blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c0c96329a80a01c75b885a1de44d8ad5ef2138d | 64965365bfeb800c9b4facae7d707f752c922d9c | /args_aula.py | 9d4788c4db0121d903f357ba664099c15d84c3df | [] | no_license | RafaelNGP/Curso-Python | 585b535f1383931286d897fe2745a1a976e3a3b8 | 0300566578b176071abe7083b222d4fa8a338e90 | refs/heads/master | 2020-12-03T17:33:35.907478 | 2020-06-26T18:33:27 | 2020-06-26T18:33:27 | 231,409,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | """
Entendendo o *args
- Eh um parametro, como outro qualquer. Isso significa que voce podera chamar de qualquer
coisa, desde que comece com *
por ex:
*xis
mas por convencao, todos utilizam *args para defini-li.
O QUE EH O ARGS??
O parametro *args utilizado em uma funcao, coloca os valores extras informados como entrada
em uma tupla, entao desde ja lembre-se que as tuplas sao imutaveis.
"""
def soma_todos_os_numeros(num1, num2, num3):
return num1 + num2 + num3
print(soma_todos_os_numeros(4, 6, 9))
# Entendendo o Args
def soma_todos_os_atributos(*args):
return sum(args)
print(soma_todos_os_atributos())
print(soma_todos_os_atributos(8))
print(soma_todos_os_atributos(9, 8))
print(soma_todos_os_atributos(9, 5, 3))
print(soma_todos_os_atributos(7, 8, 2, 9))
# print(soma_todos_os_atributos(8, 6, "AHAL"))
def cadastro_usuario(*args):
nome = input("Qual o nome que sera cadastrado? ")
email = input("Informe seu email: ")
idade = int(input("Qual sua idade? "))
tipo_conta = input("Que tipo de contavai abrir? (User/ADM) ")
if idade >= 18:
idade = "Maior de idade"
else:
idade = "Menor de idade"
print(f'\n{tipo_conta} {nome} cadastrado com sucesso!\n'
f'{idade}\n'
f'Email para contato: {email}')
print(args)
# print(cadastro_usuario(nome, email, idade, tipo_conta, "Periodo de Testes", 3.14))
def verifica_info(*args):
if 'Geek' in args and 'University' in args:
return print("Seja bem vindo, Geek!")
return print("Nao tenho certeza de quem eh voce")
# verifica_info()
# verifica_info("Geek", "University")
# verifica_info("University", 3.14, 'Geek')
# O * serve para que informemos ao python que estamos passando como argumento
# uma colecao de dados. Desta forma, ele sabera que vai precisar desempacotar os dados antes.
numeros = [1, 2, 3, 4, 5, 6, 7]
print(soma_todos_os_atributos(*numeros))
| [
"rafaelferreira.dev3@gmail.com"
] | rafaelferreira.dev3@gmail.com |
51f55bc16f6ed44f56ff1aebecc74e8ef660f3e9 | 222b17dacb95640499ebd484697ead32e83b9ac1 | /find_defining_class.py | 3bf7302beb99684035cd35f6b235fee80a90520b | [] | no_license | cicekozkan/python-examples | 08330ef0fb1678cace17716ac2f490a3c5b95dd2 | 01b0e654c884946f8353995333a6946062c9c158 | refs/heads/master | 2021-01-14T14:06:37.585963 | 2014-12-26T07:55:13 | 2014-12-26T07:55:13 | 25,510,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 17 11:46:43 2014
@author: ocicek
"""
def find_defining_class(obj, meth_name):
"""takes an object and a method name (as a string) and returns
the class that provides the definition of the method"""
for ty in type(obj).mro():
if meth_name in ty.__dict__:
return ty | [
"cicekozkan@gmail.com"
] | cicekozkan@gmail.com |
663f935d7eb0b3d622d212ba615d6a7387719c88 | c4cb90afb658a822c4ab867eec979227c0a25a6d | /testdemo/settings.py | 752c0a3676d4faf49f9a97caa9ee3abc5b89683d | [] | no_license | Contraz/demosys-py-test | 81afb3dd801c0deb6046ddb0e7836de61182a36f | 2aa760cb94ea34e3fb610ca8c43f1549ba9b53de | refs/heads/master | 2021-01-19T16:58:33.608630 | 2018-07-13T07:59:34 | 2018-07-13T07:59:34 | 88,294,443 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | import os
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
SCREENSHOT_PATH = os.path.join(PROJECT_DIR, 'screenshots')
# Profile: any, core, compat
OPENGL = {
"version": (3, 3),
}
WINDOW = {
"size": (1280, 720),
"vsync": True,
"resizable": True,
"fullscreen": False,
"title": "demosys-py",
"cursor": True,
}
# MUSIC = os.path.join(PROJECT_DIR, 'resources/music/tg2035.mp3')
TIMER = 'demosys.timers.Timer'
# TIMER = 'demosys.timers.RocketTimer'
# TIMER = 'demosys.timers.RocketMusicTimer'
# TIMER = 'demosys.timers.MusicTimer'
ROCKET = {
'mode': 'project',
# 'mode': 'editor',
'rps': 60,
'project': os.path.join(PROJECT_DIR, 'resources', 'cube.xml'),
'files': os.path.join(PROJECT_DIR, 'resources', 'tracks'),
}
# What effects to load
EFFECTS = (
# 'testdemo.plain',
# 'testdemo.bouncingcubes',
# 'testdemo.bouncingcubes_instanced',
# 'testdemo.cube',
# 'testdemo.deferred',
# 'demosys.deferred',
'testdemo.feedback',
# 'testdemo.multilayer',
# 'testdemo.rockettest',
)
SHADER_DIRS = (
os.path.join(PROJECT_DIR, 'resources/shaders'),
)
SHADER_FINDERS = (
'demosys.core.shaderfiles.finders.FileSystemFinder',
'demosys.core.shaderfiles.finders.EffectDirectoriesFinder',
)
# Hardcoded paths to shader dirs
TEXTURE_DIRS = (
os.path.join(PROJECT_DIR, 'resource/textures'),
)
# Finder classes
TEXTURE_FINDERS = (
'demosys.core.texturefiles.finders.FileSystemFinder',
'demosys.core.texturefiles.finders.EffectDirectoriesFinder'
)
# Tell demosys how to find shaders split into multiple files
SHADERS = {
'vertex_shader_suffix': ('vert', '_vs.glsl', '.glslv'),
'fragment_shader_suffix': ('frag', '_fs.glsl', '.glslf'),
'geometry_shader_suffix': ('geom', '_gs.glsl', '.glslg'),
}
| [
"eforselv@gmail.com"
] | eforselv@gmail.com |
f1ef29d00b9e612458bdb8429ac6cc2833dcfeb1 | cd58faaffc84a4b1194fa55206ecce3458289edb | /setup.py | 00f05e0c3c24ac0059253c0b709c8ccd9fd0b61a | [
"MIT"
] | permissive | danieleteti/revelation | 89327833d896c7350d41a7983d4781d980134a79 | de4f8221e6c78aca174600dd333b0f9a5f62baa2 | refs/heads/master | 2020-03-21T08:10:47.420032 | 2018-07-17T18:05:17 | 2018-07-17T18:05:17 | 138,326,204 | 0 | 0 | MIT | 2018-06-22T16:43:33 | 2018-06-22T16:43:33 | null | UTF-8 | Python | false | false | 2,222 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""revelation setup file"""
import os
import re
from setuptools import find_packages, setup
PACKAGE = "revelation"
REQUIREMENTS = [
"Jinja2==2.10",
"Werkzeug==0.14.1",
"click==6.7",
"gevent-websocket==0.10.1",
"gevent==1.3.4",
"watchdog==0.8.3",
]
TEST_REQUIREMENTS = [
"coverage==4.5.1",
"coveralls==1.3.0",
"flake8==3.5.0",
"mock",
"nose==1.3.7",
]
with open("README.md", "r") as f:
README = f.read()
with open(os.path.join(PACKAGE, "__init__.py")) as init_file:
INIT = init_file.read()
VERSION = re.search(
"^__version__ = ['\"]([^'\"]+)['\"]", INIT, re.MULTILINE
).group(1)
AUTHOR = re.search(
"^__author__ = ['\"]([^'\"]+)['\"]", INIT, re.MULTILINE
).group(1)
EMAIL = re.search(
"^__email__ = ['\"]([^'\"]+)['\"]", INIT, re.MULTILINE
).group(1)
setup(
name=PACKAGE,
version=VERSION,
description="Make awesome reveal.js presentations with revelation",
long_description=README,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
url="https://github.com/humrochagf/revelation",
license="MIT",
packages=find_packages(),
package_data={PACKAGE: ["templates/presentation.html"]},
zip_safe=False,
install_requires=REQUIREMENTS,
entry_points=dict(console_scripts=["revelation=revelation.cli:cli"]),
platforms="any",
keywords="presentation slides reveal.js markdown",
classifiers=[
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Multimedia :: Graphics :: Presentation",
"Topic :: Text Processing :: Markup :: HTML",
],
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
extras_require={"test": TEST_REQUIREMENTS},
)
| [
"humrochagf@gmail.com"
] | humrochagf@gmail.com |
48f3584a790de2d2e0b3bcbc8564f738a07d7db1 | 4760101a6c297c4b4b0e96f5ae7fb8c94c2abda6 | /cs_591/hw1/Swell.py | 0922c9bccb5bb0bd5ed7e2c7ba7ab360210abbc9 | [] | no_license | ssikdar1/grad_school | 6fc739c7638e64a0a9974c920ac808c3989c9109 | cfcdcd70fab5b4083515abb2afe10c9dd5c27923 | refs/heads/master | 2016-09-06T11:46:42.754021 | 2014-10-27T16:17:10 | 2014-10-27T16:17:10 | 13,286,075 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | #Problem 3: Write a program Swell.py which prompts the user for the name of a mono input file, and produces a mono output file which takes the musical signal from the input file and changes the amplitude in the following way: the signal will start at amplitude 0, increase linearly until exactly half way through the file, at which point it should be at the same amplitude as the original signal, and then decrease linearly until it becomes 0 at the end of the file. It should work no matter how long the input file is. (Hint: You can find the number of frames in the input file, and hence the length; you just need to scale the amplitude using a factor which changes at each iteration through the loop.)
import array
from struct import pack # This creates the binary data structure for frames
from math import sin, pi
import wave # The main library for manipulating wave files
#prompt user for input file
inputFile = raw_input("Enter name of input file: ")
# open wave file for reading
inWaveFile = wave.open(inputFile, 'r')
# get the parameters
(numChannels, sampleWidth, sampleRate, numFrames, compressionType, nameOfCompression) = inWaveFile.getparams()
#output file
outputFile = "Swell_mono_ouput_file.wav"
outWaveFile = wave.open(outputFile, 'w')
outWaveFile.setparams((numChannels, sampleWidth, sampleRate, 0, 'NONE', 'not compressed'))
waveData="" # this holds the data to be written to file in binary form
print("Number of Channels: " + str(numChannels))
print("# of frames:" + str(numFrames))
# how many frames to read each loop iteration?
numFramesToRead = 1
midpoint = numFrames/2
for i in range( 0, numFrames ):
#in mono, frame will have one sample, in stereo, two
frame = inWaveFile.readframes( numFramesToRead )
# unpack binary string into array of ints
data = array.array('h',frame)
if(i <= numFrames/2):
amplitude = float(i)/midpoint
if(i > numFrames/2):
amplitude = float(numFrames-1-i)/numFrames
newAmplitude = data[0]*amplitude
#write the frame to wavedata
waveData += pack( 'h', newAmplitude)
#write everything else back
for i in range(1, len(data)):
waveData += pack( 'h', data[i])
#write wavefile to the outfile
outWaveFile.writeframes(waveData)
outWaveFile.close()
inWaveFile.close()
| [
"shan.sikdar@gmail.com"
] | shan.sikdar@gmail.com |
233a03f97ecdb2657432694ef3554ae545d0dda9 | 13adab7e3e09f69dfc74ac0fad96d3a4a350db9d | /src/game/data_structures/states.py | 25efe554d83825e6886957f1bb48628844731861 | [
"MIT"
] | permissive | richlanc/Game_of_life | 5f4150c2258a97778021b8841309a03c826ca01c | 3784b694c5d89df05a2b9d5cbf2a0596d8bf5a86 | refs/heads/master | 2020-12-03T05:10:41.430658 | 2014-01-05T14:23:06 | 2014-01-05T14:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | '''
Created on 20 Oct 2013
@author: Richard and Michael
This is a module containing the State class and all classes derived from it.
'''
class State(object):
"""
This class represents the state of a cell at any given time.
NOTE: This class is abstract.
"""
def __init__(self):
"""
Ctor (Constructor)
Initialises a state, readying all state data for use by the game
engine. (Placeholder)
"""
pass
def __eq__(self, other):
'''
Allow boolean equality comparison of state object.
Returns true if objects are the same class.
'''
return isinstance(other, self.__class__)
def __ne__(self, other):
'''
Allow boolean equality comparison of state object.
Returns true if two objects are not the same class.
'''
return not isinstance(other, self.__class__)
| [
"mw362@kent.ac.uk"
] | mw362@kent.ac.uk |
c389de36a0b5295a4cec6e9ccfb15f6e11845d20 | 1f3cfc88c3b9d51d626f3e71301d6fd25ef2738b | /vegefoods/accounts/migrations/0009_auto_20190812_1913.py | 67ef3e510a576d5d99a0e5f43c3e59622a1b60c2 | [] | no_license | farzan-web/django-vegetableshop | cde071b8fab98901e193eec198c3b0e95efc3dcd | 7713808046ca2b2cd009765a21c61713352eb014 | refs/heads/master | 2020-07-05T20:35:24.087796 | 2019-08-16T16:58:16 | 2019-08-16T16:58:16 | 202,766,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | # Generated by Django 2.2.1 on 2019-08-12 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0008_auto_20190812_1909'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='address',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='profile',
name='phone_number',
field=models.PositiveIntegerField(blank=True),
),
migrations.AlterField(
model_name='profile',
name='state_country',
field=models.CharField(blank=True, choices=[('france', 'France'), ('iran', 'Iran'), ('philippines', 'Philippines'), ('hongkong', 'Hongkong'), ('japan', 'Japan'), ('unitedkingdom', 'United kingdom'), ('unitedstates', 'United states')], default='France', max_length=10),
),
]
| [
"farzan.moradi71@gmail.com"
] | farzan.moradi71@gmail.com |
df5e27dbf8300206dc4e4fdd743705abeca3d79a | 42d4fd4b5ecf073ed7eda60bb12347a7e2281a60 | /uvfits_to_ms.py | 856f0ceaf02f321096560bd1157dac9f6e28c034 | [
"BSD-3-Clause"
] | permissive | tyler-a-cox/HERA_beam_mapping | 6d0d57cf344262f58d2e5c0cd75f5df161b5da81 | ef45242b73219017c0403b18048136f2feafa234 | refs/heads/master | 2023-07-13T02:37:46.356293 | 2021-08-12T20:03:04 | 2021-08-12T20:03:04 | 177,019,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | """
Example:
casa -c uvfits_to_ms.py /path/to/data/*.uv
"""
from casa_imaging import convert_uvfits
if __name__ == "__main__":
try:
folders = sys.argv[3:]
for folder in folders:
convert_uvfits(folder)
except IndexError:
print("No file specified for conversion from uvfits to ms")
| [
"tyler.a.cox@berkeley.edu"
] | tyler.a.cox@berkeley.edu |
40442ff6a3e8d55c9463efb03745c7f4f7599364 | 3b14022e319adc1d51583bf323b52d60b6bdcae3 | /deepfield/field/rates/__init__.py | 05a1c2af9b673a70c2fd21c73c81c939c03b3708 | [
"Apache-2.0"
] | permissive | scuervo91/DeepField | 0b7008f76e0789cd80ad3bbdf65027b5815921f9 | 3b336ed110ff806316f1f6a99b212f99256a6b56 | refs/heads/main | 2023-07-07T13:03:16.869291 | 2021-08-14T09:55:52 | 2021-08-14T09:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | """Init file"""
from .blocks_info_upd import apply_perforations, calculate_cf
from .plot_rates import show_rates, show_rates2, show_blocks_dynamics
from .calc_rates import calc_rates, calc_rates_multiprocess
| [
"egor.mypost@gmail.com"
] | egor.mypost@gmail.com |
f25824b60d5d366a1303a8ecdeb33674f8eee4e7 | de12b4939bc67ba4a1438a1b90fb2dc83725930c | /emg_hom_iso_unbound/export_as_pandas.py | 1440ba5e1d5077524915eaf97ba820c9d7bc062f | [
"Apache-2.0"
] | permissive | UAS-Embedded-Systems-Biomechatronics/EMG-concentrated-current-sources | d239a9080e224868a0c61dc90af1bbd94ad2ba2b | 567280d71c6bc32f6afd1f12c08091361a5b3ab6 | refs/heads/main | 2023-06-07T10:56:55.772829 | 2023-06-01T14:03:08 | 2023-06-01T14:25:48 | 609,161,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,763 | py | # Copyright 2023 Malte Mechtenberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm
import vtk
from vtk.util.numpy_support import vtk_to_numpy
from glob import glob
def load_vtu(str_file):
r = vtk.vtkXMLUnstructuredGridReader()
r.SetFileName(str_file)
r.Update()
points = r.GetOutput().GetPoints().GetData()
np_points = vtk_to_numpy(points)
phi = r.GetOutput().GetPointData().GetArray(0)
np_phi = vtk_to_numpy(phi)
return {'points':np_points, 'phi' : np_phi}
file_list = glob("./*_motor_unit_* idx_muscle_fiber_*_1/t_*.vtu")
df = pd.DataFrame({'file_name' : file_list})
df['muscle_fiber_id'] = df.file_name.map(lambda x: int( x.split(' ')[-1].split('_')[3] ))
df['electrodes'] = df.file_name.map(lambda x: x.split('/')[-1].split('_')[0] == 't')
df['sources'] = df.file_name.map(lambda x: x.split('/')[-1].split('_')[0] == 'sources')
df['time'] = df.file_name.map(lambda x: int(x.split('/')[-1].split('_')[-1].split('.')[0]))
muscle_ids = df.muscle_fiber_id.unique()
muscle_fiber_phi = {}
def import_muscle_data(muscle_id : '[int]', df : 'pd.DataFrame' ) -> 'dict':
sel = (df.muscle_fiber_id == muscle_id)
df_local = df[sel & df.electrodes]
muscle_fiber_phi = {}
muscle_fiber_phi[muscle_id] = {}
for t in df_local.time:
file_selected = df_local.file_name[df_local.time == t]
assert len(file_selected) == 1 , 'more than one file selected'
try:
data_dict = load_vtu( file_selected.item() )
except :
import pdb; pdb.set_trace()
phi = data_dict['phi']
muscle_fiber_phi[muscle_id][t] = phi
return muscle_fiber_phi
def import_electrode_positions(df : 'pd.DataFrame') -> 'nd.array' :
data_dict = load_vtu( df.file_name[0])
return data_dict['points']
def sort_time(d):
time_idx = np.sort([t for t in d])
return np.array( [ d[t] for t in time_idx ])
iterator = tqdm(map(lambda x : import_muscle_data(x, df), muscle_ids), total = len(muscle_ids))
muscle_fiber_phi = { k: sort_time(v) for d in iterator for k , v in d.items() }
electtrode_df = pd.DataFrame({'timepoint' : [], 'phi': [], 'electrode_id': [], 'muscle_id' : []})
electrode_positions = import_electrode_positions(df)
for e_id in tqdm( range(len(muscle_fiber_phi[0][0,:])) ):
for m_id in muscle_fiber_phi:
df_length = len(muscle_fiber_phi[m_id][:,e_id])
electtrode_df_m = pd.DataFrame({
'timepoint': [t for t in range(len(muscle_fiber_phi[m_id][:,e_id]))]
, 'phi': muscle_fiber_phi[m_id][:,e_id]
, 'muscle_id': [m_id] * df_length
, 'electrode_id': [e_id] * df_length
, 'p_electrode_x' : [electrode_positions[e_id,0]] * df_length
, 'p_electrode_y' : [electrode_positions[e_id,1]] * df_length
, 'p_electrode_z' : [electrode_positions[e_id,2]] * df_length
})
electtrode_df = pd.concat([electtrode_df, electtrode_df_m], ignore_index = True )
pd.to_pickle(electtrode_df, "./pandas_full_data.pkl")
| [
"mmechtenberg@fh-bielefeld.de"
] | mmechtenberg@fh-bielefeld.de |
30dc3b2da46f46ecc794ac6ddebb62d32642eee8 | 933450b76a6ebfc06f5ee8bd09a03e5eecd79081 | /itm-413/final/connection.py | a06d769ff728e980eeb287f6515e69f6bd5b8cfa | [
"MIT"
] | permissive | bradyhouse/iit | 7a2e0e1d81a0d02dc432bc7a5a2b76fde7bd8865 | 7d1e91864467f56fa6dc901a83e97977630132b3 | refs/heads/master | 2022-07-07T01:20:45.808092 | 2016-08-27T17:11:46 | 2016-08-27T17:11:46 | 66,312,487 | 1 | 1 | MIT | 2022-03-25T15:57:02 | 2016-08-22T22:22:26 | JavaScript | UTF-8 | Python | false | false | 105 | py | db_host_name="127.0.0.1"
db_name="TestDB"
db_user="testuser"
db_password="test123"
db_table_name="brady"
| [
"bradyhouse@gmail.com"
] | bradyhouse@gmail.com |
e2f505d0488a2ed6b0f324d7951de077e1ea375f | 9530809bf3772806290305b82171faf84fe7c500 | /books/migrations/0003_add_page_year_cover_to_book_model.py | 2ff994507851925a0aa76a1af7ea759af2f6b741 | [
"Beerware"
] | permissive | tomasz-rzesikowski/books_poc | a764fbd939b5215bda7189290f63e9498874bcf5 | 9f253db1ef6c35fe91a6b8d65317a85532986344 | refs/heads/master | 2023-03-23T05:17:50.536208 | 2021-03-23T19:41:17 | 2021-03-23T19:41:17 | 349,726,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # Generated by Django 3.1.7 on 2021-03-20 16:36
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0002_change_isbn_in_book_model'),
]
operations = [
migrations.AddField(
model_name='book',
name='cover',
field=models.URLField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='book',
name='page_count',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='book',
name='publication_year',
field=models.IntegerField(
default=1500,
validators=[
django.core.validators.MinValueValidator(1000),
django.core.validators.MaxValueValidator(9999)
]
),
preserve_default=False,
),
]
| [
"tomasz.rzesikowski@gmail.com"
] | tomasz.rzesikowski@gmail.com |
1763b815e273475b326541c6b358d67877c9a015 | dc375c2e22ebf0a56703ad84d09494a49d994c7c | /genrxkt0.py | c8e87d48c9439d6e5f396a1cab7c2633365364ab | [] | no_license | chen17/rxkt0 | ad7a0b32453890745092121eb8063a41c3cb11e1 | 66713aecd2f82395e56ec360747b79b3500b0bba | refs/heads/master | 2021-01-13T10:56:03.973637 | 2016-11-14T05:30:45 | 2016-11-14T05:30:45 | 72,267,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | import fontforge
import subprocess
import urllib
# Create new font TypographyKT.
newfont = fontforge.font()
newfont.fontname="TypographyKT"
newfont.save("TypographyKT.sfd")
font = fontforge.open("TypographyKT.sfd")
# Get Char_List.txt
charlist = urllib.urlopen("https://raw.githubusercontent.com/chen17/rxkt0/master/Char_List.txt").readlines()
for i in range(len(charlist)):
url = 'http://taiwancamping.club/RX/kt0_bw/' + charlist[i].split()[0]
char = int(charlist[i].split()[2])
charjpg = charlist[i].split()[2] + '.jpg'
charbmp = charlist[i].split()[2] + '.bmp'
charsvg = charlist[i].split()[2] + '.svg'
print 'Working on ' + charlist[i].split()[3]
# Get jpg file.
urllib.urlretrieve(url, charjpg)
# Convert into bmp.
subprocess.check_call(['/usr/bin/convert', charjpg, charbmp])
# Convert into svg.
subprocess.check_call(['/usr/bin/potrace', '-s', charbmp])
# Paste svg into fonts.
glyph = font.createChar(char)
glyph.importOutlines(charsvg)
# Remove process files.
subprocess.check_call(['rm', charjpg, charbmp, charsvg])
font.generate("TypographyKT.ttf")
| [
"noreply@github.com"
] | chen17.noreply@github.com |
8f2f0eec2b1efbe86d15896aed5fdbab5ccb68a4 | 190193fbf79b2a755c1fe7bc2db604d6e2c7dac3 | /clovece nezlob se.py | 2d855e23aa89af91526f9e7e3775ead4934bc162 | [] | no_license | marekdrab/clovece_nezlob_se | 2843a22fc4333990437429b43065ba59e358b5b1 | a43046149787d027583a839986eed0ef1a9b6e6c | refs/heads/main | 2023-07-17T00:11:10.498624 | 2021-08-20T14:44:38 | 2021-08-20T14:44:38 | 398,307,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,800 | py | """
n=rozmer hracej plochy
m=počet pozícii domčeka ( ak n=9, tak m=3 čo znamená, že berieme do úvahy 3. riadok/stlpec
v podmienkach pre pohyb)
k=najväčší počet "*" vedľa seba v riadku (použité pri generovaní hracej plochy)
r1,r2,r3,r4=riadky v hracej ploche
r5=riadok plný medzier potrebný pre pohyb
sach=samotná hracia plocha (neskôr používané ako: s)
x_cur=aktualna suradnica riadka
y_cur=aktualna suradnica stlpca
x_next=ďalšia suradnica riadka
y_next=ďalšia suradnica stlpca
step_count=zostavajuci počet riadkov (potrebné pre vyhadzovanie)
x=súradnica riadka hráča A
y=súradnica stĺpca hráča A
xb=súradnica riadka hráča B
yb=súradnica stĺpca hráča B
ha=hod kockou hráča A
hb=hod kockou hráča B
sur_A=súradnice hráča A zapísané v liste + počet figuriek v domčeku + súradnica riadka v domčeku
sur_B=súradnice hráča B zapísané v liste + počet figuriek v domčeku + súradnica riadka v domčeku
"""
n=int(input("Zadaj velkost pola, musi byt neparne a vacsie ako 5: "))
if n%2==0:
print("Zadal si parne cislo")
if n<5:
print("Zadal si cislo mensie ako 5")
#--------------------------------------------------------------------------------------------------------------
m=(n-3)//2
k=(n-1)//2
def gensachovnicu(n):
m=(n-3)//2
k=(n-1)//2
r1=[]
r2=[]
r3=[]
r4=[]
r5=[]
sach=[]
#vytvori prvy a posledny riadok sachovnice
for i in range(m):
r1.append(" ")
for i in range(3):
r1.append("*")
for i in range(m):
r1.append(" ")
r1.append(" ")
#vytvori riadok ramena
for i in range(m):
r2.append(" ")
r2.append("*")
r2.append("D")
r2.append("*")
for i in range(m):
r2.append(" ")
r2.append(" ")
#vytvori riadok s * a jednym D
for i in range(k):
r3.append("*")
r3.append("D")
for i in range(k):
r3.append("*")
r3.append(" ")
#vytvori riadok s X
r4.append("*")
for i in range(m):
r4.append("D")
r4.append("x")
for i in range(m):
r4.append("D")
r4.append("*")
r4.append(" ")
#vytvori prazdny riadok s medzerami
for i in range(n):
r5.append(" ")
#samotna hracska plocha
sach.append(r1.copy())
for i in range(m-1):
sach.append(r2.copy())
sach.append(r3.copy())
sach.append(r4.copy())
sach.append(r3.copy())
for i in range(m-1):
sach.append(r2.copy())
sach.append(r1.copy())
sach.append(r5.copy())
return sach
#-------------------------------------------------------------------------------------------
#vypíš hráčov na plochu a tlačí šachovnicu
s = gensachovnicu(n)
#začiatočné súradnice A
x=0
y=m+2
#začiatočné súradnice B
xb=n-1
yb=m
#priradenie A a B na svoje pozície
s[x][y]="A"
s[n-1][m]="B"
#priradenie hodnot pre suradnice hráč A a B
sur_A=[x,y]
sur_B=[xb,yb]
#tlačenie šachovnice
def tlacsachovnicu(n):
for i in s:
print(" ".join(map(str,i)))
#-------------------------------------------------------------------------------------------
#hod kockou hráč A a hráč B
import random
def krokA():
hodA=random.randint(1,6)
return hodA
def krokB():
hodB=random.randint(1,6)
return hodB
#-------------------------------------------------------------------------------------------
#funkcie pre vyhadzovanie a pohyb
#funkcia pre vyhodenie
def vyhod(x_cur,y_cur,x_next,y_next, hrac_na_vyhodenie,s):
global n,m,sur_A,sur_B
s[x_cur][y_cur]="*"
if hrac_na_vyhodenie == "B":
s[x_next][y_next]="A"
sur_B[0]=n-1
sur_B[1]=m
s[sur_B[0]][sur_B[1]]="B"
else:
s[x_next][y_next]="B"
sur_A[0]=0
sur_A[1]=m+2
s[sur_A[0]][sur_A[1]]="A"
#funkcia pre pohyb
def krok(x_cur,y_cur,x_next,y_next, hybajuci_sa_hrac, nehybajuci_sa_hrac,s,step_count):
if (s[x_next][y_next] == nehybajuci_sa_hrac):
#ak nastane vyhodenie
if (step_count == 1):
vyhod(x_cur,y_cur,x_next,y_next,nehybajuci_sa_hrac,s)
else:
if (s[x_next][y_next]==nehybajuci_sa_hrac):
s[x_cur][y_cur]="*"
else:
if s[x_cur][y_cur] == nehybajuci_sa_hrac:
s[x_next][y_next] = hybajuci_sa_hrac
else:
s[x_cur][y_cur] = "*"
s[x_next][y_next] = hybajuci_sa_hrac
#-------------------------------------------------------------------------------------------
#pohyb hráča A
pocet_v_domceku_A=0
x_domcek_A=m
def pohyb(s,x,y,ha,n,sur_A,pocet_v_domceku_A,x_domcek_A):
m=(n-3)//2
k=(n-1)//2
for i in range(ha):
# pohyb hráča do domčeka a vypísanie novej figúrky
if s[0][m+1]=="A":
pocet_v_domceku_A+=1
print("Hráč má A v domčeku",pocet_v_domceku_A,"figuriek.")
s[x_domcek_A][m+1]="A"
x_domcek_A-=1
s[0][m+1]="*"
if pocet_v_domceku_A<m:
s[0][m+1]="*"
s[0][m+2]="A"
x=0
y=m+2
break
else:
break
#otočenie hráča v dolnej polovici smerom hore
elif x==n-1 and y==m:
krok(x,y,x-1,y,"A","B",s,ha-i)
x-=1
#otočenie hráča v pravej polovici smerom dole
elif x==m+2 and y==m+2:
krok(x,y,x+1,y,"A","B",s,ha-i)
x+=1
#otočenie hráča v hornej polovici smerom doprava
elif x==m and y==m+2:
krok(x,y,x,y+1,"A","B",s,ha-i)
y+=1
#otočenie hráča v pravej polovici smerom dole
elif x==m and y==n-1:
krok(x,y,x+1,y,"A","B",s,ha-i)
x+=1
#otočenie hráča v pravej polovici smerom doľava
elif x==m+2 and y==n-1:
krok(x,y,x,y-1,"A","B",s,ha-i)
y-=1
#otočenie hráča v dolnej polovici smerom doľava
elif x==n-1 and y==m+2:
krok(x,y,x,y-1,"A","B",s,ha-i)
y-=1
#otočenie hráča v ľavej polovici smerom doľava
elif x==m+2 and y==m:
krok(x,y,x,y-1,"A","B",s,ha-i)
y-=1
#otočenie hráča v ľavej polovici smerom hore
elif x==m+2 and y==0:
krok(x,y,x-1,y,"A","B",s,ha-i)
x-=1
#otočenie hráča v ľavej polovici smerom doprava
elif x==m and y==0:
krok(x,y,x,y+1,"A","B",s,ha-i)
y+=1
#otočenie hráča v ľavej polovici smerom hore
elif x==m and y==m:
krok(x,y,x-1,y,"A","B",s,ha-i)
x-=1
#pohyb hráča doľava v spodnom ramene
elif x==n-1 and y==m+1:
krok(x,y,x,y-1,"A","B",s,ha-i)
y-=1
#pohyb dolava v dolnej polovici
elif (x==n-1 or x==m+2) and y!=0:
if y>m+2 or 0<y<=m:
krok(x,y,x,y-1,"A","B",s,ha-i)
y-=1
#pohyb doprava v hornej polovici
elif x==m and (y!=m or y!=n-1):
krok(x,y,x,y+1,"A","B",s,ha-i)
y+=1
#pohyb dole v pravej polovici
elif (y==m+2 or y==n-1) and (x!=m or x!=m+2 or x!=n-1):
krok(x,y,x+1,y,"A","B",s,ha-i)
x+=1
#pohyb hráča doprava v hornom ramene
elif x==0 and y==m:
krok(x,y,x,y+1,"A","B",s,ha-i)
y+=1
#pohyb hore v lavej polovici
elif (y==0 or y==m) and (x!=m+2 or x!=m or x!=0):
krok(x,y,x-1,y,"A","B",s,ha-i)
x-=1
print()
return [x,y,pocet_v_domceku_A,x_domcek_A]
#-------------------------------------------------------------------------------------------
#pohyb hráča B po ploche
pocet_v_domceku_B=0
x_domcek_B=m+2
def pohyb_B(s,xb,yb,hb,n,sur_B,pocet_v_domceku_B,x_domcek_B):
m=(n-3)//2
k=(n-1)//2
for i in range(hb):
#pohyb hráča do domčeka a vypísanie novej figúrky
if xb==n-1 and yb==m+1:
pocet_v_domceku_B+=1
print("Hráč B má v domčeku",pocet_v_domceku_B,"figuriek.")
s[x_domcek_B][m+1]="B"
x_domcek_B+=1
s[n-1][m+1]="*"
if pocet_v_domceku_B<m:
s[n-1][m+1]="*"
s[n-1][m]="B"
xb=n-1
yb=m
break
else:
break
#otočenie hráča v hornej polovici smerom doprava
elif xb==0 and yb==m:
krok(xb,yb,xb,yb+1,"B","A",s,hb-i)
yb+=1
#otočenie hráča v hornej polovici smerom dole
elif xb==0 and yb==m+2:
krok(xb,yb,xb+1,yb,"B","A",s,hb-i)
xb+=1
#otočenie hráča v ľavej polovici smerom doprava
elif xb==m and yb==0:
krok(xb,yb,xb,yb+1,"B","A",s,hb-i)
yb+=1
#otočenie hráča v ľavej polovici smerom hore
elif xb==m and yb==m:
krok(xb,yb,xb-1,yb,"B","A",s,hb-i)
xb-=1
#otočenie hráča v pravej polovici smerom doprava
elif xb==m and yb==m+2:
krok(xb,yb,xb,yb+1,"B","A",s,hb-i)
yb+=1
#otočenie hráča v pravej polovici smerom dole
elif xb==m and yb==n-1:
krok(xb,yb,xb+1,yb,"B","A",s,hb-i)
xb+=1
#otočenie hráča v ľavej polovici smerom hore
elif xb==m+2 and yb==0:
krok(xb,yb,xb-1,yb,"B","A",s,hb-i)
xb-=1
#otočenie hráča v dolnej polovici smerom doľava
elif xb==m+2 and yb==m:
krok(xb,yb,xb,yb-1,"B","A",s,hb-i)
yb-=1
#otočenie hráča v dolnej polovici smerom dole
elif xb==m+2 and yb==m+2:
krok(xb,yb,xb+1,yb,"B","A",s,hb-i)
xb+=1
#otočenie hráča v pravej polovici smerom doľava
elif xb==m+2 and yb==n-1:
krok(xb,yb,xb,yb-1,"B","A",s,hb-i)
yb-=1
#otočenie hráča v dolnej polovici smerom hore
elif xb==n-1 and yb==m:
krok(xb,yb,xb-1,yb,"B","A",s,hb-i)
xb-=1
#otočenie hráča v dolnej polovici smerom doľava
elif xb==n-1 and yb==m+2:
krok(xb,yb,xb,yb-1,"B","A",s,hb-i)
yb-=1
#pohyb hore v lavej polovici
elif (yb==0 or yb==m) and (xb!=m+2 or xb!=m or xb!=0):
krok(xb,yb,xb-1,yb,"B","A",s,hb-i)
xb-=1
#pohyb dolava v dolnej polovici
elif (xb==n-1 or xb==m+2) and yb!=0:
if yb>m+2 or 0<yb<=m:
krok(xb,yb,xb,yb-1,"B","A",s,hb-i)
yb-=1
#pohyb dole v pravej polovici
elif (yb==m+2 or yb==n-1) and (xb!=0 or xb!=m+2 or xb!=n-1):
krok(xb,yb,xb+1,yb,"B","A",s,hb-i)
xb+=1
#pohyb doprava v hornej polovici
elif (xb==m or xb==0) and (yb!=m or yb!=n-1):
krok(xb,yb,xb,yb+1,"B","A",s,hb-i)
yb+=1
print()
return [xb,yb,pocet_v_domceku_B,x_domcek_B]
#-------------------------------------------------------------------------------------------
#Hra dvoch hráčov
print("Máte ",(n-3)//2,"panáčikov")
tlacsachovnicu(n)
print()
while True:
if pocet_v_domceku_A==m:
print("Hráč A vyhral hru")
break
elif pocet_v_domceku_B==m:
print("Hráč B vyhral hru")
break
ha=krokA()
hb=krokB()
vstup_pre_krok=input("Pre pokračovanie stlač ENTER: ")
print("Hráč A hodil: ",ha)
sur_A=pohyb(s,x,y,ha,n,sur_A,pocet_v_domceku_A,x_domcek_A)
tlacsachovnicu(n)
vstup_pre_krok_1=input("Pre pokračovanie stlač ENTER: ")
print("Hráč B hodil: ",hb)
sur_B=pohyb_B(s,xb,yb,hb,n,sur_B,pocet_v_domceku_B,x_domcek_B)
tlacsachovnicu(n)
x=sur_A[0]
y=sur_A[1]
pocet_v_domceku_A=sur_A[2]
x_domcek_A=sur_A[3]
xb=sur_B[0]
yb=sur_B[1]
pocet_v_domceku_B=sur_B[2]
x_domcek_B=sur_B[3]
| [
"noreply@github.com"
] | marekdrab.noreply@github.com |
7cbcab569a1f959cdee14d726d23c6b0440e977e | cc274ad0f1ce4e9a968fd6057f4fb0fd8a08f988 | /hw21.py | 4001c1dc59a457beeaec6ee8bf0d7f87fcb38259 | [] | no_license | MikaPY/HomeworkPY | 51b216de30d7b2b33aa8847cd3ae46e421ea6db4 | c1ecc0f718ededb60aa375dd1a7a6231a7668a32 | refs/heads/master | 2023-01-22T11:51:30.248030 | 2020-11-22T17:25:28 | 2020-11-22T17:25:28 | 304,744,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # def max_num(num1,num2):
# if num1 > num2:
# return num1
# else:
# return num2
# num1 = 10
# num2 = 21
# print(max_num(num1,num2))
# def sum_num(*numbers):
# pack = 0
# for x in numbers:
# pack += x
# print(pack)
# sum_num(1,21,9,3,5,7)
# def multiply_num(*numbers):
# pack = 1
# for x in numbers:
# pack *= x
# print(pack)
# multiply_num(5,5,2,4)
# # def passengers(string):
# # a = 0
# # b = 0
# # for i in string:
# # a.isdigit(i)
# # print(passengers('python: '))
# age = 16
# def peaoples(*args):
# for i in args:
# if i < age:
# print('Got out: ')
# break
# else:
# print('It is ok: ')
# peaoples(18,18,19)
def convert(numbers):
return numbers * 1.6
print(convert(5))
| [
"72929883+MikaPY@users.noreply.github.com"
] | 72929883+MikaPY@users.noreply.github.com |
49810965fe88053aecf6f8d41bc56ca22e857633 | 8f14190c9a9c4060a17a5d8cafc5a54bc2c79300 | /companies/views.py | 14a54af61ff6ad243004abc0276fa378f9519f18 | [] | no_license | rcdigital/kanban-server | f486672627fcbb11c8ae377187cac455e45ea14a | 905f071ff963b9bad61610e944b1cef01fc95b33 | refs/heads/master | 2019-01-02T04:14:04.573196 | 2015-03-23T22:00:59 | 2015-03-23T22:00:59 | 31,366,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,871 | py | from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from django.http import Http404
from companies.serializers import MembersSerializer, MembersRetrieveSerializer, RolesSerializer
from companies.models import Members, Roles
class CompanyMembersList(APIView):
def post(self, request, company_pk,format=None):
serializer = MembersSerializer(data= request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status= status.HTTP_400_BAD_REQUEST)
def get(self, request, company_pk, format=None):
queryset = Members.objects.filter(company__id= company_pk)
serializer = MembersRetrieveSerializer(queryset, many= True)
return Response(serializer.data)
class MemberDetails(APIView):
def get_object(self, company_pk, member_id):
try:
return Members.objects.filter(id = member_id, company__id=company_pk)
except Members.DoesNotExist:
raise Http404
def get(self, request, company_pk, member_id, format=None):
member = self.get_object(company_pk, member_id)
serializer = MembersRetrieveSerializer(member, many= True)
return Response(serializer.data)
def delete(self, request, company_pk, member_id, format=None):
member = self.get_object(company_pk, member_id)
member.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
class RolesList(APIView):
def post(self, request, company_pk, format=None):
serializer = RolesSerializer(data= request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status= status.HTTP_400_BAD_REQUEST)
def get(self, request, company_pk, format=None):
queryset = Roles.objects.filter(company__id= company_pk)
serializer = RolesSerializer(queryset, many= True)
return Response(serializer.data)
class MemberRoleDetail(APIView):
def get_object(self, company_pk, member_id):
try:
return Members.objects.get(id = member_id, company__id = company_pk)
except Members.DoesNotExist:
raise Http404
def put(self, request, company_pk, member_id, format=None):
role = Roles.objects.get(id = request.data['role'])
member = self.get_object(company_pk, member_id)
member.role = role
serializer = MembersRetrieveSerializer(member, data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| [
"erick.belf@gmail.com"
] | erick.belf@gmail.com |
5fb0d6de6e07ff397e5a483f3a634518532a6424 | 427cb811a465677542172b59f5e5f102e3cafb1a | /python/print/printContent.py | 6a213db972abd85fe761285d5c7b5bbb5ae57cdd | [] | no_license | IzaakWN/CodeSnippets | 1ecc8cc97f18f77a2fbe980f322242c04dacfb89 | 07ad94d9126ea72c1a8ee5b7b2af176c064c8854 | refs/heads/master | 2023-07-26T21:57:10.660979 | 2023-07-20T20:35:59 | 2023-07-20T20:35:59 | 116,404,943 | 18 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,361 | py | # https://docs.python.org/2/library/optparse.html
# http://www.macworld.com/article/1132219/software-utilities/termfoldercomp.html
# https://automatetheboringstuff.com/chapter7/
# TODO: function to replace patterns https://docs.python.org/2/library/re.html
# TODO: add month and year to fileName
# TODO .bundle
import os, sys
from argparse import ArgumentParser
import re
import time
argv = sys.argv
parser = ArgumentParser(description="Make textfile with hierarchy of subdir for a given dir")
parser.add_argument( "file",
type=str, action='store',
metavar="DIRECTORY", help="Input directory" )
parser.add_argument( "-o", "--output", dest="fileName",
default=None, action='store',
metavar="FILE_NAME", help="file name to print subdirs hierarchy" )
parser.add_argument( "-t", "--extensions", dest="extensions",
nargs='+', default=None, action='store',
metavar="EXT", help="only specified extensions" )
parser.add_argument( "-d","--depth", dest="maxDepth",
type=int, default=None, action='store',
metavar="MAX_DEPTH", help="set maximum subdir depth" )
parser.add_argument( "-e", "--excludeFiles", dest="excludeFiles",
default=False, action='store_true',
help="exclude files" )
parser.add_argument( "-a", "--all", dest="showAll",
default=False, action='store_true',
help="show hidden files and directories" )
args = parser.parse_args()
fileName = args.fileName
extensions = args.extensions
maxDepth = args.maxDepth
includeFiles = not args.excludeFiles
showAll = args.showAll
print args.file
tab = " "
def replacePattern2(string,pattern,replaceString):
parts = pattern.split("*")
a = 0
for part in parts:
if part in string[a:]:
a = sting[a:].index(part)
else:
return string
def replacePattern2(string,patterns,replaceString=""):
# pattern = re.compile (r'\[720.*?BluRay.*?YIFY\]')
# pattern.findall("lol (2010) [720p foo BluRay YIFY bar]")
for pattern in patterns:
pattern = pattern.replace("[","\[").replace("]","\]").replace("*",".*?")
comp = re.compile(pattern)
matches = findall(string)
for match in matches:
string = string.replace(match,replaceString,1)
def listSubDirs(dir,extensions=[],indent="",depth=0):
list = os.listdir(dir)
hierarchy = [ ]
for i in list:
if i[0] != "." or showAll:
subdir = dir+"/"+i
if os.path.isdir(subdir) and not i[-4:] == ".app":
hierarchy += [ indent+i ]
if (maxDepth == None or depth < maxDepth):
hierarchy += listSubDirs( subdir,
extensions=extensions,
indent=tab+indent,
depth=depth+1 )
elif includeFiles or i[-4:] == ".app":
if extensions:
for ext in extensions:
if ext == i[-len(ext):]:
hierarchy += [ indent+i ]
break
else:
hierarchy += [ indent+i ]
return hierarchy
def main(dir):
global fileName
path = "/"
if "/" in dir:
if dir[-1] == "/":
dir = dir[:-1]
path = dir[:dir.rfind("/")+1]
hierarchy = listSubDirs(dir,extensions=extensions)
for i in hierarchy:
print i
if not fileName:
t = time.struct_time(time.localtime())
fileName = "%s hierarchy %i-%i-%i.txt" % (dir.replace(path,""), t.tm_mday, t.tm_mon, t.tm_year)
file = open(fileName,'write')
file.write(dir+"\n\n")
for i in hierarchy:
file.write(i+"\n")
print ">>> %s written" % fileName
file.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
dir = str(sys.argv[1])
if os.path.isdir(dir):
main(dir)
else:
if not os.path.isdir(dir):
print ">>> ERROR: argument is not a directory: %s" % dir
else:
print ">>> ERROR: Needs an arguments"
print ">>> done"
| [
"iwn_@hotmail.com"
] | iwn_@hotmail.com |
b596f6be091373758522a35be25f02f7e2e9bb57 | 6e43f45ff6eb4d3388b20b0af18982abc3d58993 | /migrations/versions/d0695a26d06a_.py | f4ccbd8279dcff1a1533112d88a58743c231d4a2 | [] | no_license | mechanicalhack/Fyyur | 8ab910950c5576730205638bb6477528347b9972 | c2dfbad1ab7de3d5d721422037e048e468833196 | refs/heads/master | 2022-11-13T07:27:53.209134 | 2020-07-13T21:41:25 | 2020-07-13T21:41:25 | 276,451,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | """empty message
Revision ID: d0695a26d06a
Revises: 2afc362f9fb7
Create Date: 2020-07-06 17:35:52.150821
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd0695a26d06a'
down_revision = '2afc362f9fb7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('venue', sa.Column('genres', sa.ARRAY(sa.String()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('venue', 'genres')
# ### end Alembic commands ###
| [
"rlloyd28@gmail.com"
] | rlloyd28@gmail.com |
3464a1d12deb2429b216bf21a75ff381d2e395ba | 7e4b8098a108e8396961bb0b7c208914d41231be | /webscraper.py | 591fec861ee1ca3a07f485c8351921c6907e9822 | [] | no_license | marceloslacerda/webscraper | 2e4c20bee96f5cc228f052ec4928610e1a2dc3a5 | 11ecb899aa5b5d95f2c21c3fdaa76ee572ac3969 | refs/heads/master | 2021-04-15T04:42:58.401080 | 2018-04-01T20:36:08 | 2018-04-01T20:36:08 | 126,224,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | #!/usr/bin/env python3.6
import json
import sys
from io import StringIO
from os.path import join
from urllib.parse import urljoin
from typing import Dict, Any, AnyStr
import requests
from lxml.html import parse
class PageError(ValueError):
def __init__(self, pname):
self.pname = pname
def process_page(meta_data: Dict[AnyStr, Any], tree):
actual_result = tree.xpath(meta_data['xpath_test_query'])
expected_result = meta_data['xpath_test_result']
if actual_result != expected_result:
raise PageError(meta_data['next_page_expected'])
else:
return (
tree.xpath(meta_data['xpath_button_to_click'] + '/@href')[0],
meta_data['next_page_expected'])
def get_tree(url: AnyStr, name: AnyStr):
text = requests.get(url, auth=('Thumb', 'Scraper')).text
with open(join('debug', name), 'w') as f:
f.write(text)
return parse(StringIO(text))
def scrape_text(text_input: AnyStr):
pages = json.loads(text_input)
base = 'https://yolaw-tokeep-hiring-env.herokuapp.com/'
next_url = '/'
next_name = '0'
for i in range(len(pages)):
try:
tree = get_tree(urljoin(base, next_url), next_name)
next_url, next_name = process_page(pages[next_name], tree)
print(f"Move to page {next_name}")
except PageError as err:
print(
f'ALERT - Can’t move to page {err.pname}: '
f'page {next_name} link has been malevolently tampered with!!')
return
if __name__ == '__main__':
scrape_text(sys.stdin.read())
| [
"marceloslacerda@gmail.com"
] | marceloslacerda@gmail.com |
70e4b33a9519977afd5f4c27c746af80bf80095b | d69212d8f0f63937db5a3f3376d634a3adca9276 | /app.py | aeab5527d0b6e58de265f848d77f7a6e5b6c5da5 | [
"MIT"
] | permissive | the-mocode/Heroku_test | 82dea28d6df8b158959a46a5c34cce98e8fd4860 | 97e48c546d65fc339f83d7eeea69eefc68d6f5c0 | refs/heads/main | 2023-07-26T12:02:45.724092 | 2021-09-09T11:34:06 | 2021-09-09T11:34:06 | 404,654,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | from flask import Flask
import os
import psycopg2
DB_URL = os.environ.get("DATABASE_URL", "dbname=i_love_bigmac_db")
SECRET_KEY = os.environ.get("SECRET_KEY", "pretend key for testing only")
app = Flask(__name__)
app.config['SECRET_KEY'] = SECRET_KEY
app = Flask(__name__)
@app.route('/')
def index():
conn = psycopg2.connect(DB_URL)
cur = conn.cursor()
cur.execute('SELECT 1', []) # Query to check that the DB connected
conn.close()
return 'Hello, world Yang!'
if __name__ == "__main__":
app.run(debug=True) | [
"Antar@Mohamads-MacBook-Air.local"
] | Antar@Mohamads-MacBook-Air.local |
a5c7326e28f20fc08a463bfb04e69b82c6be461e | 466c185dd064d0a1fb8f20e72b21e227e2cb3efc | /individual_tutorials/pygamestartercode-PghTrickster-master/00-IntroToPython/01_expressions.py | ce823f663fc15dfaa6c674cfdb136a7c8bc9ae00 | [
"MIT"
] | permissive | rhit-catapult/2021-session1 | 3e937235fe48cb03a1dc69b5573879a17b2e81eb | 60c70abeb90ab7edc8d6ddb2c6beb12243a244fc | refs/heads/main | 2023-06-15T20:34:34.449474 | 2021-07-16T20:15:15 | 2021-07-16T20:15:15 | 386,752,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,282 | py | """
Permits exploration of EXPRESSIONS, e.g. 3 + (5 * 2) and "hello" + "goodbye",
and NAMES and ASSIGNMENT, e.g. n = n + 1
Authors: David Mutchler, Sana Ebrahimi, Mohammed Noureddine, Vibha Alangar,
Matt Boutell, Dave Fisher, their colleagues, and
"""
import random
import math
###############################################################################
# Done 1: Smile
###############################################################################
# You Fool, I AM Smiling
###############################################################################
# Done 2:
# Write a statement that prints your name
###############################################################################
print("My Name is:")
print("Alexander Atticus Trick")
###############################################################################
# Part 1: Numbers, Arithmetic, and Precedence.
###############################################################################
###############################################################################
# Done: 3.
# Uncomment the following and then run the program, paying close attention
# to what gets printed.
# _
# Then type an example of your own for each of:
# -- subtraction
# -- division
# and run the program, checking that what gets printed is what you expect.
###############################################################################
print()
print("_TODO 3:")
print("4 + 8 evaluates to: ", 4 + 8)
print("7 * 10 evaluates to: ", 7 * 10)
print("1.53 + 8 evaluates to:", 1.53 + 8)
###############################################################################
# Done: 4.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
###############################################################################
print()
print("_TODO 4:")
print("(4 + 2) * 3 evaluates to:", (4 + 2) * 3)
print("4 + (2 * 3) evaluates to:", 4 + (2 * 3))
print("4 + 2 * 3 evaluates to:", 4 + 2 * 3)
print("(4 - 2) + 3 evaluates to:", (4 - 2) + 3)
print("4 - (2 + 3) evaluates to:", 4 - (2 + 3))
print("4 - 2 + 3 evaluates to:", 4 - 2 + 3)
###############################################################################
# Done: 5.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
###############################################################################
print()
print("_TODO 5:")
print("2 ** 10 evaluates to:", 2 ** 10)
print("10 ** 2 evaluates to:", 10 ** 2)
print("2 ** 0.5 evaluates to:", 2 ** 0.5)
print("10 ** -2 evaluates to:", 10 ** -2)
print("10 ** -0.5 evaluates to:", 10 ** -0.5, "(do you see why?)")
###############################################################################
# Done: 6.
# Type some expressions of your own choosing that use combinations of:
# -- addition, subtraction
# -- multiplication, division
# -- exponentiation
# using parentheses to make clear the order of operations.
# Then run the program, checking that what gets printed is what you expect.
# _
###############################################################################
print()
print("_TODO 6:")
print((1 ** 1000) * 21 - 42 + (84 / 4))
###############################################################################
# Part 2: Exceptions: Syntax and Run-Time Errors.
###############################################################################
###############################################################################
# Done: 7.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
# _
# Then comment-out the line that causes the syntax error.
# _
# Now type some other statement that causes a syntax error,
# for example a statement that is missing a required parenthesis.
# Run again to see the error-message from your syntax error,
# and finally comment-out your statement to continue to the next _TODO.
###############################################################################
print()
print("_TODO 7:")
# This is crazy! Python will make no sense of it!
###############################################################################
# Done: 8.
# Uncomment the following and then run the program,
# paying close attention to what gets printed, especially the last red line.
# Note that the error-output (in red) may (or may not) appear BEFORE the
# ordinary output from previously executed PRINT statements.
# _
# Then comment-out the line that causes the run-time error.
###############################################################################
print()
print("_TODO 8:")
print("3 + 2 evaluates to:", 3 + 2)
# print("3 / 0 evaluates to:", 3 / 0)
###############################################################################
# Done: 9.
# Uncomment the following and then run the program,
# paying close attention to what gets printed, especially the last red line.
# Again note that the error-output (in red) may PRECEDE ordinary output.
# _
# Then comment-out the first line that causes the run-time error
# and run the program again to see the result of running the line below it.
###############################################################################
print()
print("_TODO 9:")
# print("3 / 'hello' evaluates to:", 3 / 'hello')
###############################################################################
# Done: 10.
# Type some expressions of your own choosing that cause error messages.
# Then run the program, paying close attention to the last line
# of each error message (in red).
# _
###############################################################################
# print()
# print("_TODO 10:")
# print(four)
# print(four / 3)
# print(four / 3 / 0)
###############################################################################
# Part 3: Objects, Types, and Values.
###############################################################################
###############################################################################
# Done: 11.
# READ the following statements and PREDICT what they will produce as output.
# Then, uncomment them and run the program, checking your predictions
# and learning from any predictions that you got wrong
###############################################################################
print()
print("_TODO 11:")
print("The type of 482 is:", type(482))
print("The type of 48.203 is:", type(48.203))
print('The type of "blah blah blah" is:', type("blah blah blah"))
print("The type of 'blah blah blah' is:", type('blah blah blah'))
print("The type of [4, 2, 9] is:", type([4, 2, 9]))
print("The type of (4, 2, 9) is:", type((4, 2, 9)))
print("The type of min is:", type(min))
print("The type of 'min' is:", type('min'))
print("The type of min(4, 6, 2, 12, 10) is:", type(min(4, 6, 2, 12, 10)))
print("The type of min(4, 6, 2.0, 12, 10) is:", type(min(4, 6, 2.0, 12, 10)))
###############################################################################
# Done: 12.
# Type an expression that involves addition, subtraction and multiplication
# (but NOT division, yet), using whole numbers (which are of type int).
# Then run the program, checking that what gets printed is what you expect.
# _
# Next, repeat the above, but making just a single one of the numbers in
# your expression a float, by appending a decimal point to it, like this:
# instead of 2 (which is an int), write 2.0 (which is a float).
# _
# Finally, try division by uncommenting the following and then run the program,
# paying close attention to what gets printed. What do you notice about the
# type that results from division, even if both arguments are int objects?
###############################################################################
print()
print("_TODO 12:")
print("4.2 / 2.0 evaluates to:", 4.2 / 2.0)
print("4.2 / 2 evaluates to:", 4.2 / 2)
print("4 / 2 evaluates to:", 4 / 2)
print("3 / 2 evaluates to:", 3 / 2)
###############################################################################
# Done: 13.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
# _
# Then try more expressions involving the // and % operators
# until you understand what those operators do.
###############################################################################
print()
print("_TODO 13:")
print("17 // 5 evaluates to:", 17 // 5)
print("17 % 5 evaluates to:", 17 % 5)
###############################################################################
# Done: 14.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
# _
# Then try more expressions involving string arithmetic as needed, until you
# understand what the + and * operators do when applied to strings.
###############################################################################
print()
print("_TODO 14:")
#
print("hello" + "goodbye girl")
print("big" * 20)
print(("hello " + "goodbye ") * 4)
###############################################################################
# Done: 15.
# Type a statement that prints:
# I'm not a bug, that's right!
# and then run the program, checking that it printed the above sentence
# (including the punctuation exactly as written above).
# _
# Then repeat the above for the sentence:
# What does "yarborough" mean?
# _
# Then repeat the above for the sentence:
# I'm on "pins and needles" about '"'".
# Hint: consider using the + operator as part of your solution.
# _
###############################################################################
print()
print("_TODO 15:")
print("I'm not a bug, that's right! ")
print('What does "yarborough " mean?')
print("I'm on " + '"pins and needles" about' + "'" + '"' + "'" + '"' )
###############################################################################
# Part 4: Names, Variables, and Assignment.
###############################################################################
###############################################################################
# Done: 16.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
# _
# Then comment-out the line that causes the run-time error,
# PREDICT what the subsequent lines will print,
# and run again to check your predictions.
# _
# Finally, practice assignment as suggested by the examples below, that is:
# choose your own names, given them values by using the assignment (=)
# operator, and define new names by using expressions that include names
# that you defined previously.
###############################################################################
print()
print("_TODO 16:")
first_program = "Hello, world!"
print(first_program)
#print(greeting)
#
greeting = "Hello, earthlings"
print(greeting)
print(first_program + (greeting * 2))
#
n = 3
print(first_program * n)
n = 2 * first_program
print(n + greeting)
###############################################################################
# Donw: 17.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
# _
# Throughout this program, remember that error-output may (or may not)
# PRECEDE ordinary output from previous PRINT statements. Be sure to scroll
# up to see if any error message (in red) appears higher up in the Console.
# _
# Then repeatedly:
# -- comment-out the line that causes a run-time error
# -- run again to see the output from the statements that follow it.
# until you see the output from the last statement below,
# noting its perhaps-surprising output.
# _
# Finally, try out your own assignment statements that yield run-time errors.
###############################################################################
# print()
# print("_TODO 17:")
# r = 0
# s = -9
# t = s / r
# y = "oops" + s
# u = math.sqrt(-2)
# v = (-2) ** 0.5
# print(v)
###############################################################################
# Done: 18.
# Uncomment the following and then run the program,
# paying close attention to what gets printed.
# _
# Then comment-out the line that causes the run-time error,
# PREDICT what the subsequent lines will print,
# and run again to check your predictions.
###############################################################################
print()
print("_TODO 18:")
a = 45
# 45 = a
b = 10
c = b + 20
b = c
print(a, b, c)
###############################################################################
# Done: 19.
# Uncomment the following and PREDICT what will get printed.
# Then run the program, checking to see whether your prediction is correct.
###############################################################################
print()
print("_TODO 19:")
x = 5
x = x + 1
print(x)
#
x = x + 1
print(x)
#
x = x + 1
print(x)
###############################################################################
# Done: 20.
# Uncomment the following and PREDICT what will get printed.
# (Hint: what gets printed is NOT 75 10.)
# Then run the program, checking to see whether your prediction is correct.
###############################################################################
print()
print("_TODO 20:")
x = 10
y = 75
x = y
y = x
print(x, y)
###############################################################################
# Done.
# The statements below make x and y refer to random integers between 1 and 99,
# then prints the values of x and y.
# _
# Challenge: can you write statements below the following that causes the
# values of x and y to SWAP? For example, if the values of x and y are set
# randomly to 40 and 33, so that the given print statement prints: 40 33
# then your code should print: 33 40
# _
# Spend up to 1 minute on this challenge, typing your code and running the
# program to try out your solution.
# _
###############################################################################
print()
print("_TODO 22:")
x = random.randint(1, 99)
y = random.randint(1, 99)
print(x, y)
# Challenge Area
z = x
x = y
y = z
print(x, y)
| [
"fisherds@rose-hulman.edu"
] | fisherds@rose-hulman.edu |
4e22770993cda1b6d0349f0ad9c0668bd56e8065 | 3041138130a9feda9ee28166ec8c4b6dff593fac | /commandblog.py | 6daa24f16c9ac3ed47f11c58abfc54682b1e7f8c | [
"MIT"
] | permissive | IssaIan/commandlineblog | 1b23f68bbdd6eafed627d00654def752f52488f8 | 2cfe197ed85bec3a24b31f55d43e8d1c54d9f7e4 | refs/heads/master | 2020-04-10T16:14:17.991212 | 2018-12-10T12:15:20 | 2018-12-10T12:15:20 | 161,138,508 | 0 | 0 | MIT | 2018-12-10T12:35:52 | 2018-12-10T07:58:53 | Python | UTF-8 | Python | false | false | 3,273 | py | import datetime
comments = []
users = [
{
"name": "kenn",
"password": "1234",
"role": "admin",
"lastLoginAt": ""
},
{
"name": "issa",
"password": "1234",
"role": "moderator",
"lastLoginAt": ""
},
{
"name": "eric",
"password": "1234",
"role": "normal",
"lastLoginAt": ""
},
{
"name": "steve",
"password": "1234",
"role": "normal"
}
]
def login():
username = input("please input username: ")
for user in users:
if user['name'] == username:
# return user['password']
password = input("please input password: ")
if user['password'] != password:
return 'Wrong password'
user["lastLoginAt"] = datetime.datetime.now()
if user['role'] == "normal":
userinput = input("1. create comment \n 2.Edit comment \n 3. logout ")
if userinput == str("1"):
comment = input("Enter your comment:")
data = {'comment_id': len(comments) +1,
'comment': comment,
'timestamp': datetime.datetime.now() ,
'created_by': username
}
comments.append(data)
return comments
elif userinput == str("2"):
comment_id = int(input('Enter comment id:'))
if not comment_id:
return "Enter comment id"
comment = next((comment for comment in comments if comment["comment_id"] == comment_id), False)
if comment == False:
return "No comment found"
edit = input("Enter your comment here:")
comment["comment"] = edit
return comments
else:
login()
if user['role'] == "moderator":
userinput = input("1. create comment \n 2. edit comment \n 3. delete comment \n 4. logout \n ")
if userinput == str("1"):
comment = input("Enter your comment:")
data = {'comment_id': len(comments) +1,
'comment': comment,
'timestamp': datetime.datetime.now() ,
'created_by': username
}
comments.append(data)
return comments
elif userinput == str("2"):
comment_id = int(input('Enter comment id:'))
if not comment_id:
return "Enter comment id: "
comment = next((comment for comment in comments if comment["comment_id"] == comment_id), False)
if comment == False:
return "No comment found"
edit = input("Enter your comment here:")
comment["comment"] = edit
return comments
elif userinput == str("3"):
comment_id = int(input('Enter comment id'))
if not comment_id:
return 'Enter comment id'
comment = next((comment for comment in comments if comment["comment_id"] == comment_id), False)
if comment == False:
return "No comment found"
comments.remove(comment)
return comments
else:
login()
print(login()) | [
"issamwangi@gmail.com"
] | issamwangi@gmail.com |
fda2f73dcd940252430b681bada9eeb30c5c875e | 625bcba8922a6d0bd4c34721354f1f87977437ad | /scripts/make_sec_qc/linear_reg.py | 309f3e5019cf48ba311d00c0967ec399d1c0fd81 | [] | no_license | toth12/data_processing_lts | 2970805d101154b5aa2283061da970d45538571a | 831c3d3ccfd745606f4cc855623086b3d3c07a01 | refs/heads/master | 2023-04-16T06:19:18.410693 | 2021-05-04T07:55:53 | 2021-05-04T07:55:53 | 353,973,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | import pandas as pd
import numpy as np
import itertools
from itertools import chain, combinations
import statsmodels.formula.api as smf
import scipy.stats as scipystats
import statsmodels.api as sm
import statsmodels.stats.stattools as stools
import statsmodels.stats as stats
from statsmodels.graphics.regressionplots import *
import matplotlib.pyplot as plt
import seaborn as sns
import copy
from sklearn.cross_validation import train_test_split
import math
import time
import pdb
import os,sys
helper_path = os.path.join("..", "..", "utils")
sys.path.insert(0, helper_path)
import text
import numpy as np
input_data=text.ReadCSVasDict('base_data_for_second_qc.csv')
original_x=[record['video_lenght'] for record in input_data if record['video_lenght']!='']
x1=np.array([np.log(float(record['video_lenght'])) for record in input_data if record['video_lenght']!=''])
y1=np.array([np.log(int(record['token_in_current_folia'])) for record in input_data if record['video_lenght']!=''])
pdb.set_trace()
plt.rcParams['figure.figsize'] = (12, 8)
'''
np.random.seed(0)
x1 = np.random.normal(20, 3, 20)
y0 = 5 + 0.5 * x1
y1 = 5 + 0.5 * x1 + np.random.normal(0, 1, 20)
'''
lm = sm.OLS(y1, sm.add_constant(x1)).fit()
print "The rsquared values is " + str(lm.rsquared)
plt.scatter(np.sort(x1), y1[np.argsort(x1)])
plt.scatter(np.mean(x1), np.mean(y1), color = "green")
#plt.plot(np.sort(x1), y0[np.argsort(x1)], label = "actual")
plt.plot(np.sort(x1), lm.predict()[np.argsort(x1)], label = "regression")
plt.title("Linear Regression plots with the regression line")
plt.legend()
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(lm, alpha = 0.05, ax = ax, criterion="cooks")
plt.show()
pdb.set_trace() | [
"gabor.toth@yale.edu"
] | gabor.toth@yale.edu |
935fca9aeb9e49495333cec1cba3e0da776c35f3 | 3170ea59f57a214285ea3e0a12f839ca9c64a4f8 | /python/BOJ/1000/1305.py | c67ec2711280427d2e66e1587fbdb08c91a03a27 | [] | no_license | dahyun1226/ProblemSolving | 9d02bed487f19abfcbd79934b166a2e59cd51c69 | 523cc84d70efe84e4574de84e50161563ccdb6d6 | refs/heads/master | 2023-07-26T18:18:45.468548 | 2021-09-08T12:52:03 | 2021-09-08T12:52:03 | 255,228,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # 백준 온라인 저지 1305번: 광고
# fail의 이해
def makeFail(n: str):
fail = [0] * len(n)
matched = 0
for index in range(1, len(n)):
while matched > 0 and n[index] != n[matched]:
matched = fail[matched - 1]
if n[index] == n[matched]:
matched += 1
fail[index] = matched
return fail
l = int(input())
ad = input()
print(l - makeFail(ad)[-1])
| [
"dahyun1226@naver.com"
] | dahyun1226@naver.com |
0ef11b9c31f93e9991b2d05c6ea0958092ae630c | 523a47a9519b548680cdd2e100d7d3bd871158d1 | /DP/14003_가장긴증가하는부분수열.py | 51db0adb36e9f2d9d5b0785b5451e48605b6e6aa | [] | no_license | bernard-choi/Algorithm_Training | 19c2b3063a506d45931d8948de765efc53dd93aa | 4e16533d0cd9ea1e79485f0a6d34bc680064c111 | refs/heads/master | 2022-12-31T01:20:53.361406 | 2020-10-20T03:00:04 | 2020-10-20T03:00:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | n = int(input())
input_list = list(map(int,input().split()))
d = [1] * n
d[0]=1
for i in range(1,n): ## 기준은 i
# print(i)
max_value = 1
for j in range(0,i):
# print(j)
if input_list[j] < input_list[i]:
temp = d[j] + 1
if temp > max_value:
max_value = temp
d[i] = max_value
print(max(d))
| [
"gafield8785@naver.com"
] | gafield8785@naver.com |
b7acd23168a4cac8d3afab25692d93f940e90c97 | bc97fbbfa0078304fa45c386700ddba278c34e37 | /BoardManip.py | 8105cabbe4d1f0e28a5332790d6fecfe961d34e9 | [] | no_license | nayr31/Connect4 | f0d0c4fdce29b03b4286ebb671e4fe7a454d750c | 1be25dd07525f0bf3c4d7f02ddb6e0b6caef92ee | refs/heads/main | 2023-08-31T13:07:46.370699 | 2021-10-11T15:12:38 | 2021-10-11T15:12:38 | 405,181,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,434 | py | # This file is for anything that relates to the board. The main program should not know what it needs to do, only what it wants to.
# This could just be my Java seeping into my Python, but this is how I like it.
from PVector import PVector
import random
# Constants
width, height = 7, 6
top_string = "+ 0 1 2 3 4 5 6 "
player_token, empty_token, ai_token = 2, 1, 0
lowest_in_column = [5, 5, 5, 5, 5, 5, 5]
player_turn = True
game_over = False
moves = []
brain_depth = 1
# Board information and setup
## I ended up using a 1 for an empty space, 2 for a player space, and 0 for an AI space
## Yes it is dumb, no I am not going to change it
board = []
for i in range(height):
board.append(list(range(width)))
for i in range(height):
for j in range(width):
board[i][j] = empty_token
# Prints the board according to the example given (without the pieces):
## + 0 1 2 3 4 5 6
## 0| | | | | | | |
## 1| | | | | | | |
## 2| |X|O| | | | |
## 3| |O|X|X| | | |
## 4| |O|X|O| | | |
## 5|X|O|O|O|X| | |
## The spots in the board are converted using the ternary operation
## Why not just store the board like this? Because reasons.
def printBoard():
print(top_string)
imp = -1
for row in board:
imp += 1
## Always starts empty ""
build_string = str(imp)
for spot in row:
## So that every iteration we put the left wall and its contents "|X"
build_string += "|"
#print("Checking spot: " + str(spot))
build_string += "X" if str(spot) == str(player_token) else " " if str(spot) == str(empty_token) else "0"
## Ending with the capstone "|X|O|""
build_string += "|"
print(build_string)
# Confirms that there is an open space at the top of the board. If there is, then it can't be full.
def is_valid_drop(column):
# First, check if its inside the range of our bounds
if column > width-1 or column < 0:
print("Selected column is too " + "high" if column > width-1 else + "low" + ". Try again.")
return False
# Then we can check for column validity
## A "1" indicates an empty space, meaning we can place something there
if board[0][column] != empty_token:
print("Column is full! Try another.")
return False
## Yes this could have just returned board[0][column] == 1, but I wanted error specific messages
return True
def take_player_turn():
selection = -1
## Get the column the player wants to drop into, making sure its valid
while True:
# Error handling is handled inside of the is_valid method
selection = int(input("Which column to drop into?: "))
if is_valid_drop(selection): # Once we get a selection that is a valid drop, we can continue and make the move
break
make_move(selection, player_token)
def take_ai_turn():
# Get the predicted value from the minimax
# This value comes back in the form of a list:
# [value, column]
# Where `value` is the value returned during the algorithm, and column is the column that it thinks it should use
val = minimax(brain_depth)
print("I've seen the future: " + str(val))
# Make the move, which should already have the valid column done already
make_move(val[1], ai_token)
def check_four_winner():
global game_over
# Piece dropped, check for a winner
# This process is a mess and could be improved
received_point = check_for_four()
#print(received_point)
if received_point == [-99, -99]:
# Show results first
printBoard()
game_over = True
print("Tie.")
elif not received_point == [-1, -1]:
printBoard()
## This funky ternary knows who won by the founded 4 string of pieces (if there are no moves, then someone cheated!)
print(("Player " if moves[-1].data == 2 else "AI ") + "has won.") # If it can't find moves[-1], then nobody moved
game_over = True
def print_moves():
print("These were the games moves:")
for move in moves:
print(move)
# Refreshes the lowest known row per column for all columns
def refresh_lowest_all():
for col in range(width):
refresh_lowest_at(col)
# Refreshes the entry of the lowest row at a certain column
def refresh_lowest_at(col):
lowest_in_column[col] = -1
for row in range(height):
if board[row][col] == empty_token:
lowest_in_column[col] = row
else:
break
def minimax(depth):
# Base condition, evaluate the final board state
if depth == 0:
return eval()
# Check for valid columns
valid = valid_cols()
if len(valid) == 0: # Full board
four_check = check_for_four()
# [-99, -99] is a tie, no winner
if four_check == [-99, -99]:
return [0, 0]
# This means that someone won, making this a bad move
elif not four_check == [-1, -1]:
return [-999999, 0]
# [-1, -1] should never happen if the board isn't full, but just in case
return [0, 0]
# These values keep track of the best of the best
best_eval = [-999999, -999999] # Temp best single
best_list = [] # Bet of all equal column scores
# The reason for these is because otherwise the ai would prefer filling leftmost columns
# Board is not full, run through each valid column to get the highest score
for i in range(len(valid)):
# Make a possible move
global player_turn # I dont know why it needs this, as there is on
move = predict_move(valid[i], player_token if player_turn else ai_token)
# Grab the value of the next deeper board state from the other player's turn
player_turn = not player_turn
test_eval = minimax(depth - 1)
player_turn = not player_turn # Make sure to reset it back to our instace's turn
test_eval[0] *= -1 # Negative because it would be the other turn always
# If the value we tested is the same as the best we currently have, then we store that one as well
if test_eval[0] == best_eval[0]:
best_list.append(test_eval)
# If it isn't equal, but is instead larger, then we clear the list and add the new value
elif test_eval[0] > best_eval[0]:
best_list.clear()
best_list.append(test_eval)
# Not sure if I need to deep copy here or not, so I did it anyway
best_eval[0] = test_eval[0]
best_eval[1] = test_eval[1]
# Unmake the move
unmake_move(move)
# Now we have a list of the best columns
if len(best_list) == 1: # If there was a clear winner, return it
return best_eval
else:
# Get a random entry from the list and return it
return best_list[random.randint(0, len(best_list) - 1)]
# Returns a list of the indexes of valid droppable columns
def valid_cols():
valid = []
for col in range(len(lowest_in_column)):
if lowest_in_column[col] != -1:
valid.append(col)
return valid
# Evaluates the board state into a score
# Does this by grabbing the normalized best column score of each player
# Then subtracts the player from the ai to get a difference
# Multiples the result by which turn it is
def eval():
# Retreive both best scores (straights)
my_score = best_in_score(ai_token)
your_score = best_in_score(player_token)
# Get the difference, this will give + if good for us
eval_score = my_score[0] - your_score[0]
# But if it is the player's turn, we want the opposite of that value
global player_turn
perspective = 1 if player_turn else -1
# Return that perspective score and the column that it was found at
## The second bit that is returned is the column it was found
return [eval_score * perspective, my_score[1] if eval_score >= 0 else your_score[1]]
# Returns the best value in a list of columns, also returning the column it was found at
def best_in_score(token):
score = score_board(token)
best_val = -1
best_col = -1
for col in range(len(score)):
if score[col] > best_val:
best_val = score[col]
best_col = col
return [best_val, best_col]
# Returns a list of each possible drop point and its respective score to the simple leads
def score_board(token):
# Search through all columns last empty node to see the potential score of adjacent pieces
col_score = [-1, -1, -1, -1, -1, -1, -1]
for i in range(width):
# Make sure we only score the non-filled columns
refresh_lowest_at(i)
if lowest_in_column[i] != -1:
col_score[i] = score_col(token, lowest_in_column[i], i, 0, -1)
return col_score
# Looks around the current location for a given token. If it is, then it will proceed down that path.
# Dir is used to avoid circles because I am bad a coding recursion
## [0] [1] [2] (1 is unused)
## [7] [-1] [3]
## [6] [5] [4]
def score_col(token, row, column, length, dir):
# Store the initial best score from each direction
best_score_in_dir = [-1, -1, -1, -1, -1, -1, -1, -1]
## Left 3 (up-left, left, down-left)
if column != 0:
## up-left
if row - 1 >= 0:
if dir == -1 or dir == 0:
if board[row - 1][column - 1] == token:
best_score_in_dir[0] = score_col(token, row - 1, column - 1, length + 1, 0)
## left
if dir == -1 or dir == 7:
if board[row][column - 1] == token:
best_score_in_dir[7] = score_col(token, row, column - 1, length + 1, 7)
## down-left
if row + 1 < height:
if dir == -1 or dir == 6:
if board[row + 1][column - 1] == token:
best_score_in_dir[6] = score_col(token, row + 1, column - 1, length + 1, 6)
## down
if row + 1 < height:
if dir == -1 or dir == 5:
if board[row + 1][column] == token:
best_score_in_dir[5] = score_col(token, row + 1, column, length + 1, 5)
## Right 3 (right-down, right, right-up)
if column != width-1:
## right-down
if row + 1 < height:
if dir == -1 or dir == 4:
if board[row + 1][column + 1] == token:
best_score_in_dir[4] = score_col(token, row + 1, column + 1, length + 1, 4)
## right
if dir == -1 or dir == 3:
if board[row][column + 1] == token:
best_score_in_dir[3] = score_col(token, row, column + 1, length + 1, 3)
## right-up
if row - 1 >= 0:
if dir == -1 or dir == 2:
if board[row - 1][column + 1] == token:
best_score_in_dir[2] = score_col(token, row - 1, column + 1, length + 1, 2)
best_len = -1 # Best length of this column, overall score
# Now we have the best score (common tokens) in each direction, although we want to bridge some as well
## [0] = left-down (0 and 4)
## [1] = straight right (7 and 3)
## [2] = left-up (6 and 2)
## [3] = down (down doesn't share a straight)
# If I set the initial values in the best_in_score_dir to 0, it might break something. Something to improve/investigate if performance is bad
if dir == -1: # This step should only be preformed during the last
best_in_common = [-1, -1, -1, best_score_in_dir[5]]
best_in_common[0] = (best_score_in_dir[0] if best_score_in_dir[0] != -1 else 0) + (best_score_in_dir[4] if best_score_in_dir[4] != -1 else 0)
best_in_common[1] = (best_score_in_dir[7] if best_score_in_dir[7] != -1 else 0) + (best_score_in_dir[3] if best_score_in_dir[3] != -1 else 0)
best_in_common[2] = (best_score_in_dir[6] if best_score_in_dir[6] != -1 else 0) + (best_score_in_dir[2] if best_score_in_dir[2] != -1 else 0)
# Iterate through the now scored directions for the best one
for i in range(4):
if best_in_common[i] > best_len:
best_len = best_in_common[i]
#print("At root node return. BL=" + str(best_len) + " " + str(best_in_common))
return best_len
# Otherwise, we are at a recursion node
# Since we are at a node, we need to return our best found length
for i in range(8):
if best_score_in_dir[i] > best_len:
best_len = best_score_in_dir[i]
# If we didn't get any hits on the
if best_len < length:
return length
# This line should only run if we are at a node, but have run branches past this.
return best_len
def check_for_four():
## Check in each direction for a connection, limited by the width/height of the board
## Horizontal
### In the below, we only start on the [x] and look right (instead of looking both left and right). The 3 is for the required space for a 4 pair
### Ill omit the preamble from the coming ones.
### [x] [o] [o] [o]
### [x] [o] [o] [o]
### [x] [o] [o] [o]
### [x] [o] [o] [o]
#print("Checking horizontal")
for row in range(height):
for column in range(width - 3):
if board[row][column] == board[row][column + 1]\
== board[row][column + 2] == board[row][column + 3]\
and not board[row][column] == empty_token:
return [row, column]
## Vertical
### [x] [x] [x] [x] (only looking downwards)
### [o] [o] [o] [o]
### [o] [o] [o] [o]
### [o] [o] [o] [o]
#print("Checking vertical")
for row in range(height - 3):
for column in range(width):
if board[row][column] == board[row + 1][column]\
== board[row + 2][column] == board[row + 3][column]\
and not board[row][column] == empty_token:
return [row, column]
## Diagonal down (left to right)
### [x] [o] [o] [o] (only one valid point here)
### [o] [o] [o] [o]
### [o] [o] [o] [o]
### [o] [o] [o] [o]
#print("Checking ltr")
for row in range(height - 3):
for column in range(width - 3):
if board[row][column] == board[row + 1][column + 1]\
== board[row + 2][column + 2] == board[row + 3][column + 3]\
and not board[row][column] == empty_token:
return [row, column]
## Diagonal up (left to right)
## It doesn't need to look diagonally right to left downwards, cause that would be the same thing
### [o] [o] [o] [o] (looking at it going up instead of like before)
### [o] [o] [o] [o]
### [o] [o] [o] [o]
### [x] [o] [o] [o]
#print("Checking rtl")
## This range here starts at the bottom (height - 1)
for row in range(height - 1, height - 3, -1): # start at the bottom, search upwards (since we are getting cut off at the top)
for column in range(width - 3):
#print("point : " + str(row) + str(column))
if board[row][column] == board[row - 1][column + 1]\
== board[row - 2][column + 2] == board[row - 3][column + 3]\
and not board[row][column] == empty_token:
return [row, column]
## Not connections, but the board could be full (making a tie)
is_full = True
for row in range(height):
for column in range(width):
if board[row][column] == empty_token:
is_full = False
break
if is_full:
return [-99, -99]
return [-1, -1]
# Returns the value on the board when given a point (list of two numbers)
def val_at(point):
return board[point[0]][point[1]]
# Test method; returns the current board score of the player [token_type]
def test_score_player():
return score_board(player_token)
# The following methods use the concept of a "move".
## A move is defined as the position and piece that was placed.
## The PVector object stores the location and piece data in a single object
# This concept allows us to make and unmake moves easily for recursive tree searching
# Yes, these could be only two methods with an argument for both, but I like this more
def make_move(column, token): # This is the one that should be used normally, letting the program take care of the row
move = PVector(column, lowest_in_column[column], token)
moves.append(move)
board[move.y][move.x] = move.data
refresh_lowest_at(column)
return move
def predict_move(column, token):
move = PVector(column, lowest_in_column[column], token)
board[move.y][move.x] = token
refresh_lowest_at(column)
return move
def undo_move(): # Undo-s the last move made
move = moves.pop()
board[move.y][move.x] = empty_token
refresh_lowest_at(move.x)
def unmake_move(move): # Reverts a certain move to an empty state
#print("Unmaking " + str(move))
board[move.y][move.x] = empty_token
refresh_lowest_at(move.x)
# Gets the desired depth of the minimax from the user
def get_depth():
# Keep pinging for a response in case they enter something invalid
while True:
try:
dep = input("Please enter the depth of minimax: ") # Get the input
if int(dep) >= 0: # Argue if it is a number, and its above -1
break
print("Cannot be negative")
except: # If it wasn't a number, they need to try again
print("Please enter a number.")
global brain_depth
brain_depth = int(dep) | [
"re17wj@brocku.ca"
] | re17wj@brocku.ca |
f4569cb68df5952191b3a6f8ad24a70db8240a73 | ac87ed7653f277c82ed80235d4598847e96102b4 | /scripts/date_parse.py | ee71750e7cd12b49575588c7145222009957e894 | [] | no_license | ucancallmealicia/aspace_data_audit_demo | da561017bae6287ac14c3b5a86055091ebd4bf36 | de96822d7af24c004d223772f2b0c4bd81f2ffb3 | refs/heads/master | 2021-08-07T05:45:09.214166 | 2017-11-07T16:34:05 | 2017-11-07T16:34:05 | 108,923,900 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from dateutil.parser import parse
import csv
csvin = input('Please enter path to input csv: ')
file = open(csvin, 'r', encoding='utf-8')
reader = csv.reader(file)
next(reader, None)
for row in reader:
date_expression = row[0]
try:
parse_it = parse(date_expression)
print(parse_it)
except ValueError:
print('String does not contain a parse-able date: ' + row[0])
continue
| [
"noreply@github.com"
] | ucancallmealicia.noreply@github.com |
c6815d6ecd0404d950bfe91a9e4cd29990e054be | f5a6f1ecc4e9ecdf0ef607707ca7d725cbe7aab2 | /day6_1.py | 3c10375d99157fc40a06d37d94021ca652bcb763 | [] | no_license | juliaschatz/adventofcode19 | 89911c0bc5f6b2dc72a3ac93d130a046c3bf7353 | c2a0fd0a4c53f7916444accb2a0bb25f66257b50 | refs/heads/master | 2020-09-27T07:34:52.581548 | 2019-12-17T22:03:39 | 2019-12-17T22:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | #!/usr/bin/python
with open("day6_input") as f:
orbits = {}
for l in f.readlines():
o = l.replace("\n","").split(")")
orbits[o[1]] = o[0]
def count(p):
if orbits[p] == "COM":
return 1
return 1 + count(orbits[p])
print(sum(map(count, orbits.keys()))) | [
"schat127@umn.edu"
] | schat127@umn.edu |
61bd7c3ce1c3aa60ee4f2247f0294133557a21b5 | dcc82e255c50ddcae9489e2524e43c1d51dc0daf | /schematic.py | ffb16d56cf23290de52dd83ff0b3925c922349af | [
"MIT"
] | permissive | BenediktSeidl/fzpzf | 5f46bda1b09fd74a79892808ebe3541d01ec7162 | 69581beb36e9f9cc62713559f7212c9fc81ba462 | refs/heads/master | 2020-12-04T05:37:59.281232 | 2011-10-03T18:03:16 | 2011-10-03T18:03:16 | 2,462,234 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,056 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Benedikt Seidl
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import svg
def create(s, file_name):
r,l,t,b = [getattr(s,"getSide")(i) for i in "RLTB"]
p = 21.25 # grid
height = p*(max(len(r), len(l)))
width = p*(max(len(t), len(b)))
svg_width = width +2*p
svg_height = height+2*p
svg.start(svg_width, svg_height, svg_width*127/360, svg_height*127/360, "mm")
svg.add_start_g(id="schematic")
def addPin(nr,c,name,direction):
if direction == "R":
x = width +p
y = (1.5+ c)*p
r = 0
f = 0
elif direction == "L":
x = p
y = p*(1.5 +c)
r = 0
f = 1
elif direction == "T":
x = (1.5+ c)*p
y = p
r = 90
f = 1
elif direction == "B":
x = p*(1.5 +c)
y = height +p
r = 90
f = 0
if f == 0:
name_anchor = "end"
nr_anchor = "start"
direction = 1
else:
name_anchor = "start"
nr_anchor = "end"
direction = -1
svg.add_start_g(transform="rotate({r}, {x}, {y}) translate({x},{y}) ".format(x=x,y=y,r=r))
svg.add_path(d=(
svg.M(0,-1.2),
svg.H(p*direction),
svg.V(1.2),
svg.H(0),
svg.z()), id="connector{0}pin".format(nr))
svg.add_path(d=(
svg.M((p)*direction-2.4*direction,-1.2),
svg.h(2.4*direction),
svg.v(2.4),
svg.h(-2.4*direction),
svg.z()), id="connector{0}terminal".format(nr))
svg.add_text(name, x=-3*direction, y=3, font_size=10, font_family="DroidSans", text_anchor=name_anchor)
svg.add_text(nr, x=+3*direction, y=-2, font_size=7, font_family="DroidSans", text_anchor=nr_anchor)
svg.add_end_g()
for data,direction in [(r,"R"), (l,"L"), (t,"T"), (b,"B")]:
for i,pin in enumerate(data):
if pin:
name, description = s.pins.data[pin]
addPin(pin, i, name, direction)
svg.add_rect(p,p,width,height, fill="none", stroke_width=2.4, stroke="#000000")
x = svg_width/2
r = 0
if not any(t):
y = 18
elif not any(b):
y = svg_height - 1
elif not any(r):
r = 90
y = svg_height/2
x = svg_width -18
elif not any(l):
r = 270
y = svg_height/2
x = 18
else:
x = svg_width/2
y = svg_height/2
r = 270 if width < height else 0
if r != 0:
rotate = dict(rotate="rotate({r},{x},{y})".format(r=r,x=x,y=y))
else:
rotate = dict()
svg.add_text(s.meta.name,
font_size=18, font_family="DroidSans", text_anchor="middle",
x=x, y=y, **rotate) # name!
svg.add_end_g()
svg.end()
svg.write(file_name)
| [
"Benedikt.Seidl@gmx.de"
] | Benedikt.Seidl@gmx.de |
376d6b0ccb6509c96d3c340f24977524379fc444 | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/secondaires/diligence/fonctions/diligences.py | 0c9dd2f3eb7a0ab9f84de366ec3c7a1105448876 | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,016 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction diligences."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Retourne les diligences (salle d'entrée)."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.toutes_diligences)
@staticmethod
def toutes_diligences():
"""Retourne toutes les diligences de l'univers.
Cette fonction retourne toutes les diligences sous la forme
d'une liste. Cette liste contient des salles. Les fonctions
et actions manipulant les diligencs attendent une salle
comme paramètre : les salles retournées sont les salles
d'entrées (celles de mnémonique "1"). La diligence possède
normalement une sortie "bas" menant vers la salle permettant
d'accéder à la diligence.
Cette fonction n'attend aucun paramètre.
Exemple d'utilisation :
diligences = diligences()
pour chaque entree dans diligences:
exterieur = destination(entree, "bas")
# exterieur contient la salle à l'extérieur de la diligence
fait
"""
zones = importeur.diligence.zones
entrees = []
for zone in zones:
salle = importeur.salle.salles.get("{}:1".format(zone.cle))
if salle:
entrees.append(salle)
return entrees
| [
"vincent.legoff.srs@gmail.com"
] | vincent.legoff.srs@gmail.com |
8fc69ea6d952ef1e4cfc879a40a170fe9c897d6c | d9fd9c6329461235f140393f1e934362d0f645df | /Unidad 2/Módulo 6/Sección 4/eje_09.py | e3cf1510314a26331adc0b550e3c13291c3325ad | [
"MIT"
] | permissive | angelxehg/utzac-python | e6b5ee988d1d76c549ab0fa49717eb042fa7d91f | fb88bcc661518bb35c08a102a67c20d0659f71db | refs/heads/main | 2022-12-02T11:16:27.134741 | 2020-08-14T19:38:33 | 2020-08-14T19:38:33 | 265,944,612 | 0 | 0 | MIT | 2020-08-07T21:23:53 | 2020-05-21T20:25:24 | Python | UTF-8 | Python | false | false | 375 | py | class MiClase:
pass
obj = MiClase()
obj.a = 1
obj.b = 2
obj.i = 3
obj.ireal = 3.5
obj.entero = 4
obj.z = 5
def incIntsI(obj):
for name in obj.__dict__.keys():
if name.startswith('i'):
val = getattr(obj, name)
if isinstance(val, int):
setattr(obj, name, val + 1)
print(obj.__dict__)
incIntsI(obj)
print(obj.__dict__)
| [
"50889225+angelxehg@users.noreply.github.com"
] | 50889225+angelxehg@users.noreply.github.com |
bb936e36f73b3022e5fc4ff938b2e48d6d89e8c1 | 4273f162abb12ef1939271c2aabee9547ac6afee | /studio_usd_pipe/test/ver.py | e3d449cb801732082a041c7c123caf699f61c94a | [] | no_license | xiyuhao/subins_tutorials | 2717c47aac0adde099432e5dfd231606bf45a266 | acbe4fe16483397e9b0f8e240ca23bdca652b92d | refs/heads/master | 2023-07-28T13:42:41.445399 | 2021-09-12T11:02:37 | 2021-09-12T11:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | input_data = {
"exe": [
"KONSOLE_EXE",
"/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole/main.sh"
],
"name": [
"APPLICATION_NAME",
"konsole2.10.5"
],
"version": [
"KONSOLE_VERSION",
"konsole2.10.5"
],
"path": [
"KONSOLE_PATH",
"/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole"
],
"order": 0,
"bash": "/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole/main.sh",
"icon": [
"KONSOLE_ICON",
"/venture/source_code/subins_tutorials/studio_usd_pipe/resource/icons/konsole.png"
]
}
import os
import json
os.environ['KONSOLE_EXE'] = "/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole/main.sh:subin"
for each in input_data:
if not isinstance(input_data[each], list):
continue
env_name = input_data[each][0]
env_value = input_data[each][1]
if isinstance(env_value, list):
env_value = ':'.join(env_value)
else:
env_value = str(env_value)
if os.getenv(env_name):
envrons = os.getenv(env_name).split(':')
envrons.append(env_value)
envrons = list(set(envrons))
env_value = os.environ[':'.join(envrons))
else:
env_value = str(env_value) | [
"subing85@gmail.com"
] | subing85@gmail.com |
43b850a09eb1b800bf3a04b4a7b97fbca79ba6f3 | af64febbe756c6eef9201bbdec675d0956376ba8 | /day 6-nested loop/pattern 17.py | a202763a59f4904da3935b6d4bcca470da079ecd | [] | no_license | lokitha0427/pythonproject | 34bf1e6d301b5adc3330d79294ce3cf3aa170a22 | 7eac677378ac7d2f631466e98b646a0f928e3852 | refs/heads/main | 2023-08-19T21:01:58.297201 | 2021-09-21T08:08:14 | 2021-09-21T08:08:14 | 380,134,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #pattern 17
'''
1 3 5 7 9
3 5 7 9 11
5 7 9 11 13
7 9 11 13 15
9 11 13 15 17
'''
k=1
for i in range(1,6):
for j in range(1,6):
k=2*(i+j)-3
if k<10:
print(" "+str(k),end=" ")
else:
print(k,end=" ")
print()
| [
"noreply@github.com"
] | lokitha0427.noreply@github.com |
16b0e2d4b028d65145d6962bb8cfe453c22ffd6b | 13d06305d64ee4705a8f9ba0c3801b89fa8c742c | /timeflow/placeholders/prediction.py | 987ef175ce54c7fe5722b8125fdc13d4f931554d | [
"MIT"
] | permissive | genesiscrew/TensorFlow-Predictor | 45fc43f5c85bac2fbc137fb6a23fc7e458248e3b | d129172b064d9e73e9118ac7164eb826a1263100 | refs/heads/master | 2021-01-20T05:53:25.935780 | 2017-05-02T03:05:10 | 2017-05-02T03:05:10 | 89,820,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | # Input and output placeholders for prediction placeholders
import tensorflow as tf
def input_placeholder(input_dim, name='inputs'):
"""Initialize input placeholder for prediction networks
Parameters
----------
input_dim : integer
Input dimensions
name : string
Placeholder name
Returns
----------
tf.placeholder
Input placeholder
"""
input = tf.placeholder(tf.float32, shape=[None, input_dim],
name=name)
return input
def input_batch_placeholder(input_dim, batch_size, name='inputs'):
"""Initialize input placeholder for prediction networks for batch training
Parameters
----------
input_dim : integer
Input dimensions
batch_size : integer
Input dimensions
name : string
Placeholder name
Returns
----------
tf.placeholder
Input placeholder
"""
input = tf.placeholder(tf.float32, shape=[batch_size, None, input_dim],
name=name)
return input
def output_placeholder(output_dim, name='outputs'):
"""Initialize output placeholder for prediction networks
Parameters
----------
output_dim : integer
Input dimensions
name : string
Placeholder name
Returns
----------
tf.placeholder
Input placeholder
"""
output = tf.placeholder(tf.float32, shape=[None, output_dim],
name=name)
return output
| [
"abhishekmalali@gmail.com"
] | abhishekmalali@gmail.com |
cd6178fde5a2f050f81387635719ed84a41f15cb | 8218b3ba6abf291bad3f29b1dce3cd1c9bb51a27 | /doctype/contract/contract.py | 3a0d291a55f1b1a77376215d04470e89b53cafab | [] | no_license | Kozlov-V/contracts | 1ec3c7a6e794789b2fde4f426f4369008c1c53f8 | 8928c1bdbe4eb1f9a5e2db8f263e150b81865525 | refs/heads/master | 2016-08-12T19:08:51.832553 | 2016-01-13T20:31:34 | 2016-01-13T20:31:34 | 49,599,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,039 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from frappe.model.document import Document
class Contract(Document):
def get_feed(self):
return '{0}: {1}'.format(_(self.status), self.contract_name)
def onload(self):
"""Load contract tasks for quick view"""
if not self.get("tasks"):
for task in self.get_tasks():
self.append("tasks", {
"title": task.subject,
"status": task.status,
"start_date": task.exp_start_date,
"end_date": task.exp_end_date,
"description": task.description,
"task_id": task.name
})
def __setup__(self):
self.onload()
def get_tasks(self):
return frappe.get_all("Task", "*", {"contract": self.name}, order_by="exp_start_date asc")
def validate(self):
self.validate_dates()
self.sync_tasks()
self.tasks = []
def validate_dates(self):
if self.expected_start_date and self.expected_end_date:
if getdate(self.expected_end_date) < getdate(self.expected_start_date):
frappe.throw(_("Expected End Date can not be less than Expected Start Date"))
def sync_tasks(self):
"""sync tasks and remove table"""
if self.flags.dont_sync_tasks: return
task_names = []
for t in self.tasks:
if t.task_id:
task = frappe.get_doc("Task", t.task_id)
else:
task = frappe.new_doc("Task")
task.contract = self.name
task.update({
"subject": t.title,
"status": t.status,
"exp_start_date": t.start_date,
"exp_end_date": t.end_date,
"description": t.description,
})
task.flags.ignore_links = True
task.flags.from_contract = True
task.save(ignore_permissions = True)
task_names.append(task.name)
# delete
for t in frappe.get_all("Task", ["name"], {"contract": self.name, "name": ("not in", task_names)}):
frappe.delete_doc("Task", t.name)
self.update_percent_complete()
self.update_costing()
def update_contract(self):
self.update_percent_complete()
self.update_costing()
self.flags.dont_sync_tasks = True
self.save()
def update_percent_complete(self):
total = frappe.db.sql("""select count(*) from tabTask where contract=%s""", self.name)[0][0]
if total:
completed = frappe.db.sql("""select count(*) from tabTask where
contract=%s and status in ('Closed', 'Cancelled')""", self.name)[0][0]
self.percent_complete = flt(flt(completed) / total * 100, 2)
def update_costing(self):
from_time_log = frappe.db.sql("""select
sum(costing_amount) as costing_amount,
sum(billing_amount) as billing_amount,
min(from_time) as start_date,
max(to_time) as end_date,
sum(hours) as time
from `tabTime Log` where contract = %s and docstatus = 1""", self.name, as_dict=1)[0]
from_expense_claim = frappe.db.sql("""select
sum(total_sanctioned_amount) as total_sanctioned_amount
from `tabExpense Claim` where contract = %s and approval_status='Approved'
and docstatus = 1""",
self.name, as_dict=1)[0]
self.actual_start_date = from_time_log.start_date
self.actual_end_date = from_time_log.end_date
self.total_costing_amount = from_time_log.costing_amount
self.total_billing_amount = from_time_log.billing_amount
self.actual_time = from_time_log.time
self.total_expense_claim = from_expense_claim.total_sanctioned_amount
self.gross_margin = flt(self.total_billing_amount) - flt(self.total_costing_amount)
if self.total_billing_amount:
self.per_gross_margin = (self.gross_margin / flt(self.total_billing_amount)) *100
def update_purchase_costing(self):
total_purchase_cost = frappe.db.sql("""select sum(base_net_amount)
from `tabPurchase Invoice Item` where contract_name = %s and docstatus=1""", self.name)
self.total_purchase_cost = total_purchase_cost and total_purchase_cost[0][0] or 0
@frappe.whitelist()
def get_cost_center_name(contract_name):
return frappe.db.get_value("Contract", contract_name, "cost_center")
| [
"kozlov-ter@yandex.ru"
] | kozlov-ter@yandex.ru |
ec574f915bd463b70acdc5f5ee19c252bfd21b95 | f8221492ca64481c84680d46f617de6168719e7a | /frontpage/migrations/0002_auto_20180509_2100.py | 7631280fa55bcc3b9a999aed10cc5bb76f95bd18 | [] | no_license | JohannLieb/BD | 046a1e7e1be665c66fcc1c5e22f7f7fbf9d0e5a4 | c880fd697692fa3501afba52aa43031e5b253e75 | refs/heads/master | 2020-03-18T10:37:29.003428 | 2018-06-09T17:01:22 | 2018-06-09T17:01:22 | 134,623,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | # Generated by Django 2.0.4 on 2018-05-09 18:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('frontpage', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='orders',
options={},
),
]
| [
"gerbertwells101@gmail.com"
] | gerbertwells101@gmail.com |
4229eb3d57d5f03b46b944d86271693266461296 | e73a2ff9458effe038ebabfe9db6cdaf0c5bc473 | /order_food_online_project/order_food_online/urls.py | c5771206f24064f500d0c904aa8232d203cf5dcb | [
"MIT"
] | permissive | MaksNech/django_order_food_ingredients | fcad5668b92b90776715d39e3f241577cf4364fa | 3578e36570ce99b25136942320fbcd7df956d435 | refs/heads/master | 2020-04-20T21:20:38.496108 | 2019-04-06T15:17:29 | 2019-04-06T15:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | """order_food_online URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.shortcuts import redirect
from django.conf.urls.static import static
urlpatterns = [
path('api/v1/', include('order_food_online.api_urls')),
path('', lambda request: redirect('foods/', permanent=True)),
path('i18n/', include('django.conf.urls.i18n')),
path('admin/', admin.site.urls),
path('foods/', include('foods.urls')),
path('notes/', include('notes.urls')),
path('authentication/', include('authentication.urls')),
path('accounts/', include('django.contrib.auth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"nechypurenko.shag@gmail.com"
] | nechypurenko.shag@gmail.com |
5e373df7e1c7c7ded6daed67b822f087b1d68048 | 290e455134baa42a177d16564442524d6152ef40 | /src/.ipynb_checkpoints/model_maker-checkpoint.py | 8fbb4f92490906adcce68441768253d09b9b5931 | [] | no_license | hoffm386/customer_churn_project | caeeef745e036707723a056b2966e19c4ce261ee | 8038e34ebd31dd5296be49b57bf4d6d54f2dc55a | refs/heads/master | 2021-05-24T16:12:26.486060 | 2020-03-31T04:24:48 | 2020-03-31T04:24:48 | 253,650,393 | 0 | 0 | null | 2020-04-07T00:48:00 | 2020-04-07T00:47:59 | null | UTF-8 | Python | false | false | 3,470 | py | import pickle
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, StackingClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
file_name = '../../data/Customer Churn Data.csv'
def pipeline(pickle = True):
X_train, X_test, y_train, y_test = get_train_and_test_data()
model = make_model(X_train, y_train)
if pickle:
pickler(model, 'model.pickle')
return model
def get_train_and_test_data():
'''
Returns testing and training data
'''
data = get_data()
return split_data(data)
def get_data():
'''
Gets data from datafile and does some pruning.
Drops columns that worsen the model and agregates the charges columns (This helps the model)
Returns
-------
Returns the data frame to be used in making the model
'''
df = pd.read_csv(file_name)
df['international plan'] = (df['international plan'] == 'yes').astype(int)
df['voice mail plan'] = (df['voice mail plan'] == 'yes').astype(int)
df['total charge'] = df['total day charge'] + df['total eve charge'] + df['total intl charge'] + df['total night charge']
df = df.drop(['total day charge', 'total eve charge', 'total intl charge', 'total night charge'], axis = 1)
df = df.drop(['area code', 'phone number', 'state'], axis = 1)
return df
def split_data(data):
'''
Does a train test split on the passed in with churn as the target
Parameters
----------
data: churn data to be split
Returns
-------
Training predictors, test predictor, training target, test target
'''
target = data['churn']
X = data.copy()
X = X.drop(['churn'], axis = 1)
return train_test_split(X, target, test_size = 0.30, random_state = 42)
def make_model(X_train, y_train):
'''
fits and returns a stacking model based on the data passed in
'''
estimators = [('rf', RandomForestClassifier()),
('log', LogisticRegression(solver = 'liblinear')),
('grad', GradientBoostingClassifier())]
stack = StackingClassifier(estimators = estimators, final_estimator = LogisticRegression(), cv = 5)
stack.fit(X_train, y_train)
return stack
def metrics(y_true, y_pred):
'''
returns some metrics
'''
metric_dictionary = {}
metric_dictionary['Accuracy'] = str(accuracy_score(y_true, y_pred))
metric_dictionary['Precision'] = str(precision_score(y_true, y_pred))
metric_dictionary['Recall'] = str(recall_score(y_true, y_pred))
metric_dictionary['F1'] = str(f1_score(y_true, y_pred))
metric_dictionary['confusion_matrix'] = confusion_matrix(y_true, y_pred)
return metric_dictionary
def pickler(model, file_name):
'''
turns a model into a pickle file
'''
output_file = open(file_name, 'wb')
pickle.dump(model, output_file)
output_file.close()
def read_pickle(file_name):
'''
reads a pickle file
'''
model_file = open(file_name, "rb")
model = pickle.load(model_file)
model_file.close()
return model | [
"jarodc333@gmail.com"
] | jarodc333@gmail.com |
2a7a72f33498109a975dad72cce5024127501aae | 0eaffa74bf33d54a30a16ca85a2771090a225747 | /apps/hosts/tests.py | 6121c0f272b1909c8e90e972fbea59fd54b4f9e6 | [] | no_license | tom2jack/fdommp | b6e1466df3ae041f730cb9c7a0f6257b6902023a | 264150666c621268d0d05686cc445ace91993419 | refs/heads/master | 2022-04-12T23:42:52.255825 | 2020-03-05T08:37:08 | 2020-03-05T08:37:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | import sys
import os
from django.conf import settings
pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(pwd + "../../")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fdommp.settings")
import django
django.setup()
# git
# from git import Repo
#
# url = 'https://github.com/fq47523/fdommp-dockerfile.git'
# gitpath = '/tmp/testgit'
#
#
# # Repo.clone_from(url, gitpath,multi_options=['--depth=1'])
# git_repo = Repo(gitpath)
# print (git_repo.branches)
# print (git_repo.tags)
from rest_framework_jwt.utils import jwt_payload_handler
# from django.contrib.auth.models import User
# from rest_framework_jwt.settings import api_settings
# userobj = User.objects.get(username='fuqing')
# jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
# jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
#
# payload = jwt_payload_handler(userobj)
# token = jwt_encode_handler(payload)
# print (token)
import json
from assets.dao import AssetManage
from django.core import serializers
from assets.models import Tag,Asset,Server
from hosts.models import Service
# from utils.deploy.git import GitTools
#
# print (GitTools('/home/fuqing/PycharmProjects/fdommp').tag())
import subprocess,json
# ret = subprocess.getstatusoutput("echo 'select * from opsmanage.auth_user' | /opt/soar -online-dsn 'root:redhat@192.168.79.134:3307/opsmanage' -report-type json")
# print (json.loads(ret[1]))
# print (ret[1].split('\n'))
from django.contrib import auth
# from django.contrib.auth.models import User
# user = User.objects.get(username='admin')
# user.set_password('admin')
# user.save() | [
"289557905@qq,com"
] | 289557905@qq,com |
9b5a036d68b3466bed0b435be8c763ad69490521 | 10b1f4d80f4453972918f8de42a5b6212047dac9 | /submissions/exercise4/doron/main.py | 86994c093b9644a9187cdc9a110e5a4de9ef497a | [
"MIT"
] | permissive | tjmonsi/cmsc129-2016-repo | e6a87cab1c6adb093f9b339271cf3e8d2c48726c | 5a1833265b17f9079d5a256909296d363db9179b | refs/heads/master | 2021-01-22T09:55:37.484477 | 2017-06-01T16:24:19 | 2017-06-01T16:24:19 | 52,767,672 | 0 | 14 | null | 2017-06-01T16:24:20 | 2016-02-29T05:55:01 | JavaScript | UTF-8 | Python | false | false | 594 | py | #!/usr/bin/python
import tokengenerator
import grammar
import execute
filename = input("Enter Filename: ")
tokengenerator.tokengenerator(filename)
print("\n\nCHECK GRAMMAR\n\n")
grammar.addGrammars()
ret = grammar.checkGrammar("START", 0)
if len(ret.grammar) >0:
print("GRAMMAR CONSTRUCTION")
for gram in ret.grammar:
print(gram.grammar+"-> "+gram.token+" "+str(gram.type))
if len(ret.error_messages) > 0:
print("ERROR MESSAGES")
for message in ret.error_messages:
print(message)
print("\n\nCONSOLE INPUT")
execute.execute(ret.grammar)
| [
"mddoron@up.edu.ph"
] | mddoron@up.edu.ph |
fc547eff6a8ec530b219d1a57a2a7005de4c279a | c3941a9a631511f7785f49ad0de8b584c3f4a146 | /dataset_utilities/create_yrly_datasets.py | ac020edcd6cca9258e4ff9b1f2fdef1b73850161 | [] | no_license | spurs229/advanced-ml-final-proj-original-paper-code | 02df3b469240b30d4431912f56b74b54713751e6 | b5c9016e5a093e531a5ec21bc0b4638d2451b6c1 | refs/heads/main | 2023-07-15T12:54:16.870585 | 2021-08-30T20:05:06 | 2021-08-30T20:05:06 | 400,199,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | # from xml.etree import cElementTree as ET
import os
import re
def clean_string(s):
s = s.lower()
s = re.sub( '\s+', ' ', s ).strip()
sp =''
for w in s.split():
if len(w) < 20 and len(re.sub('[^a-z]+', '', w)) == len(w):
sp+=w + " "
# s = s.lower()
# s = re.sub('[^a-zA-Z\s]+', '', s) #TODO just discard words with non-alpha characters
# s = re.sub(r'\b\w{20,10000}\b', '', s) #remove words that are more than 20 chaacters
# s = re.sub(r'\b\w{1,1}\b', '', s) #remove single letter words
return sp
def parse_xml_file(file, print_file = False):
with open(file, 'r') as f:
s = f.read()
s= s[s.index('<block class="full_text">')+25:]
s = s[:s.find('</block>')].replace('<p>','').replace('</p>','')
s = clean_string(s)
return s
def parse_txt_file(file, print_file = False):
with open(file, 'r') as f:
s = f.read()
s = clean_string(s)
return s
def yr_directory(dir, yr, lab, xml = True, outputdir ='../databyyr/'):
with open(lab+'failed_to_parse.txt', 'w') as failed:
with open(outputdir + lab + '/' + str(yr) + '.txt', 'w') as f:
for subdir, dirs, files in os.walk(dir):
for file in files:
ext = os.path.splitext(file)[-1].lower()
if xml is False:
try:
f.write(parse_txt_file(os.path.join(subdir, file)) + '\n')
except Exception as e:
print(e)
failed.write(os.path.join(subdir, file) + '\n')
failed.flush()
continue
elif ext == '.xml':
try:
f.write(parse_xml_file(os.path.join(subdir, file)) + '\n')
except:
failed.write(os.path.join(subdir, file) + '\n')
failed.flush()
continue
def main_nyt():
for yr in range(1987, 2008):
print(yr)
folder = '../LDC2008T19_The-New-York-Times-Annotated-Corpus/data/' + str(yr)
yr_directory(folder, yr, 'nyt')
def main_coha():
for yr in range(1810, 2010, 10):
print(yr)
folder = '../COHA text/' + str(yr) + 's'
yr_directory(folder, yr, 'coha', False)
def main_ldc95(outlet):
for yr in range(1994, 1997, 1):
print(yr)
folder = '../../LDC95T21-North-American-News/{}/{}/'.format(outlet,yr)
yr_directory(folder, yr, 'ldc95_'+outlet, False, outputdir = '../../LDC95T21-North-American-News/ldc95_databyyr/')
for outlet in ['NYT', 'LATWP', 'REUFF', 'REUTE', 'WSJ']:
main_ldc95(outlet)
# main_nyt()
# main_coha()
# parse_xml_file('examplearticle.xml', print_file = True)
| [
"neta-caspi@MacBook-Pro.local"
] | neta-caspi@MacBook-Pro.local |
4d845798bb70aeb5b3dc8d3806363ab292d44cc0 | 037bd176ab73aeaf139be5811a7605167d3032e0 | /app/server.py | 09be70a52e9832db64e841fd4cbebe9401a2d94e | [] | no_license | MoyaCar/BugsEyesDetector | 7db319c3c1c63dbaef45cf1b7859f39adcbcc9d0 | 1a89cef03205a1cb4b3cb7b743a7e579a7d84941 | refs/heads/master | 2022-07-15T08:57:43.956593 | 2019-09-30T01:54:01 | 2019-09-30T01:54:01 | 211,749,310 | 0 | 0 | null | 2022-06-21T23:00:13 | 2019-09-30T01:10:40 | Python | UTF-8 | Python | false | false | 2,346 | py | import aiohttp
import asyncio
import uvicorn
from fastai import *
from fastai.vision import *
from io import BytesIO
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import HTMLResponse, JSONResponse
from starlette.staticfiles import StaticFiles
export_file_url = 'https://drive.google.com/uc?export=download&id=113xue-mkys22GLYCDnx5eb8SJRMnyvWH'
export_file_name = 'export.pkl'
classes = ['fly','bee','spider']
path = Path(__file__).parent
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])
app.mount('/static', StaticFiles(directory='app/static'))
async def download_file(url, dest):
if dest.exists(): return
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.read()
with open(dest, 'wb') as f:
f.write(data)
async def setup_learner():
await download_file(export_file_url, path / export_file_name)
try:
learn = load_learner(path, export_file_name)
return learn
except RuntimeError as e:
if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:
print(e)
message = "\n\nThis model was trained with an old version of fastai and will not work in a CPU environment.\n\nPlease update the fastai library in your training environment and export your model again.\n\nSee instructions for 'Returning to work' at https://course.fast.ai."
raise RuntimeError(message)
else:
raise
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_learner())]
learn = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()
@app.route('/')
async def homepage(request):
html_file = path / 'view' / 'index.html'
return HTMLResponse(html_file.open().read())
@app.route('/analyze', methods=['POST'])
async def analyze(request):
img_data = await request.form()
img_bytes = await (img_data['file'].read())
img = open_image(BytesIO(img_bytes))
prediction = learn.predict(img)[0]
return JSONResponse({'result': str(prediction)})
if __name__ == '__main__':
if 'serve' in sys.argv:
uvicorn.run(app=app, host='0.0.0.0', port=5000, log_level="info")
| [
"carlosmoya@gmail.com"
] | carlosmoya@gmail.com |
820c2bd2006c8b43d126a6d5226df4dd461d5814 | b6ef959b538e4bffec92998a553175248bd72a77 | /06-Machine_Learning/brain.py | 1376cf1f411443947b708acf7499cd6bdf52de49 | [
"MIT"
] | permissive | suzynakayama/udemy-python-dev | 9e384e3683a300f07c14d2a5862003038a4b169c | fbb35d00f94296da1281e6042a4efe506f79dddb | refs/heads/main | 2023-02-10T11:50:47.650049 | 2021-01-07T22:46:52 | 2021-01-07T22:46:52 | 307,135,927 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import os
from imageai.Classification import ImageClassification
# get current directory
execution_path = os.getcwd()
prediction = ImageClassification()
prediction.setModelTypeAsMobileNetV2()
prediction.setModelPath(os.path.join(execution_path, "mobilenet_v2.h5"))
prediction.loadModel()
predictions, probabilities = prediction.classifyImage(os.path.join(execution_path, "giraffe.jpg"), result_count=5 )
for eachPrediction, eachProbability in zip(predictions, probabilities):
print(eachPrediction , " : " , eachProbability) | [
"suzy.nakayama@gmail.com"
] | suzy.nakayama@gmail.com |
a433ae84fb074b61840e19b067915bc4fc1b848c | 490ffe1023a601760ae7288e86723f0c6e366bba | /kolla-docker/patching/zun_compute_api/provideraccount.py | a338bd5fd861592d8f7f624b5913d613b42fd69c | [] | no_license | bopopescu/Cloud-User-Management | 89696a5ea5d2f95191327fbeab6c3e400bbfb2b8 | 390988bf4915a276c7bf8d96b62c3051c17d9e6e | refs/heads/master | 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null | UTF-8 | Python | false | false | 1,679 | py | def provideraccount_update(self, context, container, *args):
if direct_action:
return self.manager.provideraccount_update(context, container, *args)
else:
return self.rpcapi.provideraccount_update(context, container, *args)
def provideraccount_show(self, context, container, *args):
if direct_action:
return self.manager.provideraccount_show(context, container)
else:
return self.rpcapi.provideraccount_show(context, container)
def provideraccount_create(self, context, new_provideraccount, extra_spec,
requested_networks):
host_state = None
try:
host_state = {} # self._schedule_container(context, new_provideraccount, extra_spec)
except Exception as exc:
# new_provideraccount.status = consts.ERROR
# new_provideraccount.status_reason = str(exc)
# new_provideraccount.save(context)
return
if direct_action:
self.manager.provideraccount_create(context, "", requested_networks, new_provideraccount)
else:
self.rpcapi.provideraccount_create(context, "", new_provideraccount, "", requested_networks)
# self.rpcapi.provideraccount_create(context, host_state['host'],
# new_provideraccount, host_state['limits'],
# requested_networks)
def provideraccount_delete(self, context, container, *args):
return self.manager.provideraccount_delete(context, container, True)
# return self.rpcapi.provideraccount_delete(context, container, *args)
| [
"Mr.Qinlichao@hotmail.com"
] | Mr.Qinlichao@hotmail.com |
0265f009c0e4e8d5a39c8b1b2673b159407c7b02 | 9868ee5c9b121097f824dac306b678a2761498af | /blog/models.py | dc0a3603c0a5f7c86bdbd2ee46799a8bf7570834 | [] | no_license | ShaikhHussain/portfolio_project | c5fbcbda4ea59f4db41d329de96c6b65eaf5bacc | 3064008d9d6f6083b65d5ab13c4ae2c230f76910 | refs/heads/master | 2020-07-08T20:08:41.462786 | 2019-08-22T09:45:42 | 2019-08-22T09:45:42 | 203,763,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=200)
publish_date = models.DateTimeField()
image = models.ImageField(upload_to='images/')
blogs_body = models.TextField()
def __str__(self):
return self.title
def summary(self):
return self.blogs_body[:100]
def modified_publish_date(self):
return self.publish_date.strftime("%d %B, %Y")
| [
"abdulshaikhhussain@gmail.com"
] | abdulshaikhhussain@gmail.com |
fd2f69e09e71f5891978da89ed3adc6501a7dd84 | a6ba42d4c838a2e1790984f12add463293bae942 | /todolist/urls.py | 5dc33cb0775ec3c9f346137650055ffe6998d0a8 | [] | no_license | santhoshcharan-001/todolist | 9614abb0d03d9d891d02a78f7d89486e07774bca | cd0fe620a784aefdcce174285f310b9a8f8af927 | refs/heads/main | 2023-08-01T22:49:43.474760 | 2021-09-15T09:41:35 | 2021-09-15T09:41:35 | 393,067,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | """todolist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.admin.sites import site
from django.urls import path,include
from django.views.static import serve
from django.conf import settings
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('applications.taskingapp.urls')),
url(r'^media/(?P<path>.*)$', serve,{'document_root': settings.MEDIA_ROOT}),
url(r'^static/(?P<path>.*)$', serve,{'document_root': settings.STATIC_ROOT}),
]
| [
"2019287@iiitdmj.ac.in"
] | 2019287@iiitdmj.ac.in |
9c477960dc9369da5c01e71b845d5177847a00cb | 8d582e36f3a86113d6eac6dec10d0c16b0c5595a | /ui.py | e84da1c8a2fd673d644229a36721a5804f443e6e | [] | no_license | jackz314/speech-synthesizer | c1a7e1d211e65101fc5f562fd9859b3f5fcfce25 | d42fb69d482a788b3d7157a83e0f8d258b9e80e3 | refs/heads/master | 2023-03-05T03:05:54.842687 | 2021-02-05T10:01:08 | 2021-02-05T10:01:08 | 332,403,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,278 | py | import multiprocessing
from multiprocessing import Process
import locale
import multiprocessing
import os
import sys
from multiprocessing import Process
from threading import Thread
import PySide2
import zmq
from PySide2.QtCore import QUrl, Qt, Signal, Slot
from PySide2.QtGui import QColor, QDesktopServices, QIcon, QTextCursor
from PySide2.QtWidgets import QApplication, QMainWindow, QTextEdit, QGridLayout, QPlainTextEdit, QWidget, \
QPushButton, QComboBox, QVBoxLayout, QFileDialog, QMessageBox, QCheckBox
from converter import ConverterController
sys_lang = locale.getdefaultlocale()[0]
if "en" in sys_lang: sys_lang = "en"
else: sys_lang = "zh"
calibre_link = f"https://calibre-ebook.com{'' if sys_lang == 'en' else '/zh_CN'}/download"
DATA_DIR = "./synthesizer_data"
RES_DIR = DATA_DIR + "/resources/"
class LogWidget(QTextEdit):
def log_message(self, msg):
self.moveCursor(QTextCursor.End)
if msg.lstrip().startswith("[ERROR]"):
# print("logging error")
self.setTextColor(Qt.red)
elif msg.lstrip().startswith("[DONE]"):
# print("logging error")
self.setTextColor(Qt.green)
else:
self.setTextColor(Qt.black)
self.insertPlainText(msg)
self.moveCursor(QTextCursor.End)
if self.verticalScrollBar().value() >= self.verticalScrollBar().maximum() - 5: # scrollbar at bottom, autoscroll
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
class InputTextEdit(QPlainTextEdit):
def set_signal(self, signal):
self.signal = signal
def dragEnterEvent(self, e:PySide2.QtGui.QDragEnterEvent) -> None:
if e.mimeData().hasUrls() and len(e.mimeData().urls()) == 1:
e.acceptProposedAction()
def dropEvent(self, e: PySide2.QtGui.QDropEvent) -> None:
if not e.mimeData().hasUrls() or len(e.mimeData().urls()) != 1:
print("ERROR Drop Event", e)
return
url = e.mimeData().urls()[0]
if not url: return
url = url.toLocalFile()
print("Got dropped file:", url)
self.signal.emit(url)
# def __init__(self, parent: typing.Optional[PySide2.QtWidgets.QWidget] = ...) -> None:
# super().__init__(parent)
# self.setAcceptDrops(True)
class Main(QMainWindow):
new_log = Signal(str)
new_input = Signal(str)
new_download = Signal(str)
new_file = Signal(str)
conversion_status = Signal()
def msg_listener(self):
if not self.sub_socket:
self.sub_socket = zmq.Context().socket(zmq.SUB)
self.sub_socket.connect("tcp://127.0.0.1:10290")
self.sub_socket.setsockopt(zmq.SUBSCRIBE, b"")
self.sub_socket.setsockopt(zmq.LINGER, 0)
while True:
msg = self.sub_socket.recv_string()
cmd, data = msg.split("|", maxsplit=1)
# print("Got converter msg", cmd, data)
if cmd == "[file-content]":
self.emit_input(data)
elif cmd == "[log]":
self.emit_log(data, end="")
elif cmd == "[download]":
self.emit_log(data)
self.new_download.emit(data)
elif cmd == "[conversion-done]":
self.conversion_status.emit()
elif cmd == "[crash]":
self.emit_log(f"Converter crashed, exiting... ({data})")
else:
print("Unknown message:", msg)
continue
# QApplication.processEvents()
def emit_log(self, msg, end="\n"):
self.new_log.emit(msg + end)
def emit_input(self, s):
self.new_input.emit(s)
@Slot(str)
def log(self, msg):
self.status_output.log_message(msg)
@Slot(str)
def update_text_input(self, s):
self.text_input.setPlainText(s)
self.text_input.setPlaceholderText("Paste in text you want to hear, or select a file to see its content here." if sys_lang == "en" else "输入文本或选择文件来预览")
self.from_file = True
@Slot(str)
def show_download_dialog(self, s):
reply = QMessageBox.information(self, "Download Calibre", s, QMessageBox.Ok | QMessageBox.Cancel)
if reply == QMessageBox.Ok:
QDesktopServices.openUrl(QUrl(calibre_link))
# msgBox.setIcon(QMessageBox.Information)
# msgBox.setText(s)
# msgBox.setWindowTitle("Download Calibre")
# msgBox.setStandardButtons(QMessageBox.Cancel | QMessageBox.Ok)
# downBtn = msgBox.button(QMessageBox.Ok)
# downBtn.setText("Download")
@Slot()
def conversion_done(self):
self.result_btn.setEnabled(True)
def log_with_end(self, msg, end="\n"):
self.log(msg + end)
def msg_sender(self, cmd, msg):
try:
self.pub_socket.send_string(cmd + "|" + msg, zmq.NOBLOCK)
except zmq.error.Again:
print("No subscriber. Send again later:", cmd, msg)
def start_convert(self):
print("Starting conversion...")
self.msg_sender("[lang]", self.language_dropdown.currentData())
self.msg_sender("[esp-model]", self.esp_model_dropdown.currentData())
self.msg_sender("[vocoder-model]", self.vocoder_model_dropdown.currentData())
# self.msg_sender("[calibre]", "1" if self.calibre_checkbox.isChecked() else "0")
self.msg_sender("[convert]", ("" if self.from_file else self.text_input.toPlainText()))
self.cfg.set('main', 'lang', str(self.language_dropdown.currentIndex()))
self.cfg.set('main', 'esp', str(self.esp_model_dropdown.currentIndex()))
self.cfg.set('main', 'vocoder', str(self.vocoder_model_dropdown.currentIndex()))
self.cfg.set('main', 'calibre', str(self.calibre_checkbox.isChecked()))
with open('./config.ini', encoding="utf-8", mode="w") as f:
self.cfg.write(f)
# self.converter_executor.submit(self.converter.convert)
def select_file(self):
fileName, _ = QFileDialog.getOpenFileName(self,"Select a file" if sys_lang == "en" else "选择一个文档",
"","All Files (*);;Documents (*.txt *.pdf *.doc *.docx *.rtf *.htm *.html);;")
if not fileName: return
self.load_file(fileName)
@Slot(str)
def load_file(self, fileName):
self.text_input.clear()
self.log_with_end("Reading from " + fileName)
self.msg_sender("[file]", fileName)
self.text_input.setPlaceholderText("Loading file..." if sys_lang == "en" else "加载文件中。。")
def select_save_folder(self):
dir_name = QFileDialog.getExistingDirectory(self,"Choose a place to save the output" if sys_lang == "en" else "选择输出文件夹",self.cfg.get("main", "out_dir", fallback=""))
if not dir_name: return
self.log_with_end("Saving to " + dir_name)
self.msg_sender("[out-dir]", dir_name)
self.cfg.set('main', 'out_dir', dir_name)
with open('./config.ini', encoding="utf-8", mode="w") as f:
self.cfg.write(f)
def text_changed(self):
self.from_file = False
if self.text_input.document().isEmpty() or self.text_input.toPlainText().isspace():
self.convert_btn.setEnabled(False)
else:
self.convert_btn.setEnabled(True)
def open_result(self):
out_dir = self.cfg.get("main", "out_dir", fallback=".")
if not out_dir: out_dir = "."
QDesktopServices.openUrl(QUrl.fromLocalFile(out_dir + "/out.wav"))
def change_lang(self, idx):
lang = self.language_dropdown.itemData(idx)
self.msg_sender("[lang]", lang)
def change_esp_model(self, idx):
model = self.esp_model_dropdown.itemData(idx)
self.msg_sender("[esp-model]", model)
def change_vocoder_model(self, idx):
model = self.vocoder_model_dropdown.itemData(idx)
self.msg_sender("[vocoder-model]", model)
def calibre_change(self, checked):
self.msg_sender("[calibre]", "1" if checked else "0")
def create_control_group(self):
group = QVBoxLayout()
self.language_dropdown = QComboBox()
self.language_dropdown.addItem("Auto detect", "")
self.language_dropdown.addItem("English", "en")
self.language_dropdown.addItem("中文", "zh")
self.language_dropdown.setCurrentIndex(int(self.cfg.get("main", "lang", fallback='0')))
# self.language_dropdown.currentIndexChanged.connect(self.change_lang)
self.esp_model_dropdown = QComboBox()
self.esp_model_dropdown.addItem("conformer+fastspeech2", "conformer_fastspeech2")
self.esp_model_dropdown.addItem("tacotron2", "tacotron2")
self.esp_model_dropdown.addItem("fastspeech2", "fastspeech2")
self.esp_model_dropdown.addItem("fastspeech", "fastspeech")
self.esp_model_dropdown.setCurrentIndex(int(self.cfg.get("main", "esp", fallback='0')))
# self.esp_model_dropdown.currentIndexChanged.connect(self.change_esp_model)
self.vocoder_model_dropdown = QComboBox()
self.vocoder_model_dropdown.addItem("multi-band melgan", "multi_band_melgan")
self.vocoder_model_dropdown.addItem("parallel wavegan", "parallel_wavegan")
self.vocoder_model_dropdown.addItem("full-band melgan", "full_band_melgan")
self.vocoder_model_dropdown.setCurrentIndex(int(self.cfg.get("main", "vocoder", fallback='0')))
# self.vocoder_model_dropdown.currentIndexChanged.connect(self.change_vocoder_model)
self.calibre_checkbox = QCheckBox("Always use Calibre" if sys_lang == "en" else "强制使用Calibre")
self.calibre_checkbox.setChecked(self.cfg.get("main", "calibre", fallback='False') == "True")
self.calibre_checkbox.stateChanged.connect(self.calibre_change)
self.file_btn = QPushButton("Open File" if sys_lang == "en" else "打开文档")
self.file_btn.clicked.connect(self.select_file)
self.save_btn = QPushButton("Output Folder" if sys_lang == "en" else "输出文件夹")
self.save_btn.clicked.connect(self.select_save_folder)
self.convert_btn = QPushButton("Convert" if sys_lang == "en" else "转换")
self.convert_btn.setEnabled(False)
self.convert_btn.clicked.connect(self.start_convert)
self.result_btn = QPushButton("Open Result" if sys_lang == "en" else "打开结果")
self.result_btn.setEnabled(False)
self.result_btn.clicked.connect(self.open_result)
group.addWidget(self.language_dropdown)
group.addWidget(self.esp_model_dropdown)
group.addWidget(self.vocoder_model_dropdown)
group.addWidget(self.calibre_checkbox)
group.addWidget(self.file_btn)
group.addWidget(self.save_btn)
group.addSpacing(100)
group.addWidget(self.convert_btn)
group.addSpacing(100)
group.addWidget(self.result_btn)
group.addStretch(1)
return group
def init_converter(self):
self.converter_process = Process(target=ConverterController)
self.converter_process.daemon = True
self.converter_process.start()
ctx = zmq.Context()
self.pub_socket = ctx.socket(zmq.PUB)
self.pub_socket.bind("tcp://127.0.0.1:10289")
self.pub_socket.setsockopt(zmq.LINGER, 0)
self.sub_socket = None
# self.converter = Converter(comm=self.log)
def resizeEvent(self, event:PySide2.QtGui.QResizeEvent) -> None:
QMainWindow.resizeEvent(self, event)
print("New size:", event.size())
def __init__(self):
print("UI RUNNING!")
QMainWindow.__init__(self)
self.new_log.connect(self.log)
self.new_input.connect(self.update_text_input)
self.new_download.connect(self.show_download_dialog)
self.conversion_status.connect(self.conversion_done)
from configparser import ConfigParser
self.cfg = ConfigParser()
self.cfg.read("./config.ini")
if 'main' not in self.cfg.sections():
self.cfg.add_section('main')
screen_rect = QApplication.primaryScreen().geometry()
print("Screen size:", screen_rect.width(), screen_rect.height())
self.resize(screen_rect.width() * 0.45, screen_rect.height() * 0.67)
self.setWindowTitle("Speech Synthesizer")
self.setWindowIcon(QIcon(RES_DIR + '/speech_synthesizer.svg'))
self.setCentralWidget(QWidget())
self.main_layout = QGridLayout()
self.centralWidget().setLayout(self.main_layout)
self.text_input = InputTextEdit()
self.text_input.set_signal(self.new_file)
self.new_file.connect(self.load_file)
self.text_input.resize(70,100)
self.text_input.setPlaceholderText("Paste in text you want to hear, or select a file to see its content here." if sys_lang == "en" else "输入文本或选择文件来预览")
font = self.text_input.font()
font.setPointSize(12)
self.text_input.setFont(font)
self.text_input.textChanged.connect(self.text_changed)
self.status_output = LogWidget()
font = self.status_output.font()
font.setPointSize(10)
self.status_output.setFont(font)
self.status_output.setReadOnly(True)
self.control_group = self.create_control_group()
self.main_layout.addWidget(self.text_input, 0, 0)
self.main_layout.addWidget(self.status_output, 2, 0, 1, 3)
self.main_layout.addLayout(self.control_group, 0, 1)
self.main_layout.setRowStretch(0, 2)
self.main_layout.setRowStretch(2, 1)
self.main_layout.setColumnStretch(0, 5)
self.main_layout.setColumnStretch(1, 1)
self.show()
self.log_with_end("Initializing converter" if sys_lang == 'en' else '正在初始化转换器')
self.init_converter()
self.msg_thread = Thread(target=self.msg_listener)
self.msg_thread.setDaemon(True)
self.msg_thread.start()
# self.converter_executor = ThreadPoolExecutor(max_workers=1)
# self.converter_executor.submit(self.init_converter)
# def closeEvent(self, event:PySide2.QtGui.QCloseEvent) -> None:
# self.__del__()
# event.accept()
def terminate(self):
if self.pub_socket:
self.pub_socket.send_string("[exit]")
self.pub_socket.close()
if self.sub_socket:
self.sub_socket.close()
if self.converter_process and self.converter_process.is_alive():
self.converter_process.terminate()
self.converter_process.join(timeout=2)
if self.converter_process.is_alive():
self.converter_process.kill()
# if self.msg_thread:
# self.msg_thread.join(timeout=2)
# print("Exiting")
def __del__(self):
self.terminate()
if __name__ == '__main__':
if os.name == "nt": # Windows quirks
multiprocessing.freeze_support()
import ctypes
myappid = u'com.jackz314.speech_synthesizer'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
app = QApplication(sys.argv)
main = Main()
sys.exit(app.exec_()) | [
"zhangmengyu10@gmail.com"
] | zhangmengyu10@gmail.com |
87dcdc1f187f0619115ef51295c60468005bd5f3 | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/itertools_filterfalse.py | 4db9836daa58ad384b41f161c27d4886ab93f22c | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | # itertools_filterfalse.py
from itertools import *
def check_item(x):
print('Verifica:', x)
return x < 1
for i in filterfalse(check_item, [-1, 0, 1, 2, -2]):
print('Trattengo:', i)
| [
"roberto.pauletto@gmail.com"
] | roberto.pauletto@gmail.com |
866315edcd99aed6f352e4f094a4bec0b268966f | 8fb3dad615426e7d7dd5b37f385138f38c7dedf2 | /spark-1.1.0/spark-base/files/etcd-service/Constants.py | 805270223cb0bdb58b58a88bd0733445a6e227aa | [
"Apache-2.0"
] | permissive | LuqmanSahaf/etcd-spark | 53ee6c0d3d7c315e99f9c9dd8b7a214efbe6fd3b | 0f3b9db820b243645365cd8a442943611be758ef | refs/heads/master | 2021-01-22T02:34:18.542297 | 2014-10-29T10:49:52 | 2014-10-29T10:49:52 | 24,637,843 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | ETCD_PORT = '4001'
ETCD_KEYS_DIRECTORY = 'etcd_spark/etcd_service'
HOSTS_FILE = '/etc/hosts'
TTL=60
| [
"lgsahaf@gmail.com"
] | lgsahaf@gmail.com |
da159561015fdb97e791d62db109c2cf4ba71be4 | cd6694d484744fe5be8ccdaf9be50005fb6e956b | /spoj/even_numbers.py | a5a94ac4026ababeb9ec054eaf8276c53d52aba7 | [] | no_license | MrDm1try/Euler | aa40d8d52774b79c76b332306b43f28da8a4fba1 | ef311da6072c953284606b09e1117cf2f3837033 | refs/heads/master | 2020-02-26T15:03:32.842419 | 2018-04-11T16:00:13 | 2018-04-11T16:00:13 | 83,286,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | def revert(num):
res = 0
while num:
res <<= 1
res |= num & 1
num >>= 1
return res
n = int(input())
numbers = []
for i in range(n):
num = int(input())
numbers.append(num if num % 2 == 1 else revert(num))
for i in numbers:
print(i) | [
"dmr@kamstrup.com"
] | dmr@kamstrup.com |
629f6895bc1d0a0101bef7c2b69ca4bb864c172b | 83f85eb8247e994ccdd08c6caac359fd10147478 | /hips.py | 2f24596186abf8ffb0426d95f3aefc78f707afb4 | [] | no_license | bufferbandit/hips | 0e9ef021fa8a36b0cd043d287a9dcf78270acc4a | 822257aafc169b8f4120cf2ffa1fe500fd5aba9b | refs/heads/master | 2020-04-04T16:29:28.926782 | 2020-01-27T09:39:08 | 2020-01-27T09:39:08 | 156,080,248 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | # H.I.P.S, by BufferBandit.
# Hide your HTML source code by encoding it with tabs and spaces...
import binascii
class HIPS:
def __init__(self,raw_html):
self.space,self.tab = " "," "
self.raw_html = raw_html
def text_to_bits(self,text, encoding='utf-8', errors='surrogatepass'):
bits = bin(int(binascii.hexlify(text.encode(encoding, errors)), 16))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def get_encoded_bits(self,raw_html):
bits = self.text_to_bits(self.raw_html)
invisible_bits = bits.replace("0",self.space).replace("1",self.tab)
return invisible_bits
def create_html_file(self):
invisible_bits = self.get_encoded_bits(self.raw_html )
#html_string = "<script>"+ "/"+invisible_bits +"""/.source.replace(/ /g, "0").replace(/ /g, "1").replace(/(\d{8})/g, '$1 ').replace(/(^\s+|\s+$)/,'').replace(/\d+./g,str=>String.fromCharCode('0b'+str)).replace(/.{7}/g,function(w){document.write(w)})""" +"</script>"
html_string = "<script>"+ "/"+invisible_bits +"""/.source.replace(/ /g, "0").replace(/ /g, "1").match(/.{8}/g).join(' ').replace(/\d+./g,str=>String.fromCharCode('0b'+str)).split().map(_var =>document.write(_var)) """ +"</script>"
return html_string
def write_html(self,out_file="hips.html"):
with open(out_file,"w") as f:
f.write(self.create_html_file())
if __name__ == "__main__":
hips = HIPS("<svg/onload=alert('Hidden in plain sight!')>")
hips.write_html()
| [
"noreply@github.com"
] | bufferbandit.noreply@github.com |
4261d8986f56637cd42771daa6474eafbddebbf2 | 384e9cfef73a8f3374cb048ee608d3268b739d66 | /lora_abp/config.py | 715e0de47f741d1c653185603e0c75e78c895abd | [] | no_license | rbraggaar/sensor-city-delft | 005ed7ccf60e6420d73903c48ac497dc6f11eb54 | 9b580a7232e010ed70512eccb7aebe2117461cad | refs/heads/master | 2021-01-19T23:32:11.160937 | 2017-06-12T13:17:37 | 2017-06-12T13:17:37 | 88,988,205 | 16 | 9 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | """
Change these for your own keys (see https://loradeveloper.mendixcloud.com/)
Type has to be string
"""
DEV_ADDR = 'dev addr'
NWKS_KEY = 'network shared key'
APPS_KEY = 'applications shared key'
| [
"r.c.braggaar@outlook.com"
] | r.c.braggaar@outlook.com |
42b4ad526ebe9485065450c2f0c41f7a0afd5a4f | 5d98fd658cff0fd5aa5b4f912d34dc7610f6980a | /src/core/cli/commands/__init__.py | d4d506dcbcac52d934f377536ff4f8c2cc7a4aee | [] | no_license | lxbrvr/aiohttp-chat | 6de33e98a5537dbe1439b34178a01a618a36e201 | d5cb8fa3c3f294c7c00d0a09b73ddd06342efda9 | refs/heads/master | 2022-06-19T10:42:58.952457 | 2019-09-18T09:40:09 | 2019-09-18T09:40:09 | 208,893,619 | 5 | 0 | null | 2022-05-25T02:58:24 | 2019-09-16T20:38:58 | Python | UTF-8 | Python | false | false | 123 | py | command_classes = [
'core.cli.commands.clean_db.CleanDbAppCommand',
'core.cli.commands.run.RunServerAppCommand',
]
| [
"to100100100@gmail.com"
] | to100100100@gmail.com |
fa168001eed0986c591712b65a78378669b9066b | d7851c13caeec784de188e97c912ab3edb2b10fd | /classifier/__init__.py | e265178fd1e40900289a101d7ed781fe05b19502 | [] | no_license | caozhang1996/CS231N_Two_Layer_Neural_Network | 5b7f2737b460ac3f50b11527efe63dfc03aac9d0 | 4a3d1027c20210f22ae538cb69a03aaedb175d2c | refs/heads/master | 2020-05-24T20:53:54.115208 | 2019-05-19T10:50:07 | 2019-05-19T10:50:07 | 187,464,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 4 16:43:54 2019
@author: caozhang
"""
| [
"caozhang1996@gmail.com"
] | caozhang1996@gmail.com |
747ca14a18296d4beabd473f554d3da345152774 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/sok2_20180606104430.py | e3d724e0922456fec8afc2db0669485e5ed3545c | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | row1 = [0,0,0,0,0,0,0,0,0]
row2 = [0,0,0,5,0,6,0,0,0]
row3 = [0,0,1,0,0,0,0,3,0]
row4 = [0,9,5,0,0,0,2,0,0]
row5 = [0,0,0,0,0,1,6,0,7]
row6 = [1,0,6,0,0,9,0,0,5]
row7 = [7,0,0,8,0,3,9,0,0]
row8 = [0,3,8,9,0,0,0,2,0]
row9 = [0,5,0,0,2,0,7,0,0]
print(row1)
print(row2)
print(row3)
print("")
print(row4)
print(row5)
print(row6)
print("")
print(row7)
print(row8)
print(row9)
while True:
x = input("Wprowadz x y z:")
try:
if int(x[0])==1:
row1[int(x[2])-1]=x[4]
print("ok")
except ValueError: # przechwytuje wyjątek literę i kończy program.
print("Wprowadz cyfrę!")
continue
print(row1[0],row1[1],row1[2], sep=' ', end=" - ")
print(row1[3],row1[4],row1[5], sep=' ', end=" - ")
print(row1[6],row1[7],row1[8], sep=' ')
print(row1[0],row1[1],row1[2], sep=' ', end=" - ")
print(row1[3],row1[4],row1[5], sep=' ', end=" - ")
print(row1[6],row1[7],row1[8], sep=' ')
#print(str(*r11, sep='') + "-" + str(r12) + " - " + str(r13))
print(row2)
print(row3)
print(""),
print(row4)
print(row5)
print(row6)
print("")
print(row7)
print(row8)
print(row9)
#print(new)
#rds.insert(index, "is") | [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
e5853a5dc4dcb39d89592c1c7218567fabf657b8 | ab7909a49f372c82baa1e8031eb46a1a913796aa | /selenium_spider.py/src/driver.py | c449e19872e9d30d26625008a2b70914ba883c4e | [] | no_license | Bortolosso/extract-data-tjsp | 5fde991cd6e8f71e86f6b4ee686bac9659d3deaf | 2a8c94a0d1e5582ccde1353fcaa795c77adf0ebc | refs/heads/master | 2020-06-14T05:55:40.530546 | 2019-08-16T13:45:15 | 2019-08-16T13:45:15 | 194,918,850 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | #!/usr/bin/python -tt
# coding: utf-8
from selenium import webdriver
class Driver:
def __init__(self):
local_dir_pdf = "/home/bortolossohurst/Documents/ambv_boot/selenium_spider.py/temp/pdf"
local_dir_driver = "/home/bortolossohurst/Documents/ambv_boot/selenium_spider.py/driver/chromedriver75"
url_tjsp = "https://www.tjsp.jus.br/cac/scp/webmenupesquisa.aspx"
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', {
"download.default_directory": local_dir_pdf, #Alterar diretório padrão para downloads
"download.prompt_for_download": False, #Para baixar automaticamente o arquivo
"download.directory_upgrade": True,
"plugins.always_open_pdf_externally": True, #Não mostrará PDF diretamente no chrome
})
self.__driver = webdriver.Chrome( options=options, executable_path = local_dir_driver)
| [
"bortolossojoao@gmail.com"
] | bortolossojoao@gmail.com |
3b15be8b188528ce122516e650d067191f2ec45e | 47f4ba73b25ebc1e920f85295c51171c11ec78f4 | /Tkinter/ProgessBarHttpDownload.py | cb4c017af1e3f71b65704c54ad442c134f1d06e9 | [] | no_license | pedroazcona/PythonLearning | 45e04e76d94bfea304e1924d2f7661ed497a5312 | 514e2851defce4cf911d338542c7f738167f2a57 | refs/heads/master | 2021-04-30T15:36:48.423600 | 2018-02-13T10:39:04 | 2018-02-13T10:39:04 | 121,242,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 09:42:36 2018
@author: poaa
"""
import tkinter as tk
from tkinter import ttk
from threading import Thread
from urllib.request import urlretrieve, urlcleanup
class Application(ttk.Frame):
def __init__(self, main_window):
super().__init__(main_window)
main_window.title("Barra de progreso en Tk")
self.progressbar = ttk.Progressbar(self)
self.progressbar.place(x=30, y=60, width=200)
self.download_button = ttk.Button(
self, text="Descargar", command=self.download_button_clicked)
self.download_button.place(x=30, y=20)
self.progres_label=ttk.Label(self, text='')
self.progres_label.place(x=250, y=60)
self.place(width=300, height=200)
main_window.geometry("300x200")
def download(self):
url = "https://4.img-dpreview.com/files/p/TS5504x8256~sample_galleries/5682971467/6143475567.jpg"
urlretrieve(url, r'C:\Users\poaa\Documents\Python Scripts\Tkinter\foto.jpg', self.download_status)
urlcleanup()
self.progres_label['text']='Done'
def download_button_clicked(self):
# Descargar el archivo en un nuevo hilo.
Thread(target=self.download).start()
def download_status(self, count, data_size, total_data):
if count == 0:
# Establecer el máximo valor para la barra de progreso.
self.progressbar.configure(maximum=total_data)
else:
# Aumentar el progreso.
self.progressbar.step(data_size)
self.progres_label['text']=str(int((self.progressbar['value']/self.progressbar['maximum'])*100))+'%'
main_window = tk.Tk()
app = Application(main_window)
app.mainloop()
| [
"pedroazcona@gmail.com"
] | pedroazcona@gmail.com |
4958e340325e2b1f63b19ed5aa66405c1d5d7cf5 | 3b7930868ea6b052e10202d4ba723de753d71853 | /todoauth/todo/urls.py | d431f23916cbf4a5d3ed6c35543e28d325708219 | [] | no_license | Daniel3M/ToDoAuth | c6b444ac485d8e5048f480856f14f7a6d6a9c599 | abf2ca13ddf55576e58ae268cc12f3832694638f | refs/heads/master | 2023-06-24T21:43:12.774994 | 2021-07-18T08:54:29 | 2021-07-18T08:54:29 | 387,129,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | from django.urls import path
from .views import CustomLoginView, TaskList, TaskDetail, TaskCreate, TaskUpdate, DeleteView, RegisterPage
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(next_page='login'), name='logout'),
path('register/', RegisterPage.as_view(), name= 'register'),
path('', TaskList.as_view(), name='tasks'),
path('task/<int:pk>/', TaskDetail.as_view(), name='task'),
path('task-create/', TaskCreate.as_view(), name='task-create'),
path('task-update/<int:pk>/', TaskUpdate.as_view(), name='task-update'),
path('task-delete/<int:pk>/', DeleteView.as_view(), name='task-delete'),
] | [
"daniel.muhammad96@gmail.com"
] | daniel.muhammad96@gmail.com |
6f34903b427e2c0394314d9a1fb819e582ee7026 | 590de10b365aeb28cfffde5fde02fd36ef18e9d5 | /src/covcov/infrastructure/db/schema/company_domain.py | cc294456d65b0a8cf27cd3028b15d693ed82a429 | [] | no_license | W-Alphonse/covid-app | 1b0c6b4ef19d274016a97c65eea557f1327caadf | 5f8b3953c6fd5c2bd2afd3077a20c90aa1feb396 | refs/heads/master | 2023-07-23T15:23:31.287461 | 2021-05-13T21:10:44 | 2021-05-13T21:10:44 | 321,175,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,138 | py | import datetime
from sqlalchemy import Column, Integer, Unicode, String, ForeignKey, Boolean, DateTime, update
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import relationship, Session
from sqlalchemy_serializer import SerializerMixin
from sqlalchemy_utils import EmailType
from sqlalchemy import BLOB
from covcov.application.BusinessException import BusinessException
from covcov.infrastructure.db import Base
from covcov.infrastructure.db.schema.base_domain import BaseTable
from covcov.infrastructure.kmscrypt import crypto
sql_current_zone_count = "select count(*) as current_zone_count from zone where deleted = False and room_id in (select id from room where company_id = '{company_id}' and deleted = False)"
def local_execute_after_select(db, payload_attr:dict, company_id:str) -> dict:
current_zone_count = db.native_select_rows([sql_current_zone_count.format(company_id=company_id)])[0]
payload_attr.update({"current_zone_count": current_zone_count["current_zone_count"][0]})
return payload_attr
# ========
# Company
# ========
class Company(Base, BaseTable, SerializerMixin):
__tablename__ = 'company'
__table_args__ = {'extend_existing': True}
OFFER_FREE = 'FREE'
OFFER_DISCOV = 'DISCOV'
OFFER_STD = 'STD'
OFFER_ENTR = 'ENTR'
OFFER_PREM = 'PREM'
# OFFER_PREM_PUS = 'PREM_P'
#
DISCOV_VISITOR_PM = 10
id = Column(Unicode(BaseTable.SUB_SIZE), primary_key=True) # cognito:sub
name = Column(Unicode(50), nullable=False) # custom:company_name
type = Column(Unicode(10)) # ENTRE / ADMIN / REST / ... # custom:etablissement
siret = Column(Unicode(14))
address = Column(Unicode(300))
zip_code = Column(Unicode(10))
country_code = Column(Unicode(2), default='FR')
phone_number = Column(Unicode(20))
email = Column(EmailType(length=64), unique=True, nullable=False)
contact_fname = Column(Unicode(20))
contact_lname = Column(Unicode(20))
url = Column(Unicode(128))
encrypted_data_key = Column(BLOB)
iv = Column(BLOB)
#
offer = Column(Unicode(10), default=OFFER_FREE, nullable=False) # FREE | DISCOV | STD | ENTR | PREM
contractual_visitor_pmonth = Column(Integer, default=DISCOV_VISITOR_PM, nullable=False)
visitor_on_last_count = Column(Integer, default=0, nullable=False)
visit_on_last_count = Column(Integer, default=0, nullable=False)
last_count_dt = Column(DateTime, default=datetime.datetime.now, nullable=False)
#
# contractual_visit_per_month = Column(Integer, default=VISIT_PM_DISCOV, nullable=False)
# cumulative_visit_per_month = Column(Integer, default=0, nullable=False)
# visit_threshold_readched = Column(Boolean(), default=False, nullable=False) # TODO: Remove it
max_zone = Column(Integer, default=10000, nullable=False)
#
deleted = Column(Boolean(), default=False, nullable=False)
creation_dt = Column(DateTime, default=datetime.datetime.now, nullable=False)
activation_dt = Column(DateTime, default=datetime.datetime.now, nullable=False)
deletion_dt = Column(DateTime)
#
rooms = relationship("Room", cascade="all,delete-orphan", backref="company", primaryjoin="and_(Room.company_id==Company.id, Room.deleted==False)", lazy="select" ) # https://gist.github.com/davewsmith/ab41cc4c2a189ecd4677c624ee594db3
@classmethod
def enhance_payload_with_auth_token(cls, payload_attr:dict, auth_claims:dict):
payload_attr.update({'email': auth_claims['email']})
@classmethod
def execute_after_select(cls, db, payload_attr:dict):
return local_execute_after_select(db, payload_attr, payload_attr['id'])
@classmethod
def execute_before_insert(cls, payload_attr:dict, additionnal_ctx):
encrypted_data_key, iv = crypto.generate_data_key(additionnal_ctx.kms_clt, additionnal_ctx.kms_key_id, cls.get_encryption_context(payload_attr['id']) )
payload_attr['encrypted_data_key'] = encrypted_data_key
payload_attr['iv'] = iv
@classmethod
def get_encryption_context(cls, id:str) -> {}:
return {}
# return {'company' : id }
@classmethod
def get_serialize_rules(cls):
return ('-encrypted_data_key', '-iv')
def __repr__(self):
return f"{self.__tablename__}({self.id}, {self.name}, {self.address}, {self.zip_code}, {self.country_code})"
#======
# ROOM
#======
class Room(Base, BaseTable, SerializerMixin):
__tablename__ = 'room'
__table_args__ = {'extend_existing': True}
id = Column(String(10), primary_key=True)
description = Column(String(30), nullable=False)
company_id = Column(Unicode(BaseTable.SUB_SIZE), ForeignKey("company.id", ondelete='CASCADE'), nullable=False)
deleted = Column(Boolean(), default=False, nullable=False)
creation_dt = Column(DateTime, default=datetime.datetime.now, nullable=False)
activation_dt = Column(DateTime, default=datetime.datetime.now, nullable=False)
deletion_dt = Column(DateTime)
#
zones = relationship("Zone", cascade="all, delete-orphan", backref="room", primaryjoin="and_( and_(Zone.room_id==Room.id, Room.deleted==False) , Zone.deleted==False)", lazy="joined") # select=>lazy | joined=>eager
serialize_rules = ('-company',)
@classmethod
def enhance_payload_with_auth_token(cls, payload_attr:dict, auth_claims:dict):
payload_attr.update({'company_id': auth_claims['sub']})
@classmethod
def execute_before_upsert(cls, payload_attr:dict):
handle_delete_flag(payload_attr)
@classmethod
def execute_on_update(cls, session:Session, id:str, cloned_payload:dict):
if 'deleted' in cloned_payload :
company_id = cloned_payload.pop('company_id')
session.execute( update(Zone).where(Zone.room_id == id).values(cloned_payload) )
@classmethod
def execute_after_update(cls, db, company_id:str, cloned_payload:dict):
if 'deleted' in cloned_payload :
return local_execute_after_select(db, cloned_payload, company_id)
def __repr__(self):
return f"{self.__tablename__}({self.id}, {self.description}, FK.company_id={self.company_id})"
def handle_delete_flag(payload_attr:dict):
if 'deleted' in payload_attr :
if payload_attr.get('deleted') :
payload_attr['deletion_dt'] = datetime.datetime.now()
else :
payload_attr['deletion_dt'] = None
payload_attr['activation_dt'] = datetime.datetime.now()
#======
# ZONE
#======
''' result can be 1 or 2 values dataset; It interpretation follow the folowing priorities:
dataset == 2 rows => Max Zone not reached yet
dataset == 1 row => Max Zone reached and tentative to excced max_zone '''
max_zone_sql = "select false as tentative_exceeding_max_zone, " \
"(select count(*) from zone where deleted = False and room_id in (select id from room where company_id = '{company_id}' and deleted = False)) as current_zone_count " \
" from company c where c.id = '{company_id}' and ( c.max_zone = -1 or c.max_zone > " \
"(select count(*) from zone where deleted = False and room_id in (select id from room where company_id = '{company_id}' and deleted = False)) - {p_is_row} ) " \
"union select true as tentative_exceeding_max_zone, max_zone as current_zone_count from company c where c.id = '{company_id}' " \
"order by tentative_exceeding_max_zone"
class Zone(Base, BaseTable, SerializerMixin):
__tablename__ = 'zone'
__table_args__ = {'extend_existing': True}
id = Column(String(10), primary_key=True)
description = Column(String(30), nullable=False)
room_id = Column(Unicode(10), ForeignKey("room.id", ondelete='CASCADE'), nullable=False)
deleted = Column(Boolean(), default=False, nullable=False)
creation_dt = Column(DateTime, default=datetime.datetime.now, nullable=False)
activation_dt = Column(DateTime, default=datetime.datetime.now, nullable=False)
deletion_dt = Column(DateTime)
#
serialize_rules = ('-room',)
@classmethod
def execute_before_upsert(cls, payload_attr:dict):
handle_delete_flag(payload_attr)
def __repr__(self):
return f"{self.__tablename__}({self.id}, {self.description}, FK.room_id={self.room_id})"
@classmethod
def is_max_zone_contract(cls, db, payload: dict, company_id:str, table:DeclarativeMeta) -> bool:
return False;
@classmethod
def check_exists(cls, db, payload: dict, company_id:str, table:DeclarativeMeta) -> (bool, bool, int): # (row_exists, tentative_exceeding_max_zone, current_zone_count)
row_exists = super().check_exists(db, payload, company_id, table)[0]
is_delete_zone = payload.get("deleted") == True
max_zone_list = db.native_select_rows([max_zone_sql.format(company_id=company_id, p_is_row= 1 if row_exists else 0)])[0] \
if cls.is_max_zone_contract(db, payload, company_id, table) else \
{ "tentative_exceeding_max_zone":[False,None], "current_zone_count": [1,None] }
if (len(max_zone_list["tentative_exceeding_max_zone"]) == 2) or is_delete_zone : # <-- => max_zone not reached yet
return row_exists, max_zone_list["tentative_exceeding_max_zone"][0], \
max_zone_list["current_zone_count"][0] -1 if is_delete_zone else max_zone_list["current_zone_count"][0] \
if row_exists else max_zone_list["current_zone_count"][0] + 1
else : # <-- len(max_zone_list) == 1 => max_zone reached
raise BusinessException( {"row_exists": row_exists, "tentative_exceeding_max_zone":
max_zone_list["tentative_exceeding_max_zone"][0], "current_zone_count": max_zone_list["current_zone_count"][0] } )
#-- "company sub": "caf13bd0-6a7d-4c7b-aa87-6b6f3833fe1e" | "...f" | "...g" --# {pfix}
def create_company(comp_id:str, comp_name:str, comp_email:str, url:str, pfix='X'):
from covcov.infrastructure.db.database import Database
db = Database("database")
# Creation Company_1
db.insert_value([f'{{"id":"{comp_id}", "name": "{comp_name}", "address": "1 - 24 Avenue Frayce", "zip_code":"93401", "phone_number":"+33661794641", "email":"{comp_email}", "url":"{url}" }}'], [Company])
db.insert_value([f'{{"id":"{pfix}room_0.1", "description":"ROOM_0.1_", "company_id":"{comp_id}" }}'], [Room])
db.insert_value([f'{{"id":"{pfix}z_0.1.1", "description":"Z_0.1.1", "room_id":"{pfix}room_0.1"}}'], [Zone])
db.insert_value([f'{{"id":"{pfix}z_0.1.2", "description":"Z_0.1.2", "room_id":"{pfix}room_0.1"}}'], [Zone])
db.insert_value([f'{{"id":"{pfix}z_0.1.3", "description":"Z_0.1.3", "room_id":"{pfix}room_0.1"}}'], [Zone])
#
db.insert_value([f'{{"id":"{pfix}room_0.2", "description":"ROOM_0.2_", "company_id":"{comp_id}" }}'], [Room])
db.insert_value([f'{{"id":"{pfix}z_0.2.1", "description":"Z_0.2.1", "room_id":"{pfix}room_0.2"}}'], [Zone])
db.insert_value([f'{{"id":"{pfix}z_0.2.2", "description":"Z_0.2.2", "room_id":"{pfix}room_0.2"}}'], [Zone])
#
db.insert_value([f'{{"id":"{pfix}room_0.3", "description":"ROOM_0.3_", "company_id":"{comp_id}" }}'], [Room])
db.insert_value([f'{{"id":"{pfix}z_0.3.1", "description":"Z_0.3.1", "room_id":"{pfix}room_0.3"}}'], [Zone])
# Creation Company_2
db.insert_value([f'{{"id":"{comp_id[:-1]}+", "name": "2_{comp_name}", "address": "2 - 24 Avenue Frayce", "zip_code":"93402", "phone_number":"+33661794642", "email":"2_{comp_email}" }}'], [Company])
db.insert_value([f'{{"id":"{pfix}room_2.1", "description":"ROOM_2.1_", "company_id":"{comp_id[:-1]}+" }}'], [Room])
db.insert_value([f'{{"id":"{pfix}z_2.1.1", "description":"Z_2.1.1", "room_id":"{pfix}room_2.1"}}'], [Zone])
db.insert_value([f'{{"id":"{pfix}z_2.1.2", "description":"Z_2.1.2", "room_id":"{pfix}room_2.1"}}'], [Zone])
db.insert_value([f'{{"id":"{pfix}z_2.1.3", "description":"Z_2.1.3", "room_id":"{pfix}room_2.1"}}'], [Zone])
#
db.insert_value([f'{{"id":"{pfix}room_2.2", "description":"ROOM_2.2_", "company_id":"{comp_id[:-1]}+" }}'], [Room])
db.insert_value([f'{{"id":"{pfix}z_2.2.1", "description":"Z_2.2.1", "room_id":"{pfix}room_2.2"}}'], [Zone])
db.insert_value([f'{{"id":"{pfix}z_2.2.2", "description":"Z_2.2.2", "room_id":"{pfix}room_2.2"}}'], [Zone])
# Creation Company_3
db.insert_value([f'{{"id":"{comp_id[:-2]}-", "name": "3_{comp_name}", "address": "3 - 24 Avenue Frayce", "zip_code":"93403", "phone_number":"+33661794643", "email":"3_{comp_email}"}}'], [Company])
db.insert_value([f'{{"id":"{pfix}room_3.1", "description":"ROOM_3.1_", "company_id":"{comp_id[:-2]}-" }}'], [Room])
db.insert_value([f'{{"id":"{pfix}z_3.1.1", "description":"Z_3.1.1", "room_id":"{pfix}room_3.1"}}'], [Zone])
db.insert_value([f'{{"id":"{pfix}z_3.1.2", "description":"Z_3.1.2", "room_id":"{pfix}room_3.1"}}'], [Zone])
if __name__ == '__main__':
pass
# create_company("7c791fa9-5774-46d4-88f3-1134d08ef212", "alf", "wharouny.tp@gmail.com", "https://www.lemonde.fr/", "")
# create_company("57976c93-cd46-44c4-82c1-6271abc0c319", "covcov", "yohan.obadia@gmail.com", "https://www.mcdonalds.fr/", "Y")
#
# db.insert_value([f'{{"id":"comp_2", "address": "24 Avenue Frayce", "zip_code":"93400"}}'], [Company])
# session = sessionmaker(bind=engine)
# session = Session(engine)
# session.add(Company(f'{{"id":"comp_1", "address": "24 Avenue Frayce", "zip_code":"93400"}}'))
# session.commit() | [
"wharouny.tp@gmail.com"
] | wharouny.tp@gmail.com |
779f544e7eb9cb8e40c4b4995687d4dfb4bea3fc | 74479f8586558f8a95a00c28b14d0e1421d6dc50 | /ping_thread.py | 66b91faf477237b37c4a9092e70b71a2f1a23b5d | [] | no_license | xiaoyecent/ping_threading_Queue | 39b8b6023fa5efd2b4e53b00b47a9a8e77bac96a | 625fd5f542afff12dff7083a1c36dccc33ad690f | refs/heads/master | 2021-01-21T10:59:25.422929 | 2017-03-01T03:49:02 | 2017-03-01T03:49:02 | 83,507,654 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | '''
Created on 2017-2-27
@author: xiaoye
'''
#coding: utf-8
import thread
import time
from subprocess import Popen,PIPE
def scan_ip(ip):
process = Popen('ping -c 2 ' + ip, stdin=PIPE, stdout=PIPE, shell=True)
data = process.stdout.read()
if 'ttl' in data:
print '%s is live ,now time is %s' % (ip, time.strftime('%H:%M:%S'))
if __name__ == '__main__':
#scan_ip('111.13.147.229')
ips = raw_input()
ip_header = '.'.join(ips.split('.')[:3])
for i in range(1,255):
ip = ip_header + '.' + str(i)
#print ip
thread.start_new_thread(scan_ip, (ip,))
time.sleep(0.1)
| [
"biqqanquan@163.com"
] | biqqanquan@163.com |
7b57e8d45ffc337b723d2242be7cf1abb54c1482 | 0f1dc76f801a307c0566c8fe610323bdcaa700ff | /nbsearch/Test/nb_search.py | 4bf35612f93750f898a5fdabda085db1205bb5c7 | [] | no_license | jkitchin/nb_search | 3843d0b8ed810de902a7cf193b1b523707229c23 | 50179495e71a0ba7b47a0845c881abf1da52c020 | refs/heads/master | 2022-12-04T20:53:53.033087 | 2020-08-28T14:30:56 | 2020-08-28T14:30:56 | 291,049,650 | 0 | 0 | null | 2020-08-28T13:09:34 | 2020-08-28T13:09:34 | null | UTF-8 | Python | false | false | 11,403 | py | import sys
import os
from IPython.display import HTML, display
import nbformat
import argparse
import re
import pandas as pd
# HELPFUL FUNCTIONS
def search_util(root='.'):
"""Recursively find all ipynb files in a directory.
root - This is the directory you would like to find the files in, defaults to cwd"""
nb_files = []
if isinstance(root, list):
for file in root:
if file.endswith('.ipynb') and 'checkpoint.ipynb' not in file:
nb_files += [file]
else:
for r, d, f in os.walk(root):
for file in f:
if file.endswith('.ipynb') and 'checkpoint.ipynb' not in file:
nb_files += [os.path.join(r, file)]
return nb_files
def show_files(nb_files):
[display(HTML(f'<a href="{f}">{f}</a>')) for f in nb_files]
def show_files_tags(nb_files,nb_tags,tag): # [due date (datetime)] optional description
count = 0
for i,f in enumerate(nb_files):
if tag in nb_tags[i][1:].strip():
if '[' in nb_tags[i]:
m = re.search("[^[]*\[([^]]*)\]", nb_tags[i])
ss = ''.join(nb_tags[i].split('['+m.groups(1)[0] + ']'))
description = ''.join(ss.split('%TODO')).strip()
due_date = pd.to_datetime([m.groups(1)[0]])
df = pd.DataFrame({'Date':due_date})
df['diff'] = df - pd.Timestamp.now().normalize()
due_days = df['diff'][0].days
if due_days >= 0:
print(description + color.BOLD + color.GREEN + ' (Due in: ' + str(due_days) + ' days)' + color.END)
display(HTML(f'<a href="{f}">{f}</a>'))
else:
print(description + color.BOLD + color.RED + ' (Past due by: ' + str(abs(due_days)) + ' days)' + color.END)
display(HTML(f'<a href="{f}">{f}</a>'))
else:
print(nb_tags[i])
display(HTML(f'<a href="{f}">{f}</a>'))
def search_notebook_util(pattern,cell_type,root='.'):
""" This function searches all the markdown or code cells
in the notebooks in the directory and returns the notebooks
that include the patter input in one or more of the markdown
or code cells"""
files = search_util(root)
file_list = []
for file in files:
nb = nbformat.read(file,as_version=4)
for i in nb['cells']:
if i['cell_type'] == cell_type:
text = i['source']
if pattern in text:
file_list.append(file)
break
return file_list
def search_heading_util(pattern,root='.'):
files = search_util(root)
file_list = []
for file in files:
nb = nbformat.read(file,as_version=4)
for i in nb['cells']:
if i['cell_type'] == 'markdown':
text = i['source']
for i in text.split('\n'):
try:
if i.strip()[0] == '#' and pattern in i:
file_list.append(file)
break
except:
None
return set(file_list)
def heading_list(file):
""" This function searches all the headings in the notebooks
in the directory and returns the notebooks that include the patter
input in one or more of the markdown cells"""
heading_list = []
nb = nbformat.read(file,as_version=4)
for i in nb['cells']:
if i['cell_type'] == 'markdown':
text = i['source']
for i in text.split('\n'):
try:
if i.strip()[0] == '#':
heading_list.append(i.strip())
except:
None
return heading_list
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def pretty_print_headings(heading_list):
for i in heading_list:
heading_level = len(i.strip().split()[0])
print(color.BOLD + color.GREEN + '\t'*(heading_level-1) + f'{i.strip()[heading_level+1:]}\n' + color.END)
def search_data_util(props,root='.'):
""" This function searches the properties cells of the HER notebooks for specific"""
requirements = len(props)
files = search_util(root)
file_list = []
for file in files:
nb = nbformat.read(file,as_version=4)
for i in nb['cells']:
if i['cell_type'] == 'code':
if i['source'].startswith('%%properties'):
Metal_A = i['source'].split('\n')[1].split()[-1]
Metal_B = i['source'].split('\n')[2].split()[-1]
Max_H = float(i['source'].split('\n')[3].split()[-1])
require = 0
for prop in props:
if '<' in prop:
if Max_H < float(prop.split('<')[-1].strip()):
require += 1
elif '>' in prop:
if Max_H > float(prop.split('>')[-1].strip()):
require += 1
else: # Assumed the user entered a metal name
if prop.upper() == Metal_A.upper() or prop.upper() == Metal_B.upper():
require += 1
if require == requirements:
file_list.append(file)
break
return file_list
def Get_props(file):
""" This function retreives the information from the property cell of the notebook"""
no_prop = True
nb = nbformat.read(file,as_version=4)
for i in nb['cells']:
if i['cell_type'] == 'code':
if i['source'].startswith('%%properties'):
Metal_A = i['source'].split('\n')[1].split()[-1]
Metal_B = i['source'].split('\n')[2].split()[-1]
Max_H = float(i['source'].split('\n')[3].split()[-1])
result = {'Metal_A':Metal_A,'Metal_B':Metal_B,'Max_H':Max_H}
no_prop = False
if no_prop:
result = None
return result
def search_todo_util(root='.'):
""" This function searches the properties cells of the HER notebooks for TODO tags"""
files = search_util(root)
file_list = []
tag_list = []
for file in files:
nb = nbformat.read(file,as_version=4)
for i in nb['cells']:
if file in file_list:
break
if i['cell_type'] == 'code':
for line in i['source'].split('\n'):
if line.startswith('%TODO') and '%%properties' not in line and '%matplotlib' not in line:
tag_list.append(line)
file_list.append(file)
break
return file_list,tag_list
class NB:
def __init__(self,filename):
self.filename = filename
self.property = Get_props(filename)
def fsearch_util(f,root='.'):
files = search_util(root)
file_list = []
for file in files:
nb = NB(file)
if nb.property != None:
if f(nb):
file_list.append(file)
return file_list
# The Main Functions
def search_files(root='.'):
nb_files = search_util(root)
show_files(nb_files)
return nb_files
def search_notebook(string_pattern,cell_type,root='.'):
""" Cell_type can be 'code' or 'markdown' """
nb_files = search_notebook_util(string_pattern,cell_type,root)
show_files(nb_files)
return nb_files
def search_heading(pattern,root='.'):
""" This function searches all the headings in the notebooks
in the directory and returns the notebooks that include the patter
input in one or more of the markdown cells"""
nb_files = search_heading_util(pattern,root)
show_files(nb_files)
return nb_files
def headings_pprint(file):
""" This function produces an indented (based on heading level) "pretty print" of the headings in the file given """
List = heading_list(file)
pretty_print_headings(List)
def search_data(props,root='.'):
""" This function searches all the headings in the notebooks
in the directory and returns the notebooks that include the patter
input in one or more of the markdown cells"""
if isinstance(props,list):
None
else:
x = props
if 'and' in x:
props1 = x.split('and')
props = [i.strip() for i in props1]
else:
props = [x]
nb_files = search_data_util(props,root)
show_files(nb_files)
return nb_files
def search_todo(tag='TODO',root='.'):
""" This function searches all the code cells in the notebooks
in the directory and returns the notebooks descriptions and due dates of the notebooks that include the todo tag in one or more of the code cells"""
nb_files,nb_tags = search_todo_util(root)
count = show_files_tags(nb_files,nb_tags,tag)
return nb_files
def fsearch(f,root = '.'):
nb_files = fsearch_util(f,root)
show_files(nb_files)
return nb_files
if __name__ == '__main__':
# Collecting the Command Line Inputs
parser = argparse.ArgumentParser(description='Search Jupyter Notebooks')
parser.add_argument('--all', nargs='?', const='.')
parser.add_argument('--markdown',nargs='+')
parser.add_argument('--code',nargs='+')
parser.add_argument('--heading',nargs='+')
parser.add_argument('--heading_pp',nargs='+')
parser.add_argument('--property',nargs='+')
parser.add_argument('--todo',nargs='+')
args = parser.parse_args()
if args.all: # If you selected "all" you want a list of all of the files in the directory
root = args.all # If a root is not given the root is assumed to be the current dir.
if args.markdown:
root = args.markdown[0]
string_pattern = args.markdown[1:]
if args.code:
root = args.code[0]
string_pattern = args.code[1:]
if args.heading:
root = args.heading[0]
string_pattern = args.heading[1:]
if args.heading_pp:
file_name = args.heading_pp[0]
if args.property:
root = args.property[0]
x = ''.join(args.property[1:])
if 'and' in x:
List_of_desired_props = x.split('and')
else:
List_of_desired_props = [args.property[1]]
if args.todo:
if len(args.todo) == 1:
root = args.todo[0]
tag = 'TODO'
# --------------------------------------------------------------------------------
if args.all: # If you selected "all" you want a list of all of the files in the directory
search_files(root)
elif args.code:
search_notebook(string_pattern[0],'code',root)
elif args.markdown:
search_notebook(string_pattern[0],'markdown',root)
elif args.heading:
search_heading(string_pattern[0],root)
elif args.heading_pp:
headings_pprint(file_name)
elif args.property:
search_data(List_of_desired_props,root)
elif args.todo:
search_todo(tag,root) | [
"59749099+loevlie@users.noreply.github.com"
] | 59749099+loevlie@users.noreply.github.com |
92f3ee7e26c3ee1406bd8042cee27fc0d7f8f4c2 | d115cf7a1b374d857f6b094d4b4ccd8e9b1ac189 | /tags/pygccxml_dev_1.0.0/unittests/plain_c_tester.py | c26b2581fbaca21e9f350c66801aeb71c9acd90f | [
"BSL-1.0"
] | permissive | gatoatigrado/pyplusplusclone | 30af9065fb6ac3dcce527c79ed5151aade6a742f | a64dc9aeeb718b2f30bd6a5ff8dcd8bfb1cd2ede | refs/heads/master | 2016-09-05T23:32:08.595261 | 2010-05-16T10:53:45 | 2010-05-16T10:53:45 | 700,369 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import parser_test_case
from pygccxml import utils
from pygccxml import parser
from pygccxml import declarations
class tester_t( parser_test_case.parser_test_case_t ):
def __init__(self, *args ):
parser_test_case.parser_test_case_t.__init__( self, *args )
self.header = 'plain_c.c'
self.global_ns = None
def setUp(self):
if not self.global_ns:
decls = parser.parse( [self.header], self.config )
self.global_ns = declarations.get_global_namespace( decls )
self.global_ns.init_optimizer()
def test( self ):
self.global_ns.free_fun( 'hello_sum' )
self.global_ns.free_fun( 'hello_print' )
declarations.print_declarations( self.global_ns )
f = self.global_ns.free_fun( 'do_smth' )
for arg in f.arguments:
print arg.type.decl_string
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
| [
"roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76"
] | roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76 |
c08920406c3451c83af6edf3fa87dfab8ec032c3 | 0709766ad33c3d46f0b91afb65acce1c2188d777 | /tools/export_serving_model.py | 368ee157599eb08087f4f344b8cb09aa84ab0138 | [
"Apache-2.0"
] | permissive | netidol/PaddleDetection | 37a511f3cf93d929ad986317b3294dcee95c9f62 | 365d81367bf3f42a4b1941a89e84000dc8b14223 | refs/heads/master | 2021-06-28T16:03:29.069173 | 2021-04-22T12:34:15 | 2021-04-22T12:34:15 | 223,561,417 | 0 | 0 | Apache-2.0 | 2019-11-23T09:09:26 | 2019-11-23T09:09:26 | null | UTF-8 | Python | false | false | 3,991 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
sys.path.append(parent_path)
import paddle
from paddle import fluid
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_config, check_version, enable_static_mode
import ppdet.utils.checkpoint as checkpoint
import yaml
import logging
from ppdet.utils.export_utils import dump_infer_config, prune_feed_vars
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def save_serving_model(FLAGS, exe, feed_vars, test_fetches, infer_prog):
cfg_name = os.path.basename(FLAGS.config).split('.')[0]
save_dir = os.path.join(FLAGS.output_dir, cfg_name)
feed_var_names = [var.name for var in feed_vars.values()]
fetch_list = sorted(test_fetches.items(), key=lambda i: i[0])
target_vars = [var[1] for var in fetch_list]
feed_var_names = prune_feed_vars(feed_var_names, target_vars, infer_prog)
serving_client = os.path.join(FLAGS.output_dir, 'serving_client')
serving_server = os.path.join(FLAGS.output_dir, 'serving_server')
logger.info(
"Export serving model to {}, client side: {}, server side: {}. input: {}, output: "
"{}...".format(FLAGS.output_dir, serving_client, serving_server,
feed_var_names, [str(var.name) for var in target_vars]))
feed_dict = {x: infer_prog.global_block().var(x) for x in feed_var_names}
fetch_dict = {x.name: x for x in target_vars}
import paddle_serving_client.io as serving_io
serving_client = os.path.join(save_dir, 'serving_client')
serving_server = os.path.join(save_dir, 'serving_server')
serving_io.save_model(
client_config_folder=serving_client,
server_model_folder=serving_server,
feed_var_dict=feed_dict,
fetch_var_dict=fetch_dict,
main_program=infer_prog)
def main():
cfg = load_config(FLAGS.config)
merge_config(FLAGS.opt)
check_config(cfg)
check_version()
main_arch = cfg.architecture
# Use CPU for exporting inference model instead of GPU
place = fluid.CPUPlace()
exe = fluid.Executor(place)
model = create(main_arch)
startup_prog = fluid.Program()
infer_prog = fluid.Program()
with fluid.program_guard(infer_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['TestReader']['inputs_def']
inputs_def['use_dataloader'] = False
feed_vars, _ = model.build_inputs(**inputs_def)
test_fetches = model.test(feed_vars)
infer_prog = infer_prog.clone(True)
exe.run(startup_prog)
checkpoint.load_params(exe, infer_prog, cfg.weights)
save_serving_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
dump_infer_config(FLAGS, cfg)
if __name__ == '__main__':
enable_static_mode()
parser = ArgsParser()
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory for storing the output model files.")
FLAGS = parser.parse_args()
main()
| [
"noreply@github.com"
] | netidol.noreply@github.com |
1c1722d15f2ee8dde90347013662ca30cd87c6a3 | 0269037acc7785a58f8786c60be8ccea8ef3f6f3 | /indico/modules/attachments/models/folders_test.py | 71309414a40429ae60741e7457815421438a6ce8 | [
"MIT"
] | permissive | bebusl/cbnu_indico | 1ffa7042a1f706da953214b39827cbdbb1387cce | 60b37c2bf54cd7f17092b2a9ad21311762729601 | refs/heads/master | 2023-01-18T22:22:09.655751 | 2020-12-02T09:04:06 | 2020-12-02T09:04:06 | 281,068,896 | 0 | 0 | MIT | 2020-07-20T09:09:44 | 2020-07-20T09:09:43 | null | UTF-8 | Python | false | false | 1,890 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.attachments import AttachmentFolder
def test_update_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
# not changing anything -> shouldn't be added to acl
entry = folder.update_principal(dummy_user)
assert entry is None
assert not folder.acl_entries
# adding user with read access -> new acl entry since the user isn't in there yet
entry = initial_entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
# not changing anything on existing principal -> shouldn't modify acl
entry = folder.update_principal(dummy_user)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# granting permission which is already present -> shouldn't modify acl
entry = folder.update_principal(dummy_user, read_access=True)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# removing read access -> acl entry is removed
entry = folder.update_principal(dummy_user, read_access=False)
assert entry is None
assert not folder.acl_entries
def test_remove_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
folder.remove_principal(dummy_user)
assert not folder.acl_entries
# doesn't do anything but must not fail either
folder.remove_principal(dummy_user)
assert not folder.acl_entries
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
a40856233a9964baf4c68babb5fface0b95472e3 | 045cb1a5638c3575296f83471758dc09a8065725 | /harpiya/service/model.py | d5f3733a91592649bfbd91be8592000f5bcf3b43 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,683 | py | # -*- coding: utf-8 -*-
from contextlib import closing
from functools import wraps
import logging
from psycopg2 import IntegrityError, OperationalError, errorcodes
import random
import threading
import time
import harpiya
from harpiya.exceptions import UserError, ValidationError, QWebException
from harpiya.models import check_method_name
from harpiya.tools.translate import translate, translate_sql_constraint
from harpiya.tools.translate import _
from . import security
from ..tools import traverse_containers, lazy
_logger = logging.getLogger(__name__)
PG_CONCURRENCY_ERRORS_TO_RETRY = (errorcodes.LOCK_NOT_AVAILABLE, errorcodes.SERIALIZATION_FAILURE, errorcodes.DEADLOCK_DETECTED)
MAX_TRIES_ON_CONCURRENCY_FAILURE = 5
def dispatch(method, params):
(db, uid, passwd ) = params[0], int(params[1]), params[2]
# set uid tracker - cleaned up at the WSGI
# dispatching phase in harpiya.service.wsgi_server.application
threading.current_thread().uid = uid
params = params[3:]
if method == 'obj_list':
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
if method not in ['execute', 'execute_kw']:
raise NameError("Method not available %s" % method)
security.check(db,uid,passwd)
registry = harpiya.registry(db).check_signaling()
fn = globals()[method]
with registry.manage_changes():
res = fn(db, uid, *params)
return res
def check(f):
@wraps(f)
def wrapper(___dbname, *args, **kwargs):
""" Wraps around OSV functions and normalises a few exceptions
"""
dbname = ___dbname # NOTE: this forbid to use "___dbname" as arguments in http routes
def tr(src, ttype):
# We try to do the same as the _(), but without the frame
# inspection, since we aready are wrapping an osv function
# trans_obj = self.get('ir.translation') cannot work yet :(
ctx = {}
if not kwargs:
if args and isinstance(args[-1], dict):
ctx = args[-1]
elif isinstance(kwargs, dict):
if 'context' in kwargs:
ctx = kwargs['context']
elif 'kwargs' in kwargs and kwargs['kwargs'].get('context'):
# http entry points such as call_kw()
ctx = kwargs['kwargs'].get('context')
else:
try:
from harpiya.http import request
ctx = request.env.context
except Exception:
pass
lang = ctx and ctx.get('lang')
if not (lang or hasattr(src, '__call__')):
return src
# We open a *new* cursor here, one reason is that failed SQL
# queries (as in IntegrityError) will invalidate the current one.
with closing(harpiya.sql_db.db_connect(dbname).cursor()) as cr:
if ttype == 'sql_constraint':
res = translate_sql_constraint(cr, key=key, lang=lang)
else:
res = translate(cr, name=False, source_type=ttype,
lang=lang, source=src)
return res or src
def _(src):
return tr(src, 'code')
tries = 0
while True:
try:
if harpiya.registry(dbname)._init and not harpiya.tools.config['test_enable']:
raise harpiya.exceptions.Warning('Currently, this database is not fully loaded and can not be used.')
return f(dbname, *args, **kwargs)
except (OperationalError, QWebException) as e:
if isinstance(e, QWebException):
cause = e.qweb.get('cause')
if isinstance(cause, OperationalError):
e = cause
else:
raise
# Automatically retry the typical transaction serialization errors
if e.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
if tries >= MAX_TRIES_ON_CONCURRENCY_FAILURE:
_logger.info("%s, maximum number of tries reached" % errorcodes.lookup(e.pgcode))
raise
wait_time = random.uniform(0.0, 2 ** tries)
tries += 1
_logger.info("%s, retry %d/%d in %.04f sec..." % (errorcodes.lookup(e.pgcode), tries, MAX_TRIES_ON_CONCURRENCY_FAILURE, wait_time))
time.sleep(wait_time)
except IntegrityError as inst:
registry = harpiya.registry(dbname)
key = inst.diag.constraint_name
if key in registry._sql_constraints:
raise ValidationError(tr(key, 'sql_constraint') or inst.pgerror)
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
msg = _('The operation cannot be completed:')
_logger.debug("IntegrityError", exc_info=True)
try:
# Get corresponding model and field
model = field = None
for name, rclass in registry.items():
if inst.diag.table_name == rclass._table:
model = rclass
field = model._fields.get(inst.diag.column_name)
break
if inst.pgcode == errorcodes.NOT_NULL_VIOLATION:
# This is raised when a field is set with `required=True`. 2 cases:
# - Create/update: a mandatory field is not set.
# - Delete: another model has a not nullable using the deleted record.
msg += '\n'
msg += _(
'- Create/update: a mandatory field is not set.\n'
'- Delete: another model requires the record being deleted. If possible, archive it instead.'
)
if model:
msg += '\n\n{} {} ({}), {} {} ({})'.format(
_('Model:'), model._description, model._name,
_('Field:'), field.string if field else _('Unknown'), field.name if field else _('Unknown'),
)
elif inst.pgcode == errorcodes.FOREIGN_KEY_VIOLATION:
# This is raised when a field is set with `ondelete='restrict'`, at
# unlink only.
msg += _(' another model requires the record being deleted. If possible, archive it instead.')
constraint = inst.diag.constraint_name
if model or constraint:
msg += '\n\n{} {} ({}), {} {}'.format(
_('Model:'), model._description if model else _('Unknown'), model._name if model else _('Unknown'),
_('Constraint:'), constraint if constraint else _('Unknown'),
)
except Exception:
pass
raise ValidationError(msg)
else:
raise ValidationError(inst.args[0])
return wrapper
def execute_cr(cr, uid, obj, method, *args, **kw):
harpiya.api.Environment.reset() # clean cache etc if we retry the same transaction
recs = harpiya.api.Environment(cr, uid, {}).get(obj)
if recs is None:
raise UserError(_("Object %s doesn't exist") % obj)
result = harpiya.api.call_kw(recs, method, args, kw)
# force evaluation of lazy values before the cursor is closed, as it would
# error afterwards if the lazy isn't already evaluated (and cached)
for l in traverse_containers(result, lazy):
_0 = l._value
return result
def execute_kw(db, uid, obj, method, args, kw=None):
return execute(db, uid, obj, method, *args, **kw or {})
@check
def execute(db, uid, obj, method, *args, **kw):
threading.currentThread().dbname = db
with harpiya.registry(db).cursor() as cr:
check_method_name(method)
res = execute_cr(cr, uid, obj, method, *args, **kw)
if res is None:
_logger.info('The method %s of the object %s can not return `None` !', method, obj)
return res
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
977da3579e8f87f1655e64f2de8938f2c1adc395 | 1207d50126d4d59966573927c5eadd94db6aeb59 | /svggen/library/Rectangle.py | cb7e78caee0de5d274f55684375712ff71248bc0 | [] | no_license | christianwarloe/robotBuilder | aee03c189972f1d305c6e13d106b362b5d26d187 | 3f8fbc267ac7b9bbae534d1208278541a7b5eaa5 | refs/heads/master | 2021-06-13T02:42:24.834816 | 2017-04-07T01:01:52 | 2017-04-07T01:01:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | from svggen.api.FoldedComponent import FoldedComponent
from svggen.api.composables.graph.Face import Rectangle as Rect
from svggen.api.composables.GraphComposable import Graph
from svggen.api.ports.EdgePort import EdgePort
from svggen.api.ports.FacePort import FacePort
class Rectangle(FoldedComponent):
_test_params = {
'l': 100,
'w': 400,
}
def define(self, **kwargs):
FoldedComponent.define(self, **kwargs)
self.addParameter("l", 100, positive=True)
self.addParameter("w", 400, positive=True)
def assemble(self):
dx = self.getParameter("l")
dy = self.getParameter("w")
self.addFace(Rect("r", dx, dy))
self.place()
self.addInterface("face", FacePort(self, "r"))
self.addInterface("b", EdgePort(self, "e0"))
self.addInterface("r", EdgePort(self, "e1"))
self.addInterface("t", EdgePort(self, "e2"))
self.addInterface("l", EdgePort(self, "e3"))
if __name__ == "__main__":
h = Rectangle()
#h._make_test()
| [
"christian.warloe@gmail.com"
] | christian.warloe@gmail.com |
3552b52bbdcda04755cd1fd787a1dd6a4b657cdb | f292e51314479f0d8195818667b7f5a7a2f1b410 | /Python/pset6/problems-sentiments/analyzer.py | 3538abacc3b9c5e7170e941a3a8f321a804ea6c6 | [
"MIT"
] | permissive | NSKBpro/cs50-solutions | d3f906bcef614dcbcb55ce46d05b62d3249bbcb8 | ebb13dc4861d85e6c64b235bbc6b1313c290ace5 | refs/heads/master | 2020-03-19T03:48:38.567215 | 2017-12-22T23:26:54 | 2017-12-22T23:26:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import nltk
class Analyzer():
"""Implements sentiment analysis."""
def __init__(self, positives, negatives):
#Init sets
self.positives = set()
self.negatives = set()
#Read positive words from data set
with open(positives) as words:
for word in words:
if not word.startswith(';'):
self.positives.add(word.strip('\n'))
#Read negative words from data set
with open(negatives) as words:
for word in words:
if not word.startswith(';'):
self.negatives.add(word.strip('\n'))
def analyze(self, text):
#Will hold sum of score for every word
totalScore = 0
#Tokenize words from argument string
argWords = nltk.tokenize.TweetTokenizer().tokenize(text)
#Check score of every word
for argWord in argWords:
alreadyFound = False
for positiveWord in self.positives:
if argWord.lower() == positiveWord:
totalScore += 1
alreadyFound = True
break
if not alreadyFound:
for negativeWord in self.negatives:
if argWord.lower() == negativeWord:
totalScore -= 1
break
return totalScore
| [
"alekski64@icloud.com"
] | alekski64@icloud.com |
061a12b6590aeeeea3ffcea01282b6cc2a5fe06b | 1bc6195a6d6577266c5722854b6342b22f5d914d | /BusInquirySystem/settings.py | 4defc6751684f62ac237cef63e400d3243485d29 | [] | no_license | zhangfangyong/BusInquirySystem | fe2711ce893097246646b3ea425e7de36e0c8107 | 84c5c9055db1c84b15d4d4f7d8ccd7d97deddef9 | refs/heads/master | 2020-07-31T23:23:39.217615 | 2019-10-13T13:04:42 | 2019-10-13T13:04:42 | 210,785,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,244 | py | """
Django settings for BusInquirySystem project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p0s1%@2twr0o(a8k&_!0@5r=!r1-d=$ks8&_y3#mmuoyqx-6)9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['115.159.211.43','127.0.0.1','0.0.0.0:8000']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Bus.apps.BusConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BusInquirySystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'BusInquirySystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"15209805296@163.com"
] | 15209805296@163.com |
96eaba8baa60786fa762b5a9ed86e115dfb96fb2 | b5ba12d4dcb240ba6069964380f6a3aede79f448 | /mixins/simulation.py | 7ccaefcd9da8089d5c296d7cfa10fab98b594edc | [] | no_license | 70-6C-65-61-73-75-72-65h/erp | 9e1a6f20a15d16794043f583022b1e04a9435b20 | 0e088c767d0d0c0e5515be703ed71252d55b70d9 | refs/heads/master | 2022-03-27T21:12:52.305257 | 2019-12-17T15:41:59 | 2019-12-17T15:41:59 | 224,333,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # from datetime import datetime
# # from string to datetime: time.strptime
# # from datetime to string: time.strftime
# def today():
# # simulational date_today:
# def datetime_date_today():
# """ only date returned """
# month, day, year = today()
# datetime_str = f'{month}/{day}/{year}'
# datetime_object = datetime.strptime(datetime_str, '%m/%d/%y')
# return datetime_object | [
"max.ulshin.max@istat.com.ua"
] | max.ulshin.max@istat.com.ua |
bc5ad557d4f626a81e3b4e15f4bf084bb239d1a7 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/show_asset_detail_request.py | 310a07633cd81850d55c262f5845bd24add26eb3 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowAssetDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_id': 'str',
'categories': 'list[str]'
}
attribute_map = {
'asset_id': 'asset_id',
'categories': 'categories'
}
def __init__(self, asset_id=None, categories=None):
"""ShowAssetDetailRequest - a model defined in huaweicloud sdk"""
self._asset_id = None
self._categories = None
self.discriminator = None
self.asset_id = asset_id
if categories is not None:
self.categories = categories
@property
def asset_id(self):
"""Gets the asset_id of this ShowAssetDetailRequest.
媒资ID。
:return: The asset_id of this ShowAssetDetailRequest.
:rtype: str
"""
return self._asset_id
@asset_id.setter
def asset_id(self, asset_id):
"""Sets the asset_id of this ShowAssetDetailRequest.
媒资ID。
:param asset_id: The asset_id of this ShowAssetDetailRequest.
:type: str
"""
self._asset_id = asset_id
@property
def categories(self):
"""Gets the categories of this ShowAssetDetailRequest.
查询的信息类型。 - 为空时表示查询所有信息。 - 不为空时支持同时查询一个或者多个类型的信息,取值如下: - - base_info:媒资基本信息。 - - transcode_info:转码结果信息。 - - thumbnail_info:截图结果信息。 - - review_info:审核结果信息。
:return: The categories of this ShowAssetDetailRequest.
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this ShowAssetDetailRequest.
查询的信息类型。 - 为空时表示查询所有信息。 - 不为空时支持同时查询一个或者多个类型的信息,取值如下: - - base_info:媒资基本信息。 - - transcode_info:转码结果信息。 - - thumbnail_info:截图结果信息。 - - review_info:审核结果信息。
:param categories: The categories of this ShowAssetDetailRequest.
:type: list[str]
"""
self._categories = categories
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowAssetDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
50bbc66e4a19925e3c5c7c5c929e89be44671cac | fd32b1f3d3003a70e7220482d34e2c20c16940b3 | /node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi | a69878fb189bafde4c65b6e4fb6abd1119b502c2 | [
"MIT"
] | permissive | predeezRav/CountriesGroup2 | 12a6fe51e7ac63f277ecc7e4d0292fe1b59f8684 | 6b231f6baf9688d4fd9f31297eb5734d2e4bb42f | refs/heads/main | 2023-03-15T11:39:52.270571 | 2021-03-16T08:20:47 | 2021-03-16T08:20:47 | 348,105,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,059 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/var/folders/sp/npgpkhp906b_197jz2xlkm2w0000gn/T/.node-gyp/14.15.0",
"standalone_static_library": 1,
"metrics_registry": "https://registry.npmjs.org/",
"globalconfig": "/usr/local/etc/npmrc",
"init.module": "/Users/pratheesravindrakumar/.npm-init.js",
"init_module": "/Users/pratheesravindrakumar/.npm-init.js",
"userconfig": "/Users/pratheesravindrakumar/.npmrc",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"cache": "/Users/pratheesravindrakumar/.npm",
"user_agent": "npm/7.5.3 node/v14.15.0 darwin x64",
"prefix": "/usr/local"
}
}
| [
"prat0025@edu.ucl.dk"
] | prat0025@edu.ucl.dk |
5d5b5c72b46a23b4384971602e86d7719b885892 | b8bde9a346685e1428a8284f7ffb14f15e35fb78 | /deploy/pinax.fcgi | 43f92ff0c8e98da7b3b5d94b6afd6d72456e3420 | [] | no_license | bhaugen/pinax-groups-experiments | 9302762c8e7379f067385a7280ef9af4dc4c5e8f | d520ccbfdb8228e10b6e547df6f64106caa6f0ec | refs/heads/master | 2020-04-05T22:49:04.750605 | 2009-11-13T19:36:20 | 2009-11-13T19:36:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | fcgi | # pinax.fcgi is configured to live in projects/pinax_groups/deploy.
import os
import sys
from os.path import abspath, dirname, join
from site import addsitedir
sys.path.insert(0, abspath(join(dirname(__file__), "../../")))
from django.conf import settings
os.environ["DJANGO_SETTINGS_MODULE"] = "pinax_groups.settings"
sys.path.insert(0, join(settings.PINAX_ROOT, "apps"))
sys.path.insert(0, join(settings.PROJECT_ROOT, "apps"))
from django.core.servers.fastcgi import runfastcgi
runfastcgi(method="threaded", daemonize="false")
| [
"bob.haugen@gmail.com"
] | bob.haugen@gmail.com |
f7d60b29b9a189ec5ac1378575f96b53edd6566e | 84ec7888f0df87582a8394b7cc1354d50177a765 | /StatusHQ/applications/models.py | 7718c2eded1e66503dc048506b9b38ccd5305c09 | [] | no_license | StatusHQ/StatusHQ | 5a5998d0c5b3628a1e88e13cb13fff4ed0f84cc2 | 6d0b1be1ed3e8d7dffc6628681c1aa16392e093e | refs/heads/master | 2020-04-12T00:27:40.291328 | 2018-12-19T02:59:27 | 2018-12-19T02:59:27 | 162,200,717 | 0 | 0 | null | 2019-01-04T17:58:51 | 2018-12-17T23:03:17 | Python | UTF-8 | Python | false | false | 1,245 | py | from django.db import models
from django.urls import reverse # Used to generate URLs by reversing the URL patterns
from django.contrib.auth.models import User
#class CurrentApplications(models.Model):
'''Model represting all current applications.'''
# Create your models here.
class Application(models.Model):
''' Model representing instance of an application'''
company = models.CharField(max_length = 50)
position = models.CharField(max_length = 50)
date_applied = models.DateField(null=True, blank=True)
deadline = models.DateField(null=True, blank=True)
owner = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
APP_STATUS = (
('p', 'In Progress'),
('a', 'Applied'),
('i', 'Interviewing'),
('o', 'Offer'),
('d', 'Denied'),
('r', 'Remove'),
)
status = models.CharField(
max_length=1,
choices=APP_STATUS,
blank=True,
default='p',
help_text='Application status')
class Meta:
ordering = ['company']
def __str__(self):
'''String for representing the Model object'''
return f'{self.id} ({self.company} {self.position})'
def get_absolute_url(self):
'''Returns url to access a detail record for this application'''
return reverse('application-detail', args=[str(self.id)])
| [
"nrdell3@gmail.com"
] | nrdell3@gmail.com |
e5bd7290b39a8424517b15b4e042703972bc4d2b | be29d94b112ac4fd687d84dcc0d40c4a1557687d | /utils/adni_utils.py | fb14daf62c174df94e75f2515f442e6d88c7ed8d | [] | no_license | Amerosa/Multimodal-Alzheimer-s | 10d5cefe974cbf7d7a4506b72c7c1c9fb8f06516 | 7118d4ce99f24aa12ca6da1f497d74c21d59af1f | refs/heads/master | 2023-01-01T09:12:48.493480 | 2020-10-21T22:34:17 | 2020-10-21T22:34:17 | 306,163,794 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | from utils.basic_utils import get_project_root
import csv
def parse_adni_subject(pth):
"""Takes a standard adni nii.gz file name, splits it up and returns the
subject id and session id information
"""
return pth.split('_')[:2]
def generate_mappings():
"""
Parses the labels file and makesa dictionary mapping the subject and session
to a numerical encoding of the 4 classes
"""
encoding = {'CN': 0, 'nMCI': 1, 'cMCI': 2, 'AD': 3}
mappings = {}
labels_path = get_project_root() / 'data/labels.tsv'
with open(labels_path) as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter='\t')
next(tsvreader)
for line in tsvreader:
subject_id, session_id, _, _, label = line
mappings['_'.join( [subject_id, session_id] )] = encoding[label]
return mappings
| [
"giuseppe.volpe.grossi@gmail.com"
] | giuseppe.volpe.grossi@gmail.com |
44d6d5ca913a59c3d9994a8daa7e405e4705e698 | 00c9495013a8f010ffcc9f47268869dabf03f28c | /config/settings/production.py | b12cd4ded508034ac72b0eb8f2fcfc0a041b8849 | [] | no_license | fishvi/xiaoyuanwen | 3d5cbbeeea64fffb751f52b92a7f186c614a0bd9 | 32b96e139b4ddbc089a5a9113864d7b815a538f2 | refs/heads/main | 2023-05-10T07:24:15.817206 | 2021-06-12T04:48:46 | 2021-06-12T04:48:46 | 374,678,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,166 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["fishvi@foxmail.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f'{env("REDIS_URL", default="redis://127.0.0.1:6379")}/0',
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL", default="admin/")
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
| [
"fishvi@foxmail.com"
] | fishvi@foxmail.com |
5756338cb6fc8c1265dcba6437dce7333023f4e4 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Windows/Forms/__init___parts/RichTextBoxSelectionTypes.py | 9924fd6d966d8eeaba9fa14927670259ceddad2d | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | class RichTextBoxSelectionTypes(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the type of selection in a System.Windows.Forms.RichTextBox control.
enum (flags) RichTextBoxSelectionTypes,values: Empty (0),MultiChar (4),MultiObject (8),Object (2),Text (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Empty=None
MultiChar=None
MultiObject=None
Object=None
Text=None
value__=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
9091f98df3dff4ab938bd0ab9d306ef2b2ca9621 | f6f15809ac70089ef4cfb1ade40e2dc58d239f81 | /test/functional/data/invalid_txs.py | 1f19ffe59a0e3a5e593440e7030364022a6315d2 | [
"MIT"
] | permissive | lamyaim/bitgesell | fcc96f6765d3907ce923f411a1b2c6c4de9d55d6 | 64c24348f1ba8788fbffaf663b3df38d9b49a5d1 | refs/heads/master | 2023-04-30T08:16:40.735496 | 2020-12-10T05:23:08 | 2020-12-10T05:23:08 | 369,859,996 | 1 | 0 | MIT | 2021-05-22T16:50:56 | 2021-05-22T16:48:32 | null | UTF-8 | Python | false | false | 7,089 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Templates for constructing various sorts of invalid transactions.
These templates (or an iterator over all of them) can be reused in different
contexts to test using a number of invalid transaction types.
Hopefully this makes it easier to get coverage of a full variety of tx
validation checks through different interfaces (AcceptBlock, AcceptToMemPool,
etc.) without repeating ourselves.
Invalid tx cases not covered here can be found by running:
$ diff \
<(grep -IREho "bad-txns[a-zA-Z-]+" src | sort -u) \
<(grep -IEho "bad-txns[a-zA-Z-]+" test/functional/data/invalid_txs.py | sort -u)
"""
import abc
from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint
from test_framework import script as sc
from test_framework.blocktools import create_tx_with_script, MAX_BLOCK_SIGOPS
from test_framework.script import (
CScript,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_2MUL,
OP_2DIV,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT
)
basic_p2sh = sc.CScript([sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL])
class BadTxTemplate:
"""Allows simple construction of a certain kind of invalid tx. Base class to be subclassed."""
__metaclass__ = abc.ABCMeta
# The expected error code given by BGLd upon submission of the tx.
reject_reason = ""
# Only specified if it differs from mempool acceptance error.
block_reject_reason = ""
# Do we expect to be disconnected after submitting this tx?
expect_disconnect = False
# Is this tx considered valid when included in a block, but not for acceptance into
# the mempool (i.e. does it violate policy but not consensus)?
valid_in_block = False
def __init__(self, *, spend_tx=None, spend_block=None):
self.spend_tx = spend_block.vtx[0] if spend_block else spend_tx
self.spend_avail = sum(o.nValue for o in self.spend_tx.vout)
self.valid_txin = CTxIn(COutPoint(self.spend_tx.sha256, 0), b"", 0xffffffff)
@abc.abstractmethod
def get_tx(self, *args, **kwargs):
"""Return a CTransaction that is invalid per the subclass."""
pass
class OutputMissing(BadTxTemplate):
reject_reason = "bad-txns-vout-empty"
expect_disconnect = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.calc_sha256()
return tx
class InputMissing(BadTxTemplate):
reject_reason = "bad-txns-vin-empty"
expect_disconnect = True
# We use a blank transaction here to make sure
# it is interpreted as a non-witness transaction.
# Otherwise the transaction will fail the
# "surpufluous witness" check during deserialization
# rather than the input count check.
def get_tx(self):
tx = CTransaction()
tx.calc_sha256()
return tx
# The following check prevents exploit of lack of merkle
# tree depth commitment (CVE-2017-12842)
class SizeTooSmall(BadTxTemplate):
reject_reason = "tx-size-small"
expect_disconnect = False
valid_in_block = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE])))
tx.calc_sha256()
return tx
class BadInputOutpointIndex(BadTxTemplate):
# Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins
# database can't distinguish between spent outpoints and outpoints which never existed.
reject_reason = None
expect_disconnect = False
def get_tx(self):
num_indices = len(self.spend_tx.vin)
bad_idx = num_indices + 100
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256, bad_idx), b"", 0xffffffff))
tx.vout.append(CTxOut(0, basic_p2sh))
tx.calc_sha256()
return tx
class DuplicateInput(BadTxTemplate):
reject_reason = 'bad-txns-inputs-duplicate'
expect_disconnect = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
class NonexistentInput(BadTxTemplate):
reject_reason = None # Added as an orphan tx.
expect_disconnect = False
def get_tx(self):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256 + 1, 0), b"", 0xffffffff))
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
class SpendTooMuch(BadTxTemplate):
reject_reason = 'bad-txns-in-belowout'
expect_disconnect = True
def get_tx(self):
return create_tx_with_script(
self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1))
class SpendNegative(BadTxTemplate):
reject_reason = 'bad-txns-vout-negative'
expect_disconnect = True
def get_tx(self):
return create_tx_with_script(self.spend_tx, 0, amount=-1)
class InvalidOPIFConstruction(BadTxTemplate):
reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)"
expect_disconnect = True
valid_in_block = True
def get_tx(self):
return create_tx_with_script(
self.spend_tx, 0, script_sig=b'\x64' * 35,
amount=(self.spend_avail // 2))
class TooManySigops(BadTxTemplate):
reject_reason = "bad-txns-too-many-sigops"
block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount"
expect_disconnect = False
def get_tx(self):
lotsa_checksigs = sc.CScript([sc.OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
return create_tx_with_script(
self.spend_tx, 0,
script_pub_key=lotsa_checksigs,
amount=1)
def getDisabledOpcodeTemplate(opcode):
""" Creates disabled opcode tx template class"""
def get_tx(self):
tx = CTransaction()
vin = self.valid_txin
vin.scriptSig = CScript([opcode])
tx.vin.append(vin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), {
'reject_reason': "disabled opcode",
'expect_disconnect': True,
'get_tx': get_tx,
'valid_in_block' : True
})
# Disabled opcode tx templates (CVE-2010-5137)
DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_2MUL,
OP_2DIV,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT]]
def iter_all_templates():
"""Iterate through all bad transaction template types."""
return BadTxTemplate.__subclasses__()
| [
"wuemma@protonmail.com"
] | wuemma@protonmail.com |
28d300d1315fa0c773c0509994b4837170cba797 | a9d23a63c099b8eab64ec9b264ac40935ce44c7b | /textutils/textutils/asgi.py | 7be58b29927d86c520604b4a3364635047c88f42 | [] | no_license | Hibasajid96/DjangoTextUtils | e178bbb44a2cb32238398dd4a0445999a4d245a1 | d2c1b3fc6bd0c4c4ec64fa2dc202f7b4dd2e90d2 | refs/heads/master | 2022-10-04T21:36:33.543383 | 2020-06-05T11:51:59 | 2020-06-05T11:51:59 | 269,154,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | """
ASGI config for dentalclinic project.
Asynchronous server Gateway Interface
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'textutils.settings')
application = get_asgi_application()
| [
"hibasajid96@gmail.com"
] | hibasajid96@gmail.com |
acb591fee17c1c080e2096b2524f5c892e639042 | 6a865d085519fc3f667317cd92de8332a2edf2cf | /sample-apps/segmentation_spleen_scribbles/lib/scribbles.py | 23382e99349bb8ac367e841b6e0b8920c96d43dd | [
"Apache-2.0"
] | permissive | LaplaceKorea/MONAILabel | bcd82b56a11d52d25faeb77dda1e3dcd4eb25898 | 5520bda8eedd726bba3172122e1cce416634b63d | refs/heads/main | 2023-07-18T15:21:38.383660 | 2021-08-27T11:25:26 | 2021-08-27T11:25:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,154 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monai.transforms import AddChanneld, Compose, LoadImaged, ScaleIntensityRanged, Spacingd
from monailabel.interfaces.tasks import InferTask, InferType
from monailabel.utils.others.post import BoundingBoxd, Restored
from .transforms import (
ApplyCRFOptimisationd,
ApplyGraphCutOptimisationd,
ApplyISegGraphCutPostProcd,
ApplySimpleCRFOptimisationd,
MakeISegUnaryd,
SoftenProbSoftmax,
)
class SpleenPostProc(InferTask):
"""
Defines a generic post processing task for Spleen segmentation.
"""
def __init__(
self,
dimension,
description,
):
super().__init__(
path=None, network=None, labels=None, type=InferType.SCRIBBLES, dimension=dimension, description=description
)
def pre_transforms(self):
return [
LoadImaged(keys=["image", "logits", "label"]),
AddChanneld(keys=["image", "label"]),
# at the moment optimisers are bottleneck taking a long time,
# therefore scaling non-isotropic with big spacing
Spacingd(keys=["image", "logits"], pixdim=[2.5, 2.5, 5.0]),
Spacingd(keys=["label"], pixdim=[2.5, 2.5, 5.0], mode="nearest"),
ScaleIntensityRanged(keys="image", a_min=-300, a_max=200, b_min=0.0, b_max=1.0, clip=True),
]
def post_transforms(self):
return [
Restored(keys="pred", ref_image="image"),
BoundingBoxd(keys="pred", result="result", bbox="bbox"),
]
def inferer(self):
raise NotImplementedError("inferer not implemented in base post proc class")
class SpleenISegCRF(SpleenPostProc):
"""
Defines ISeg+CRF based post processing task for Spleen segmentation from the following paper:
Wang, Guotai, et al. "Interactive medical image segmentation using deep learning with image-specific fine tuning."
IEEE transactions on medical imaging 37.7 (2018): 1562-1573. (preprint: https://arxiv.org/pdf/1710.04043.pdf)
This task takes as input 1) original image volume 2) logits from model and 3) scribbles from user
indicating corrections for initial segmentation from model. User-scribbles are incorporated using
Equation 7 on page 4 of the paper.
MONAI's CRF layer is used to optimise Equation 5 from the paper, where unaries come from Equation 7
and pairwise is the original input volume.
"""
def __init__(
self,
dimension=3,
description="A post processing step with ISeg + MONAI's CRF for Spleen segmentation",
):
super().__init__(dimension, description)
def pre_transforms(self):
return [
LoadImaged(keys=["image", "logits", "label"]),
AddChanneld(keys=["image", "label"]),
# at the moment optimisers are bottleneck taking a long time,
# therefore scaling non-isotropic with big spacing
Spacingd(keys=["image", "logits"], pixdim=[2.5, 2.5, 5.0]),
Spacingd(keys=["label"], pixdim=[2.5, 2.5, 5.0], mode="nearest"),
ScaleIntensityRanged(keys="image", a_min=-300, a_max=200, b_min=0.0, b_max=1.0, clip=True),
SoftenProbSoftmax(logits="logits", prob="prob"),
]
def inferer(self):
return Compose(
[
# unary term maker
MakeISegUnaryd(
image="image",
logits="prob",
scribbles="label",
unary="unary",
scribbles_bg_label=2,
scribbles_fg_label=3,
),
# optimiser
ApplyCRFOptimisationd(unary="unary", pairwise="image", post_proc_label="pred", device="cpu"),
]
)
class SpleenISegGraphCut(SpleenPostProc):
"""
Defines ISeg+GraphCut based post processing task for Spleen segmentation from the following paper:
Wang, Guotai, et al. "Interactive medical image segmentation using deep learning with image-specific fine tuning."
IEEE transactions on medical imaging 37.7 (2018): 1562-1573. (preprint: https://arxiv.org/pdf/1710.04043.pdf)
This task takes as input 1) original image volume 2) logits from model and 3) scribbles from user
indicating corrections for initial segmentation from model. User-scribbles are incorporated using
Equation 7 on page 4 of the paper.
SimpleCRF's GraphCut MaxFlow is used to optimise Equation 5 from the paper,
where unaries come from Equation 7 and pairwise is the original input volume.
"""
def __init__(
self,
dimension=3,
description="A post processing step with ISeg + SimpleCRF's GraphCut for Spleen segmentation",
):
super().__init__(dimension, description)
def inferer(self):
return Compose(
[
# unary term maker
MakeISegUnaryd(
image="image",
logits="logits",
scribbles="label",
unary="unary",
scribbles_bg_label=2,
scribbles_fg_label=3,
),
# optimiser
ApplyGraphCutOptimisationd(
unary="unary",
pairwise="image",
post_proc_label="pred",
lamda=10.0,
sigma=15.0,
),
]
)
class SpleenInteractiveGraphCut(SpleenPostProc):
"""
Defines ISeg+GraphCut based post processing task for Spleen segmentation from the following paper:
Wang, Guotai, et al. "Interactive medical image segmentation using deep learning with image-specific fine tuning."
IEEE transactions on medical imaging 37.7 (2018): 1562-1573. (preprint: https://arxiv.org/pdf/1710.04043.pdf)
This task takes as input 1) original image volume 2) logits from model and 3) scribbles from user
indicating corrections for initial segmentation from model. User-scribbles are incorporated using
Equation 7 on page 4 of the paper.
SimpleCRF's interactive GraphCut MaxFlow is used to optimise Equation 5 from the paper,
where unaries come from Equation 7 and pairwise is the original input volume.
"""
def __init__(
self,
dimension=3,
description="A post processing step with SimpleCRF's Interactive ISeg GraphCut for Spleen segmentation",
):
super().__init__(dimension, description)
def inferer(self):
return Compose(
[
ApplyISegGraphCutPostProcd(
image="image",
logits="logits",
scribbles="label",
post_proc_label="pred",
scribbles_bg_label=2,
scribbles_fg_label=3,
lamda=10.0,
sigma=15.0,
),
]
)
class SpleenISegSimpleCRF(SpleenPostProc):
"""
Defines ISeg+SimpleCRF's CRF based post processing task for Spleen segmentation from the following paper:
Wang, Guotai, et al. "Interactive medical image segmentation using deep learning with image-specific fine tuning."
IEEE transactions on medical imaging 37.7 (2018): 1562-1573. (preprint: https://arxiv.org/pdf/1710.04043.pdf)
This task takes as input 1) original image volume 2) logits from model and 3) scribbles from user
indicating corrections for initial segmentation from model. User-scribbles are incorporated using
Equation 7 on page 4 of the paper.
SimpleCRF's CRF is used to optimise Equation 5 from the paper,
where unaries come from Equation 7 and pairwise is the original input volume.
"""
def __init__(
self,
dimension=3,
description="A post processing step with ISeg + SimpleCRF's CRF for Spleen segmentation",
):
super().__init__(dimension, description)
def inferer(self):
return Compose(
[
# unary term maker
MakeISegUnaryd(
image="image",
logits="logits",
scribbles="label",
unary="unary",
scribbles_bg_label=2,
scribbles_fg_label=3,
),
# optimiser
ApplySimpleCRFOptimisationd(
unary="unary",
pairwise="image",
post_proc_label="pred",
),
]
)
| [
"noreply@github.com"
] | LaplaceKorea.noreply@github.com |
e5d6877f5ad971d0129f929e76feae87e1c15651 | 9b37fdf52cb2eb9795bb7cead33dd0ae053fa80f | /catkin_ws/src/opencv_img_processing/src/opencv3_parse_camera_data_colorspaces.py | e7486324a6b91977095b29a3362d872fc4810eba | [
"MIT"
] | permissive | a-yildiz/ROS-Simple-Sample-Packages | 9d3e01371e750a9489eb20487f524b380e34e0f7 | eb6b18adcd1fe26e2b5e644c42922e8102867cd9 | refs/heads/master | 2023-08-22T17:18:52.148201 | 2021-09-19T07:29:43 | 2021-09-19T07:29:43 | 390,817,521 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | #!/usr/bin/env python3
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
"""
Run this script/node after `roslaunch opencv_img_processing turtlebot3_and_cone.launch`.
When you look at `rostopic list`, you can see a topic named `/camera/rgb/image_raw`.
After inspecting `rostopic type /camera/rgb/image_raw` we observe that it is `sensor_msgs/Image`.
We will convert (a.k.a. bridge) camera raw image into visuals suited for OpenCV. Hence, we import `CvBridge`.
We use `cv2` for data-processing.
"""
class RobotCamera():
def __init__(self):
rospy.init_node("camera_processing")
rospy.Subscriber("/camera/rgb/image_raw", Image, self.cameraCallback)
self.bridge = CvBridge()
rospy.spin()
def cameraCallback(self, raw_input):
# Bridge input to output:
output1 = self.bridge.imgmsg_to_cv2(raw_input, "bgr8")
output2 = cv2.cvtColor(output1, cv2.COLOR_BGR2GRAY)
output3 = cv2.cvtColor(output1, cv2.COLOR_BGR2HSV)
# Split the original image to separate BGR channels:
b, g, r = cv2.split(output1) # note that output1 == cv2.merge((b, g, r))
# Display output:
cv2.imshow("Robot Camera BGR", output1)
cv2.imshow("Robot Camera Gray", output2)
cv2.imshow("Robot Camera HSV", output3)
cv2.imshow("Robot Camera Blue", b)
cv2.imshow("Robot Camera Green", g)
cv2.imshow("Robot Camera Red", r)
cv2.waitKey(1) # refresh every 1 ms.
if __name__ == "__main__":
RobotCamera() | [
"yildiz@stanford.edu"
] | yildiz@stanford.edu |
32bb579c27b7605eff988140489dd7395c378388 | 69900305fad4cb9d3a4192c74e51349b411c08fc | /其他/15_二进制中的1的个数.py | ffec0763710e022d90e66c613836dc8b62223cb0 | [] | no_license | MingjunGuo/python__offer | d0b9b766ef02e72e4aced37f47127c52ecf24092 | c6e50be39292f8eefd7d3312ac5d0141bbe06f5b | refs/heads/master | 2020-04-16T06:43:58.574485 | 2019-01-12T07:13:03 | 2019-01-12T07:13:03 | 165,358,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # -*- coding:utf-8 -*-
class Solution:
def NumberOf1(self, n):
'''
题目描述
输入一个整数,输出该数二进制表示中1的个数。其中负数用补码表示。
:param n:
:return:
'''
if n < 0:
n = n & 0xffffffff
ret = 0
while n:
ret += 1
n = (n-1) & n
return ret | [
"mingjun1000@gmail.com"
] | mingjun1000@gmail.com |
0432710ac0c48dbb2dae12113b61c0ce92474132 | 33ea1916b6e4574caea36ab65bffc214a613a57c | /test_platform/exta_apps/xadmin/plugins/refresh.py | 66e4a318fa0d41ad3ee94a7cdb5007a72a87c108 | [] | no_license | alvinsss/apitest_platform | fd1303d141c8c7fe6bc667684c52ea16e8e86229 | 86b171926041743e2cd3ed955032360270f7483c | refs/heads/master | 2020-04-21T20:59:40.770741 | 2019-10-31T10:37:54 | 2019-10-31T10:37:54 | 169,864,327 | 15 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # coding=utf-8
from django.template import loader
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
REFRESH_VAR = '_refresh'
class RefreshPlugin(BaseAdminPlugin):
refresh_times = []
# Media
def get_media(self, media):
if self.refresh_times and self.request.GET.get(REFRESH_VAR):
media = media + self.vendor('xadmin.plugin.refresh.js')
return media
# Block Views
def block_top_toolbar(self, context, nodes):
if self.refresh_times:
current_refresh = self.request.GET.get(REFRESH_VAR)
context.update({
'has_refresh': bool(current_refresh),
'clean_refresh_url': self.admin_view.get_query_string(remove=(REFRESH_VAR,)),
'current_refresh': current_refresh,
'refresh_times': [{
'time': r,
'url': self.admin_view.get_query_string({REFRESH_VAR: r}),
'selected': str(r) == current_refresh,
} for r in self.refresh_times],
})
nodes.append(loader.render_to_string('xadmin/blocks/model_list.top_toolbar.refresh.html',
get_context_dict(context)))
site.register_plugin(RefreshPlugin, ListAdminView)
| [
"6449694@qq.com"
] | 6449694@qq.com |
d4b4b0b2f8c115e61a51e233ebc4e6909e86ff12 | 823e69d6685f75c88d400d4539c02ae576bd730f | /GENDA - Padroes/Singleton/Singleton.py | ebe18c75ca7f880ece05cc58d32101a36d513768 | [] | no_license | PSFREITASUEA/padroes-de-projeto | 8c1be86d903a04fa37a5657aad305750ecb0ca10 | 5398214ec44ffe35e7b67b73adde498f73f6de4b | refs/heads/main | 2023-06-12T21:59:24.678112 | 2021-07-06T23:10:48 | 2021-07-06T23:10:48 | 381,531,134 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | import random
import hashlib
class Singleton(object):
instances = {}
def __new__(cls, *args, **kwargs):
if cls not in cls.instances:
cls.instances[cls] = super(Singleton, cls).__new__(cls, *args, **kwargs)
cls.hash = ""
cls.int = 1
return cls.instances[cls]
class MyClass(Singleton):
def GiveEachInstanceAUniqueValue(self):
self.int = 2
hashVar = random.randint(0, 1000)
hashObj = hashlib.sha1(b'%d' % hashVar)
hex_dig = hashObj.hexdigest()
return hex_dig
def __init__(self):
if self.int == 1:
self.hash = str(self.GiveEachInstanceAUniqueValue())
print(self.hash)
a = MyClass()
b = MyClass()
c = MyClass()
d = MyClass()
if id(a) == id(b) == id(c) == id(d):
print(a.hash)
print(b.hash)
print(c.hash)
print(d.hash)
| [
"psdsfj.snf20@uea.edu.br"
] | psdsfj.snf20@uea.edu.br |
f146849d951a5b88e50b16b8ea8fe901f7de6f8f | a934ecf1f08d0254e5e272db5ab0e87122ad9b8c | /home/views.py | 0ece1d3c9c4ec21c6f4aea39b0db24d3dea19013 | [
"MIT"
] | permissive | wsoliveira/borsocontrolo | 7957415099c5288d6e2d45fc7c9735237644b063 | 61a4e2aac738a766b6919e30c08fc967fe96fb40 | refs/heads/master | 2023-08-16T22:25:57.622831 | 2020-07-06T18:25:52 | 2020-07-06T18:25:52 | 255,434,213 | 0 | 0 | MIT | 2021-09-22T18:51:55 | 2020-04-13T20:26:02 | JavaScript | UTF-8 | Python | false | false | 226 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def home(request):
return render(request, 'home.html')
| [
"ws.oliveira@gmail.com"
] | ws.oliveira@gmail.com |
22afe83259f60017e1dc9fe21c5ac9a3a81184ba | 77ed8744975cdf7d26396a705590a3b58c440fe1 | /dovecollector/main_app/admin.py | 432981b0788f2ed4c5c6707e802cff3c4de0eac8 | [] | no_license | raihanmorshed/dovecollector | 801de49484e174c80073116ed3950f3755f2046f | eb726d1e8bf8becef3a569dcef22a2c124fefe02 | refs/heads/master | 2022-11-21T21:48:36.241493 | 2020-07-10T01:40:49 | 2020-07-10T01:40:49 | 277,176,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django.contrib import admin
from .models import Dove, Feeding
# Register your models here.
admin.site.register(Dove)
admin.site.register(Feeding) | [
"raihanmorshed@Raihans-MacBook-Air.local"
] | raihanmorshed@Raihans-MacBook-Air.local |
93176c49b6e7643b4b231a3331904fc3cddc78ba | 4c1434ee17c203c007e8dcd0f525386028fed86b | /driver.py | 76a5b6c5813b1e81b3088d64389dcc1dabc19617 | [] | no_license | Heislandmine/CrossdresserFromXtube | ad321cc7b468e9e46755b2a72c04bba93f60e8f6 | 7357eb64843be851ff11740d8a127a24d23ccfe5 | refs/heads/master | 2022-11-12T20:46:31.662725 | 2020-07-11T11:34:02 | 2020-07-11T11:34:02 | 272,945,636 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from selenium import webdriver
class Driver:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--headless")
# Selenium Server に接続する
driver = webdriver.Remote(
command_executor="http://localhost:4444/wd/hub",
desired_capabilities=options.to_capabilities(),
options=options,
)
self.driver = driver
| [
"heislandmine@protonmail.com"
] | heislandmine@protonmail.com |
f27c50dc81bfd9c7856aaa1f0f9af9ce8f819815 | 1596ebabdc32196d50e2cd9f776b0aaf460f28d5 | /oldScripts/20181002_bgSub_ClusterTrack_ImStack.py | 6dd96a66c179e24a1c6f1bf36102662ced5f48f5 | [
"MIT"
] | permissive | crackmech/flyclimb | 1394a81ef7fae98c13eaadc18a36543a378571c1 | 551621d1d2747d22b407a6b640d7ccaf680b53e5 | refs/heads/master | 2021-04-26T22:38:34.720421 | 2019-09-21T08:30:51 | 2019-09-21T08:30:51 | 124,124,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,438 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 14 18:46:57 2017
@author: aman
"""
import cv2
import os
import numpy as np
import re
from datetime import datetime
import Tkinter as tk
import tkFileDialog as tkd
import multiprocessing as mp
import time
import glob
#import trackpy as tp
import random
import csv
import itertools
from sklearn import cluster
flyParams = cv2.SimpleBlobDetector_Params()
flyParams.blobColor = 0
flyParams.minThreshold = 5
flyParams.maxThreshold = 240#120 120 for original image, 250 for bg subtracted images
flyParams.filterByArea = True
flyParams.filterByCircularity = True
flyParams.minCircularity = 0
flyParams.filterByConvexity = False
flyParams.filterByInertia = False
flyParams.minArea = 200# 200 for flyClimbing, 1000 for fly walking
flyParams.maxArea = 8000
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(flyParams)
else :
detector = cv2.SimpleBlobDetector_create(flyParams)
nImThresh = 100# if number of images in a folder is less than this, then the folder is not processed
imgDatafolder = 'imageData'
def present_time():
now = datetime.now()
return now.strftime('%Y%m%d_%H%M%S')
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def getFolder(initialDir):
'''
GUI funciton for browsing and selecting the folder
'''
root = tk.Tk()
initialDir = tkd.askdirectory(parent=root,
initialdir = initialDir, title='Please select a directory')
root.destroy()
return initialDir+'/'
def getDirList(folder):
return natural_sort([os.path.join(folder, name) for name in os.listdir(folder) if os.path.isdir(os.path.join(folder, name))])
def random_color():
levels = range(0,255,32)
return tuple(random.choice(levels) for _ in range(3))
colors = [(0,200,200),(200,0,200),(200,200,0),(150,0,0),(0,0,200),(200,200,255)]
colors = [random_color() for x in xrange(1000)]
colors = \
[(64, 96, 32), (96, 0, 160), (96, 128, 32), (128, 192, 224), (128, 32, 0), (0, 224, 64),\
(224, 96, 0), (160, 0, 64), (32, 32, 64), (160, 192, 224), (160, 64, 96), (160, 96, 64),
(224, 160, 224), (192, 96, 128), (128, 160, 64), (192, 32, 192), (160, 96, 32), (32, 96, 32),
(32, 128, 96), (224, 32, 96), (128, 0, 160), (64, 224, 32), (32, 64, 32), (192, 96, 224),
(0, 192, 0), (0, 32, 0), (128, 96, 224), (32, 224, 64), (64, 32, 64), (224, 128, 32),
(32, 192, 96), (128, 96, 128), (32, 64, 224), (160, 160, 64), (32, 32, 160), (128, 192, 128),
(128, 128, 96), (192, 0, 32), (64, 192, 224), (64, 32, 128), (96, 32, 160), (160, 160, 32),
(224, 224, 96), (224, 192, 224), (96, 0, 64), (224, 224, 128), (32, 224, 128), (64, 64, 128),
(64, 64, 192), (64, 64, 64), (64, 192, 224), (96, 128, 64), (192, 64, 160), (96, 64, 0),
(192, 32, 0), (192, 96, 96), (192, 224, 0), (192, 224, 128), (224, 64, 0), (0, 96, 192)]
#csvOutFile = '/media/aman/data/thesis/colorPalette_20181004.csv'
#with open(csvOutFile, "wb") as f:
# writer = csv.writer(f)
# writer.writerows(colors)
def createTrack(trackData, img):
'''
input:
create an image of shape 'imgShape' with the x,y coordiates of the track from the array 'trackData
returns:
an np.array with the cv2 image array, which can be saved or viewed independently of this function
'''
#img = np.ones((imgShape[0], imgShape[1], 3), dtype = 'uint8')
blue = np.hstack((np.linspace(0, 255, num = len(trackData)/2),np.linspace(255, 0, num = (len(trackData)/2)+1)))
green = np.linspace(255, 0, num = len(trackData))
red = np.linspace(0, 255, num = len(trackData))
cv2.putText(img,'Total frames: '+str(len(trackData)), (10,30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,255))
for i in xrange(1,len(trackData)):
cv2.circle(img,(int(trackData[i,0]), int(trackData[i,1])), 2, (blue[i], green[i], red[i]), thickness=2)#draw a circle on the detected body blobs
for i in xrange(1,len(trackData)):
if i%100==0:
cv2.putText(img,'^'+str(i), (int(trackData[i,0]), int(trackData[i,1])), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,255))
#cv2.imshow('track', img); cv2.waitKey(); cv2.destroyAllWindows()
return img
def getTrackData(imStack, Blobparams, blurParams):
'''
returns the numpy array of coordinates of the centroid of blob in the stack of images provided as input numpy array 'imStack'
'''
nFrames = imStack.shape[0]
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(Blobparams)
else :
detector = cv2.SimpleBlobDetector_create(Blobparams)
trackData = np.zeros((nFrames,2))
kernel, sigma = blurParams
for f in xrange(nFrames):
im = imStack[f]
keypoints = detector.detect(cv2.GaussianBlur(im, (kernel, kernel), sigma))
kp = None
try:
for kp in keypoints:
trackData[f] = (kp.pt[0],kp.pt[1])
except:
pass
return trackData
def getContours((idx, im, contourParams, blurParams)):
kernel, sigma = blurParams
#print idx, im.shape, contourParams, blurParams
ret, th = cv2.threshold(cv2.GaussianBlur(im, (kernel,kernel), sigma), contourParams['threshLow'], contourParams['threshHigh'],cv2.THRESH_BINARY)
th = cv2.bitwise_not(th)
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
contours, hierarchy = cv2.findContours(th, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
else :
im2, contours, hierarchy = cv2.findContours(th, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
try:
contours = max(contours, key = cv2.contourArea)
# if not contourParams['minCntArea']<=cv2.contourArea(x)<=contourParams['maxCntArea']:
# contours = []
except:
contours = []
return [idx, contours]
def getContourData(imStack, fList, contourParams, blurParams, pool):
'''
returns the ellipse fit data of the fly in the stack of images provided as input numpy array 'imStack'
'''
# imgStack = np.array(pool.map(imRead, flist), dtype=np.uint8)
# poolArgList = itertools.izip(flist, itertools.repeat(params), np.arange(len(flist)))
# imgWithCnt = pool.map(imReadNCnt, poolArgList)
poolArgList = itertools.izip(fList, imStack, itertools.repeat(contourParams), itertools.repeat(blurParams))
contours = pool.map(getContours, poolArgList)
trackData = []
for idx,cnt in enumerate(contours):
if len(cnt[1])>=5:
try:
trackData.append([cnt[0], cv2.fitEllipse(cnt[1])])
except:
#print ('no contour detected in frame# %s'%cnt[0])
trackData.append([cnt[0], 'noContourDetected'])
else:
#print ('no contour detected) in frame# %s'%cnt[0])
trackData.append([cnt[0], 'noContourDetected'])
return trackData
# trackData = []
# for idx, im in enumerate(imStack):
# frId = flist[idx]
# contours = getContours((frId, im, contourParams, blurParams))
# if len(contours[1])!=0:
# trackData.append([contours[0], cv2.fitEllipse(contours[1])])
# else:
# print ('no contour detected in frame# %d'%frId)
# trackData.append([contours[0], 'noContourDetected'])
# cv2.destroyAllWindows()
# return trackData
def cropImstack(imStack, trackData, heightCropbox, widthCropbox, blurParams, ratTailParams):
'''
returns a list of all images, cropped as per cropBox dimensions
'''
kernel, sigma = blurParams
thresh, nIterations, erodeKernel = ratTailParams
ims = []
for i in xrange(imStack.shape[0]):
im = imStack[i]
try:
x,y = trackData[i]
if (heightCropbox<=y<=imStack.shape[1]-heightCropbox and widthCropbox<=x<=imStack.shape[2]-widthCropbox):
pts = [int(y)-heightCropbox, int(y)+heightCropbox, int(x)-widthCropbox,int(x)+widthCropbox]
im_cropped = im[pts[0]:pts[1], pts[2]:pts[3]]
_,th = cv2.threshold(cv2.GaussianBlur(im_cropped, (kernelSize,kernelSize), sigma), thresh, 255,cv2.THRESH_BINARY)
th = cv2.bitwise_not(th)
erosion = cv2.erode(th,erodeKernel,iterations = nIterations)
dilation = cv2.dilate(erosion, erodeKernel, iterations = nIterations)
ims.append([i,np.bitwise_xor(th, dilation)])
else:
ims.append([i, 'NoCroppedImage'])
except:
pass
return ims
def cropImstackGray(imStack, trackData, heightCropbox, widthCropbox):
'''
returns a list of all images, cropped as per cropBox dimensions
'''
ims = []
for i in xrange(imStack.shape[0]):
im = imStack[i]
try:
x,y = trackData[i]
if (heightCropbox<=y<=imStack.shape[1]-heightCropbox and widthCropbox<=x<=imStack.shape[2]-widthCropbox):
pts = [int(y)-heightCropbox, int(y)+heightCropbox, int(x)-widthCropbox,int(x)+widthCropbox]
im_cropped = im[pts[0]:pts[1], pts[2]:pts[3]]
ims.append([i,im_cropped])
else:
ims.append([i, 'NoCroppedImage'])
except:
pass
return ims
def saveCroppedIms(croppedStack, ImStack, saveDir, extension, hCropbox):
'''
saves the output of the tracked flies in the given format (specifice by 'extension') in the given directory.
If a fly is not detected in a continous frame, new folder is created to save the next sequence
'''
ext = extension
outDir = saveDir
cropDir = outDir+'_cropped/'
imDir = outDir+'_original_subIms/'
os.mkdir(imDir)
os.mkdir(cropDir)
for i in xrange(len(croppedStack)):
if 'NoCroppedImage' not in croppedStack[i][1]:
cv2.imwrite(cropDir+str(i)+ext, croppedStack[i][1])
cv2.imwrite(imDir+str(i)+ext, ImStack[i])
else:
print i, croppedStack[i][1]
return cropDir, imDir
def getFiles(dirname, extList):
filesList = []
for ext in extList:
filesList.extend(glob.glob(os.path.join(dirname, ext)))
return natural_sort(filesList)
def displayImgs(imgs, fps):
f = 1000/fps
for i, img in enumerate(imgs):
cv2.imshow('123',img)
key = cv2.waitKey(f) & 0xFF
if key == ord("q"):
break
if key == ord("p"):
f = 1000/fps
cv2.waitKey(0)
if key == ord("n"):
cv2.imshow('123',imgs[i+1])
f=0
cv2.waitKey(f)
cv2.destroyAllWindows()
def imRead(x):
return cv2.imread(x, cv2.IMREAD_GRAYSCALE)
#return cv2.rotate(cv2.imread(x, cv2.IMREAD_GRAYSCALE), cv2.ROTATE_90_COUNTERCLOCKWISE)
def getBgIm(imgs):
'''
returns a background Image for subtraction from all the images using weighted average
'''
avg = np.array((np.median(imgs, axis=0)))
return cv2.convertScaleAbs(avg)
def getBgSubImStack((inImgstack, bgIm)):
'''
returns the stack of images after subtracting the background image from the input imagestack
'''
subIms = np.zeros(np.shape(inImgstack), dtype=np.uint8)
for f in range(0, len(inImgstack)):
subIms[f] = cv2.bitwise_not(cv2.absdiff(inImgstack[f], bgIm))
return subIms
def getBgSubIm((inImg, bgIm)):
'''
returns the stack of images after subtracting the background image from the input imagestack
'''
return cv2.bitwise_not(cv2.absdiff(inImg, bgIm))
def getSubIms(dirname, imExts, pool, workers):
'''
tracks the fly using cv2.SimpleBlobDetector method and saves the tracked flies in folders
'''
flist = getFiles(dirname, imExts)
#startTime = time.time()
imgStack = pool.map(imRead, flist)
ims = np.zeros((len(imgStack),imgStack[0].shape[0], imgStack[0].shape[1] ), dtype=np.uint8)
for i,x in enumerate(imgStack):
ims[i]=x
imgStack = ims.copy()
#t1 = time.time()-startTime
#print("imRead time for %d frames: %s Seconds at %f FPS"%(len(flist),t1 ,len(flist)/float(t1)))
#t1 = time.time()
imStackChunks = np.array_split(imgStack, 4*workers, axis=1)
imStackChunks = [x.copy() for x in imStackChunks if x.size > 0]
bgImChunks = pool.map(getBgIm, imStackChunks)
bgIm = np.array(np.vstack((bgImChunks)), dtype=np.uint8)
#t2 = time.time()-t1
#print("bg calculation time for %d frames: %s Seconds at %f FPS"%(len(flist),t2 ,len(flist)/float(t2)))
#t2 = time.time()
subIms = pool.map(getBgSubIm, itertools.izip(imgStack, itertools.repeat(bgIm)))
ims = np.zeros((len(subIms),subIms[0].shape[0], subIms[0].shape[1] ), dtype=np.uint8)
for i,x in enumerate(subIms):
ims[i]=x
subIms = ims.copy()
#t = time.time()-t2
#print("bg Subtraction time for %d frames: %s Seconds at %f FPS"%(len(flist),t ,len(flist)/float(t)))
return imgStack, subIms, flist
def getEuDisCenter((x1,y1)):
return np.sqrt(np.square(x1-heightCrop)+np.square(y1-widthCrop))
def getEuDisCorner((x1,y1)):
return np.sqrt(np.square(x1)+np.square(y1))
def getFarPoint(cnt):
'''
returns the coordinates of the far most point w.r.t to the origin
'''
leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
disSorted = sorted([leftmost, rightmost, topmost, bottommost], key=getEuDisCenter)
return disSorted
def tracknCrop(dirname, imgExt, heightcrop, widthcrop, contourParams, outFname, \
params, nImThreshold, blurParams, ratTailParams, pool, workers):
'''
tracks the fly using cv2.SimpleBlobDetector method and saves the tracked flies in folders
'''
flist = natural_sort(os.listdir(dirname))
if len(flist)<=nImThreshold:
print('Less Images to process, not processing folder, nImages present: %i'%len(flist))
pass
else:
imgs, subImgs, flist = getSubIms(dirname, imgExt, pool, workers)
trackedData = getContourData(imStack = subImgs, fList = flist, contourParams= contourParams, blurParams=blurParams, pool=pool)
blobXYs = [x[1][0] for _,x in enumerate(trackedData)]
cropSubImStack = cropImstack(imStack = subImgs, trackData = blobXYs, heightCropbox = heightcrop, widthCropbox = widthcrop,\
blurParams=blurParams, ratTailParams=ratTailParams)
cropImStack = cropImstackGray(imStack = imgs, trackData = blobXYs, heightCropbox = heightcrop, widthCropbox = widthcrop)
moddedTrackedData = []
for _,data in enumerate(trackedData):
if data[1]!='noContourDetected':
moddedTrackedData.append([data[0], data[1][0][0], data[1][0][1],data[1][1][0], data[1][1][1], data[1][2]])
else:
moddedTrackedData.append([data[0], data[1]])
with open(outFname+"_centroids.csv", "wb") as f:
writer = csv.writer(f)
writer.writerow(['frame','X-Coord','Y-Coord','minorAxis',' majorAxis',' angle'])
writer.writerows(moddedTrackedData)
return cropImStack, cropSubImStack, flist
def getLegTipLocs(rawDir, trackParams, legContourThresh, outFname, pool):
imExts, height, width, cntparams, \
flyparams, nImThresh, blurParams, ratTailparams = trackParams
croppedImStack, croppedSubImStack, fList = tracknCrop(rawDir, imExts, height,\
width, cntparams, outFname, flyparams,\
nImThresh, blurParams, ratTailparams,\
pool)
croppedSubIms = []
croppedIms = []
for i in xrange(len(croppedSubImStack)):
if 'NoCroppedImage' not in croppedSubImStack[i][1]:
croppedSubIms.append(croppedSubImStack[i][1])
croppedIms.append(croppedImStack[i][1])
croppedIms = np.array(croppedIms, dtype=np.uint8)
croppedSubIms = np.array(croppedSubIms, dtype=np.uint8)
allLocs = []
for i, im in enumerate(croppedSubIms):
_, contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [x for x in sorted(contours, key = cv2.contourArea)[-6:] if cv2.contourArea(x)>=legContourThresh]
locs = []
for j,cnt in enumerate(contours):
locs.append(getFarPoint(cnt)[-1])
allLocs.append(np.array(sorted([x for x in locs], key=getEuDisCorner)))
return allLocs, croppedIms
def getAllLocs(rawDir, trackParams, legContourThresh, outFname, pool, workers):
imExts, height, width, cntparams, \
flyparams, nImThresh, blurParams, ratTailparams = trackParams
croppedImStack, croppedSubImStack, fList = tracknCrop(rawDir, imExts, height,\
width, cntparams, outFname, flyparams,\
nImThresh, blurParams, ratTailparams,\
pool, workers)
croppedSubIms = []
croppedIms = []
frNames = []
nCnts = 0
for i in xrange(len(croppedSubImStack)):
if 'NoCroppedImage' not in croppedSubImStack[i][1]:
croppedSubIms.append(croppedSubImStack[i][1])
croppedIms.append(croppedImStack[i][1])
frNames.append(fList[i])
nCnts+=1
croppedIms = np.array(croppedIms, dtype=np.uint8)
croppedSubIms = np.array(croppedSubIms, dtype=np.uint8)
#displayImgs(croppedIms, 100)
#displayImgs(croppedSubIms, 100)
allLocs = []
for i, im in enumerate(croppedSubIms):
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [x for x in sorted(contours, key = cv2.contourArea)[-6:] if cv2.contourArea(x)>=legContourThresh]
locs = []
for j,cnt in enumerate(contours):
locs.append(getFarPoint(cnt)[-1])
# allLocs.append(np.array(sorted([x for x in locs], key=getEuDisCorner)))
allLocs.append([frNames[i],locs])
print('Contours detected in %d of %d frames'%(nCnts, len(fList)))
return allLocs, croppedIms
def assignLegTips(tipLocs, pxMvmntThresh, frmSkipThresh, saveFileName, crpImStack):
t = tp.link_iter(tipLocs, search_range = pxMvmntThresh, memory=frmSkipThresh) #iterator of locations, distance moved between frames, memory of skipped frame
trackedIds = []
for idx,x in enumerate(t):
trackedIds.append(x[1])
legTips = [['frame#','x','y','trackId']]
for i,loc in enumerate(tipLocs):
for j,l in enumerate(loc):
legTips.append([i, l[0], l[1],trackedIds[i][j]])
csvOutFile = saveFileName+'.csv'
with open(csvOutFile, "wb") as f:
writer = csv.writer(f)
writer.writerows(legTips)
legTipsFr = [['frame#',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId']]
for i,loc in enumerate(tipLocs):
frLocs = [i]
for j,l in enumerate(loc):
frLocs.extend((l[0], l[1],trackedIds[i][j]))
legTipsFr.append(frLocs)
csvOutFile = saveFileName+'_FramesTogether.csv'
with open(csvOutFile, "wb") as f:
writer = csv.writer(f)
writer.writerows(legTipsFr)
dispIms = []
for i, im in enumerate(crpImStack):
img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
locs = tipLocs[i]
for j,loc in enumerate(locs):
cv2.circle(img, tuple(loc), 2, colors[trackedIds[i][j]], 2)
cv2.putText(img, str(trackedIds[i][j]), tuple(loc), cv2.FONT_HERSHEY_COMPLEX, 0.4, colors[trackedIds[i][j]])
dispIms.append(img)
return trackedIds, dispIms
def getClusters(nclusters, locsArr, fname, imShape, workers):
frLabels = [x[0] for x in locsArr]
locs = [x[-1] for x in locsArr]
allFrLabels = []
for i,x in enumerate(locs):
for j in xrange(len(x)):
allFrLabels.append(frLabels[i])
allVerts = np.vstack((locs))
spectral = cluster.SpectralClustering(
n_clusters=nclusters, eigen_solver='arpack',
affinity="nearest_neighbors", n_jobs=workers)
spectral.fit(allVerts)
y_pred = spectral.labels_.astype(np.int)
allLocs1 = [np.hstack((np.zeros((len(x),1))+i,np.arange(len(x),0, -1).reshape((len(x),1)), x)) for i,x in enumerate(locs)]
allLocs1 = np.vstack((allLocs1))
allLocs1 = np.hstack((allLocs1, np.reshape(y_pred, (len(y_pred),1))))
labArr = np.array((allFrLabels))
labArr = labArr.reshape((len(labArr),1))
outData = np.hstack((labArr, allLocs1))
csvOutFile = fname+'_legTipLocs.csv'
with open(csvOutFile, "wb") as f:
writer = csv.writer(f)
writer.writerow(['frame','frame#','cluster#','X-Coord',' Y-Coord',' clusterId'])
writer.writerows(outData)
blImBl = np.zeros((imShape[1], imShape[0],3), dtype=np.uint8)
for i,v in enumerate(allVerts):
blImBl[v[1],v[0]] = colors[y_pred[i]]
cv2.imwrite(fname+'_legTipLocs_black.png', blImBl)
return allLocs1, allFrLabels
initialDir = '/media/pointgrey/data/flywalk/legTracking/data/all/'
#initialDir = '/media/aman/data/flyWalk_data/climbingData/gait/data/tmp/pythonTmp/'
#initialDir = '/media/aman/data/flyWalk_data/climbingData/gait/data/copiedLegTrackingTrackData/'
baseDir = getFolder(initialDir)
outDir = '/media/aman/data/flyWalk_data/climbingData/gait/data/tmp/'
legTipclusters = 20
imExtensions = ['*.png', '*.jpeg']
heightCrop = 100
widthCrop = 100
legCntThresh = 2
nThreads = 7
kernelSize = 5
gauBlurParams = (kernelSize,1)
threshVal = 250
nIterations = 2
kernel = np.ones((kernelSize,kernelSize),np.uint8)
pxMvdByLegBwFrm = 50
legTipFrmSkipthresh = 40
rattailparams = (threshVal, nIterations, kernel)
#baseDir = initialDir
print baseDir
cntParams = {'maxCntArea' : 7000,\
'minCntArea' : 2000,\
'threshLow' : 210,\
'threshHigh' : 255}
trackparams = [imExtensions, heightCrop, widthCrop, cntParams, flyParams,\
nImThresh, gauBlurParams, rattailparams]
rawDirs = getDirList(baseDir)
pool = mp.Pool(processes=nThreads)
procStartTime = time.time()
totalNFrames = 0
print "Started processing directories at "+present_time()
for _,rawDir in enumerate(rawDirs):
d = os.path.join(rawDir, imgDatafolder)
print rawDir
imdirs = getDirList(d)
for imdir in imdirs:
startTime = time.time()
nFrames = len(getFiles(imdir, imExtensions))
if nFrames>nImThresh:
fname = imdir.rstrip(os.sep)+'_legTipsClus_n'+str(legTipclusters)+'-Climbing'
legTipLocs = getAllLocs(imdir, trackparams, legCntThresh, fname, pool, nThreads)
allLocs, croppedIms = legTipLocs
locs = [x[-1] for x in allLocs]
if len(allLocs)>25:
try:
np.vstack((locs))
lbldLocs, frLabelsAll = getClusters(nclusters = legTipclusters, locsArr = allLocs,\
fname = fname, imShape = (2*heightCrop, 2*widthCrop),\
workers = nThreads)
except:
print('legTips not tracked properly in %s'%imdir)
print('==> Processed %i frames in %0.3f seconds at: %05f FPS'\
%(nFrames, time.time()-startTime, (nFrames/(time.time()-startTime))))
totalNFrames +=nFrames
pool.close()
totSecs = time.time()-procStartTime
print('Finished processing %d frames at: %05s, in %sSeconds, total processing speed: %05f FPS\n'\
%(totalNFrames, present_time(),totSecs , totalNFrames/totSecs))
displayImgs(croppedIms,100)
#outData = []
#
#aa = lbldLocs.copy()
#aaa = np.array(aa, dtype=np.uint8)
#for i in xrange(len(aaa)):
# frData = [frLabelsAll[i]]
# for j,x in enumerate(aaa[i].astype(list)):
# frData.extend(list(aaa[i])[j])
# outData.append(frData)
#
#
#labArr = np.array((frLabelsAll)).reshape((len(frLabelsAll),1))
#
#out = np.hstack((labArr, lbldLocs))
#csvOutFile = fname+'_legTipLocs.csv'
#with open(csvOutFile, "wb") as f:
# writer = csv.writer(f)
# writer.writerows(out)
#
#allVerts = np.vstack((allLocs))
#X = allVerts.copy()
#
#spectral = cluster.SpectralClustering(
# n_clusters=params['n_clusters'], eigen_solver='arpack',
# affinity="nearest_neighbors")
#spectral.fit(X)
#y_pred = spectral.labels_.astype(np.int)
#
#labels = y_pred
#blIm = np.zeros((2*heightCrop, 2*widthCrop,3), dtype=np.uint8)
#for i,v in enumerate(allVerts):
# blIm[v[1],v[0]] = colors[labels[i]]
#cv2.imshow("Original", blIm)
#key = cv2.waitKey(0)
#cv2.destroyAllWindows()
#
#allLocs1 = [np.hstack((x, np.zeros((len(x),1))+i)) for i,x in enumerate(allLocs)]
#
#
#
#allVerts1 = np.vstack((allLocs1))
#X = allVerts1.copy()
#
#spectral = cluster.SpectralClustering(
# n_clusters=params['n_clusters'], eigen_solver='arpack',
# affinity="nearest_neighbors")
#spectral.fit(X)
#y_pred = spectral.labels_.astype(np.int)
#
#labels = y_pred
#blIm = np.zeros((2*heightCrop, 2*widthCrop,3), dtype=np.uint8)
#for i,v in enumerate(allVerts1):
# blIm[v[1],v[0]] = colors[labels[i]]
#cv2.imshow("Original", blIm)
#key = cv2.waitKey(0)
#cv2.destroyAllWindows()
#
#
#allVertsList = [list(x) for _,x in enumerate(allVerts)]
#frLegTipLabels = []
#for i, tips in enumerate(allLocs):
# ltlabels = []
# for j, tip in enumerate(tips):
# ltlabels.append(labels[allVertsList.index(list(tip))])
# frLegTipLabels.append(ltlabels)
#
#blIms = [cv2.cvtColor(x, cv2.COLOR_GRAY2BGR) for x in croppedIms.copy()]
#for idx,im in enumerate(blIms):
# for j, pt in enumerate(allLocs[idx]):
# cv2.circle(im, tuple(pt), 2, colors[frLegTipLabels[idx][j]+10], thickness=3)
#
#displayImgs(blIms,10)
| [
"crack.mech@gmail.com"
] | crack.mech@gmail.com |
48232ef8dae4207da9df14791e46fa8988f9cf3e | 9b74ef81e5c1c1dbecf847ada0e704c8e808db22 | /data/triviaqa/config.py | 16b933196b1639ff75fa8bd3e17f6de1e78d0420 | [
"MIT"
] | permissive | neufang/jack | 4464ddc1d7c97bd729e1f661734016bb0d0b1397 | 96a4e59be70ec7df382d26d5bf6d6eee2c94f5e7 | refs/heads/master | 2020-06-23T14:05:11.116345 | 2019-07-24T13:52:15 | 2019-07-24T13:52:15 | 198,644,166 | 0 | 0 | MIT | 2019-07-24T13:46:26 | 2019-07-24T13:46:25 | null | UTF-8 | Python | false | false | 275 | py | import os
from os.path import join
"""
Global config options
"""
TRIVIA_QA = os.environ.get('TRIVIAQA_HOME', None)
TRIVIA_QA_UNFILTERED = os.environ.get('TRIVIAQA_UNFILTERED_HOME', None)
CORPUS_DIR = join(os.environ.get('TRIVIAQA_HOME', ''), "preprocessed")
VEC_DIR = ''
| [
"dirk.weissenborn@gmail.com"
] | dirk.weissenborn@gmail.com |
a9e5e96d698f3e585ec37494c11d6d2ee96ce8df | e3743b27fc794762308958f6e9c6b594db39153f | /paper_examples/visualization_of_activationfunction.py | d1ab66c06340e3a1918cb19fa3a1935f02987d0c | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | raphaeldeimel/python-phasestatemachine | 41cc73d5659155f1b8088fa9893b8c2ec02f4064 | cead459a9b4776e403dec199fbc0dd5d8dd25494 | refs/heads/master | 2022-08-01T19:58:48.368729 | 2020-05-18T12:15:05 | 2020-05-18T12:15:05 | 264,927,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,890 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Raphael Deimel
@copyright 2018
@licence: 2-clause BSD licence
Visualizes the vector field in one 2D-plane of the phase-statemachine state space
"""
from numpy import *
from matplotlib.pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy.special import betainc
x0 = dot(linspace(0,1,500)[:,newaxis], ones((1,500)))
x1 = x0.T
n = 25
x0diag = betainc(5,5,linspace(-0.01,1.01,n))
isolinevalues = linspace(0,1, 10)
def f_proposed1(x0,x1, x2=0.001):
"""
proposed function:
Matrix X:
X = x^nx1 . 1^1xn
lambda = X @ X.T * 8 * (X*X + (X*X).T) / (X + X.T + 0.01)**4
"""
xxt = x1 * x0
L2_squared = x2**2 + x1**2 + x0**2 #xtx
L1 = abs(x2)+abs(x1)+abs(x0)
S_P = abs(x1)+abs(x0)
return 16 * xxt * (L2_squared) / (L1**4 + S_P**4)
def f_proposed2(x0,x1, x2=0.00001):
return 1-(1-f_proposed1(x0,x1, x2)**2)**2 #kumaraswamy(1,2)
def f_proposed3(x0,x1, x2=0.00001):
"""
proposed function:
Matrix X:
X = x^nx1 . 1^1xn
lambda = X @ X.T * 8 * (X*X + (X*X).T) / ((X + X.T)**4 -
"""
xxt = x1 * x0
L2_squared = x2**2 + x1**2 + x0**2 #xtx
L1 = abs(x2)+abs(x1)+abs(x0)
return 8 * xxt**3 * L2_squared / ( (L1)**4 * xxt**2)
def f_proposed1_cross(x1,x2, x0=0.3):
"""
proposed function:
Matrix X:
X = x^nx1 . 1^1xn
lambda = X @ X.T * 8 * (X*X + (X*X).T) / (sum(|X|) + 0.01)**4
"""
return f_proposed1(x0, x2, x2)
traces= []
for f, name in [(f_proposed1, 'proposed_activation_function_1'), (f_proposed2, 'proposed_activation_function_2'),(f_proposed3, 'proposed_activation_function_3'),(f_proposed1_cross, 'proposed_activation_function_1_competingstates') ]:
ax = Axes3D(figure(dpi=300))
ax.view_init(elev=20., azim=-165)
#ax.mouse_init(rotate_btn=1, zoom_btn=3)
ax.plot_surface(x0,x1,f(x0, x1), cmap=cm.coolwarm, linewidth=2.0, rcount=n, ccount=n)
#ax.set_xlim3d(0,1)
#ax.set_ylim3d(0,1)
ax.set_zlabel('transition activation')
ax.set_xlabel('successor')
ax.set_ylabel('predecessor')
for c in [0.5]:
a = x0diag**c
b = (1.0-x0diag)**c
ax.plot3D(a, b, f(a, b), color='gray')
savefig('figures/{0}.pdf'.format(name), bbox_inches='tight')
savefig('figures/{0}.jpg'.format(name), bbox_inches='tight')
figure()
traces.append((arctan2(a,b), f(a, b), name))
plot(traces[-1][0],traces[-1][1], marker='x')
savefig('figures/{0}_trace.pdf'.format(name), bbox_inches='tight')
figure()
for x,y,label in traces:
plot(x,y, marker='x', label=label)
legend()
savefig('figures/proposed_activation_function_alltraces.pdf'.format(name), bbox_inches='tight')
if sys.flags.interactive:
ion()
show()
| [
"raphael.deimel@tu-berlin.de"
] | raphael.deimel@tu-berlin.de |
6288d87565ff30fcae21bfa03159a2e8abac484d | d0e5128ad27ebd6c3be846e83c9678b077b071f0 | /directors.py | 4d8f09bf45cfeb497119d090f443ad275d85d0e5 | [] | no_license | iliakakurin/dir | c280b7bcef2026fa4f04a5b5172fb18765e4ee05 | bb5c995a32115e357771085786b0ea828b3db649 | refs/heads/main | 2022-12-28T12:15:52.544625 | 2020-10-10T12:24:07 | 2020-10-10T12:24:07 | 302,895,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | winners = {1931: ['Norman Taurog'], 1932: ['Frank Borzage'], 1933: ['Frank Lloyd'], 1934: ['Frank Capra'], 1935: ['John Ford'], 1936: ['Frank Capra'], 1937: ['Leo McCarey'], 1938: ['Frank Capra'], 1939: ['Victor Fleming'], 1940: ['John Ford'], 1941: ['John Ford'], 1942: ['William Wyler'], 1943: ['Michael Curtiz'], 1944: ['Leo McCarey'], 1945: ['Billy Wilder'], 1946: ['William Wyler'], 1947: ['Elia Kazan'], 1948: ['John Huston'], 1949: ['Joseph L. Mankiewicz'], 1950: ['Joseph L. Mankiewicz'], 1951: ['George Stevens'], 1952: ['John Ford'], 1953: ['Fred Zinnemann'], 1954: ['Elia Kazan'], 1955: ['Delbert Mann'], 1956: ['George Stevens'], 1957: ['David Lean'], 1958: ['Vincente Minnelli'], 1959: ['William Wyler'], 1960: ['Billy Wilder'], 1961: ['Jerome Robbins', 'Robert Wise'], 1962: ['David Lean'], 1963: ['Tony Richardson'], 1964: ['George Cukor'], 1965: ['Robert Wise'], 1966: ['Fred Zinnemann'], 1967: ['Mike Nichols'], 1968: ['Carol Reed'], 1969: ['John Schlesinger'], 1970: ['Franklin J. Schaffner'], 1971: ['William Friedkin'], 1972: ['Bob Fosse'], 1973: ['George Roy Hill'], 1974: ['Francis Ford Coppola'], 1975: ['Milos Forman'], 1976: ['John G. Avildsen'], 1977: ['Woody Allen'], 1978: ['Michael Cimino'], 1979: ['Robert Benton'], 1980: ['Robert Redford'], 1981: ['Warren Beatty'], 1982: ['Richard Attenborough'], 1983: ['James L. Brooks'], 1984: ['Milos Forman'], 1985: ['Sydney Pollack'], 1986: ['Oliver Stone'], 1987: ['Bernardo Bertolucci'], 1988: ['Barry Levinson'], 1989: ['Oliver Stone'], 1990: ['Kevin Costner'], 1991: ['Jonathan Demme'], 1992: ['Clint Eastwood'], 1993: ['Steven Spielberg'], 1994: ['Robert Zemeckis'], 1995: ['Mel Gibson'], 1996: ['Anthony Minghella'], 1997: ['James Cameron'], 1998: ['Steven Spielberg'], 1999: ['Sam Mendes'], 2000: ['Steven Soderbergh'], 2001: ['Ron Howard'], 2002: ['Roman Polanski'], 2003: ['Peter Jackson'], 2004: ['Clint Eastwood'], 2005: ['Ang Lee'], 2006: ['Martin Scorsese'], 2007: ['Ethan Coen', 'Joel Coen'], 2008: ['Danny Boyle'], 2009: ['Kathryn Bigelow'], 2010: ['Tom Hooper']}
w = []
# создаем список режиссеров, выигравших оскар
for k,v in winners.items(): # пробегаем по элементам словаря
for x in v: # пробегаем по победителям за текущий год
w.append(x) # добавляем в словарь текущего победителя
# создаем счетчик побед
occ = {}
for director in w: # для каждого режиссера в списке победителей
if director in occ:
occ[director] += 1
else:
occ[director] = 1
print(occ) # выводим список побед
m = 0 # макс количество побед
person = '' # человек с макс количеством побед
for k,v in occ.items(): # пробегаем по словарю
if v > m: # обновляем максимум и человека
m = v
person = k
print(person, m) # выводим ответ
| [
"noreply@github.com"
] | iliakakurin.noreply@github.com |
71e02c2e6d1bce0c15ac9ed0cf1d4f5cd0eaff41 | 20318c1ce3d4a9b1233c877338e74f862e80c4e9 | /Python/main.py | 65409bdbd9de9f2b75b74b64b268132df91b64a2 | [] | no_license | jfvilleforceix/BenchmarkRugby | 91ed1b5da2ae1de2af7b5d1a0d7430e75cf9e3e3 | f33d8bb5be6bb12155aff761e915ed10abac19ba | refs/heads/master | 2020-04-16T23:45:06.969115 | 2019-02-18T22:00:31 | 2019-02-18T22:00:31 | 166,024,296 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,862 | py | ##! /usr/bin/python
# -*- coding: utf-8 -*-
import urllib.request
from bs4 import BeautifulSoup
sexton = "http://www.itsrugby.fr/joueur-4545.html"
vulivuli = "http://www.itsrugby.fr/joueur-17678.html"
ford = "http://www.itsrugby.fr/joueur-18596.html"
<<<<<<< HEAD
bobo = "http://www.itsrugby.fr/joueur_1859.html"
reponse = urllib.request.urlopen(sexton)
=======
reponse = urllib.request.urlopen(ford)
>>>>>>> origin/master
html = reponse.read().decode(reponse.headers.get_content_charset())
soup = BeautifulSoup(html, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text(separator="|")
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
textBio = '\n'.join(chunk for chunk in chunks if chunk not in ["","|","|:|","| |"])
# print(textBio)
textStat = textBio
# print(textStat)
def getJoueurBio(text):
nom = text.split("Nom|\n|",1)[1].split("|\n| Age")[0].replace("\n","")
print("Nom:", nom)
age_date = text.split("Age|\n|",1)[1].split("|\n| Prenom")[0].replace("\n","")
# print(age_date)
age = age_date[0:2]
date = age_date[-11:-1]
print("Âge:", age)
print("Date de naissance:", date)
prenom = text.split("Prenom|\n|",1)[1].split("|\n| Poste")[0].replace("\n","")
print("Prénom:", prenom)
poste = text.split("Poste|\n",1)[1].split("\n| Nationalité")[0].replace("\n","")
print("Poste:", poste)
nation = text.split("Nationalité|\n|",1)[1].split("|\n| Mensuration")[0].replace("\n","").strip()
print("Nation:", nation)
poids_taille = text.split("Mensuration|\n",1)[1].split("\n|Recommander")[0].split("\n",1)
poids = int(poids_taille[0].split(" ")[0])
taille = int(poids_taille[1].split(" ")[0]+poids_taille[1].split(" ")[2])
print("Poids:", poids)
print("Taille:", taille)
def txt2value(str):
if str == "-":
value = 0
else:
value = int(str)
return value
<<<<<<< HEAD
def getJoueurClubStat(text):
table = text.split("|Min.|\n",1)[1]
table = table.split("|Copyright",1)[0]
# print(table)
bool_sp = False
for line in table.splitlines():
line_split = list(filter(None, line.split("|")))
quotient = len(line_split) // 11
reste = len(line_split) % 11
print(len(line_split), quotient, reste, line_split)
if len(line_split) == 1:
saison = line_split[0]
continue
elif reste == 2:
saison_prec = line_split[-1]
bool_sp = True
# print(saison)
saisonList, club, competition, points, joues, titularisations, essais, penalites, drops, transformations, cartons_jaunes, cartons_rouges, minutes = ([] for i in range(13))
for i in range(0, quotient):
saisonList.append(saison)
club.append(line_split[0])
competition.append(line_split[11*i+1])
points.append(txt2value(line_split[11*i+2]))
joues.append(txt2value(line_split[11*i+3]))
titularisations.append(txt2value(line_split[11*i+4]))
essais.append(txt2value(line_split[11*i+5]))
penalites.append(txt2value(line_split[11*i+6]))
drops.append(txt2value(line_split[11*i+7]))
transformations.append(txt2value(line_split[11*i+8]))
cartons_jaunes.append(txt2value(line_split[11*i+9]))
cartons_rouges.append(txt2value(line_split[11*i+10]))
minutes.append(txt2value(line_split[11*i+11]))
# print(saisonList, club, competition, points, joues, titularisations, essais, penalites, drops, transformations, cartons_jaunes, cartons_rouges, minutes)
resume_saison = []
for i in range(len(saisonList)):
compet = []
compet.append(saisonList[i])
compet.append(club[i])
compet.append(competition[i])
compet.append(points[i])
compet.append(joues[i])
compet.append(titularisations[i])
compet.append(essais[i])
compet.append(penalites[i])
compet.append(drops[i])
compet.append(transformations[i])
compet.append(cartons_jaunes[i])
compet.append(cartons_rouges[i])
compet.append(minutes[i])
resume_saison.append(compet)
if bool_sp:
saison = saison_prec
bool_sp = False
print(resume_saison)
# getJoueurBio(textBio)
getJoueurClubStat(textStat)
=======
def getJoueurStat(text):
table = text.split("|Min.|",1)[1]
table = table.split("|Copyright",1)[0]
print(table)
for line in table.splitlines():
if len(line) < 8:
saison = line.replace("|","")
else:
line_split = line.split("|")
club = line_split[0]
competition = line_split[1]
points = txt2value(line_split[2])
joues = txt2value(line_split[3])
titularisations = txt2value(line_split[4])
essais = txt2value(line_split[5])
penalites = txt2value(line_split[6])
drops = txt2value(line_split[7])
transformations = txt2value(line_split[8])
cartons_jaunes = txt2value(line_split[9])
cartons_rouges = txt2value(line_split[10])
minutes = txt2value(line_split[11])
if len(line_split) == 14: #Ligne du premier club dans la saison
saison_prec = line_split[13]
elif len(line_split) == 25: #Ligne avec deux compétitions
# getJoueurBio(textBio)
getJoueurStat(textStat)
>>>>>>> origin/master
| [
"jf.villeforceix2207@hotmail.fr"
] | jf.villeforceix2207@hotmail.fr |
283cc666d0bb18ceb259e7ce977b964c5a16d1a3 | 544d0cef14d6e003b91763b0d351494fbe191f5c | /es_site_config.py.example | 9c1e8301af0d400ae91ddec3581855bd6cdbf821 | [] | no_license | TTimo/es_build | e22e0e1dd7743e22847d16360b41e992a8c1259b | b1e596bb36392a6524c63b1dbed0ce75f2f306a0 | refs/heads/master | 2016-09-06T15:24:24.722752 | 2013-10-10T12:44:07 | 2013-10-10T12:44:07 | 12,408,799 | 3 | 0 | null | 2013-10-05T19:54:27 | 2013-08-27T14:49:14 | Python | UTF-8 | Python | false | false | 116 | example | #!/usr/bin/env python
# override the default configuration settings with a site configuration
FLAVOR = "ubuntu_10"
| [
"ttimo@ttimo.net"
] | ttimo@ttimo.net |
da441fe0d6c94bb2e2df9eb6f5089b6d7cb64770 | b65e09f870e3a6f99bd68f3fd7ea92523eff6b0f | /meituan/main.py | 5d3ccf6f5f410e2aededf1b36bc6de6898e3b23f | [] | no_license | f1025395916/untitled | bf079c810bce161693c127f81fbd8efe98508d1f | 8c1005b783a1029d80332fc4cf52074514ea7502 | refs/heads/master | 2020-05-03T02:36:00.963934 | 2019-08-28T08:43:16 | 2019-08-28T08:43:16 | 178,374,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | import requests
import json
import jsonpath
import fontTools
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
'Accept': 'application/json',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Referer': 'https://h5.waimai.meituan.com/waimai/mindex/home',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://h5.waimai.meituan.com',
'Connection': 'keep-alive',
}
cookies = {
'_lx_utm': 'utm_source%3DBaidu%26utm_medium%3Dorganic',
'_lxsdk_cuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'_lxsdk': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'wm_order_channel': 'default',
'utm_source': '',
'terminal': 'i',
'w_utmz': 'utm_campaign=(direct)&utm_source=5000&utm_medium=(none)&utm_content=(none)&utm_term=(none)',
'openh5_uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'w_latlng': '36657209,117055413',
'w_actual_lat': '0',
'w_actual_lng': '0',
'cssVersion': '9968de10',
'au_trace_key_net': 'default',
'_lxsdk_s': '16c2c23ca54-8d5-31a-b59%7C%7C55',
'uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'igateApp': 'custom',
'w_uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'mtsi-real-ip': '60.208.74.98',
'mtsi-cur-time': '2019-07-26 11:02:55',
'w_visitid': '0a2aadc8-ed0a-419d-b086-8546a9a5e0c0',
}
params = (
('_', '1564110538926'),
('X-FOR-WITH', '4CH20e1RM5tFPC3ysgcEMB2eNihocq70OCgAZlOxM6ErimvsViyCQWHTX3s9O5sr5notM0IjX2yiTvX5yQOTNeumO+bn/AmDSF4ilrVVhVYYnuC+CUNIhWdAeJSq7u4Rz2GwjLsUrxG6B4u8Y0P7lw=='),
)
data = {
'startIndex': '4',
'sortId': '0',
'multiFilterIds': '',
'sliderSelectCode': '',
'sliderSelectMin': '',
'sliderSelectMax': '',
'geoType': '2',
'rankTraceId': 'E7BE070C8CAF616576DFC1828EAACE84',
'uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'platform': '3',
'partner': '4',
'originUrl': 'https://h5.waimai.meituan.com/waimai/mindex/home',
'riskLevel': '71',
'optimusCode': '10',
'wm_latitude': '36657209',
'wm_longitude': '117055413',
'wm_actual_latitude': '0',
'wm_actual_longitude': '0',
'openh5_uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'_token': 'eJxVj9uOokAQht+FW43dICdN5gJBAcEDIqhM5gKksRlsQGgRney7b092NptNKvmrvvorVfXFNXbKTXnIK5Afch1quCnHj+BI5oYcbVlHkkWeh9JYkBVpyJ3/Y+pEYCxpQoObvo9ZX5aVj2+wY/U/8M7LKhyyHfBjKIgsvj02s3CY0rqdAoCl0SPOSZyPCMrpPS5H54qAPwiQvExRD3BFEDuKY8Nkz4aZFj8a/yj9W6/YF8zb5peSZWj5vBYrenu8NG+XqVd9AH1/rNj7Yu7y4+ACHTlu2nVAWi9HGEcJdUAj1brvdtQvdfFzJU/y51YeI2TbeSqsr4uO2sWJksrvbVBhUoEXXy17EiI4e6F75K6LwsGyY+U4qMSj4jzF7UGu562pL3V06PG1UKV5glCtPqpFpXpWlu7v7u7S9wbeGHwhhE77ijUqR6FlmWnvoLqbVKv+VgPdN1IxN/3FHEMziAzPM17SKYg0TWk817bMZrdQwaTxyTHd8KYqiWG2KfdnFHebMqH3e1JcvEkZKb0mCpUZkVWIMYKxvgoVmklacM/EmaBnFw9CFOwNa5m4IIWlC0qE28lztj8r622X2vPn6Yi8wWCTAv/gHPMbWW/5RSo+P9cHeCvqg+uK2lHIE/h4e+N+/QYm5b9y'
}
response = requests.post('https://i.waimai.meituan.com/openh5/homepage/poilist', headers=headers, params=params, cookies=cookies, data=data)
js = json.loads(response.text)
print(js)
| [
"1025395916@qq.com"
] | 1025395916@qq.com |
ae3df4ef092d4514dccb7aab617de96786eb2497 | 8a640ca583db73c8af025c135d8f6c53afd2e9ba | /tr_sys/tr_ara_ncats/urls.py | 66bd73a7857e77ffbcd1da67a83cd0a2252a6feb | [
"MIT"
] | permissive | edeutsch/Relay | b6434096da3ab5b85d55eab1c14da1bf3bfe0a21 | fc4581e4a512ebd3a0459bbd06652eb4291133e9 | refs/heads/master | 2022-11-30T17:35:23.048297 | 2020-08-04T18:46:09 | 2020-08-04T18:46:09 | 282,288,237 | 0 | 0 | MIT | 2020-07-24T18:17:23 | 2020-07-24T18:17:23 | null | UTF-8 | Python | false | false | 246 | py | from django.urls import path, include
from . import api
apipatterns = [
path(r'', api.index, name='ara-ncats-api'),
path(r'runquery', api.runquery, name='ara-ncats-runquery')
]
urlpatterns = [
path(r'api/', include(apipatterns)),
]
| [
"markwilliams2755@gmail.com"
] | markwilliams2755@gmail.com |
73f40efcf57f9a5c4b0601d37814d3ce3733297a | 3587e4c248005c6df500caea7ee18675d8676022 | /DBS3/Server/Python/src/dbs/dao/Oracle/BranchHashe/Insert.py | 5d6e65f82d2c96c81b384b5cb91896b541d29153 | [] | no_license | bbockelm/DBS | 1a480e146010e3d6b234ba5ee471f7c87a4877e6 | 4e47d578610485e0503fc1270c7d828064643120 | refs/heads/master | 2021-01-16T18:18:29.989833 | 2012-08-21T15:01:05 | 2012-08-21T15:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | !/usr/bin/env python
""" DAO Object for BranchHashes table """
__revision__ = "$Revision: 1.7 $"
__version__ = "$Id: Insert.py,v 1.7 2010/03/05 15:41:53 yuyi Exp $ "
from WMCore.Database.DBFormatter import DBFormatter
class Insert(DBFormatter):
def __init__(self, logger, dbi):
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.sql = """INSERT INTO %sBRANCH_HASHES ( BRANCH_HASH_ID, HASH, CONTENT) VALUES (:branch_hash_id, :branch_hash, :content)""" % (self.owner)
def execute( self, conn, binds, transaction=False ):
if not conn:
raise Exception("dbs/dao/Oracle/BranchHashes expects db connection from upper layer.")
result = self.dbi.processData(self.sql, binds, conn, transaction)
return
| [
""
] | |
2670a564756e2418d01354846cf57d5defcc1c20 | 460f981dfe1a05f14d2a4cdc6cc71e9ad798b785 | /3/amd64/envs/navigator/lib/python3.6/site-packages/xarray/core/common.py | 5b090bf0d2f077e52b70fdaa639249f070fb92b3 | [
"Python-2.0",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-mit-old-style",
"dtoa",
"LicenseRef-scancode-public-domain-disclaimer",
"Zlib",
"LicenseRef-scancode-public-domain"
] | permissive | DFO-Ocean-Navigator/navigator-toolchain | d8c7351b477e66d674b50da54ec6ddc0f3a325ee | 930d26886fdf8591b51da9d53e2aca743bf128ba | refs/heads/master | 2022-11-05T18:57:30.938372 | 2021-04-22T02:02:45 | 2021-04-22T02:02:45 | 234,445,230 | 0 | 1 | BSD-3-Clause | 2022-10-25T06:46:23 | 2020-01-17T01:26:49 | C++ | UTF-8 | Python | false | false | 35,834 | py | from __future__ import absolute_import, division, print_function
from textwrap import dedent
import numpy as np
import pandas as pd
from . import dtypes, duck_array_ops, formatting, ops
from .arithmetic import SupportsArithmetic
from .options import _get_keep_attrs
from .pycompat import OrderedDict, basestring, dask_array_type, suppress
from .utils import Frozen, ReprObject, SortedKeysDict, either_dict_or_kwargs
# Used as a sentinel value to indicate a all dimensions
ALL_DIMS = ReprObject('<all-dims>')
class ImplementsArrayReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, axis=None, skipna=None,
**kwargs):
return self.reduce(func, dim, axis,
skipna=skipna, allow_lazy=True, **kwargs)
else:
def wrapped_func(self, dim=None, axis=None,
**kwargs):
return self.reduce(func, dim, axis,
allow_lazy=True, **kwargs)
return wrapped_func
_reduce_extra_args_docstring = dedent("""\
dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over axes.""")
_cum_extra_args_docstring = dedent("""\
dim : str or sequence of str, optional
Dimension over which to apply `{name}`.
axis : int or sequence of int, optional
Axis over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied.""")
class ImplementsDatasetReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, skipna=None,
**kwargs):
return self.reduce(func, dim, skipna=skipna,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
else:
def wrapped_func(self, dim=None, **kwargs):
return self.reduce(func, dim,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`. By default `{name}` is
applied over all dimensions."""
_cum_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension over which to apply `{name}`.
axis : int or sequence of int, optional
Axis over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied."""
class AbstractArray(ImplementsArrayReduce, formatting.ReprMixin):
"""Shared base class for DataArray and Variable."""
def __bool__(self):
return bool(self.values)
# Python 3 uses __bool__, Python 2 uses __nonzero__
__nonzero__ = __bool__
def __float__(self):
return float(self.values)
def __int__(self):
return int(self.values)
def __complex__(self):
return complex(self.values)
def __long__(self):
return long(self.values) # noqa
def __array__(self, dtype=None):
return np.asarray(self.values, dtype=dtype)
def __repr__(self):
return formatting.array_repr(self)
def _iter(self):
for n in range(len(self)):
yield self[n]
def __iter__(self):
if self.ndim == 0:
raise TypeError('iteration over a 0-d array')
return self._iter()
@property
def T(self):
return self.transpose()
def get_axis_num(self, dim):
"""Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
"""
if isinstance(dim, basestring):
return self._get_axis_num(dim)
else:
return tuple(self._get_axis_num(d) for d in dim)
def _get_axis_num(self, dim):
try:
return self.dims.index(dim)
except ValueError:
raise ValueError("%r not found in array dimensions %r" %
(dim, self.dims))
@property
def sizes(self):
"""Ordered mapping from dimension names to lengths.
Immutable.
See also
--------
Dataset.sizes
"""
return Frozen(OrderedDict(zip(self.dims, self.shape)))
class AttrAccessMixin(object):
"""Mixin class that allows getting keys with attribute access
"""
_initialized = False
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return []
@property
def _item_sources(self):
"""List of places to look-up items for key-autocompletion """
return []
def __getattr__(self, name):
if name != '__setstate__':
# this avoids an infinite loop when pickle looks for the
# __setstate__ attribute before the xarray object is initialized
for source in self._attr_sources:
with suppress(KeyError):
return source[name]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, name))
def __setattr__(self, name, value):
if self._initialized:
try:
# Allow setting instance variables if they already exist
# (e.g., _attrs). We use __getattribute__ instead of hasattr
# to avoid key lookups with attribute-style access.
self.__getattribute__(name)
except AttributeError:
raise AttributeError(
"cannot set attribute %r on a %r object. Use __setitem__ "
"style assignment (e.g., `ds['name'] = ...`) instead to "
"assign variables." % (name, type(self).__name__))
object.__setattr__(self, name, value)
def __dir__(self):
"""Provide method name lookup and completion. Only provide 'public'
methods.
"""
extra_attrs = [item
for sublist in self._attr_sources
for item in sublist
if isinstance(item, basestring)]
return sorted(set(dir(type(self)) + extra_attrs))
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython.
See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion
For the details.
""" # noqa
item_lists = [item
for sublist in self._item_sources
for item in sublist
if isinstance(item, basestring)]
return list(set(item_lists))
def get_squeeze_dims(xarray_obj, dim, axis=None):
"""Get a list of dimensions to squeeze out.
"""
if dim is not None and axis is not None:
raise ValueError('cannot use both parameters `axis` and `dim`')
if dim is None and axis is None:
dim = [d for d, s in xarray_obj.sizes.items() if s == 1]
else:
if isinstance(dim, basestring):
dim = [dim]
if isinstance(axis, int):
axis = (axis, )
if isinstance(axis, tuple):
for a in axis:
if not isinstance(a, int):
raise ValueError(
'parameter `axis` must be int or tuple of int.')
alldims = list(xarray_obj.sizes.keys())
dim = [alldims[a] for a in axis]
if any(xarray_obj.sizes[k] > 1 for k in dim):
raise ValueError('cannot select a dimension to squeeze out '
'which has length greater than one')
return dim
class DataWithCoords(SupportsArithmetic, AttrAccessMixin):
"""Shared base class for Dataset and DataArray."""
def squeeze(self, dim=None, drop=False, axis=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
drop : bool, optional
If ``drop=True``, drop squeezed coordinates instead of making them
scalar.
axis : int, optional
Select the dimension to squeeze. Added for compatibility reasons.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = get_squeeze_dims(self, dim, axis)
return self.isel(drop=drop, **{d: 0 for d in dims})
def get_index(self, key):
"""Get an index for a dimension, with fall-back to a default RangeIndex
"""
if key not in self.dims:
raise KeyError(key)
try:
return self.indexes[key]
except KeyError:
# need to ensure dtype=int64 in case range is empty on Python 2
return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)
def _calc_assign_results(self, kwargs):
results = SortedKeysDict()
for k, v in kwargs.items():
if callable(v):
results[k] = v(self)
else:
results[k] = v
return results
def assign_coords(self, **kwargs):
"""Assign new coordinates to this object.
Returns a new object with all the original data in addition to the new
coordinates.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on this object and assigned to new coordinate
variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
Returns
-------
assigned : same type as caller
A new object with the new coordinates in addition to the existing
data.
Examples
--------
Convert longitude coordinates from 0-359 to -180-179:
>>> da = xr.DataArray(np.random.rand(4),
... coords=[np.array([358, 359, 0, 1])],
... dims='lon')
>>> da
<xarray.DataArray (lon: 4)>
array([0.28298 , 0.667347, 0.657938, 0.177683])
Coordinates:
* lon (lon) int64 358 359 0 1
>>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))
<xarray.DataArray (lon: 4)>
array([0.28298 , 0.667347, 0.657938, 0.177683])
Coordinates:
* lon (lon) int64 -2 -1 0 1
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign_coords``
is possible, but you cannot reference other variables created within
the same ``assign_coords`` call.
See also
--------
Dataset.assign
Dataset.swap_dims
"""
data = self.copy(deep=False)
results = self._calc_assign_results(kwargs)
data.coords.update(results)
return data
def assign_attrs(self, *args, **kwargs):
"""Assign new attrs to this object.
Returns a new object equivalent to self.attrs.update(*args, **kwargs).
Parameters
----------
args : positional arguments passed into ``attrs.update``.
kwargs : keyword arguments passed into ``attrs.update``.
Returns
-------
assigned : same type as caller
A new object with the new attrs in addition to the existing data.
See also
--------
Dataset.assign
"""
out = self.copy(deep=False)
out.attrs.update(*args, **kwargs)
return out
def pipe(self, func, *args, **kwargs):
"""
Apply func(self, *args, **kwargs)
This method replicates the pandas method of the same name.
Parameters
----------
func : function
function to apply to this xarray object (Dataset/DataArray).
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the xarray object.
args : positional arguments passed into ``func``.
kwargs : a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
xarray or pandas objects, e.g., instead of writing
>>> f(g(h(ds), arg1=a), arg2=b, arg3=c)
You can write
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.pipe
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = ('%s is both the pipe target and a keyword argument'
% target)
raise ValueError(msg)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def groupby(self, group, squeeze=True):
"""Returns a GroupBy object for performing grouped operations.
Parameters
----------
group : str, DataArray or IndexVariable
Array whose unique values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
Examples
--------
Calculate daily anomalies for daily data:
>>> da = xr.DataArray(np.linspace(0, 1826, num=1827),
... coords=[pd.date_range('1/1/2000', '31/12/2004',
... freq='D')],
... dims='time')
>>> da
<xarray.DataArray (time: 1827)>
array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
>>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time')
<xarray.DataArray (time: 1827)>
array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...
See Also
--------
core.groupby.DataArrayGroupBy
core.groupby.DatasetGroupBy
""" # noqa
return self._groupby_cls(self, group, squeeze=squeeze)
def groupby_bins(self, group, bins, right=True, labels=None, precision=3,
include_lowest=False, squeeze=True):
"""Returns a GroupBy object for performing grouped operations.
Rather than using all unique values of `group`, the values are discretized
first by applying `pandas.cut` [1]_ to `group`.
Parameters
----------
group : str, DataArray or IndexVariable
Array whose binned values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
bins : int or array of scalars
If bins is an int, it defines the number of equal-width bins in the
range of x. However, in this case, the range of x is extended by .1%
on each side to include the min or max values of x. If bins is a
sequence it defines the bin edges allowing for non-uniform bin
width. No extension of the range of x is done in this case.
right : boolean, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, string bin labels are assigned by
`pandas.cut`.
precision : int
The precision at which to store and display the bins labels.
include_lowest : bool
Whether the first interval should be left-inclusive or not.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
The name of the group has the added suffix `_bins` in order to
distinguish it from the original variable.
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
""" # noqa
return self._groupby_cls(self, group, squeeze=squeeze, bins=bins,
cut_kwargs={'right': right, 'labels': labels,
'precision': precision,
'include_lowest': include_lowest})
def rolling(self, dim=None, min_periods=None, center=False, **dim_kwargs):
"""
Rolling window object.
Parameters
----------
dim: dict, optional
Mapping from the dimension name to create the rolling iterator
along (e.g. `time`) to its moving window size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**dim_kwargs : optional
The keyword arguments form of ``dim``.
One of dim or dim_kwargs must be provided.
Returns
-------
Rolling object (core.rolling.DataArrayRolling for DataArray,
core.rolling.DatasetRolling for Dataset.)
Examples
--------
Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:
>>> da = xr.DataArray(np.linspace(0, 11, num=12),
... coords=[pd.date_range('15/12/1999',
... periods=12, freq=pd.DateOffset(months=1))],
... dims='time')
>>> da
<xarray.DataArray (time: 12)>
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
>>> da.rolling(time=3, center=True).mean()
<xarray.DataArray (time: 12)>
array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
Remove the NaNs using ``dropna()``:
>>> da.rolling(time=3, center=True).mean().dropna('time')
<xarray.DataArray (time: 10)>
array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
Coordinates:
* time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...
See Also
--------
core.rolling.DataArrayRolling
core.rolling.DatasetRolling
""" # noqa
dim = either_dict_or_kwargs(dim, dim_kwargs, 'rolling')
return self._rolling_cls(self, dim, min_periods=min_periods,
center=center)
def resample(self, indexer=None, skipna=None, closed=None, label=None,
base=0, keep_attrs=None, loffset=None, **indexer_kwargs):
"""Returns a Resample object for performing resampling operations.
Handles both downsampling and upsampling. If any intervals contain no
values from the original object, they will be given the value ``NaN``.
Parameters
----------
indexer : {dim: freq}, optional
Mapping from the dimension name to resample frequency.
skipna : bool, optional
Whether to skip missing values when aggregating in downsampling.
closed : 'left' or 'right', optional
Side of each interval to treat as closed.
label : 'left or 'right', optional
Side of each interval to use for labeling.
base : int, optional
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '24H' frequency, base could
range from 0 through 23.
loffset : timedelta or str, optional
Offset used to adjust the resampled time labels. Some pandas date
offset strings are supported.
keep_attrs : bool, optional
If True, the object's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**indexer_kwargs : {dim: freq}
The keyword arguments form of ``indexer``.
One of indexer or indexer_kwargs must be provided.
Returns
-------
resampled : same type as caller
This object resampled.
Examples
--------
Downsample monthly time-series data to seasonal data:
>>> da = xr.DataArray(np.linspace(0, 11, num=12),
... coords=[pd.date_range('15/12/1999',
... periods=12, freq=pd.DateOffset(months=1))],
... dims='time')
>>> da
<xarray.DataArray (time: 12)>
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
>>> da.resample(time="QS-DEC").mean()
<xarray.DataArray (time: 4)>
array([ 1., 4., 7., 10.])
Coordinates:
* time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01
Upsample monthly time-series data to daily data:
>>> da.resample(time='1D').interpolate('linear')
<xarray.DataArray (time: 337)>
array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
""" # noqa
# TODO support non-string indexer after removing the old API.
from .dataarray import DataArray
from .resample import RESAMPLE_DIM
from ..coding.cftimeindex import CFTimeIndex
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
# note: the second argument (now 'skipna') use to be 'dim'
if ((skipna is not None and not isinstance(skipna, bool))
or ('how' in indexer_kwargs and 'how' not in self.dims)
or ('dim' in indexer_kwargs and 'dim' not in self.dims)):
raise TypeError(
'resample() no longer supports the `how` or '
'`dim` arguments. Instead call methods on resample '
"objects, e.g., data.resample(time='1D').mean()")
indexer = either_dict_or_kwargs(indexer, indexer_kwargs, 'resample')
if len(indexer) != 1:
raise ValueError(
"Resampling only supported along single dimensions."
)
dim, freq = indexer.popitem()
dim_name = dim
dim_coord = self[dim]
if isinstance(self.indexes[dim_name], CFTimeIndex):
raise NotImplementedError(
'Resample is currently not supported along a dimension '
'indexed by a CFTimeIndex. For certain kinds of downsampling '
'it may be possible to work around this by converting your '
'time index to a DatetimeIndex using '
'CFTimeIndex.to_datetimeindex. Use caution when doing this '
'however, because switching to a DatetimeIndex from a '
'CFTimeIndex with a non-standard calendar entails a change '
'in the calendar type, which could lead to subtle and silent '
'errors.'
)
group = DataArray(dim_coord, coords=dim_coord.coords,
dims=dim_coord.dims, name=RESAMPLE_DIM)
# TODO: to_offset() call required for pandas==0.19.2
grouper = pd.Grouper(freq=freq, closed=closed, label=label, base=base,
loffset=pd.tseries.frequencies.to_offset(loffset))
resampler = self._resample_cls(self, group=group, dim=dim_name,
grouper=grouper,
resample_dim=RESAMPLE_DIM)
return resampler
def where(self, cond, other=dtypes.NA, drop=False):
"""Filter elements from this object according to a condition.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this object's values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, these locations filled with NA.
drop : boolean, optional
If True, coordinate labels that only correspond to False values of
the condition are dropped from the result. Mutually exclusive with
``other``.
Returns
-------
Same type as caller.
Examples
--------
>>> import numpy as np
>>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))
>>> a.where(a.x + a.y < 4)
<xarray.DataArray (x: 5, y: 5)>
array([[ 0., 1., 2., 3., nan],
[ 5., 6., 7., nan, nan],
[ 10., 11., nan, nan, nan],
[ 15., nan, nan, nan, nan],
[ nan, nan, nan, nan, nan]])
Dimensions without coordinates: x, y
>>> a.where(a.x + a.y < 5, -1)
<xarray.DataArray (x: 5, y: 5)>
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, -1],
[10, 11, 12, -1, -1],
[15, 16, -1, -1, -1],
[20, -1, -1, -1, -1]])
Dimensions without coordinates: x, y
>>> a.where(a.x + a.y < 4, drop=True)
<xarray.DataArray (x: 4, y: 4)>
array([[ 0., 1., 2., 3.],
[ 5., 6., 7., nan],
[ 10., 11., nan, nan],
[ 15., nan, nan, nan]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
where : equivalent function
"""
from .alignment import align
from .dataarray import DataArray
from .dataset import Dataset
if drop:
if other is not dtypes.NA:
raise ValueError('cannot set `other` if drop=True')
if not isinstance(cond, (Dataset, DataArray)):
raise TypeError("cond argument is %r but must be a %r or %r" %
(cond, Dataset, DataArray))
# align so we can use integer indexing
self, cond = align(self, cond)
# get cond with the minimal size needed for the Dataset
if isinstance(cond, Dataset):
clipcond = cond.to_array().any('variable')
else:
clipcond = cond
# clip the data corresponding to coordinate dims that are not used
nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))
indexers = {k: np.unique(v) for k, v in nonzeros}
self = self.isel(**indexers)
cond = cond.isel(**indexers)
return ops.where_method(self, cond, other)
def close(self):
"""Close any files linked to this object
"""
if self._file_obj is not None:
self._file_obj.close()
self._file_obj = None
def isin(self, test_elements):
"""Tests each value in the array for whether it is in the supplied list.
Parameters
----------
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if an array or array_like.
See numpy notes for behavior with non-array-like parameters.
Returns
-------
isin : same as object, bool
Has the same shape as this object.
Examples
--------
>>> array = xr.DataArray([1, 2, 3], dims='x')
>>> array.isin([1, 3])
<xarray.DataArray (x: 3)>
array([ True, False, True])
Dimensions without coordinates: x
See also
--------
numpy.isin
"""
from .computation import apply_ufunc
from .dataset import Dataset
from .dataarray import DataArray
from .variable import Variable
if isinstance(test_elements, Dataset):
raise TypeError(
'isin() argument must be convertible to an array: {}'
.format(test_elements))
elif isinstance(test_elements, (Variable, DataArray)):
# need to explicitly pull out data to support dask arrays as the
# second argument
test_elements = test_elements.data
return apply_ufunc(
duck_array_ops.isin,
self,
kwargs=dict(test_elements=test_elements),
dask='allowed',
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def full_like(other, fill_value, dtype=None):
"""Return a new object with the same shape and type as a given object.
Parameters
----------
other : DataArray, Dataset, or Variable
The reference object in input
fill_value : scalar
Value to fill the new object with before returning it.
dtype : dtype, optional
dtype of the new array. If omitted, it defaults to other.dtype.
Returns
-------
out : same as object
New object with the same shape and type as other, with the data
filled with fill_value. Coords will be copied from other.
If other is based on dask, the new one will be as well, and will be
split in the same chunks.
"""
from .dataarray import DataArray
from .dataset import Dataset
from .variable import Variable
if isinstance(other, Dataset):
data_vars = OrderedDict(
(k, _full_like_variable(v, fill_value, dtype))
for k, v in other.data_vars.items())
return Dataset(data_vars, coords=other.coords, attrs=other.attrs)
elif isinstance(other, DataArray):
return DataArray(
_full_like_variable(other.variable, fill_value, dtype),
dims=other.dims, coords=other.coords, attrs=other.attrs,
name=other.name)
elif isinstance(other, Variable):
return _full_like_variable(other, fill_value, dtype)
else:
raise TypeError("Expected DataArray, Dataset, or Variable")
def _full_like_variable(other, fill_value, dtype=None):
"""Inner function of full_like, where other must be a variable
"""
from .variable import Variable
if isinstance(other.data, dask_array_type):
import dask.array
if dtype is None:
dtype = other.dtype
data = dask.array.full(other.shape, fill_value, dtype=dtype,
chunks=other.data.chunks)
else:
data = np.full_like(other, fill_value, dtype=dtype)
return Variable(dims=other.dims, data=data, attrs=other.attrs)
def zeros_like(other, dtype=None):
"""Shorthand for full_like(other, 0, dtype)
"""
return full_like(other, 0, dtype)
def ones_like(other, dtype=None):
"""Shorthand for full_like(other, 1, dtype)
"""
return full_like(other, 1, dtype)
def is_np_datetime_like(dtype):
"""Check if a dtype is a subclass of the numpy datetime types
"""
return (np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64))
def contains_cftime_datetimes(var):
"""Check if a variable contains cftime datetime objects"""
try:
from cftime import datetime as cftime_datetime
except ImportError:
return False
else:
if var.dtype == np.dtype('O') and var.data.size > 0:
sample = var.data.ravel()[0]
if isinstance(sample, dask_array_type):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item()
return isinstance(sample, cftime_datetime)
else:
return False
def _contains_datetime_like_objects(var):
"""Check if a variable contains datetime like objects (either
np.datetime64, np.timedelta64, or cftime.datetime)"""
return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)
| [
"dwayne.hart@gmail.com"
] | dwayne.hart@gmail.com |
0fee4123dd316b974c3fdd92e1ace45e6046c0e7 | 1f40a08ee85ef6f78384e6f6f53bcf3f86b8c44b | /shorten/app/views.py | fec1ecdf840fbfdd7d0588f916a668b2701fdb4d | [] | no_license | infsolution/EncurtUrl | bff4543fb17f3c2a6853c64abc24d307abcd04bf | 0f6d8aa23a2498a8bf5575797db9a5a8eb855403 | refs/heads/master | 2020-05-14T09:31:39.265337 | 2019-09-28T17:44:25 | 2019-09-28T17:44:25 | 181,741,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,523 | py | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage
from django.shortcuts import render, redirect
from django.http import JsonResponse
from rest_framework import generics
from .models import *
from .forms import *
def index(request):
perfil_logado = get_perfil_logado(request)
return render(request,'app/index.html',{"title_page":"O melhor encurtador","perfil_logado":perfil_logado})
def get_perfil_logado(request):
try:
perfil = Perfil.objects.get(user=request.user)
except Exception as e:
return None
return perfil
def shorten(request):
if request.GET.get('url'):
short = Shortened(perfil=get_perfil_logado(request), url_user=request.GET.get('url'))
short.shorten()
if request.GET.getlist('private'):
short.get_private_code()
if request.GET.getlist('preview'):
short.preview=True
short.preview_message = request.GET.get('preview_msg')
short.save()
return render(request, 'app/showurl.html',{"url_short":short.url_shortened,"perfil_logado":get_perfil_logado(request),
"title_page":"TShort: Sua url encurtada"})
return render(request,'app/urlnotfound.html', {"value":"Nenhuma url foi informada",
"title_page":"Url Não encontrada","perfil_logado":get_perfil_logado(request)})
@login_required
def shotened_report(request):
ITEMS_PER_PAGE = 5
perfil_logado = get_perfil_logado(request)
shorteneds = Shortened.objects.filter(perfil=perfil_logado)
paginator = Paginator(shorteneds, ITEMS_PER_PAGE)
page = request.GET.get('page',1)
try:
short_page = paginator.get_page(page)
except InvalidPage:
short_page = paginator.get_page(1)
return render(request, 'app/report.html',{"shorteneds":short_page,"perfil_logado":perfil_logado})
@login_required
def detail(request, shortened_id):
shorten = Shortened.objects.get(id=shortened_id)
return render(request, 'app/report_detail.html', {'shorten':shorten, 'perfil_logado':get_perfil_logado(request)})
def go_to_url(request, shortened):
if request.method == 'GET':
try:
short = Shortened.objects.get(url_shortened=shortened)
get_click(request,short)
except Exception as e:
return render(request,'app/urlnotfound.html', {"value":shortened,"error":e, "title_page":"Url Não encontrada"})
if short.private_code != None:
return render(request, 'app/private_access.html',{"short":short})
if short.preview:
return render(request, 'app/preview.html',{'short':short, 'perfil_logado':get_perfil_logado(request)})
return redirect(short.url_user)
def create_user(request):
if request.method == 'POST':
form = UserModelForm(request.POST)
if form.is_valid():
if request.POST['last-password'] == request.POST['password']:
user = User.objects.create_user(request.POST['username'], request.POST['email'], request.POST['last-password'])#validar se as senhas são igauis
perfil = Perfil(name=user.username, user=user)
perfil.save()
return render(request, 'app/add.html', {'form':UserModelForm(), 'alert_type':'success', 'msg_confirm':'Parabéns seu cadastro foi realizado.'})
else:
return render(request, 'app/add.html', {'form':UserModelForm(),'alert_type':'danger' , 'msg_confirm':'As senhas não são iguais'})
return render(request, 'app/add.html',{'form':UserModelForm(request.POST), 'alert_type':'danger','msg_confirm':'Ocorreu um erro ao realizar o cadastro.'})
form = UserModelForm()
return render(request, 'app/add.html', {"form":form})
'''def do_login(request):
if request.method == 'POST':
user = authenticate(username = request.POST['username'], password = request.POST['password'])
if user is not None:
login(request,user)
#return redirect('/app/'+str(user.id), user)
return redirect('index')
return render(request,'app/login.html' ,{"error_msg":"Usuário ou senha Invalidos"})
return render(request, 'app/login.html')'''
def do_logout(request):
logout(request)
return redirect('/login/')
def access_private(request):
if request.method == 'POST':
short = Shortened.objects.get(url_shortened=request.POST['url_shortened'])
if request.POST.get('private_code') == short.private_code:
return redirect(short.url_user)
return render(request, 'app/private_access.html',{"short":short, "error_msg":"Código inválido"})
@login_required
def get_contatos(request):
return render(request, 'app/contatos.html', {"perfil_logado":get_perfil_logado(request)})
def request_access(request, codeurl):
if request.method == 'POST':
short = Shortened.objects.get(url_shortened=codeurl)
if send_message(short):
return render(request,'app/request_access.html',{"code":codeurl,"msg":"Sua solicitação foi enviada. Aquarde contato."})
return render(request,'app/request_access.html',{"code":codeurl})
def send_message(short):
return True
def get_click(request, shortened):
shor = Click(shortened=shortened)
print(shor.save())
def about(request):
context = {}
if get_perfil_logado(request):
context = {"perfil_logado":get_perfil_logado(request)}
return render(request, 'app/about.html',context)
def help(request):
context = {}
if get_perfil_logado(request):
context = {"perfil_logado":get_perfil_logado(request)}
return render(request, 'app/help.html',context)
def personalize(request, shortened_id):
pass
def valid(request, url):
rersult = None
try:
url = Shortened.objects.get(url_shortened=url)
rersult = True
except Exception as e:
rersult = False
return JsonResponse({'result':result})
#API#
| [
"clsinfsolution@gmail.com"
] | clsinfsolution@gmail.com |
ce8203a37a0d73246f63399116e942a387aa6b19 | 38eb57300418e6f10433630437388f779ce50e09 | /rbac_permission/rbac/servers/permission.py | 4fc6af516966b9eb74fc2a0ed9e12b36cfe54973 | [] | no_license | SelfShadows/Django-Flask | f37839f763133f0d62bffad3128171c426a1c038 | 13e32d1c8aac1532b43323e1891c423fe78f2813 | refs/heads/master | 2021-01-04T12:31:18.018508 | 2020-02-14T16:29:27 | 2020-02-14T16:29:27 | 240,550,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py |
def init_session(request, user):
# 在session中注册用户 ID
request.session["user_id"] = user.pk
# 方法1
# # 查询当前用户登陆的所有权限 distinct(去重)
# permission = user.roles.all().values("permissions__url").distinct()
# permission_list = []
# for item in permission:
# permission_list.append(item["permissions__url"])
# print(permission_list)
# request.session["permission_list"] = permission_list
# 方法2
permissions = user.roles.all().values("permissions__url", "permissions__action", "permissions__group_id")
permissions_dict = {}
for item in permissions:
group_id = item["permissions__group_id"]
# 键不在字典里
if group_id not in permissions_dict:
permissions_dict[group_id] = {
"urls": [item["permissions__url"]],
"actions": [item["permissions__action"]],
}
# 键在字典里
else:
permissions_dict[group_id]["urls"].append(item["permissions__url"])
permissions_dict[group_id]["actions"].append(item["permissions__action"])
print(permissions_dict)
request.session["permissions_dict"] = permissions_dict
ret = user.roles.all().values("permissions__url", "permissions__action", "permissions__group__name",)
print("ret:", ret)
menu_permission_list = []
for item in ret:
if item["permissions__action"] == "list":
menu_permission_list.append((item["permissions__url"], item["permissions__group__name"]))
request.session["menu_permission_list"] = menu_permission_list
| [
"870670791@qq.com"
] | 870670791@qq.com |
7609d8654867171cc043ee30d5b4edc4ba5d48f2 | ed8db15dad4236ada32c0355e032dc996266a271 | /Advance_Python/8. Inheritance/4. ConstructorOverriding.py | 207ba7b1ce97664ee242397920676842b2750dc9 | [] | no_license | mukund1985/Python-Tutotrial | a01e0c3ea77690c23c6f30ba1a157c450e5a53ed | bfcf0c81029ce2bee4aa855d90661df25cc94ef9 | refs/heads/master | 2021-05-21T15:41:18.018660 | 2020-11-04T02:20:30 | 2020-11-04T02:20:30 | 309,857,690 | 1 | 0 | null | 2020-11-04T02:14:37 | 2020-11-04T02:11:38 | Python | UTF-8 | Python | false | false | 468 | py | # Constructor Overriding
class Father: # Parent Class
def __init__(self):
self.money = 1000
print("Father Class Constructor")
def show(self):
print("Father Class Instance Method")
class Son(Father): # Child Class
def __init__(self):
self.money = 5000
self.car = 'BMW'
print("Son Class Constructor")
def disp(self):
print("Son Class Instance Method")
s = Son()
print(s.money)
print(s.car)
s.disp()
s.show()
| [
"mukund.pandey@gmail.com"
] | mukund.pandey@gmail.com |
9586b10b08a6dad3fcc896702a405df1651e8e2b | 54a2dbd057fe471bae3763499eed9df66cd86af0 | /insight_testsuite/temp/src/graph.py | 77f060754c7b4743de49c02f108e454f41e25d37 | [] | no_license | ying4uang/digital-wallet | 42e03e0d71d1c06bccb2ce3fe75f287dff5dfb5e | 49c82d01df5987acb468f1e79c5ea4eb82a0b96e | refs/heads/master | 2020-12-24T11:17:28.899386 | 2016-11-11T05:06:57 | 2016-11-11T05:06:57 | 73,040,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,451 | py | #!/usr/bin/env python
from collections import deque
class Vertex:
"""
Stores vertices in a Graph. Vertex encapsulates first_connections and node_id.
"""
def __init__(self,node_id):
"""
Construct a new 'Vertex' object.
:param node_id: The id of the vertex, in our case userid.
:return: returns nothing
"""
self.id = node_id
self.first_connections = set()
def add_first_connections(self,node_id):
"""
Add node_id to the first degree connection of the current vertex.
:param node_id: current vertex's neighbor, in our case a user's first degree connection.
:return: returns nothing
"""
self.first_connections.add(node_id)
def __str__(self):
return str(self.id)
def get_first_connections(self):
"""
Retrieve all first degree connections of the current vertex.
:return: returns first degree connections, stored in a set.
"""
return self.first_connections
def get_id(self):
"""
Return node_id of the current vertex.
:return: returns nothing
"""
return self.id
class Graph:
"""
Graph structure, consisted of zero to many vertices.
"""
def __init__(self):
"""
Construct a new Graph object.
:return: returns nothing
"""
self.vert_list = {}
self.numVertices = 0
def add_vertex(self,node_id):
"""
Add a vertex to the graph
:param node_id: int, id of the vertex, userid.
:return: returns nothing
"""
self.numVertices = self.numVertices + 1
newVertex = Vertex(node_id)
self.vert_list[node_id] = newVertex
return newVertex
def get_vertex(self,node_id):
"""
Obtain a vertex object by its id
:param node_id: int, id of the vertex, userid.
:return: returns nothing
"""
if node_id in self.vert_list:
return self.vert_list[node_id]
else:
return None
def add_edge(self,source_id,target_id):
"""
Add an edge to the graph.
:param source_id: int, id of source node.
:param target_id: int, id of target node.
:return: returns degree between the two
"""
if source_id not in self.vert_list:
nv = self.add_vertex(source_id)
if target_id not in self.vert_list:
nv = self.add_vertex(target_id)
self.vert_list[source_id].add_first_connections(target_id)
self.vert_list[target_id].add_first_connections(source_id)
def bibfs_degree_between(self,source_id,target_id,level_limit):
"""
Bidirectional breadth first search on the graph to retrieve the degree between users. It goes through
neighbors of source users and see if it is in connections of target users as the first level. And then
goes through neighbors of target users to see if they contain source user. And then continue to the
second degree connections.
:param source_id: int, id of source node.
:param target_id: int, id of target node.
:param level_limit: int, the limit to the degree of connections we are searching
:return: int, returns degree between the two users.
"""
#stores the current level of target/source users, visited users will be removed from the queue
source_queue = deque()
source_queue.append(source_id)
target_queue = deque()
target_queue.append(target_id)
#whether we have visited the source or target node
source_visited = set()
source_visited.add(source_id)
target_visited = set()
target_visited.add(target_id)
#stores the connections of source/target users. As we goes thru each level, all the connections
#of source/target users will be added.
source_connections = set()
source_connections.add(source_id)
target_connections = set()
target_connections.add(target_id)
#level helps to limit how much further we look into the common connections between the source
#and target users. Since we are searching bidirectionally from both source and target. If we are
#looking for 4th degree connection we only need to go down 2 levels from each side
current_level = 1
#helps determines whether we finish the current degree of connection search for sourcce/target
dist_source = dist_target = 0
while current_level <= level_limit/2:
while (source_queue):
source_vert_id = source_queue.popleft()
source_vert = self.get_vertex(source_vert_id)
if(source_vert is not None):
for source_node in source_vert.get_first_connections():
if source_node not in source_visited:
if source_node in target_connections:
return dist_source + dist_target + 1
source_queue.append(source_node)
source_visited.add(source_node)
source_connections.add(source_node)
dist_source = dist_source + 1
#switching to target loop
if current_level == dist_source:
break
while (target_queue):
target_vert_id = target_queue.popleft()
target_vert = self.get_vertex(target_vert_id)
if(target_vert is not None):
for target_node in target_vert.get_first_connections():
if target_node not in target_visited:
if target_node in source_connections:
return dist_source + dist_target + 1
target_queue.append(target_node)
target_visited.add(target_node)
target_connections.add(target_node)
dist_target = dist_target+1
else:
return 0
if current_level == dist_target:
break
current_level = current_level + 1
return 0
| [
"ying@xandys-MacBook-Pro.local"
] | ying@xandys-MacBook-Pro.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.