max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
migrations/versions/03f014e8d128_added_basic_prefix.py | charlestondance/amoslims | 0 | 12770151 | <gh_stars>0
"""added basic prefix
Revision ID: 03f014e8d128
Revises: fed23b3ce53a
Create Date: 2017-01-15 10:25:37.768783
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '03f014e8d128'
down_revision = 'fed23b3ce53a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('basic_buffer_plate_wells',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('buffer_plate_well_id', sa.String(length=64), nullable=True),
sa.Column('loading_volume', sa.Float(), nullable=True),
sa.Column('buffer_plate_barcode', sa.String(length=64), nullable=True),
sa.Column('buffer_plate_number', sa.Integer(), nullable=True),
sa.Column('buffer_name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_buffer_plate_wells_buffer_name'), 'basic_buffer_plate_wells', ['buffer_name'], unique=False)
op.create_index(op.f('ix_basic_buffer_plate_wells_buffer_plate_barcode'), 'basic_buffer_plate_wells', ['buffer_plate_barcode'], unique=False)
op.create_index(op.f('ix_basic_buffer_plate_wells_buffer_plate_number'), 'basic_buffer_plate_wells', ['buffer_plate_number'], unique=False)
op.create_index(op.f('ix_basic_buffer_plate_wells_buffer_plate_well_id'), 'basic_buffer_plate_wells', ['buffer_plate_well_id'], unique=False)
op.create_index(op.f('ix_basic_buffer_plate_wells_loading_volume'), 'basic_buffer_plate_wells', ['loading_volume'], unique=False)
op.create_index(op.f('ix_basic_buffer_plate_wells_unique_job_id'), 'basic_buffer_plate_wells', ['unique_job_id'], unique=False)
op.create_table('basic_clip_enzyme',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('clip_well_id', sa.String(length=64), nullable=True),
sa.Column('clip_barcode', sa.String(length=64), nullable=True),
sa.Column('concatenated_clip_id', sa.String(length=64), nullable=True),
sa.Column('clip_number', sa.Integer(), nullable=True),
sa.Column('clip_batch_number', sa.Integer(), nullable=True),
sa.Column('clip_plate_number', sa.Integer(), nullable=True),
sa.Column('transfer_volume', sa.Integer(), nullable=True),
sa.Column('enzyme_plate_barcode', sa.String(length=64), nullable=True),
sa.Column('enzyme_plate_well_id', sa.String(length=64), nullable=True),
sa.Column('enzyme_plate_number', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_clip_enzyme_clip_barcode'), 'basic_clip_enzyme', ['clip_barcode'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_clip_batch_number'), 'basic_clip_enzyme', ['clip_batch_number'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_clip_number'), 'basic_clip_enzyme', ['clip_number'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_clip_plate_number'), 'basic_clip_enzyme', ['clip_plate_number'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_clip_well_id'), 'basic_clip_enzyme', ['clip_well_id'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_concatenated_clip_id'), 'basic_clip_enzyme', ['concatenated_clip_id'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_enzyme_plate_barcode'), 'basic_clip_enzyme', ['enzyme_plate_barcode'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_enzyme_plate_number'), 'basic_clip_enzyme', ['enzyme_plate_number'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_enzyme_plate_well_id'), 'basic_clip_enzyme', ['enzyme_plate_well_id'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_transfer_volume'), 'basic_clip_enzyme', ['transfer_volume'], unique=False)
op.create_index(op.f('ix_basic_clip_enzyme_unique_job_id'), 'basic_clip_enzyme', ['unique_job_id'], unique=False)
op.create_table('basic_clip_list',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('part_id', sa.String(length=64), nullable=True),
sa.Column('concatenated_part_id', sa.String(length=64), nullable=True),
sa.Column('job_master_well_id', sa.String(length=64), nullable=True),
sa.Column('job_master_barcode', sa.String(length=64), nullable=True),
sa.Column('part_id_sample_number', sa.Integer(), nullable=True),
sa.Column('clip_well_id', sa.String(length=64), nullable=True),
sa.Column('clip_barcode', sa.String(length=64), nullable=True),
sa.Column('concatenated_clip_id', sa.String(length=64), nullable=True),
sa.Column('clip_number', sa.Integer(), nullable=True),
sa.Column('clip_batch_number', sa.Integer(), nullable=True),
sa.Column('destination_plate_number', sa.Integer(), nullable=True),
sa.Column('transfer_volume', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_clip_list_clip_barcode'), 'basic_clip_list', ['clip_barcode'], unique=False)
op.create_index(op.f('ix_basic_clip_list_clip_batch_number'), 'basic_clip_list', ['clip_batch_number'], unique=False)
op.create_index(op.f('ix_basic_clip_list_clip_number'), 'basic_clip_list', ['clip_number'], unique=False)
op.create_index(op.f('ix_basic_clip_list_clip_well_id'), 'basic_clip_list', ['clip_well_id'], unique=False)
op.create_index(op.f('ix_basic_clip_list_concatenated_clip_id'), 'basic_clip_list', ['concatenated_clip_id'], unique=False)
op.create_index(op.f('ix_basic_clip_list_concatenated_part_id'), 'basic_clip_list', ['concatenated_part_id'], unique=False)
op.create_index(op.f('ix_basic_clip_list_destination_plate_number'), 'basic_clip_list', ['destination_plate_number'], unique=False)
op.create_index(op.f('ix_basic_clip_list_job_master_barcode'), 'basic_clip_list', ['job_master_barcode'], unique=False)
op.create_index(op.f('ix_basic_clip_list_job_master_well_id'), 'basic_clip_list', ['job_master_well_id'], unique=False)
op.create_index(op.f('ix_basic_clip_list_part_id'), 'basic_clip_list', ['part_id'], unique=False)
op.create_index(op.f('ix_basic_clip_list_part_id_sample_number'), 'basic_clip_list', ['part_id_sample_number'], unique=False)
op.create_index(op.f('ix_basic_clip_list_transfer_volume'), 'basic_clip_list', ['transfer_volume'], unique=False)
op.create_index(op.f('ix_basic_clip_list_unique_job_id'), 'basic_clip_list', ['unique_job_id'], unique=False)
op.create_table('basic_clip_qc_fragment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('clip_well_id', sa.String(length=64), nullable=True),
sa.Column('clip_barcode', sa.String(length=64), nullable=True),
sa.Column('concatenated_clip_id', sa.String(length=64), nullable=True),
sa.Column('clip_number', sa.Integer(), nullable=True),
sa.Column('clip_batch_number', sa.Integer(), nullable=True),
sa.Column('clip_plate_number', sa.Integer(), nullable=True),
sa.Column('well_id_96', sa.String(length=64), nullable=True),
sa.Column('well_id_384', sa.String(length=64), nullable=True),
sa.Column('clip_qc_plate_number', sa.Integer(), nullable=True),
sa.Column('clip_qc_barcode', sa.String(length=64), nullable=True),
sa.Column('well_number_96', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_clip_qc_fragment_clip_barcode'), 'basic_clip_qc_fragment', ['clip_barcode'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_clip_batch_number'), 'basic_clip_qc_fragment', ['clip_batch_number'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_clip_number'), 'basic_clip_qc_fragment', ['clip_number'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_clip_plate_number'), 'basic_clip_qc_fragment', ['clip_plate_number'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_clip_qc_barcode'), 'basic_clip_qc_fragment', ['clip_qc_barcode'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_clip_qc_plate_number'), 'basic_clip_qc_fragment', ['clip_qc_plate_number'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_clip_well_id'), 'basic_clip_qc_fragment', ['clip_well_id'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_concatenated_clip_id'), 'basic_clip_qc_fragment', ['concatenated_clip_id'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_unique_job_id'), 'basic_clip_qc_fragment', ['unique_job_id'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_well_id_384'), 'basic_clip_qc_fragment', ['well_id_384'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_well_id_96'), 'basic_clip_qc_fragment', ['well_id_96'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_fragment_well_number_96'), 'basic_clip_qc_fragment', ['well_number_96'], unique=False)
op.create_table('basic_clip_qc_plates_per_job',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('number_of_clip_plates', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_clip_qc_plates_per_job_number_of_clip_plates'), 'basic_clip_qc_plates_per_job', ['number_of_clip_plates'], unique=False)
op.create_index(op.f('ix_basic_clip_qc_plates_per_job_unique_job_id'), 'basic_clip_qc_plates_per_job', ['unique_job_id'], unique=False)
op.create_table('basic_job_master',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('part_id', sa.String(length=64), nullable=True),
sa.Column('job_master_well_id', sa.String(length=64), nullable=True),
sa.Column('job_master_barcode', sa.String(length=64), nullable=True),
sa.Column('sample_number', sa.Integer(), nullable=True),
sa.Column('uploaded_filename', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_job_master_job_master_barcode'), 'basic_job_master', ['job_master_barcode'], unique=False)
op.create_index(op.f('ix_basic_job_master_job_master_well_id'), 'basic_job_master', ['job_master_well_id'], unique=False)
op.create_index(op.f('ix_basic_job_master_part_id'), 'basic_job_master', ['part_id'], unique=False)
op.create_index(op.f('ix_basic_job_master_sample_number'), 'basic_job_master', ['sample_number'], unique=False)
op.create_index(op.f('ix_basic_job_master_unique_job_id'), 'basic_job_master', ['unique_job_id'], unique=False)
op.create_index(op.f('ix_basic_job_master_uploaded_filename'), 'basic_job_master', ['uploaded_filename'], unique=False)
op.create_table('basic_stitch_buffer',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('stitch_well_id', sa.String(length=64), nullable=True),
sa.Column('stitch_barcode', sa.String(length=64), nullable=True),
sa.Column('stitch_id', sa.String(length=64), nullable=True),
sa.Column('transfer_volume', sa.Integer(), nullable=True),
sa.Column('enzyme_plate_barcode', sa.String(length=64), nullable=True),
sa.Column('enzyme_plate_well_id', sa.String(length=64), nullable=True),
sa.Column('enzyme_plate_number', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_stitch_buffer_enzyme_plate_barcode'), 'basic_stitch_buffer', ['enzyme_plate_barcode'], unique=False)
op.create_index(op.f('ix_basic_stitch_buffer_enzyme_plate_number'), 'basic_stitch_buffer', ['enzyme_plate_number'], unique=False)
op.create_index(op.f('ix_basic_stitch_buffer_enzyme_plate_well_id'), 'basic_stitch_buffer', ['enzyme_plate_well_id'], unique=False)
op.create_index(op.f('ix_basic_stitch_buffer_stitch_barcode'), 'basic_stitch_buffer', ['stitch_barcode'], unique=False)
op.create_index(op.f('ix_basic_stitch_buffer_stitch_id'), 'basic_stitch_buffer', ['stitch_id'], unique=False)
op.create_index(op.f('ix_basic_stitch_buffer_stitch_well_id'), 'basic_stitch_buffer', ['stitch_well_id'], unique=False)
op.create_index(op.f('ix_basic_stitch_buffer_transfer_volume'), 'basic_stitch_buffer', ['transfer_volume'], unique=False)
op.create_index(op.f('ix_basic_stitch_buffer_unique_job_id'), 'basic_stitch_buffer', ['unique_job_id'], unique=False)
op.create_table('basic_stitch_list',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('stitch_id', sa.String(length=64), nullable=True),
sa.Column('clip_number', sa.Integer(), nullable=True),
sa.Column('clip_batch_number', sa.Integer(), nullable=True),
sa.Column('concatenated_clip_id', sa.String(length=64), nullable=True),
sa.Column('clip_well_id', sa.String(length=64), nullable=True),
sa.Column('clip_barcode', sa.String(length=64), nullable=True),
sa.Column('stitch_well_id', sa.String(length=64), nullable=True),
sa.Column('stitch_plate_barcode', sa.String(length=64), nullable=True),
sa.Column('stitch_plate_number', sa.Integer(), nullable=True),
sa.Column('transfer_volume', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_stitch_list_clip_barcode'), 'basic_stitch_list', ['clip_barcode'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_clip_batch_number'), 'basic_stitch_list', ['clip_batch_number'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_clip_number'), 'basic_stitch_list', ['clip_number'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_clip_well_id'), 'basic_stitch_list', ['clip_well_id'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_concatenated_clip_id'), 'basic_stitch_list', ['concatenated_clip_id'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_stitch_id'), 'basic_stitch_list', ['stitch_id'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_stitch_plate_barcode'), 'basic_stitch_list', ['stitch_plate_barcode'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_stitch_plate_number'), 'basic_stitch_list', ['stitch_plate_number'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_stitch_well_id'), 'basic_stitch_list', ['stitch_well_id'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_transfer_volume'), 'basic_stitch_list', ['transfer_volume'], unique=False)
op.create_index(op.f('ix_basic_stitch_list_unique_job_id'), 'basic_stitch_list', ['unique_job_id'], unique=False)
op.create_table('basic_unique_clip',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('clip_number', sa.Integer(), nullable=True),
sa.Column('part_1_id', sa.String(length=64), nullable=True),
sa.Column('part_2_id', sa.String(length=64), nullable=True),
sa.Column('part_3_id', sa.String(length=64), nullable=True),
sa.Column('number_of_times_used', sa.Integer(), nullable=True),
sa.Column('clip_batches_required', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_unique_clip_clip_batches_required'), 'basic_unique_clip', ['clip_batches_required'], unique=False)
op.create_index(op.f('ix_basic_unique_clip_clip_number'), 'basic_unique_clip', ['clip_number'], unique=False)
op.create_index(op.f('ix_basic_unique_clip_number_of_times_used'), 'basic_unique_clip', ['number_of_times_used'], unique=False)
op.create_index(op.f('ix_basic_unique_clip_part_1_id'), 'basic_unique_clip', ['part_1_id'], unique=False)
op.create_index(op.f('ix_basic_unique_clip_part_2_id'), 'basic_unique_clip', ['part_2_id'], unique=False)
op.create_index(op.f('ix_basic_unique_clip_part_3_id'), 'basic_unique_clip', ['part_3_id'], unique=False)
op.create_index(op.f('ix_basic_unique_clip_unique_job_id'), 'basic_unique_clip', ['unique_job_id'], unique=False)
op.create_table('basic_unique_part',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_job_id', sa.String(length=64), nullable=True),
sa.Column('part_id', sa.String(length=64), nullable=True),
sa.Column('samples_required', sa.Integer(), nullable=True),
sa.Column('number_of_times_used', sa.Integer(), nullable=True),
sa.Column('total_volume_required', sa.Integer(), nullable=True),
sa.Column('volume_per_part', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_basic_unique_part_number_of_times_used'), 'basic_unique_part', ['number_of_times_used'], unique=False)
op.create_index(op.f('ix_basic_unique_part_part_id'), 'basic_unique_part', ['part_id'], unique=False)
op.create_index(op.f('ix_basic_unique_part_samples_required'), 'basic_unique_part', ['samples_required'], unique=False)
op.create_index(op.f('ix_basic_unique_part_total_volume_required'), 'basic_unique_part', ['total_volume_required'], unique=False)
op.create_index(op.f('ix_basic_unique_part_unique_job_id'), 'basic_unique_part', ['unique_job_id'], unique=False)
op.create_index(op.f('ix_basic_unique_part_volume_per_part'), 'basic_unique_part', ['volume_per_part'], unique=False)
op.create_table('job_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('job_type', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_job_types_job_type'), 'job_types', ['job_type'], unique=True)
op.drop_table('clip_qc_plates_per_job')
op.drop_table('stitch_buffer')
op.drop_table('clip_qc_fragment')
op.drop_table('buffer_plate_wells')
op.drop_table('clip_enzyme')
op.drop_table('unique_part')
op.drop_table('unique_clip')
op.drop_table('job_master')
op.drop_table('clip_list')
op.drop_table('stitch_list')
op.add_column('job_table', sa.Column('job_type', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_job_table_job_type'), 'job_table', ['job_type'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_job_table_job_type'), table_name='job_table')
op.drop_column('job_table', 'job_type')
op.create_table('stitch_list',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('stitch_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_batch_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('concatenated_clip_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('stitch_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('stitch_plate_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('stitch_plate_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('transfer_volume', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='stitch_list_pkey')
)
op.create_table('clip_list',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('part_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('concatenated_part_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('job_master_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('job_master_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('part_id_sample_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('concatenated_clip_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_batch_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('destination_plate_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('transfer_volume', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='clip_list_pkey')
)
op.create_table('job_master',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('part_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('job_master_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('job_master_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('sample_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('uploaded_filename', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='job_master_pkey')
)
op.create_table('unique_clip',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('part_1_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('part_2_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('part_3_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('number_of_times_used', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_batches_required', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='unique_clip_pkey')
)
op.create_table('unique_part',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('part_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('samples_required', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('number_of_times_used', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('total_volume_required', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('volume_per_part', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='unique_part_pkey')
)
op.create_table('clip_enzyme',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('concatenated_clip_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_batch_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_plate_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('transfer_volume', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('enzyme_plate_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('enzyme_plate_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('enzyme_plate_number', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='clip_enzyme_pkey')
)
op.create_table('buffer_plate_wells',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('buffer_plate_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('loading_volume', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('buffer_plate_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('buffer_plate_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('buffer_name', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='buffer_plate_wells_pkey')
)
op.create_table('clip_qc_fragment',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('concatenated_clip_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_batch_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_plate_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('well_id_96', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('well_id_384', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('clip_qc_plate_number', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('clip_qc_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('well_number_96', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='clip_qc_fragment_pkey')
)
op.create_table('stitch_buffer',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('stitch_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('stitch_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('stitch_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('transfer_volume', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('enzyme_plate_barcode', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('enzyme_plate_well_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('enzyme_plate_number', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='stitch_buffer_pkey')
)
op.create_table('clip_qc_plates_per_job',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('unique_job_id', sa.VARCHAR(length=64), autoincrement=False, nullable=True),
sa.Column('number_of_clip_plates', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='clip_qc_plates_per_job_pkey')
)
op.drop_index(op.f('ix_job_types_job_type'), table_name='job_types')
op.drop_table('job_types')
op.drop_index(op.f('ix_basic_unique_part_volume_per_part'), table_name='basic_unique_part')
op.drop_index(op.f('ix_basic_unique_part_unique_job_id'), table_name='basic_unique_part')
op.drop_index(op.f('ix_basic_unique_part_total_volume_required'), table_name='basic_unique_part')
op.drop_index(op.f('ix_basic_unique_part_samples_required'), table_name='basic_unique_part')
op.drop_index(op.f('ix_basic_unique_part_part_id'), table_name='basic_unique_part')
op.drop_index(op.f('ix_basic_unique_part_number_of_times_used'), table_name='basic_unique_part')
op.drop_table('basic_unique_part')
op.drop_index(op.f('ix_basic_unique_clip_unique_job_id'), table_name='basic_unique_clip')
op.drop_index(op.f('ix_basic_unique_clip_part_3_id'), table_name='basic_unique_clip')
op.drop_index(op.f('ix_basic_unique_clip_part_2_id'), table_name='basic_unique_clip')
op.drop_index(op.f('ix_basic_unique_clip_part_1_id'), table_name='basic_unique_clip')
op.drop_index(op.f('ix_basic_unique_clip_number_of_times_used'), table_name='basic_unique_clip')
op.drop_index(op.f('ix_basic_unique_clip_clip_number'), table_name='basic_unique_clip')
op.drop_index(op.f('ix_basic_unique_clip_clip_batches_required'), table_name='basic_unique_clip')
op.drop_table('basic_unique_clip')
op.drop_index(op.f('ix_basic_stitch_list_unique_job_id'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_transfer_volume'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_stitch_well_id'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_stitch_plate_number'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_stitch_plate_barcode'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_stitch_id'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_concatenated_clip_id'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_clip_well_id'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_clip_number'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_clip_batch_number'), table_name='basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_list_clip_barcode'), table_name='basic_stitch_list')
op.drop_table('basic_stitch_list')
op.drop_index(op.f('ix_basic_stitch_buffer_unique_job_id'), table_name='basic_stitch_buffer')
op.drop_index(op.f('ix_basic_stitch_buffer_transfer_volume'), table_name='basic_stitch_buffer')
op.drop_index(op.f('ix_basic_stitch_buffer_stitch_well_id'), table_name='basic_stitch_buffer')
op.drop_index(op.f('ix_basic_stitch_buffer_stitch_id'), table_name='basic_stitch_buffer')
op.drop_index(op.f('ix_basic_stitch_buffer_stitch_barcode'), table_name='basic_stitch_buffer')
op.drop_index(op.f('ix_basic_stitch_buffer_enzyme_plate_well_id'), table_name='basic_stitch_buffer')
op.drop_index(op.f('ix_basic_stitch_buffer_enzyme_plate_number'), table_name='basic_stitch_buffer')
op.drop_index(op.f('ix_basic_stitch_buffer_enzyme_plate_barcode'), table_name='basic_stitch_buffer')
op.drop_table('basic_stitch_buffer')
op.drop_index(op.f('ix_basic_job_master_uploaded_filename'), table_name='basic_job_master')
op.drop_index(op.f('ix_basic_job_master_unique_job_id'), table_name='basic_job_master')
op.drop_index(op.f('ix_basic_job_master_sample_number'), table_name='basic_job_master')
op.drop_index(op.f('ix_basic_job_master_part_id'), table_name='basic_job_master')
op.drop_index(op.f('ix_basic_job_master_job_master_well_id'), table_name='basic_job_master')
op.drop_index(op.f('ix_basic_job_master_job_master_barcode'), table_name='basic_job_master')
op.drop_table('basic_job_master')
op.drop_index(op.f('ix_basic_clip_qc_plates_per_job_unique_job_id'), table_name='basic_clip_qc_plates_per_job')
op.drop_index(op.f('ix_basic_clip_qc_plates_per_job_number_of_clip_plates'), table_name='basic_clip_qc_plates_per_job')
op.drop_table('basic_clip_qc_plates_per_job')
op.drop_index(op.f('ix_basic_clip_qc_fragment_well_number_96'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_well_id_96'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_well_id_384'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_unique_job_id'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_concatenated_clip_id'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_clip_well_id'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_clip_qc_plate_number'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_clip_qc_barcode'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_clip_plate_number'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_clip_number'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_clip_batch_number'), table_name='basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_qc_fragment_clip_barcode'), table_name='basic_clip_qc_fragment')
op.drop_table('basic_clip_qc_fragment')
op.drop_index(op.f('ix_basic_clip_list_unique_job_id'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_transfer_volume'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_part_id_sample_number'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_part_id'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_job_master_well_id'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_job_master_barcode'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_destination_plate_number'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_concatenated_part_id'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_concatenated_clip_id'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_clip_well_id'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_clip_number'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_clip_batch_number'), table_name='basic_clip_list')
op.drop_index(op.f('ix_basic_clip_list_clip_barcode'), table_name='basic_clip_list')
op.drop_table('basic_clip_list')
op.drop_index(op.f('ix_basic_clip_enzyme_unique_job_id'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_transfer_volume'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_enzyme_plate_well_id'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_enzyme_plate_number'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_enzyme_plate_barcode'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_concatenated_clip_id'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_clip_well_id'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_clip_plate_number'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_clip_number'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_clip_batch_number'), table_name='basic_clip_enzyme')
op.drop_index(op.f('ix_basic_clip_enzyme_clip_barcode'), table_name='basic_clip_enzyme')
op.drop_table('basic_clip_enzyme')
op.drop_index(op.f('ix_basic_buffer_plate_wells_unique_job_id'), table_name='basic_buffer_plate_wells')
op.drop_index(op.f('ix_basic_buffer_plate_wells_loading_volume'), table_name='basic_buffer_plate_wells')
op.drop_index(op.f('ix_basic_buffer_plate_wells_buffer_plate_well_id'), table_name='basic_buffer_plate_wells')
op.drop_index(op.f('ix_basic_buffer_plate_wells_buffer_plate_number'), table_name='basic_buffer_plate_wells')
op.drop_index(op.f('ix_basic_buffer_plate_wells_buffer_plate_barcode'), table_name='basic_buffer_plate_wells')
op.drop_index(op.f('ix_basic_buffer_plate_wells_buffer_name'), table_name='basic_buffer_plate_wells')
op.drop_table('basic_buffer_plate_wells')
# ### end Alembic commands ###
| 1.453125 | 1 |
Codeforces/A_Linear_Keyboard.py | anubhab-code/Competitive-Programming | 0 | 12770152 | for _ in range(int(input())):
a=input()
b=input()
s={}
ans=0
for i in range(26):
s[a[i]]=i+1
temp=[]
for j in b:
temp.append(s[j])
for k in range(len(temp)-1):
ans+=abs(temp[k]-temp[k+1])
print(ans) | 2.96875 | 3 |
src/kata/data/io/network.py | FlorianKempenich/kata | 0 | 12770153 | import requests
from kata.domain.exceptions import ApiLimitReached, InvalidAuthToken
class GithubApi:
"""
Basic wrapper around the Github Api
"""
def __init__(self, auth_token: str):
self._requests = requests
self._auth_token = auth_token
def contents(self, user, repo, path=''):
url = f'https://api.github.com/repos/{user}/{repo}/contents'
if path:
url += f'/{path}'
response = self._get_url(url)
return response.json()
def download_raw_text_file(self, raw_text_file_url: str):
response = self._get_url(raw_text_file_url)
return response.text
def _get_url(self, url: str):
response = self._requests.get(url, headers=self._headers())
self._validate_response(response)
return response
def _headers(self):
if not self._auth_token:
return {}
return {'Authorization': f'token {self._auth_token}'}
def _validate_response(self, response: requests.Response):
def rate_limit_reached():
def unauthorised():
return response.status_code == 403
def limit_reached():
return int(response.headers.get('X-RateLimit-Remaining', -1)) == 0
return unauthorised() and limit_reached()
def invalid_auth():
return response.status_code == 401
if rate_limit_reached():
raise ApiLimitReached()
if invalid_auth():
raise InvalidAuthToken(self._auth_token)
response.raise_for_status()
| 2.890625 | 3 |
utils.py | karthigb/mturk-object-detection | 0 | 12770154 | import boto3
import json
MTURK_SANDBOX = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
def get_mturk_client():
with open('config.json', 'r') as f:
config = json.load(f)
mturk = boto3.client('mturk',
aws_access_key_id = config['SANDBOX']['aws_access_key_id'],
aws_secret_access_key = config['SANDBOX']['aws_secret_access_key'],
region_name='us-east-1',
endpoint_url = MTURK_SANDBOX
)
return mturk
| 2.140625 | 2 |
cosmic_ray/plugins.py | jkblume/cosmic-ray | 0 | 12770155 | from stevedore import driver, ExtensionManager
def get_operator(name):
"""Get an operator class from a plugin.
Attrs:
name: The name of the plugin containing the operator class.
Returns: The operator *class object* (i.e. not an instance) provided by the
plugin named `name`.
"""
return ExtensionManager('cosmic_ray.operators')[name].plugin
def operator_names():
"""Get an iterable of all operator plugin names."""
return ExtensionManager('cosmic_ray.operators').names()
def get_test_runner(name, test_args):
"""Get a test-runner instance by name."""
test_runner_manager = driver.DriverManager(
namespace='cosmic_ray.test_runners',
name=name,
invoke_on_load=True,
invoke_args=(test_args,),
)
return test_runner_manager.driver
def test_runner_names():
"""Get iterable of test-runner plugin names."""
return ExtensionManager('cosmic_ray.test_runners').names()
| 2.6875 | 3 |
backend/lists/admin.py | xelam11/TaskPlanner | 0 | 12770156 | from django.contrib import admin
from .models import List
class ListAdmin(admin.ModelAdmin):
list_filter = ('board', 'name')
admin.site.register(List, ListAdmin)
| 1.554688 | 2 |
DEFLECTION_ANGLE.py | Sketos/PyGlafic | 0 | 12770157 | <reponame>Sketos/PyGlafic
import os as os
import sys as sys
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from PyGlafic import PyGlafic
class DEFLECTION_ANGLE(PyGlafic):
def __init__(self, COSMOLOGICAL_PARAMETERS = None, LEN_REDSHIFT = None, SRC_REDSHIFT = None, ANGULAR_DISTANCE = None, \
LEN_PLANE_RESOLUTION = None, SRC_PLANE_RESOLUTION = None, MODEL_TYPE = None, MODEL_PARAMETERS = None):
super(DEFLECTION_ANGLE, self).__init__()
# COSMOLOGY
self.COSMOLOGY(COSMOLOGICAL_PARAMETERS = COSMOLOGICAL_PARAMETERS, \
UPDATE = False)
if LEN_REDSHIFT >= SRC_REDSHIFT:
raise ValueError
else:
self.LENSING_CONFIGURATION(LEN_REDSHIFT = LEN_REDSHIFT, \
SRC_REDSHIFT = SRC_REDSHIFT)
self.SRC_PLANE_RESOLUTION = SRC_PLANE_RESOLUTION
if self.SRC_PLANE_RESOLUTION is None:
raise ValueError
else:
self.GRID(ANGULAR_DISTANCE = ANGULAR_DISTANCE, \
SRC_PLANE_RESOLUTION = SRC_PLANE_RESOLUTION)
self.LEN_MODEL(MODEL_TYPE = MODEL_TYPE, MODEL_PARAMETERS = MODEL_PARAMETERS, \
UPDATE = False)
# COMMAND
self.COMMAND()
def COMMAND(self, COMMAND = "writelens", COMMAND_ARGS = None, UPDATE = False):
if UPDATE:
pass # TODO :
else:
for i, line_i in enumerate(self.INPUTFILE_LINES):
if line_i.split(" ")[0] == "start_command":
for j in range(i+1, len(self.INPUTFILE_LINES)):
if self.INPUTFILE_LINES[j].split(" ")[0] == "\n":
self.INPUTFILE_LINES.insert(j, "")
break
if COMMAND is None:
raise ValueError
elif COMMAND == "writelens":
self.INPUTFILE_LINES[j] += COMMAND + \
" " + "\t" + str(self.SRC_REDSHIFT) + " " + "\n"
else:
raise ValueError
def COMPUTE(self, BINSIZE = 5.0, DISPLAY = True):
self.CREATE_PyGlafic_INPUTFILE()
if os.path.isfile("./PyGlafic.INPUT"):
os.system("./glafic PyGlafic.INPUT")
else:
IOError
N_PIXELS = int(2.0 * self.ANGULAR_DISTANCE / \
self.SRC_PLANE_RESOLUTION)
LENSING_PROPERTIES_HDU = fits.open("out_lens.fits")
# if CLEAN:
# os.system("rm out_lens.fits")
LENSING_POTENTIAL_GRAD_X, LENSING_POTENTIAL_GRAD_Y = \
np.gradient(LENSING_PROPERTIES_HDU["PRIMARY"].data[2])
DEFLECTION_ANGLE_2D = np.sqrt(LENSING_POTENTIAL_GRAD_X**2.0 + \
LENSING_POTENTIAL_GRAD_Y**2.0) / self.SRC_PLANE_RESOLUTION
if DISPLAY:
figure, axes = plt.subplots()
IMAGE = plt.imshow(DEFLECTION_ANGLE_2D, cmap = "inferno", extent = (-self.ANGULAR_DISTANCE, self.ANGULAR_DISTANCE, \
-self.ANGULAR_DISTANCE, self.ANGULAR_DISTANCE), vmin = 0.0, vmax = 10.0)
plt.text(-0.85 * self.ANGULAR_DISTANCE, -0.85 * self.ANGULAR_DISTANCE, "$z_l = $" + str(self.LEN_REDSHIFT) + \
"$,$" + "$z_s = $" + str(self.SRC_REDSHIFT), fontsize = 15)
plt.xticks([-0.75 * self.ANGULAR_DISTANCE, -0.5 * self.ANGULAR_DISTANCE, -0.25 * self.ANGULAR_DISTANCE, 0.0, \
0.25 * self.ANGULAR_DISTANCE, 0.5 * self.ANGULAR_DISTANCE, 0.75 * self.ANGULAR_DISTANCE])
plt.yticks([-0.75 * self.ANGULAR_DISTANCE, -0.5 * self.ANGULAR_DISTANCE, -0.25 * self.ANGULAR_DISTANCE, 0.0, \
0.25 * self.ANGULAR_DISTANCE, 0.5 * self.ANGULAR_DISTANCE, 0.75 * self.ANGULAR_DISTANCE])
plt.xlabel(r"$x \,\, (arcsec)$", fontsize = 15)
plt.ylabel(r"$y \,\, (arcsec)$", fontsize = 15)
COLORBAR = figure.colorbar(IMAGE, ticks=[1.0, 3.0, 5.0, 7.0, 9.0], \
cax = figure.add_axes([0.225, 0.90, 0.575, 0.05]), orientation = "horizontal")
COLORBAR.set_label(r"$Deflection \,\, Angle$", \
color = "black", fontsize = 15)
N_PIXELS_Y, N_PIXELS_X = \
DEFLECTION_ANGLE_2D.shape
if (N_PIXELS_X != N_PIXELS) or (N_PIXELS_Y != N_PIXELS):
raise ValueError
Y_idx, X_idx = np.indices(DEFLECTION_ANGLE_2D.shape)
CENTER = np.array([(X_idx.max() - X_idx.min()) / 2.0, \
(Y_idx.max() - Y_idx.min()) / 2.0])
R_2D = np.hypot(X_idx - CENTER[0], Y_idx - CENTER[1]) * \
self.SRC_PLANE_RESOLUTION
R_1D = np.linspace(R_2D.min(), R_2D.max(), \
int(np.sqrt(N_PIXELS_X**2.0 + N_PIXELS_Y**2.0) / BINSIZE))
DEFLECTION_ANGLE_1D = np.zeros((len(R_1D) - 1))
for i in range(0, len(R_1D) - 1):
idx = np.where((R_2D > R_1D[i]) & \
(R_2D <= R_1D[i+1]))
DEFLECTION_ANGLE_1D[i] = np.mean(DEFLECTION_ANGLE_2D[idx])
R_1D = 0.5 * (R_1D[1:] + R_1D[:-1])
# plt.figure()
# plt.loglog(R_1D, DEFLECTION_ANGLE_1D, \
# marker = "o", linewidth = 4, alpha = 0.25)
# print N_PIXELS * self.SRC_PLANE_RESOLUTION
# plt.axvline(0.5 * N_PIXELS * self.SRC_PLANE_RESOLUTION)
if DISPLAY:
plt.show()
if __name__ == "__main__":
# DEFLECTION_ANGLE_obj = DEFLECTION_ANGLE(LEN_REDSHIFT = 0.5, SRC_REDSHIFT = 2.0, ANGULAR_DISTANCE = 5.0, SRC_PLANE_RESOLUTION = 0.01, \
# MODEL_TYPE = "sie", MODEL_PARAMETERS = [300.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
DEFLECTION_ANGLE_obj = DEFLECTION_ANGLE(LEN_REDSHIFT = 0.5, SRC_REDSHIFT = 2.0, ANGULAR_DISTANCE = 100.0, SRC_PLANE_RESOLUTION = 1.0, \
MODEL_TYPE = "nfw", MODEL_PARAMETERS = [10**14.0, 0.0, 0.0, 0.0, 0.0, 10.0, 0.0])
DEFLECTION_ANGLE_obj.COMPUTE()
| 2.328125 | 2 |
xlnet/datasets/squad_tfrecords.py | fanshiqing/DAPPLE | 50 | 12770158 | <filename>xlnet/datasets/squad_tfrecords.py
import tensorflow as tf
from tensorflow.contrib.distribute.python import prefetching_ops_v2
def input_fn_builder(input_glob, batch_size, is_training, seq_length=512):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.float32),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"cls_index": tf.FixedLenFeature([], tf.int64),
"p_mask": tf.FixedLenFeature([seq_length], tf.float32)
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["is_impossible"] = tf.FixedLenFeature([], tf.float32)
else:
name_to_features["unique_ids"] = tf.FixedLenFeature([], tf.int64)
def input_fn():
d = tf.data.TFRecordDataset(input_glob)
if is_training:
d = d.shuffle(buffer_size=1024)
d = d.repeat()
else:
d = d.repeat(1)
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
return example
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=8,
drop_remainder=False))
d = d.prefetch(1024)
if False:
d = d.apply(tf.contrib.data.prefetch_to_device('/replica:0/task:0/device:GPU:0'))
return d
return input_fn
| 2.390625 | 2 |
src/utils/filesys.py | dmitry-vorobiev/nlp_headline_rus | 0 | 12770159 | <gh_stars>0
import json
import os
from pathlib import Path
def save_json(content, path, indent=4, **json_dump_kwargs):
with open(path, "w") as f:
json.dump(content, f, indent=indent, sort_keys=True, **json_dump_kwargs)
def write_txt_file(ordered_tgt, path):
f = Path(path).open("w")
for ln in ordered_tgt:
f.write(ln + "\n")
f.flush()
def check_output_dir(args, expected_items=0):
"""
Checks whether to bail out if output_dir already exists and has more than expected_items in it
`args`: needs to have the following attributes of `args`:
- output_dir
- do_train
- overwrite_output_dir
`expected_items`: normally 0 (default) - i.e. empty dir, but in some cases a few files are expected (e.g. recovery from OOM)
Source:
https://github.com/huggingface/transformers/blob/c60e0e1ee45f4bf1017736b146c51729f120bb83/examples/seq2seq/utils.py#L632
"""
if (
os.path.exists(args.output_dir)
and len(os.listdir(args.output_dir)) > expected_items
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({args.output_dir}) already exists and "
f"has {len(os.listdir(args.output_dir))} items in it (expected {expected_items} items). "
"Use --overwrite_output_dir to overcome."
)
| 2.65625 | 3 |
Models/kNN.py | AlgoAIBoss/Data-Mining-Cup | 0 | 12770160 | import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances
def top_5(book, items, similarity_measure):
"""
This function extracts the top-five similar books for a given book and
similarity measure. This function takes the following arguments:
book: the book for which five recommendations need to be provided
items: data-frame that contains all the books with their corresponding
engineered features.
similarity_measure: possible values Euclidean, Cosine or Manhattan
"""
## Filter out books with same title but different publisher
temp = items[items['itemID'] == book]
temp_title = items.loc[items['itemID'] == book, 'title']
items = items[~np.isin(items['title'], temp_title)]
items = pd.concat([temp, items]).reset_index(drop = True)
## Selecting books based on the same language and topic
items = items[np.isin(items['language'], temp['language'])].reset_index(drop = True)
if (items[np.isin(items['general_topic'], temp['general_topic'])].shape[0] > 5):
if (sum(items['general_topic'] == 'Y') > 15000):
if (all(temp['general_topic_2'] == 'YF') == True):
items = items[np.isin(items['general_topic_3'], temp['general_topic_3'])].reset_index(drop = True)
else:
if (items[np.isin(items['general_topic_2'], temp['general_topic_2'])].shape[0] >= 6):
items = items[np.isin(items['general_topic_2'], temp['general_topic_2'])].reset_index(drop = True)
else:
items = items[np.isin(items['general_topic'], temp['general_topic'])].reset_index(drop = True)
## Selecting variables of interest
to_remove = ['itemID', 'title', 'author', 'publisher', 'subtopics', 'general_topic', 'general_topic_2', 'general_topic_3', 'language', 'main topic']
variables_of_interest = items.columns[~np.isin(items.columns, to_remove)]
items_temp = items[variables_of_interest]
## Selecting top 5 similar books
if (similarity_measure == 'Euclidean'):
D = euclidean_distances(items_temp)
to_select = np.argsort(D[:, 0])[1:6]
elif (similarity_measure == 'Cosine'):
D = cosine_similarity(items_temp)
to_select = np.argsort(-D[:, 0])[1:6]
elif (similarity_measure == 'Manhattan'):
D = manhattan_distances(items_temp)
to_select = np.argsort(D[:, 0])[1:6]
return [items.loc[to_select[0], 'itemID'], items.loc[to_select[1], 'itemID'], items.loc[to_select[2], 'itemID'], items.loc[to_select[3], 'itemID'], items.loc[to_select[4], 'itemID']]
def top_5_after_transaction(book, book_to_recommend, items, similarity_measure):
"""
This function extracts the top-five similar books for a given book, books from
transaction history, items and a similarity measure. This function takes the
following arguments:
book: the book for which five recommendations need to be provided.
book_to_recommend: list of book from historical transactions.
items: data-frame that contains all the books with their corresponding
engineered features.
similarity_measure: possible values Euclidean, Cosine or Manhattan
"""
## Selecting books based on transactions
items_temp = items.loc[np.isin(items['itemID'], book_to_recommend)]
## Selecting books based on the same language and topic
temp = items[items['itemID'] == book]
temp_title = items.loc[items['itemID'] == book, 'title']
items_temp = items_temp[~np.isin(items_temp['title'], temp_title)]
items_temp = pd.concat([temp, items_temp]).reset_index(drop = True)
## Selecting books based on language
items_temp = items_temp[np.isin(items_temp['language'], temp['language'])].reset_index(drop = True)
## Selecting variables of interest
to_remove = ['itemID', 'title', 'author', 'publisher', 'subtopics', 'general_topic', 'general_topic_2', 'general_topic_3', 'language', 'main topic']
variables_of_interest = items.columns[~np.isin(items.columns, to_remove)]
items_temp_1 = items_temp[variables_of_interest]
## Sanity check
if (items_temp.shape[0] >= 6):
## Selecting top 5 similar books
if (similarity_measure == 'Euclidean'):
D = euclidean_distances(items_temp_1)
to_select = np.argsort(D[:, 0])[1:6]
elif (similarity_measure == 'Cosine'):
D = cosine_similarity(items_temp_1)
to_select = np.argsort(-D[:, 0])[1:6]
elif (similarity_measure == 'Manhattan'):
D = manhattan_distances(items_temp_1)
to_select = np.argsort(D[:, 0])[1:6]
return [items_temp.loc[to_select[0], 'itemID'], items_temp.loc[to_select[1], 'itemID'], items_temp.loc[to_select[2], 'itemID'], items_temp.loc[to_select[3], 'itemID'], items_temp.loc[to_select[4], 'itemID']]
else:
knn_top_5 = top_5(book, items, similarity_measure)
return knn_top_5
| 3.40625 | 3 |
demos/python/sdk_wireless_camera_control/open_gopro/util.py | hypoxic/OpenGoPro | 1 | 12770161 | # util.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue May 18 22:08:50 UTC 2021
"""Miscellaneous utilities for the GoPro package."""
import sys
import queue
import logging
import subprocess
from pathlib import Path
from typing import Dict, Type, Any, List, Optional, Union
logger = logging.getLogger(__name__)
def launch_vlc(location: Optional[Path]) -> None:
"""Launch VLC
Args:
location (Optional[Path]): path to VLC. If None, it will be automatically discovered
"""
# This is a fairly lazy way to find VLC. We'll call it best effort.
potential_vlc_locations: List[Union[Path, str]] = []
command = "echo Invalid Platform"
if "linux" in sys.platform.lower():
potential_vlc_locations = [r'"/snap/bin/vlc"']
command = 'su $(id -un 1000) -c "{} udp://@:8554 > /dev/null 2>&1 &"'
elif "darwin" in sys.platform.lower():
potential_vlc_locations = [r'"/Applications/VLC.app/Contents/MacOS/VLC"']
command = "{} udp://@:8554 > /dev/null 2>&1 &"
elif "win" in sys.platform.lower():
potential_vlc_locations = [
r'"/c/Program Files/VideoLAN/VLC/vlc.exe"',
r'"/c/Program Files (x86)/VideoLAN/VLC/vlc.exe"',
r'"C:\Program Files (x86)\VideoLAN\VLC\vlc.exe"',
r'"C:\Program Files\VideoLAN\VLC\vlc.exe"',
]
command = "{} udp://@:8554 &"
potential_vlc_locations = potential_vlc_locations if location is None else [location]
for vlc in potential_vlc_locations:
response = cmd(command.format(vlc)).lower()
if (
" not " not in response
and " no " not in response
and " cannot " not in response
and " unexpected " not in response
):
logger.info("VLC launched")
return
logger.error("Failed to find VLC")
def scrub(obj: Any, bad_key: str) -> None:
"""Recursively scrub a dict or list to remove a given key in place.
Args:
obj (Any): dict or list to operate on. If neither, it will return immediately.
bad_key (str): key to remove
"""
if isinstance(obj, dict):
for key in list(obj.keys()):
if key == bad_key:
del obj[key]
else:
scrub(obj[key], bad_key)
elif isinstance(obj, list):
for i in reversed(range(len(obj))):
if obj[i] == bad_key:
del obj[i]
else:
scrub(obj[i], bad_key)
else:
# neither a dict nor a list, do nothing
pass
def cmd(command: str) -> str:
"""Send a command to the shell and return the result.
Args:
command (str): command to send
Returns:
str: response returned from shell
"""
logger.debug(f"Send cmd --> {command}")
# Note: Ignoring unicode characters in SSIDs to prevent intermittent UnicodeDecodeErrors from occurring
# while trying to connect to SSID when *any* AP is nearby that has unicode characters in the name
response = (
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # type: ignore
.stdout.read()
.decode(errors="ignore")
)
logger.debug(f"Receive response --> {response}")
return response
class Singleton:
"""To be subclassed to create a singleton class."""
_instances: Dict[Type["Singleton"], Type["Singleton"]] = {}
# pylint: disable=missing-return-doc, missing-return-type-doc
def __new__(cls, *args, **kwargs): # type: ignore
"""Check for existing instance."""
if cls not in cls._instances:
cls._instances[cls] = object.__new__(cls, *args, **kwargs)
return cls._instances[cls]
class SnapshotQueue(queue.Queue):
"""A subclass of the default queue module to safely take a snapshot of the queue
This is so we can access the elements (in a thread safe manner) without dequeuing them.
"""
def snapshot(self) -> List[Any]:
"""Acquire the mutex, then return the queue's elements as a list.
Returns:
List[Any]: List of queue elements
"""
with self.mutex:
return list(self.queue)
| 2.5 | 2 |
tests/unit_tests.py | tylerscave/OS_Simulator | 0 | 12770162 | <filename>tests/unit_tests.py
import sys
import os
def main():
python3 = 'python3'
# List of tests to run
test_modules = [
'test_random_generator.py',
'test_process_generator.py'
]
# Loops through all tests, outputs to stdout
for test_file in test_modules:
cmd = python3+' '+test_file
os.system(cmd)
################################################################################
if __name__ == '__main__':
main()
| 2.859375 | 3 |
bits_wilp/isPalindrome.py | deepak5998/Py | 726 | 12770163 | def isPalindrome(str):
result = False
if str == str[::-1]:
result = True
return result
print("Please enter a string: ")
x = input()
flag = isPalindrome(x)
if flag:
print(x, "is a Palindrome")
else:
print(x, "is NOT a Palindrome")
| 4.25 | 4 |
dezero/__init__.py | teijeong/deeplearning-from-scratch | 0 | 12770164 | _IS_SIMPLE_CORE = False
if _IS_SIMPLE_CORE:
from dezero.core_simple import Variable
from dezero.core_simple import Function
from dezero.core_simple import using_config
from dezero.core_simple import no_grad
from dezero.core_simple import as_array
from dezero.core_simple import as_variable
from dezero.core_simple import setup_variable
else:
from dezero.core import Variable
from dezero.core import Parameter
from dezero.core import Function
from dezero.core import using_config
from dezero.core import no_grad
from dezero.core import as_array
from dezero.core import as_variable
from dezero.core import setup_variable
from dezero.layers import Layer
from dezero.models import Model
setup_variable()
| 1.421875 | 1 |
src/python/WMCore/WMBS/Oracle/Jobs/GetCouchID.py | hufnagel/WMCore | 21 | 12770165 | #!/usr/bin/env python
"""
_GetCouchID_
Oracle implementation of Jobs.GetCouchID
"""
from WMCore.WMBS.MySQL.Jobs.GetCouchID import GetCouchID as MySQLGetCouchID
class GetCouchID(MySQLGetCouchID):
"""
Identical to MySQL version.
"""
pass
| 1.78125 | 2 |
lab6_named_entity_recognition/named_entity_recognition/tag_with_wikidata_sparql.py | hckr/semantic-data-labs | 0 | 12770166 | import time
import urllib
from typing import List, Tuple
from SPARQLWrapper import JSON, SPARQLWrapper
from named_entity_recognition.utils import (join_with_newlines, load_list,
save_text)
LIMIT = 0
def main():
names = load_list('output/nltk.txt')
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
names_with_classes = []
for i, name in enumerate(names):
if LIMIT > 0 and i > LIMIT:
break
retries = 0
while True:
try:
print(f"({i + 1} / {len(names)}) {name}{' '*100}", end='\r')
names_with_classes.append(
(name, fetch_classes_from_wikidata(sparql, name)))
break
except urllib.error.HTTPError as exc:
retries += 1
if retries > 2:
print()
raise exc
finally:
time.sleep(2)
print(f"Saving HTML...{' '*100}")
html = generate_html(names_with_classes)
save_text('output/tagged_with_wikidata.html', html)
def fetch_classes_from_wikidata(sparql: SPARQLWrapper, name: str) -> str:
separator = ', '
sparql.setQuery(f'''
SELECT (GROUP_CONCAT(DISTINCT ?instanceOfName; SEPARATOR="{separator}") AS ?instanceOfNames) {{
?item rdfs:label "{name}"@en .
?item wdt:P31 ?instanceOf .
?instanceOf rdfs:label ?instanceOfName FILTER(LANG(?instanceOfName) = "en") .
}}
''')
sparql.setReturnFormat(JSON)
return sparql.query().convert()['results']['bindings'][0]['instanceOfNames']['value']
def generate_html(names_with_classes: List[Tuple[str, str]]) -> str:
css = '''
table { border-collapse: collapse; max-width: 1000px; margin: 0 auto; }
tr { border-bottom: 1px solid #333 }
td { min-width: 200px; }
'''
return f'''
<!DOCTYPE html>
<html lang="en">
<head>
<style>{css}</style>
</head>
<body>
<table>
<tr><th>Name</th><th>Classes</th></tr>
{ join_with_newlines(f'<tr><td>{name}</td><td>{classes}</td></tr>' for (name, classes) in names_with_classes) }
</table>
</body>
</html>
'''.lstrip()
if __name__ == '__main__':
main()
| 2.796875 | 3 |
Conteudo das Aulas/126/gabarito.py | cerberus707/lab-python | 0 | 12770167 | """
Faça uma "fabrica decoradora" que retorna um decorador que decora funções com um único argumento. A fábrica deverá receber um argumento, um
tipo, e retornar um decorador em que a função verifica se o argumento passado é do tipo correto, senão levanta um TypeError.
"""
def decorador(função, tipo):
def nova_função(arg):
if type(arg) != tipo:
raise TypeError
else:
return função(arg)
return nova_função
| 3.890625 | 4 |
stanCode_Projects/hangman_game/similarity.py | calvin0123/sc-projects | 0 | 12770168 | <filename>stanCode_Projects/hangman_game/similarity.py
"""
File: similarity.py
Name: <NAME>
----------------------------
This program compares short dna sequence, s2,
with sub sequences of a long dna sequence, s1
The way of approaching this task is the same as
what people are doing in the bio industry.
"""
def main():
"""
First, enter a long DNA sequence and a short DNA sequence.
Second, print out the best match.
"""
long_seq = input('Please give me a DNA sequence to search: ')
long_seq = long_seq.upper()
short_seq = input('What DNA sequence would you like to match? ')
short_seq = short_seq.upper()
print('The best match is ' + best_match(long_seq, short_seq))
def best_match(long_seq, short_seq):
"""
First, know the len of long_seq and short_seq, and know how many times the long_seq
string can loop in order to find out the best match string.
Second, for each loop we count how many characters that look the same in each sub string(sub the long_seq)
and assigns the counts to the max_same.
Third, we return the sub string that includes the most same alphabet to the ans.
:param long_seq: string, the DNA string that you will find which part is similar to short_seq.
:param short_seq: string, the DNA string that you want to match.
:return: ans: string, the best match string in the long_seq DNA string.
"""
l = len(long_seq)
s = len(short_seq)
same = 0
max_same = 0
ans = ''
for i in range(l - s + 1):
sub_l = long_seq[i:i+s]
same = 0
for j in range(s):
if sub_l[j] == short_seq[j]:
same += 1
if same > max_same:
max_same = same
ans = sub_l
return ans
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == '__main__':
main()
| 4.375 | 4 |
tests/builders/test_simple_bib_drone.py | munrojm/maggma | 0 | 12770169 | from datetime import datetime
from pathlib import Path
import pytest
from maggma.stores import MemoryStore
from .simple_bib_drone import SimpleBibDrone
@pytest.fixture
def init_drone(test_dir):
"""
Initialize the drone, do not initialize the connection with the database
:return:
initialized drone
"""
mongo_store = MemoryStore(collection_name="drone_test", key="record_key")
simple_path = test_dir / "simple_bib_example_data"
assert simple_path.exists(), f"{simple_path} not found"
simple_bib_drone = SimpleBibDrone(store=mongo_store, path=simple_path)
return simple_bib_drone
def test_read(init_drone: SimpleBibDrone):
"""
Test whether read function is correct
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = init_drone.read(init_drone.path)
assert len(list_record_id) == 7
state_hashes = [r.state_hash for r in list_record_id]
assert len(state_hashes) == len(list_record_id) # all record_id has hash
assert len((set(state_hashes))) == len(state_hashes) # all unique hashes
num_docs = sum([len(r.documents) for r in list_record_id])
assert num_docs == 12
def test_record_id(init_drone: SimpleBibDrone):
"""
Test validity of RecordIdentifier
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = init_drone.read(init_drone.path)
record0 = list_record_id[0]
assert record0.parent_directory == init_drone.path
assert record0.last_updated < datetime.now()
assert len(record0.documents) > 0
# state hash does not change when the file is not changed
assert record0.compute_state_hash() == record0.state_hash
def test_process_item(init_drone: SimpleBibDrone):
"""
Test whether data is expaneded correctly and whether meta data is added
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = list(init_drone.read(init_drone.path))
text_record = next(
d for d in list_record_id if any("text" in f.name for f in d.documents)
)
data = init_drone.process_item(text_record)
assert "citations" in data
assert "text" in data
assert "record_key" in data
assert "last_updated" in data
assert "documents" in data
assert "state_hash" in data
def test_compute_record_identifier_key(init_drone: SimpleBibDrone):
list_record_id = init_drone.read(init_drone.path)
record0 = list_record_id[0]
doc0 = record0.documents[0]
assert record0.record_key == init_drone.compute_record_identifier_key(doc0)
def test_get_items(init_drone: SimpleBibDrone):
"""
This test might take a while
test whether get_items work correctly.
It should fetch from database all the files that needs to be updated
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
init_drone.connect()
init_drone.run() # make sure the database is up-to-date
init_drone.connect()
assert sum([1 for _ in init_drone.get_items()]) == 0
init_drone.finalize()
init_drone.connect()
init_drone.store.remove_docs(criteria={}) # clears the database
assert sum([1 for _ in init_drone.get_items()]) == 7
init_drone.finalize()
def test_assimilate(init_drone: SimpleBibDrone):
"""
Test whether assimilate file is correct
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
record_ids = init_drone.assimilate(init_drone.path)
assert len(record_ids) == 7
def test_compute_data(init_drone: SimpleBibDrone):
"""
test whether data is extracted as expected
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = list(init_drone.read(init_drone.path))
text_record = next(
d for d in list_record_id if any("text" in f.name for f in d.documents)
)
data = init_drone.process_item(text_record)
assert "citations" in data
assert "text" in data
| 2.421875 | 2 |
websubsub/migrations/0011_subscription_static.py | Fak3/websubsub | 4 | 12770170 | # Generated by Django 2.0.13 on 2020-02-16 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('websubsub', '0010_subscription_time_last_event_received'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='static',
field=models.BooleanField(default=False, editable=False),
),
]
| 1.515625 | 2 |
pybat/workflow/workflows.py | zezhong-zhang/pybat | 1 | 12770171 | # coding: utf8
# Copyright (c) <NAME>, University of Antwerp
# Distributed under the terms of the MIT License
import os
import numpy as np
from fireworks import Firework, LaunchPad, PyTask, Workflow
from pymongo.errors import ServerSelectionTimeoutError
from ruamel.yaml import YAML
from pybat.cli.commands.define import define_dimer, define_migration
from pybat.cli.commands.setup import transition
from pybat.core import Cathode, LiRichCathode, Dimer
from pybat.workflow.firetasks import VaspTask, CustodianTask, ConfigurationTask, \
EnergyConfTask
from pybat.workflow.fireworks import ScfFirework, RelaxFirework, NebFirework
"""
Package that contains all the Workflows of the pybat package.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>, University of Antwerp"
__version__ = "alpha"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Mar 2019"
# Load the workflow configuration
CONFIG_FILE = os.path.join(os.path.expanduser("~"), ".pybat_wf_config.yaml")
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, 'r') as configfile:
yaml = YAML()
yaml.default_flow_style = False
CONFIG = yaml.load(configfile.read())
try:
LAUNCHPAD = LaunchPad(
host=CONFIG["SERVER"].get("host", ""),
port=int(CONFIG["SERVER"].get("port", 0)),
name=CONFIG["SERVER"].get("name", ""),
username=CONFIG["SERVER"].get("username", ""),
password=CONFIG["SERVER"].get("password", ""),
ssl=CONFIG["SERVER"].get("ssl", False),
authsource=CONFIG["SERVER"].get("authsource", None)
)
except ServerSelectionTimeoutError:
raise TimeoutError("Could not connect to server. Please make "
"sure the details of the server are correctly "
"set up.")
else:
raise FileNotFoundError("No configuration file found in user's home "
"directory. Please use pybat config "
"in order to set up the configuration for "
"the workflows.")
# TODO Extend configuration and make the whole configuration setup more user friendly
# Currently the user is not guided to the workflow setup when attempting to use
# pybat workflows, this should change and be tested. Moreover, careful additions should
# be made to make sure all user-specific configuration elements are easily configured
# and implemented in the code.
# TODO Fix the CustodianTask
# TODO Add UnitTests!
# It's really getting time to do this. Think about what unit tests you need and make a
# test suite.
def scf_workflow(structure_file, functional=("pbe", {}), directory="",
write_chgcar=False, in_custodian=False, number_nodes=None):
"""
Set up a self consistent field calculation (SCF) workflow and add it to the
launchpad of the mongoDB server defined in the config file.
Args:
structure_file (str): Path to the geometry file of the structure.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
directory (str): Directory in which the SCF calculation should be performed.
write_chgcar (bool): Flag that indicates whether the CHGCAR file should
be written.
in_custodian (bool): Flag that indicates whether the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
# Set up the calculation directory
if directory == "":
directory = os.path.join(os.getcwd(), functional[0])
if functional[0] == "pbeu":
directory += "_" + "".join(k + str(functional[1]["LDAUU"][k]) for k
in functional[1]["LDAUU"].keys())
directory += "_scf"
# Set up the SCF Firework
scf_firework = ScfFirework(
structure_file=structure_file, functional=functional,
directory=directory, write_chgcar=write_chgcar,
in_custodian=in_custodian, number_nodes=number_nodes
)
# Set up a clear name for the workflow
cathode = LiRichCathode.from_file(structure_file)
workflow_name = str(cathode.composition.reduced_formula).replace(" ", "")
workflow_name += str(functional)
# Create the workflow
workflow = Workflow(fireworks=[scf_firework, ],
name=workflow_name)
LAUNCHPAD.add_wf(workflow)
def relax_workflow(structure_file, functional=("pbe", {}), directory="",
is_metal=False, in_custodian=False, number_nodes=None):
"""
Set up a geometry optimization workflow and add it to the launchpad of the
mongoDB server defined in the config file.
Args:
structure_file (str): Path to the geometry file of the structure.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
directory (str): Directory in which the SCF calculation should be performed.
is_metal (bool): Flag that indicates whether the material for which the
geometry optimization should be performed is metallic. Determines the
smearing method used.
in_custodian (bool): Flag that indicates wheter the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
# Set up the calculation directory
if directory == "":
directory = os.path.join(os.getcwd(), functional[0])
if functional[0] == "pbeu":
directory += "_" + "".join(k + str(functional[1]["LDAUU"][k]) for k
in functional[1]["LDAUU"].keys())
directory += "_relax"
# Set up the geometry optimization Firework
relax_firework = RelaxFirework(structure_file=structure_file,
functional=functional,
directory=directory,
is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes)
# Set up a clear name for the workflow
cathode = LiRichCathode.from_file(structure_file)
workflow_name = str(cathode.composition.reduced_formula).replace(" ", "")
workflow_name += str(functional)
# Create the workflow
workflow = Workflow(fireworks=[relax_firework, ],
name=workflow_name)
LAUNCHPAD.add_wf(workflow)
def dimer_workflow(structure_file, dimer_indices=(0, 0), distance=0,
functional=("pbe", {}), is_metal=False, in_custodian=False,
number_nodes=None):
"""
Set up a workflow that calculates the thermodynamics for a dimer
formation in the current directory.
Can later be expanded to also include kinetic barrier calculation.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
dimer_indices (tuple): Indices of the oxygen sites which are to form a
dimer. If no indices are provided, the user will be prompted.
distance (float): Final distance between the oxygen atoms. If no
distance is provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
"""
# TODO Change naming scheme
# Let the user define a dimer, unless one is provided
dimer_dir = define_dimer(structure_file=structure_file,
dimer_indices=dimer_indices,
distance=distance,
write_cif=True)
# Set up the FireTask that sets up the transition calculation
setup_transition = PyTask(
func="pybat.cli.commands.setup.transition",
kwargs={"directory": dimer_dir,
"functional": functional,
"is_metal": is_metal,
"is_migration": False}
)
# Create the PyTask that runs the calculation
if in_custodian:
vasprun = CustodianTask(directory=os.path.join(dimer_dir, "final"))
else:
vasprun = VaspTask(directory=os.path.join(dimer_dir, "final"))
# Extract the final cathode from the geometry optimization
get_cathode = PyTask(
func="pybat.cli.commands.get.get_cathode",
kwargs={"directory": os.path.join(dimer_dir, "final"),
"write_cif": True}
)
# Add number of nodes to spec, or "none"
firework_spec = {"_launch_dir": os.getcwd()}
if number_nodes is None:
firework_spec.update({"_category": "none"})
else:
firework_spec.update({"_category": str(number_nodes) + "nodes"})
transition_firework = Firework(tasks=[setup_transition, vasprun, get_cathode],
name="Dimer Geometry optimization",
spec=firework_spec)
# Set up the SCF calculation directory
scf_dir = os.path.join(dimer_dir, "scf_final")
final_cathode = os.path.join(dimer_dir, "final", "final_cathode.json")
# Set up the SCF calculation
scf_firework = ScfFirework(
structure_file=final_cathode, functional=functional,
directory=scf_dir, write_chgcar=False, in_custodian=in_custodian,
number_nodes=number_nodes
)
workflow = Workflow(fireworks=[transition_firework, scf_firework],
name=structure_file + dimer_dir.split("/")[-1],
links_dict={transition_firework: [scf_firework]})
LAUNCHPAD.add_wf(workflow)
def migration_workflow(structure_file, migration_indices=(0, 0),
functional=("pbe", {}), is_metal=False,
in_custodian=False, number_nodes=None):
"""
Set up a workflow that calculates the thermodynamics for a migration in
the current directory.
Can later be expanded to also include kinetic barrier calculation.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
migration_indices (tuple): Tuple of the indices which designate the
migrating site and the vacant site to which the cation will
migrate. If no indices are provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
"""
# TODO Add setup steps to the workflow
# In case adjustments need to made to the setup of certain calculations,
# after which the calculation needs to be rerun, not adding the setup
# steps to the workflow means that these will have to be rerun manually,
# instead of simply relying on the fireworks commands.
# Let the user define a migration
migration_dir = define_migration(structure_file=structure_file,
migration_indices=migration_indices,
write_cif=True)
# Set up the transition calculation
transition(directory=migration_dir,
functional=functional,
is_metal=is_metal,
is_migration=False)
# Create the PyTask that runs the calculation
if in_custodian:
vasprun = CustodianTask(directory=os.path.join(migration_dir, "final"))
else:
vasprun = VaspTask(directory=os.path.join(migration_dir, "final"))
# Add number of nodes to spec, or "none"
firework_spec = {"_launch_dir": os.getcwd()}
if number_nodes is None:
firework_spec.update({"_category": "none"})
else:
firework_spec.update({"_category": str(number_nodes) + "nodes"})
transition_firework = Firework(tasks=[vasprun],
name="Migration Geometry optimization",
spec=firework_spec)
workflow = Workflow(fireworks=[transition_firework],
name=structure_file + migration_dir.split("/")[-1])
LAUNCHPAD.add_wf(workflow)
def neb_workflow(directory, nimages=7, functional=("pbe", {}), is_metal=False,
is_migration=False, in_custodian=False,
number_nodes=None):
"""
Set up a workflow that calculates the kinetic barrier between two geometries.
# TODO
TEMPORARY? Should NEB be integrated in other workflows? If so, should we still
have a separate NEB workflow?
Args:
directory (str): Directory in which the NEB calculation should be performed.
nimages (int): Number of images to use for the NEB calculation.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
is_migration (bool): Flag that indicates that the transition is a migration
of an atom in the structure.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker. Defaults to the number of images.
"""
# If no number of nodes is specified, take the number of images
if number_nodes is None:
number_nodes = nimages
# Create the Firework that sets up and runs the NEB
neb_firework = NebFirework(
directory=directory,
nimages=nimages,
functional=functional,
is_metal=is_metal,
is_migration=is_migration,
in_custodian=in_custodian,
number_nodes=number_nodes
)
# Add number of nodes to spec, or "none"
firework_spec = {"_launch_dir": os.getcwd()}
if number_nodes is None:
firework_spec.update({"_category": "none"})
else:
firework_spec.update({"_category": str(number_nodes) + "nodes"})
cathode = Cathode.from_file(
os.path.join(directory, "final", "initial_cathode.json")
)
dir_name = os.path.abspath(directory).split("/")[-1]
workflow_name = str(cathode.composition).replace(" ", "") + " " + dir_name
workflow = Workflow(fireworks=[neb_firework, ],
name=workflow_name)
LAUNCHPAD.add_wf(workflow)
def configuration_workflow(structure_file, substitution_sites=None, element_list=None,
sizes=None, concentration_restrictions=None,
max_configurations=None, functional=("pbe", {}),
directory=None, in_custodian=False, number_nodes=None):
"""
Set up a workflow for a set of atomic configurations, which includes a geometric
optimization as well as a SCF calculation based on the final geometry.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
substitution_sites (list): List of site indices or pymatgen.Sites to be
substituted.
element_list (list): List of string representations of the cation elements
which have to be substituted on the substitution sites. Can also
include "Vac" to introduce vacancy sites.
E.g. ["Li", "Vac"]; ["Mn", "Co", "Ni"]; ...
sizes (list): List of unit supercell sizes to be considered for the
enumeration of the configurations.
E.g. [1, 2]; range(1, 4); ...
concentration_restrictions (dict): Dictionary of allowed concentration
ranges for each element. Note that the concentration is defined
versus the total amount of atoms in the unit cell.
E.g. {"Li": (0.2, 0.3)}; {"Ni": (0.1, 0.2, "Mn": (0.05, 0.1)}; ...
max_configurations (int): Maximum number of new configurations to generate.
Note that the function detects all the cathode.json files present
in the directory tree and ignores the corresponding configurations.
max_configurations is the maximum number of new configurations that need
to be generated, i.e. on top of the configurations already present in the
directory tree in the form of cathode.json files.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
directory (str): Path to the directory in which the configurations and
calculations should be set up.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
# Load the cathode from the structure file
cathode = Cathode.from_file(structure_file)
# Check for the required input, and request if necessary
if not substitution_sites or not element_list or not sizes:
print(cathode)
print()
if not substitution_sites:
substitution_sites = [int(i) for i in input(
"Please provide the substitution site indices, separated by a space: "
).split(" ")]
if not element_list:
element_list = [i for i in input(
"Please provide the substitution elements, separated by a space: "
).split(" ")]
if not sizes:
sizes = [int(i) for i in input(
"Please provide the possible unit cell sizes, separated by a space: "
).split(" ")]
# Set up the directory
if directory == "":
directory = os.getcwd()
directory = os.path.abspath(directory)
configuration_task = ConfigurationTask(
structure=cathode,
directory=directory,
substitution_sites=list(substitution_sites),
element_list=element_list,
sizes=list(sizes),
concentration_restrictions=concentration_restrictions,
max_configurations=max_configurations
)
energy_task = EnergyConfTask(
functional=functional,
in_custodian=in_custodian,
number_nodes=number_nodes
)
# Set up a (sort of) clear name for the workflow
workflow_name = str(cathode.composition.reduced_formula).replace(" ", "")
workflow_name += " " + str(element_list)
workflow_name += " " + str(functional)
configuration_fw = Firework(tasks=[configuration_task, energy_task],
name="Configuration Setup",
spec={"_category": "none"})
# Create the workflow
workflow = Workflow(
fireworks=[configuration_fw],
name=workflow_name
)
LAUNCHPAD.add_wf(workflow)
def noneq_dimers_workflow(structure_file, distance, functional=("pbe", {}),
is_metal=False, in_custodian=False, number_nodes=None):
"""
Run dimer calculations for all the nonequivalent dimers in a structure.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
distance (float): Final distance between the oxygen atoms. If no
distance is provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
lirich = LiRichCathode.from_file(structure_file)
dimer_lists = lirich.list_noneq_dimers()
for dimer_list in dimer_lists:
# Find the dimer closest to the center of the lattice. Just for
# visualization purposes.
central_dimer = [(), 1e10]
for dimer in dimer_list:
dimer_center = Dimer(lirich, dimer).center
lattice_center = np.sum(lirich.lattice.matrix, 0) / 3
dist_to_center = np.linalg.norm(dimer_center - lattice_center)
if dist_to_center < central_dimer[1]:
central_dimer = [dimer, dist_to_center]
dimer_workflow(structure_file=structure_file,
dimer_indices=central_dimer[0],
distance=distance,
functional=functional,
is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes)
def site_dimers_workflow(structure_file, site_index, distance,
functional=("pbe", {}), is_metal=False,
in_custodian=False, number_nodes=None):
"""
Run dimer calculations for all the dimers around a site.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
site_index (int): Index of the site around which the dimers should
be investigated. Corresponds to the internal Python index.
distance (float): Final distance between the oxygen atoms. If no
distance is provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
lirich = LiRichCathode.from_file(structure_file)
dimer_list = lirich.find_noneq_dimers(int(site_index))
for dimer in dimer_list:
dimer_workflow(structure_file=structure_file,
dimer_indices=dimer,
distance=distance,
functional=functional,
is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes)
# region * Utility scripts
def find_all(name, path):
result = []
for root, dirs, files in os.walk(path):
if name in files:
result.append(os.path.join(root, name))
return result
def find_all_cathode_hashes(path):
return [Cathode.from_file(file).__hash__() for file in find_all("cathode.json", path)]
def find_hash_dict(path):
path = os.path.abspath(path)
return {Cathode.from_file(file).__hash__(): file.replace(path, "").replace(
"cathode.json", "")
for file in find_all("cathode.json", path)}
def generate_conf_dir(directory, element_list, configuration, number):
if "Vac" in element_list:
# Set up Li configuration directory
conf_dir = os.path.join(
directory, "tm_conf_1",
str(round(configuration.concentration, 3)),
"workion_conf" + str(number), "prim"
)
else:
# Set up TM configuration directory
try:
conf_dir = os.path.join(
directory, "tm_conf_" + str(number),
str(round(configuration.concentration, 3)), "workion_conf1",
"prim"
)
except ZeroDivisionError:
conf_dir = os.path.join(
directory, "tm_conf_" + str(number), "prim"
)
return conf_dir
# endregion
# region * Token workflows for testing
# endregion
| 2.046875 | 2 |
tests/test_fastapi/test_more_reallife_fastapi.py | ivangirko/ormar | 905 | 12770172 | <filename>tests/test_fastapi/test_more_reallife_fastapi.py
import asyncio
from typing import List, Optional
import databases
import pytest
import sqlalchemy
from fastapi import FastAPI
from starlette.testclient import TestClient
import ormar
from tests.settings import DATABASE_URL
app = FastAPI()
metadata = sqlalchemy.MetaData()
database = databases.Database(DATABASE_URL, force_rollback=True)
app.state.database = database
@app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
class Category(ormar.Model):
class Meta:
tablename = "categories"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Item(ormar.Model):
class Meta:
tablename = "items"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
category: Optional[Category] = ormar.ForeignKey(Category, nullable=True)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
@app.get("/items/", response_model=List[Item])
async def get_items():
items = await Item.objects.select_related("category").all()
return items
@app.get("/items/raw/", response_model=List[Item])
async def get_raw_items():
items = await Item.objects.all()
return items
@app.post("/items/", response_model=Item)
async def create_item(item: Item):
await item.save()
return item
@app.post("/categories/", response_model=Category)
async def create_category(category: Category):
await category.save()
return category
@app.get("/items/{item_id}")
async def get_item(item_id: int):
item = await Item.objects.get(pk=item_id)
return item
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
item_db = await Item.objects.get(pk=item_id)
return await item_db.update(**item.dict())
@app.delete("/items/{item_id}")
async def delete_item(item_id: int, item: Item = None):
if item:
return {"deleted_rows": await item.delete()}
item_db = await Item.objects.get(pk=item_id)
return {"deleted_rows": await item_db.delete()}
def test_all_endpoints():
client = TestClient(app)
with client as client:
response = client.post("/categories/", json={"name": "test cat"})
category = response.json()
response = client.post(
"/items/", json={"name": "test", "id": 1, "category": category}
)
item = Item(**response.json())
assert item.pk is not None
response = client.get("/items/")
items = [Item(**item) for item in response.json()]
assert items[0] == item
item.name = "New name"
response = client.put(f"/items/{item.pk}", json=item.dict())
assert response.json() == item.dict()
response = client.get("/items/")
items = [Item(**item) for item in response.json()]
assert items[0].name == "New name"
response = client.get("/items/raw/")
items = [Item(**item) for item in response.json()]
assert items[0].name == "New name"
assert items[0].category.name is None
response = client.get(f"/items/{item.pk}")
new_item = Item(**response.json())
assert new_item == item
response = client.delete(f"/items/{item.pk}")
assert response.json().get("deleted_rows", "__UNDEFINED__") != "__UNDEFINED__"
response = client.get("/items/")
items = response.json()
assert len(items) == 0
client.post("/items/", json={"name": "test_2", "id": 2, "category": category})
response = client.get("/items/")
items = response.json()
assert len(items) == 1
item = Item(**items[0])
response = client.delete(f"/items/{item.pk}", json=item.dict())
assert response.json().get("deleted_rows", "__UNDEFINED__") != "__UNDEFINED__"
response = client.get("/docs/")
assert response.status_code == 200
| 2.15625 | 2 |
client_net.py | Args-Engine/rb-distributed-helloworld | 0 | 12770173 | <filename>client_net.py
import socket
import application_params as ap
import messages
class Client:
def __init__(self, addr: str, middleware):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while self.sock.connect_ex((addr, ap.PORT)) != 0:
print("Connection failed, host not up!")
self.established = False
self.middleware = middleware
def communicate(self) -> None:
message = self.middleware.emit()
self.sock.send(message.to_sendable())
data = self.sock.recv(ap.MAX_MSG_LEN_CLIENT)
for message_key, message_type in messages.msg_types.items():
if message_type.is_this(data=data):
self.middleware.consume(message_key, *message_type.from_sendable(data=data))
| 2.734375 | 3 |
sdk/python/pulumi_aws/storagegateway/stored_iscsi_volume.py | sibuthomasmathew/pulumi-aws | 0 | 12770174 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['StoredIscsiVolume']
class StoredIscsiVolume(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disk_id: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None,
kms_encrypted: Optional[pulumi.Input[bool]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
preserve_existing_data: Optional[pulumi.Input[bool]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages an AWS Storage Gateway stored iSCSI volume.
> **NOTE:** The gateway must have a working storage added (e.g. via the `storagegateway.WorkingStorage` resource) before the volume is operational to clients, however the Storage Gateway API will allow volume creation without error in that case and return volume status as `WORKING STORAGE NOT CONFIGURED`.
## Example Usage
### Create Empty Stored iSCSI Volume
```python
import pulumi
import pulumi_aws as aws
example = aws.storagegateway.StoredIscsiVolume("example",
gateway_arn=aws_storagegateway_cache["example"]["gateway_arn"],
network_interface_id=aws_instance["example"]["private_ip"],
target_name="example",
preserve_existing_data=False,
disk_id=data["aws_storagegateway_local_disk"]["test"]["id"])
```
### Create Stored iSCSI Volume From Snapshot
```python
import pulumi
import pulumi_aws as aws
example = aws.storagegateway.StoredIscsiVolume("example",
gateway_arn=aws_storagegateway_cache["example"]["gateway_arn"],
network_interface_id=aws_instance["example"]["private_ip"],
snapshot_id=aws_ebs_snapshot["example"]["id"],
target_name="example",
preserve_existing_data=False,
disk_id=data["aws_storagegateway_local_disk"]["test"]["id"])
```
## Import
`aws_storagegateway_stored_iscsi_volume` can be imported by using the volume Amazon Resource Name (ARN), e.g.
```sh
$ pulumi import aws:storagegateway/storedIscsiVolume:StoredIscsiVolume example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] disk_id: The unique identifier for the gateway local disk that is configured as a stored volume.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
:param pulumi.Input[bool] kms_encrypted: `true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. Optional.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when `kms_encrypted` is `true`.
:param pulumi.Input[str] network_interface_id: The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted.
:param pulumi.Input[bool] preserve_existing_data: Specify this field as `true` if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.
:param pulumi.Input[str] snapshot_id: The snapshot ID of the snapshot to restore as the new stored volume. e.g. `snap-1122aabb`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags
:param pulumi.Input[str] target_name: The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if disk_id is None and not opts.urn:
raise TypeError("Missing required property 'disk_id'")
__props__['disk_id'] = disk_id
if gateway_arn is None and not opts.urn:
raise TypeError("Missing required property 'gateway_arn'")
__props__['gateway_arn'] = gateway_arn
__props__['kms_encrypted'] = kms_encrypted
__props__['kms_key'] = kms_key
if network_interface_id is None and not opts.urn:
raise TypeError("Missing required property 'network_interface_id'")
__props__['network_interface_id'] = network_interface_id
if preserve_existing_data is None and not opts.urn:
raise TypeError("Missing required property 'preserve_existing_data'")
__props__['preserve_existing_data'] = preserve_existing_data
__props__['snapshot_id'] = snapshot_id
__props__['tags'] = tags
if target_name is None and not opts.urn:
raise TypeError("Missing required property 'target_name'")
__props__['target_name'] = target_name
__props__['arn'] = None
__props__['chap_enabled'] = None
__props__['lun_number'] = None
__props__['network_interface_port'] = None
__props__['target_arn'] = None
__props__['volume_attachment_status'] = None
__props__['volume_id'] = None
__props__['volume_size_in_bytes'] = None
__props__['volume_status'] = None
__props__['volume_type'] = None
super(StoredIscsiVolume, __self__).__init__(
'aws:storagegateway/storedIscsiVolume:StoredIscsiVolume',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
chap_enabled: Optional[pulumi.Input[bool]] = None,
disk_id: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None,
kms_encrypted: Optional[pulumi.Input[bool]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
lun_number: Optional[pulumi.Input[int]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
network_interface_port: Optional[pulumi.Input[int]] = None,
preserve_existing_data: Optional[pulumi.Input[bool]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_arn: Optional[pulumi.Input[str]] = None,
target_name: Optional[pulumi.Input[str]] = None,
volume_attachment_status: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
volume_size_in_bytes: Optional[pulumi.Input[int]] = None,
volume_status: Optional[pulumi.Input[str]] = None,
volume_type: Optional[pulumi.Input[str]] = None) -> 'StoredIscsiVolume':
"""
Get an existing StoredIscsiVolume resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Volume Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678`.
:param pulumi.Input[bool] chap_enabled: Whether mutual CHAP is enabled for the iSCSI target.
:param pulumi.Input[str] disk_id: The unique identifier for the gateway local disk that is configured as a stored volume.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
:param pulumi.Input[bool] kms_encrypted: `true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. Optional.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when `kms_encrypted` is `true`.
:param pulumi.Input[int] lun_number: Logical disk number.
:param pulumi.Input[str] network_interface_id: The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted.
:param pulumi.Input[int] network_interface_port: The port used to communicate with iSCSI targets.
:param pulumi.Input[bool] preserve_existing_data: Specify this field as `true` if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.
:param pulumi.Input[str] snapshot_id: The snapshot ID of the snapshot to restore as the new stored volume. e.g. `snap-1122aabb`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags
:param pulumi.Input[str] target_arn: Target Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/target/iqn.1997-05.com.amazon:TargetName`.
:param pulumi.Input[str] target_name: The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway.
:param pulumi.Input[str] volume_attachment_status: A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway.
:param pulumi.Input[str] volume_id: Volume ID, e.g. `vol-12345678`.
:param pulumi.Input[int] volume_size_in_bytes: The size of the data stored on the volume in bytes.
:param pulumi.Input[str] volume_status: indicates the state of the storage volume.
:param pulumi.Input[str] volume_type: indicates the type of the volume.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["chap_enabled"] = chap_enabled
__props__["disk_id"] = disk_id
__props__["gateway_arn"] = gateway_arn
__props__["kms_encrypted"] = kms_encrypted
__props__["kms_key"] = kms_key
__props__["lun_number"] = lun_number
__props__["network_interface_id"] = network_interface_id
__props__["network_interface_port"] = network_interface_port
__props__["preserve_existing_data"] = preserve_existing_data
__props__["snapshot_id"] = snapshot_id
__props__["tags"] = tags
__props__["target_arn"] = target_arn
__props__["target_name"] = target_name
__props__["volume_attachment_status"] = volume_attachment_status
__props__["volume_id"] = volume_id
__props__["volume_size_in_bytes"] = volume_size_in_bytes
__props__["volume_status"] = volume_status
__props__["volume_type"] = volume_type
return StoredIscsiVolume(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Volume Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678`.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="chapEnabled")
def chap_enabled(self) -> pulumi.Output[bool]:
"""
Whether mutual CHAP is enabled for the iSCSI target.
"""
return pulumi.get(self, "chap_enabled")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> pulumi.Output[str]:
"""
The unique identifier for the gateway local disk that is configured as a stored volume.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="gatewayArn")
def gateway_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the gateway.
"""
return pulumi.get(self, "gateway_arn")
@property
@pulumi.getter(name="kmsEncrypted")
def kms_encrypted(self) -> pulumi.Output[Optional[bool]]:
"""
`true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. Optional.
"""
return pulumi.get(self, "kms_encrypted")
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> pulumi.Output[Optional[str]]:
"""
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when `kms_encrypted` is `true`.
"""
return pulumi.get(self, "kms_key")
@property
@pulumi.getter(name="lunNumber")
def lun_number(self) -> pulumi.Output[int]:
"""
Logical disk number.
"""
return pulumi.get(self, "lun_number")
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> pulumi.Output[str]:
"""
The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted.
"""
return pulumi.get(self, "network_interface_id")
@property
@pulumi.getter(name="networkInterfacePort")
def network_interface_port(self) -> pulumi.Output[int]:
"""
The port used to communicate with iSCSI targets.
"""
return pulumi.get(self, "network_interface_port")
@property
@pulumi.getter(name="preserveExistingData")
def preserve_existing_data(self) -> pulumi.Output[bool]:
"""
Specify this field as `true` if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.
"""
return pulumi.get(self, "preserve_existing_data")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> pulumi.Output[Optional[str]]:
"""
The snapshot ID of the snapshot to restore as the new stored volume. e.g. `snap-1122aabb`.
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value mapping of resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetArn")
def target_arn(self) -> pulumi.Output[str]:
"""
Target Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/target/iqn.1997-05.com.amazon:TargetName`.
"""
return pulumi.get(self, "target_arn")
@property
@pulumi.getter(name="targetName")
def target_name(self) -> pulumi.Output[str]:
"""
The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway.
"""
return pulumi.get(self, "target_name")
@property
@pulumi.getter(name="volumeAttachmentStatus")
def volume_attachment_status(self) -> pulumi.Output[str]:
"""
A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway.
"""
return pulumi.get(self, "volume_attachment_status")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Output[str]:
"""
Volume ID, e.g. `vol-12345678`.
"""
return pulumi.get(self, "volume_id")
@property
@pulumi.getter(name="volumeSizeInBytes")
def volume_size_in_bytes(self) -> pulumi.Output[int]:
"""
The size of the data stored on the volume in bytes.
"""
return pulumi.get(self, "volume_size_in_bytes")
@property
@pulumi.getter(name="volumeStatus")
def volume_status(self) -> pulumi.Output[str]:
"""
indicates the state of the storage volume.
"""
return pulumi.get(self, "volume_status")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Output[str]:
"""
indicates the type of the volume.
"""
return pulumi.get(self, "volume_type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 1.65625 | 2 |
2021/day01.py | valogonor/advent-of-code | 1 | 12770175 | import sys
sys.stdout = open('output.txt', 'w')
sys.stdin = open('input.txt')
# Part Two
from collections import deque
ans = 0
last = None
t = 2000
q = deque()
sm = 0
for _ in range(t):
if len(q) == 3:
sm -= q.popleft()
num = int(input())
sm += num
q.append(num)
if last is None and len(q) == 3:
last = sm
elif last is None:
continue
if sm > last:
ans += 1
last = sm
print(ans)
| 3.015625 | 3 |
LinkedList.py | SergioMD15/LinkedList-Sorting | 0 | 12770176 | class Node:
def __init__(self, val):
self.val = val
self.next = None
def add(self, val):
if not self.next:
self.next = Node(val)
else:
self.next.add(val)
def remove(self, val):
if self.next.val == val:
self.next = self.next.next
else:
self.next.remove(val)
def __str__(self):
return str(self.val)
class LinkedList:
def __init__(self):
self.head = None
self.length = 0
def push(self, val):
if not self.head:
self.head = Node(val)
else:
self.head.add(val)
self.length += 1
def add_first(self, val):
if not self.head:
self.head = Node(val)
else:
aux = Node(val)
aux.next = self.head
self.head = aux
self.length += 1
def get_length(self):
return self.length
def remove(self, val):
if self.head:
if self.head.val == val:
self.head = self.head.next
elif self.head.next.val == val:
self.head.next = self.head.next.next
else:
self.head.remove(val)
self.length -= 1
def remove_by_index(self, index):
if self.head:
aux = self.head
if index == 0:
self.head = self.head.next
else:
while index > 1 and aux:
aux = aux.next
index -= 1
if index <= 1 and aux.next:
aux.next = aux.next.next
self.length -= 1
def sort(self, criteria, asc=True):
self.head = criteria.sort(self, asc)
def count_elements(self):
aux = self.head
result = {}
while aux:
if aux.val in result.keys():
result[aux.val] += 1
else:
result[aux.val] = 1
aux = aux.next
return result
def __str__(self):
aux = self.head
result = ''
if aux:
result = aux.__str__()
while aux.next:
aux = aux.next
result += ', ' + aux.__str__()
return result
| 3.703125 | 4 |
framework/project_spdk.py | spdk/tests | 3 | 12770177 | # BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from settings import NICS, DRIVERS
from settings import load_global_setting, HOST_DRIVER_SETTING
from dut import Dut
from tester import Tester
class SPDKdut(Dut):
"""
SPDK project class will be called set_target function to setup
build, memory and kernel module.
"""
def __init__(self, crb, serializer):
super(SPDKdut, self).__init__(crb, serializer)
self.testpmd = None
def set_target(self, target):
self.target = target
drivername = load_global_setting(HOST_DRIVER_SETTING)
if drivername == DRIVERS['ConnectX4']:
out = self.send_expect("lsmod | grep mlx5_ib", "#")
if "mlx5_ib" not in out:
self.send_expect("modprobe mlx5_core", "#", 70)
self.send_expect("modprobe mlx5_ib", "#", 70)
if drivername == DRIVERS['ConnectX3']:
out = self.send_expect("lsmod | grep mlx4_ib", "#")
if "mlx4_ib" not in out:
self.send_expect("modprobe mlx4_en", "#", 70)
self.send_expect("modprobe mlx4_core", "#", 70)
self.send_expect("modprobe mlx4_ib", "#", 70)
if drivername == DRIVERS['chelsio_40gb']:
out = self.send_expect("lsmod | grep iw_cxgb4", "#")
if "iw_cxgb4" not in out:
self.send_expect("modprobe cxgb4", "#", 70)
self.send_expect("modprobe iw_cxgb4", "#", 70)
self.setup_modules(target)
if not self.skip_setup:
self.build_install_spdk(target)
def setup_modules(self, target):
drivername = load_global_setting(HOST_DRIVER_SETTING)
if drivername == "ConnectX4" or "ConnectX3":
out = self.send_expect("lsmod | grep ib_cm", "#")
if "ib_cm" not in out:
self.send_expect("modprobe ib_addr", "#", 70)
self.send_expect("modprobe ib_cm", "#", 70)
self.send_expect("modprobe ib_core", "#", 70)
self.send_expect("modprobe ib_mad", "#", 70)
self.send_expect("modprobe ib_sa", "#", 70)
self.send_expect("modprobe ib_ucm", "#", 70)
self.send_expect("modprobe ib_umad", "#", 70)
self.send_expect("modprobe ib_uverbs", "#", 70)
self.send_expect("modprobe iw_cm", "#", 70)
self.send_expect("modprobe rdma_cm", "#", 70)
self.send_expect("modprobe rdma_ucm", "#", 70)
print " load some kernel modules"
print " kernel modules has loaded"
def build_install_spdk(self, target, extra_options=''):
self.send_expect("make clean", "#", 20)
drivername = load_global_setting(HOST_DRIVER_SETTING)
if drivername == "ConnectX4" or "ConnectX3":
self.send_expect("./configure --with-rdma", "#", 100)
else:
self.send_expect("./configure", "#", 100)
out = self.send_expect("make -j", "# ", 100)
if("Error" in out or "No rule to make" in out):
self.logger.error("ERROR - try to compile again")
out = self.send_expect("make", "# ", 100)
assert ("Error" not in out), "Compilation error..."
assert ("No rule to make" not in out), "No rule to make error..."
self.send_expect("NRHUGE=12288 %s" % r'./scripts/setup.sh', "#", 200)
def prepare_package(self):
if not self.skip_setup:
depot = "../dep"
gitLabel = "master"
gitLabel1 = "spdk-17.05"
gitURL = r"https://github.com/spdk/spdk.git"
gitURL1 = r"https://github.com/spdk/dpdk.git"
gitPrefix = r"spdk/"
gitPrefix1 = r"dpdk/"
package = r"../dep/spdk.tar.gz"
package1 = r"../dep/dpdk.tar.gz"
if os.path.exists("%s/%s" % (depot, gitPrefix)) is True:
ret = os.system(
"cd %s/%s && git pull --force" %
(depot, gitPrefix))
else:
print "git clone %s %s/%s" % (gitURL, depot, gitPrefix)
ret = os.system(
"git clone %s %s/%s" %
(gitURL, depot, gitPrefix))
if ret is not 0:
print "Clone spdk failed!!!"
raise EnvironmentError
if os.path.exists("%s/%s" % (depot, gitPrefix1)) is True:
ret1 = os.system(
"cd %s/%s && git pull --force" %
(depot, gitPrefix1))
else:
print "git clone %s %s/%s" % (gitURL1, depot, gitPrefix1)
ret1 = os.system(
"git clone %s %s/%s" %
(gitURL1, depot, gitPrefix1))
if ret1 is not 0:
print "Clone spdk failed!!!"
raise EnvironmentError
ret = os.system(
"cd %s/%s && git archive --format=tar.gz --prefix=%s/ %s -o ../%s" %
(depot, gitPrefix, gitPrefix, gitLabel, package))
if ret is not 0:
print "Zip spdk failed!!!"
raise EnvironmentError
assert (os.path.isfile(package) is True), "Invalid spdk package"
ret1 = os.system(
"cd %s/%s && git archive --format=tar.gz --prefix=%s/ %s -o ../%s" %
(depot, gitPrefix1, gitPrefix1, gitLabel1, package1))
if ret1 is not 0:
print "Zip dpdk failed!!!"
raise EnvironmentError
assert (os.path.isfile(package1) is True), "Invalid dpdk package"
p_dir, _ = os.path.split(self.base_dir)
q_dir, _ = os.path.split(self.dpdk_dir)
dst_dir = "/tmp/"
out = self.send_expect(
"ls %s && cd %s" %
(dst_dir, p_dir), "#", verify=True)
if out == -1:
raise ValueError("Directiry %s or %s does not exist,"
"please check params -d"
% (p_dir, dst_dir))
self.session.copy_file_to(package, dst_dir)
self.session.copy_file_to(package1, dst_dir)
self.send_expect("ulimit -c unlimited", "#")
self.send_expect("rm -rf %s" % self.base_dir, "#")
out = self.send_expect("tar zxf %s%s -C %s" %
(dst_dir, package.split('/')[-1], p_dir), "# ", 20, verify=True)
if out == -1:
raise ValueError("Extract spdk package to %s failure,"
"please check params -d"
% (p_dir))
self.send_expect("rm -rf %s" % self.dpdk_dir, "#")
out1 = self.send_expect("tar zxf %s%s -C %s" %
(dst_dir, package1.split('/')[-1], q_dir), "# ", 20, verify=True)
if out1 == -1:
raise ValueError("Extract spdk package to %s failure,"
"please check params -d"
% (q_dir))
out = self.send_expect("cd %s" % self.base_dir,
"# ", 20, verify=True)
if out == -1:
raise ValueError("spdk dir %s mismatch, please check params -d"
% self.base_dir)
def prerequisites(self):
self.prepare_package()
self.dut_prerequisites()
class SPDKtester(Tester):
def __init__(self, crb, serializer):
self.NAME = "tester"
super(SPDKtester, self).__init__(crb, serializer)
def prerequisites(self, perf_test=False):
self.tester_prerequisites()
| 1.484375 | 1 |
dashboard/views/_general/_classes.py | beta-nu-theta-chi/ox-dashboard | 0 | 12770178 | <reponame>beta-nu-theta-chi/ox-dashboard
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse, reverse_lazy
from dashboard.forms import ClassTakenForm
from dashboard.models import (
Position,
Classes,
Brother,
Grade
)
from dashboard.utils import verify_position
from dashboard.views._dashboard_generic_views import DashboardDeleteView
@login_required
def classes(request, department=None, number=None, brother=None):
if not request.user.is_authenticated: # brother auth check
messages.error(request, "Brother not logged in")
return HttpResponseRedirect(reverse('dashboard:home'))
if request.user.brother in Position.objects.get(title=Position.PositionChoices.SCHOLARSHIP_CHAIR).brothers.all():
view = "scholarship"
else:
view = ""
classes_taken = Classes.objects.all().order_by('department', 'number')
if department is not None:
classes_taken = classes_taken.filter(department=department)
if brother is not None:
classes_taken = classes_taken.filter(brothers=brother)
if isinstance(brother, str):
brother = int(brother)
if request.user.brother.pk == brother:
view = "brother"
if number is not None:
classes_taken = classes_taken.filter(number=number)
if request.method == 'POST':
if 'filter' in request.POST:
form = request.POST
department = ('department', form.get('department'))
brother = ('brother', form.get('brother'))
number = ('number', form.get('class_number'))
kwargs = dict((arg for arg in [department, number, brother] if arg[1] != ""))
return HttpResponseRedirect(reverse('dashboard:classes', kwargs=kwargs))
elif 'unadd_self' in request.POST:
form = request.POST
class_taken = Classes.objects.get(pk=form.get('class'))
class_taken.brothers.remove(request.user.brother)
if not class_taken.brothers.exists():
class_taken.delete()
context = {
'classes_taken': classes_taken,
'departments': Classes.objects.all().values_list('department', flat=True).distinct,
'brothers': Brother.objects.order_by('last_name', 'first_name'),
'filter_department': department,
'filter_number': number,
'filter_brother': brother,
'view': view,
}
return render(request, "general/classes.html", context)
def classes_add(request):
form = ClassTakenForm(request.POST or None)
brother = request.user.brother
if request.method == 'POST':
if form.is_valid():
instance = form.save(commit=False)
instance.department = instance.department.upper()
class_taken, created = Classes.objects.get_or_create(department=instance.department, number=instance.number)
class_taken.brothers.add(brother)
brother_grades = Grade(grade=form.cleaned_data['grade'], class_taken=class_taken, brother=brother)
brother_grades.save()
class_taken.save()
return HttpResponseRedirect(reverse('dashboard:classes'), brother.pk)
context = {
'form': form,
'brother': brother,
'title': 'Add a Class',
}
return render(request, "model-add.html", context)
class ClassesDelete(DashboardDeleteView):
@verify_position([Position.PositionChoices.SCHOLARSHIP_CHAIR, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def get(self, request, *args, **kwargs):
return super(ClassesDelete, self).get(request, *args, **kwargs)
model = Classes
template_name = 'generic-forms/base-confirm-delete.html'
success_url = reverse_lazy('dashboard:classes')
| 2.140625 | 2 |
bitcodin/test/core/__init__.py | bitmovin/bitcodin-python | 27 | 12770179 | __author__ = '<NAME> <<EMAIL>>'
from unittest import TestSuite
from .testcase_api_key_authorized import ApiKeyAuthorizedTestCase
from .testcase_api_key_unauthorized import ApiKeyUnauthorizedTestCase
from .testcase_create_headers import CreateHttpHeadersTestCase
from .testcase_convert import ConvertTestCase
from .testcase_convert_dict import ConvertDictTestCase
from .testcase_get_api_base import GetApiBaseTestCase
from .testcase_bitcodinobject import BitcodinObjectBooleanTestCase
from .testcase_bitcodinobject import BitcodinObjectLengthTestCase
def get_test_suite():
test_suite = TestSuite()
test_suite.addTest(ConvertTestCase())
test_suite.addTest(ConvertDictTestCase())
test_suite.addTest(GetApiBaseTestCase())
test_suite.addTest(CreateHttpHeadersTestCase())
test_suite.addTest(ApiKeyAuthorizedTestCase())
test_suite.addTest(ApiKeyUnauthorizedTestCase())
test_suite.addTest(BitcodinObjectBooleanTestCase())
test_suite.addTest(BitcodinObjectLengthTestCase())
return test_suite
| 2.03125 | 2 |
chsdi/models/vector/zeitreihen.py | procrastinatio/mf-chsdi3 | 29 | 12770180 | # -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer
from sqlalchemy.types import Numeric, Unicode
from sqlalchemy.dialects import postgresql
from chsdi.models import register, bases
from chsdi.models.vector import Vector, Geometry2D
Base = bases['zeitreihen']
class Zeitreihen15(Base, Vector):
__tablename__ = 'tooltip_15'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 10.05
__maxresolution__ = 500005
__minscale__ = 37984.176
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class Zeitreihen20(Base, Vector):
__tablename__ = 'tooltip_20'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 5.05
__maxresolution__ = 10.05
__minscale__ = 19086.576
__maxscale__ = 37984.176
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class Zeitreihen21(Base, Vector):
__tablename__ = 'tooltip_21'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 2.55
__maxresolution__ = 5.05
__minscale__ = 9637.776
__maxscale__ = 19086.576
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class Zeitreihen22(Base, Vector):
__tablename__ = 'tooltip_22'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 0
__maxresolution__ = 2.55
__minscale__ = 0
__maxscale__ = 9637.776
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class DufourErst(Base, Vector):
__tablename__ = 'view_dufour_erstausgabe'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/dufour_erst.mako'
__bodId__ = 'ch.swisstopo.hiks-dufour'
__label__ = 'datenstand'
id = Column('tilenumber', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
datenstand = Column('datenstand', Integer)
bv_nummer = Column('bv_nummer', Unicode)
the_geom = Column(Geometry2D)
class SiegfriedErst(Base, Vector):
__tablename__ = 'view_siegfried_erstausgabe'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/siegfried_erst.mako'
__bodId__ = 'ch.swisstopo.hiks-siegfried'
__label__ = 'datenstand'
id = Column('tilenumber', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
datenstand = Column('datenstand', Numeric)
bv_nummer = Column('bv_nummer', Unicode)
the_geom = Column(Geometry2D)
register('ch.swisstopo.hiks-siegfried', SiegfriedErst)
register('ch.swisstopo.hiks-dufour', DufourErst)
register('ch.swisstopo.zeitreihen', Zeitreihen15)
register('ch.swisstopo.zeitreihen', Zeitreihen20)
register('ch.swisstopo.zeitreihen', Zeitreihen21)
register('ch.swisstopo.zeitreihen', Zeitreihen22)
| 2.15625 | 2 |
test/smallTests/test_WithoutOGS.py | mcwimm/pyMANGA | 1 | 12770181 | <filename>test/smallTests/test_WithoutOGS.py
# This script tests pyMANGA using seven setups
# The first test only checks whether the setups can be calculated without
# errors
# The second test compares the calculated results with reference results
import sys
from os import path
import os
sys.path.append(
path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from ProjectLib import XMLtoProject
from TimeLoopLib import TreeDynamicTimeStepping
import unittest
import glob
import os
from lxml import etree
import shutil
from pathlib import Path
import pandas as pd
manga_root_directory = path.dirname(
path.dirname(path.dirname(path.abspath(__file__))))
filepath_examplesetups = path.join(path.dirname(path.abspath(__file__)),
"testSetupsWithoutOGS/*.xml")
xml = glob.glob(filepath_examplesetups)
xml.sort()
example_setups = []
errors = []
errors_compare = []
errors_empty_comparison = []
errors_empty_results = []
testlist = []
global output_exist
output_exist = str
global seperator
seperator = "/"
# MARKER:
if xml:
for xmlfile in xml:
print("________________________________________________")
print("In the following the setup", xmlfile, "is tested.")
print("________________________________________________")
def findChild(parent, key):
child = parent.find(key)
return child
tree = etree.parse(xmlfile)
root = tree.getroot()
for tag in root.iter():
tag.text = tag.text.strip()
output = findChild(root, "tree_output")
output_type_xml_element = findChild(output, "type")
output_type = output_type_xml_element.text
if not output_type == "NONE":
output_dir_xml_element = findChild(output, "output_dir")
output_dir = path.join(manga_root_directory,
output_dir_xml_element.text)
if not os.path.exists(output_dir):
output_exist = False
os.makedirs(output_dir)
else:
output_exist = True
old_results = glob.glob(path.join(output_dir, "*.*"))
if old_results:
for result in old_results:
os.remove(result)
e, filename = os.path.split(xmlfile)
else:
errors_empty_results.append(xmlfile)
e, filename = os.path.split(xmlfile)
comparison_file_dir_in_pieces = (path.join(
path.dirname(path.abspath(__file__))), "referenceFiles", filename,
"*.*")
comparison_file_dir = seperator.join(comparison_file_dir_in_pieces)
files_comparison = glob.glob(comparison_file_dir)
example_setups.append(filename)
class MyTest(unittest.TestCase):
def test1(self):
# Test of MANGA project file and the correct calculation of its
try:
prj = XMLtoProject(xml_project_file=xmlfile)
time_stepper = TreeDynamicTimeStepping(prj)
prj.runProject(time_stepper)
# Storing failed test for clear evaluation
except:
self.fail(errors.append(xmlfile))
def test2(self):
# Query whether a reference file for the setup is not available
if not files_comparison:
errors_empty_comparison.append(xmlfile)
# If a reference file is available, it will be compared with the
# calculated results
else:
files_result = glob.glob(path.join(output_dir, "*"))
if files_result:
for y in range(len(files_result)):
test = (
pd.read_csv(files_result[y],
delimiter='\t').drop('tree',
axis=1) -
pd.read_csv(
files_comparison[y], delimiter='\t').drop(
'tree', axis=1)).values.any() == 0
try:
assert test == True
except:
self.fail(errors_compare.append(xmlfile))
if __name__ == "__main__":
unittest.main(exit=False)
# remove created output
if not output_type == "NONE":
if not output_exist:
shutil.rmtree((output_dir[:-1]), ignore_errors=True)
elif output_exist:
old_results = glob.glob(path.join(output_dir, "*.*"))
for result in old_results:
os.remove(result)
print("The setup", xmlfile, "was tested.")
print("________________________________________________")
print("""
The testing of all setups is finished.
print("")
________________________________________________
________________________________________________
########
#Report#
########
________________________________________________
________________________________________________
""")
if not len(example_setups) == 1:
print("The following sample setups have been tested:")
else:
print("The following sample setup have been tested:")
print("")
for setup in example_setups:
print("")
print(setup)
print("________________________________________________")
print("________________________________________________")
print("")
print("Result of the first test:")
print("")
if errors:
print("An error occured while testing the following setup(s):")
n = range(len(errors))
for x in n:
print("")
print(errors[x])
print("")
else:
print("The first test of all setups were successful.")
print("________________________________________________")
print("________________________________________________")
print("")
print("Result of the second test:")
print("")
if errors_empty_comparison and errors_compare:
print('An error occured when comparing the result of the following '
'setup:')
for x in range(len(errors_compare)):
print("")
print(errors_compare[x])
print("")
print('It should be noted further:')
print('There are missing files for the comparison of the result '
'of the following setups:')
for x in range(len(errors_empty_comparison)):
print("")
print(errors_empty_comparison[x])
print("")
elif errors_empty_comparison:
print("There is/are missing file(s) for the comparison of the result "
"of the following setup(s):")
print("")
n = range(len(errors_empty_comparison))
for x in n:
print("")
print(errors_empty_comparison[x])
print("")
print("The comparison of the result of the other setups "
"with the comparison files was successful.")
else:
if errors_compare:
print("An error occurred when comparing the result(s) of the "
"following setup(s) with the comparison file(s):")
print("")
for x in range(len(errors_compare)):
print("")
print(errors_compare[x])
print("")
if errors_empty_results:
print("Please also note that the following sample setup(s) "
"do not save model results and therefore could not "
"be checked:")
print("")
n = len(errors_empty_results)
for x in n:
print(errors_empty_results[x])
print("")
else:
if errors_empty_results:
print("""The comparison of the result of the setups
with the comparison files was successful. Please
note, however, that the following sample setups do
not save model results and therefore could not be
"checked:""")
print("")
n = len(errors_empty_results)
for x in n:
print("")
print(errors_compare[x])
print("")
else:
print("The comparison of the result of the setups "
"with the comparison files was successful.")
print("________________________________________________")
print("________________________________________________")
else:
print("Unfortunately no project-file could be found.")
| 2.390625 | 2 |
domintell/controller.py | yaccri/python-domintell | 1 | 12770182 | <gh_stars>1-10
"""
:author: <NAME> <<EMAIL>>
Port for Domintell
:author: <NAME> <<EMAIL>>
"""
import logging
import time
import json
import domintell
from domintell.utils import ModuleJSONEncoder
MODULE_CATEGORIES = {
# todo: fix modules
'switch': ['VMB4RYLD', 'VMB4RYNO'],
'sensor': ['VMB6IN', 'VMB7IN']
}
class DomintellConnection(object):
"""
Generic Domintell connection
"""
controller = None
def set_controller(self, controller):
"""
:return: None
"""
assert isinstance(controller, Controller)
self.controller = controller
def send(self, message, callback=None):
"""
:return: None
"""
raise NotImplementedError
class Controller(object):
"""
Domintell Bus connection controller
"""
def __init__(self, port):
self.logger = logging.getLogger('domintell')
self.parser = domintell.DomintellParser(self)
self.__subscribers = []
self.__scan_callback = None
self._modules = {}
if ":" in port:
self.connection = domintell.UDPConnection(port, self)
else:
self.connection = domintell.RS232Connection(port, self)
def feed_parser(self, data):
"""
Feed parser with new data
:return: None
"""
assert isinstance(data, bytes)
self.parser.feed(data)
def subscribe(self, subscriber):
"""
:return: None
"""
self.__subscribers.append(subscriber)
def parse(self, message):
"""
:return: domintell.Message or None
"""
return self.parser.parse(message)
def unsubscribe(self, subscriber):
"""
:return: None
"""
self.__subscribers.remove(subscriber)
def send(self, message, callback=None):
"""
:return: None
"""
self.connection.send(message, callback)
def get_modules(self, category):
"""
Returns a list of modules from a specific category
:return: list
"""
result = []
for module in self._modules.values():
if module.get_module_name() in MODULE_CATEGORIES[category]:
result.append(module)
return result
def scan(self, callback=None):
"""
Scan the bus discovered modules will com to reader thread
:return: None
"""
# def scan_finished():
# """
# Callback when scan is finished
# """
# time.sleep(3)
# logging.info('Scan finished')
# callback()
message = domintell.AppInfoRequest()
self.send(message)
def login(self, password):
message = domintell.LoginRequest(password)
self.send(message)
def new_message(self, message):
"""
:return: None
"""
self.logger.debug("New message: [" + str(message) + "]")
if isinstance(message, domintell.ModuleInfoMessage):
# do something with module data here
self.logger.info("Domintell module info message received")
module_type = message.moduleType
serial_number = message.serialNumber
# print(domintell.ModuleRegistry)
self.add_module(module_type, serial_number)
elif isinstance(message, domintell.ControllMessage):
if message.moduleType == 'END APPINFO':
# all APPINFO received
logging.info("All APPINFO received")
# TODO move to config
with open('modules.js', 'w') as f:
m = self._modules
# move encoding into config , encoding='iso8859_13'
json.dump(m, f, cls=ModuleJSONEncoder)
# forward message to listeners
for subscriber in self.__subscribers:
subscriber(message)
def add_module(self, module_type, serial_number):
"""
Create and add device
:param self:
:param module_type:
:param serial_number:
"""
if module_type in domintell.ModuleRegistry:
# we support this module
if serial_number in self._modules:
# serial numbeer already registered
pass
else:
module = domintell.ModuleRegistry[module_type](serial_number, self)
self._modules[serial_number] = module
return self._modules[serial_number]
else:
self.logger.warning("!! Module " + module_type + " is not yet supported. !!")
return None
def get_module(self, serial_number):
"""
Get device by serial number
"""
if serial_number in self._modules:
return self._modules[serial_number]
else:
return None
def stop(self):
"""
Stop domintell
"""
self.connection.stop()
def start_ping(self, ping_interval):
"""
Start ping service
:param self:
:param ping_interval:
"""
self.connection.start_ping(ping_interval)
| 2.578125 | 3 |
python/quicksort.py | tushariscoolster/algorithm | 2 | 12770183 | <gh_stars>1-10
def quickSort(toSort):
if len(toSort) <= 1:
return toSort
end = len(toSort) - 1
pivot = toSort[end]
low = []
high = []
for num in toSort[:end]:
if num <= pivot:
low.append(num)
else:
high.append(num)
#print pivot
#print low
sortedList = quickSort(low)
#print "Low sorting done"
#print sortedList
sortedList.append(pivot)
sortedList.extend(quickSort(high))
#print "High sorting done"
return sortedList
def main():
l = [1,3,6,9, 200, 5678, 76, 45, 23, 44, 81, 121, 11]
sortedList = quickSort(l)
print sortedList
if __name__ == '__main__':
main()
#O(n log(n)) | 3.78125 | 4 |
svc-websocket/app/controllers/api/example_api.py | jamessaldo/knowledge-chatbot | 0 | 12770184 | <reponame>jamessaldo/knowledge-chatbot
from flask_restful import Resource
from flask_restful import reqparse
from app.helpers import rest
class ExampleAPIById(Resource):
def get(self, id):
data = {
"get": "ok"
}
return rest.response(200, data=data, message="GET ok"+id)
class ExampleApi(Resource):
def get(self):
data = {
"get": "ok"
}
return rest.response(200, data=data, message="OK")
class ExampleApiDelete(Resource):
def delete(self, id):
data = {
"delete": "ok"
}
return rest.response(200, data=data, message="OK"+id)
class ExampleApiInsert(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('key', type=str, required=True)
parser.add_argument('key1', type=str, required=False)
args = parser.parse_args()
try:
data = {
"user": args['key'],
"password": args['key1'],
}
except Exception as e:
return rest.response(401, message=str(e))
else:
return rest.response(200, data= data, message="POST Ok")
class ExampleApiUpdate(Resource):
def put(self, id):
parser = reqparse.RequestParser()
parser.add_argument('key', type=str, required=True)
parser.add_argument('key1', type=str, required=False)
args = parser.parse_args()
try:
data = {
"user": args['key'],
"password": args['key1'],
}
except Exception as e:
return rest.response(401, message=str(e))
else:
return rest.response(200, data= data, message="PUT Ok By"+id) | 2.640625 | 3 |
souper/main.py | spookey/souper | 0 | 12770185 | <filename>souper/main.py
from logging import getLogger
from souper.base import APP_NAME
from souper.lib.args import arguments
from souper.lib.note import keep_args, setup_logging
from souper.load import Load
from souper.page import Page
from souper.site import Site
LOG = getLogger(__name__)
def main():
args = arguments()
setup_logging(args)
keep_args(args)
LOG.info('%s ready', APP_NAME)
page = Page(args)
if not page.user_valid:
return 1
load = Load(page, args)
site = Site(load, args)
site()
LOG.info('%s done', APP_NAME)
return 0
| 2.28125 | 2 |
server/data.py | ParkingPrediXion/ParkingPrediXion | 2 | 12770186 | import json
import numpy as np
def get_timestamps(evts):
return [c['timestamp'] for c in evts['content']]
def get_bucket(dt):
return dt.weekday() * 24 + dt.hour
from collections import namedtuple
AllData = namedtuple('AllData', ['spots', 'trends', 'total'])
def load_data():
pass
| 2.796875 | 3 |
effective_python/metaclass_property/getattr_demo.py | ftconan/python3 | 1 | 12770187 | <reponame>ftconan/python3
"""
@author: magician
@file: getattr_demo.py
@date: 2020/1/14
"""
class LazyDB(object):
"""
LazyDB
"""
def __init__(self):
self.exists = 5
def __getattr__(self, name):
value = 'Value for %s' % name
setattr(self, name, value)
return value
class LoggingLazyDB(LazyDB):
"""
LoggingLazyDB
"""
def __getattr__(self, name):
print('Called __getattr__(%s)' % name)
return super().__getattr__(name)
class ValidateDB(object):
"""
ValidateDB
"""
def __init__(self):
self.exists = 5
def __getattribute__(self, name):
print('Called __getattribute__(%s)' % name)
try:
return super().__getattribute__(name)
except AttributeError:
value = 'Value for %s' % name
setattr(self, name, value)
return value
class MissingPropertyDB(object):
"""
MissingPropertyDB
"""
def __getattr__(self, name):
if name == 'bad_name':
raise AttributeError('%s is missing' % name)
class SavingDB(object):
"""
SavingDB
"""
def __setattr__(self, name, value):
"""
Save some data to the DB log
@param name:
@param value:
@return:
"""
super().__setattr__(name, value)
class LoggingSavingDB(SavingDB):
"""
LoggingSavingDB
"""
def __setattr__(self, name, value):
print('Called __setattr__(%s, %r)' % (name, value))
super().__setattr__(name, value)
class BrokenDictionaryDB(object):
"""
BrokenDictionaryDB
"""
def __init__(self, data):
self._data = data
def __getattribute__(self, name):
print('Called __getattribute__(%s)' % name)
return self._data[name]
class DictionaryDB(object):
"""
DictionaryDB
"""
def __init__(self, data):
self._data = data
def __getattribute__(self, name):
data_dict = super().__getattribute__('_data')
return data_dict[name]
if __name__ == '__main__':
data = LazyDB()
print('Before: ', data.__dict__)
print('foo: ', data.foo)
print('After: ', data.__dict__)
data = LoggingLazyDB()
print('exists: ', data.exists)
print('foo: ', data.foo)
print('foo: ', data.foo)
data = ValidateDB()
print('exists: ', data.exists)
print('foo: ', data.foo)
print('foo: ', data.foo)
data = MissingPropertyDB()
try:
data.bad_name
except Exception as e:
print(e)
data = LoggingLazyDB()
print('Before: ', data.__dict__)
print('foo exists: ', hasattr(data, 'foo'))
print('After: ', data.__dict__)
print('foo exists: ', hasattr(data, 'foo'))
data = ValidateDB()
print('foo exists: ', hasattr(data, 'foo'))
print('foo exists: ', hasattr(data, 'foo'))
data = LoggingSavingDB()
print('Before: ', data.__dict__)
data.foo = 5
print('After: ', data.__dict__)
data.foo = 7
print('Finally: ', data.__dict__)
data = BrokenDictionaryDB({'foo': 3})
try:
data.foo
except Exception as e:
print(e)
data = DictionaryDB({'foo': 3})
print(data.foo)
| 2.96875 | 3 |
towhee/pipelines/image_embedding_pipeline.py | L-Net-1992/towhee | 0 | 12770188 | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Union
import yaml
from pathlib import Path
from towhee.hparam.hyperparameter import param_scope
from towhee.pipelines.alias_resolvers import get_resolver
from towhee.pipelines.base import PipelineBase
from towhee import Inject, pipeline
class ImageEmbeddingPipeline(PipelineBase):
"""
Pipeline for image embedding tasks.
Args:
model: (`str` or `List[str]`)
Specifies the model used for image embedding. The user can pass a list
of model names to create a pipeline ensembling multiple models.
Supported models:
`vgg`,
`resnet50`, `resnet101`,
`swin-transformer`,
`vit`,
...
ensemble: (`str`)
Specifies the type of model ensemble. This argument works iff
multiple model names are given via `model`.
Supported ensemble types:
`linear`,
"""
def __init__(self, model: Union[Any, List[Any]] = None, ensemble: str = None):
with param_scope() as hp:
resolver = get_resolver(hp().towhee.alias_resolver('local'))
models: List[Any] = []
if isinstance(model, str):
models = [model]
else:
models = model
num_branch = len(models)
models = [resolver.resolve(model) if isinstance(model, str) else model for model in models]
operators = dict(zip([
'embedding_model_1',
'embedding_model_2',
'embedding_model_3',
], models))
if ensemble is not None:
operators['ensemble_model'] = resolver.resolve(
ensemble) if isinstance(ensemble, str) else ensemble
injections = {name: {'function': model.function, 'init_args': model.init_args} for name, model in operators.items()}
self._pipeline = Inject(**injections).pipeline('builtin/image_embedding_template_{}'.format(num_branch))
def __call__(self, *arg, **kws):
return self._pipeline(*arg, **kws)
def save(self, name: str, path: Union[str, Path] = Path.cwd()):
path = Path(path)
operator_path = path / name
if operator_path.exists():
raise FileExistsError(operator_path)
operator_path.mkdir(parents=True)
with open('{}/{}.yaml'.format(operator_path, name), 'w', encoding='utf-8') as f:
info = yaml.safe_load(self._pipeline.pipeline.graph_repr.ir)
info['name'] = name
f.write(yaml.safe_dump(info))
def push_to_hub(self, version: str = 'main'):
# TODO: push to hub with new hub tool
pass
def image_embedding_pipeline(model: Union[str, List[str]] = None,
ensemble: str = None,
name: str = None,
version: str = None):
"""Create a pipeline for image embedding tasks.
An image embedding pipeline converts input images into feature vectors (embedding),
which can be adapted to various vision tasks,
such as image retrieval, image classifications, etc.
There are two ways to instantiate an image embedding pipeline:
1 - If `model` is passed to `image_embedding_pipeline`,
a new pipeline will be generated for evaluation and benchmarking.
```python
>>> pipe = image_embedding_pipeline(model='resnet101')
>>> embedding = pipe('uri_to_image')
```
The pipeline can be saved to file if the evaluation results seems good,
and if you want to reuse this pipeline:
```python
>>> pipe.save(name='my_image_embedding_pipeline', path='my_pipelines')
```
You can also publish this pipeline to towhee hub to share it with the community.
```shell
$ cd ${WORK_DIR}/my_pipelines/my_image_embedding_pipeline
$ towhee publish # see towhee publish user guide from the terminal
$ git commit && git push
```
2 - Load a saved/shared pipeline from towhee hub:
```python
>>> pipe = image_embedding_pipeline(name='your_name/my_image_embedding_pipeline')
```
Args:
model (Union[str, List[str]], optional): Backbone models for extracting image embedding.
If there are more than one models, the model outputs will be fused with the `ensemble` model.
Defaults to None.
ensemble (str, optional): Ensemble model used to fuse backbone model outputs. Defaults to None.
name (str, optional): Pipeline name. Defaults to None.
version (str, optional): Version of the pipeline. Defaults to None.
Returns:
Pipeline: An image embedding pipeline.
"""
pipe = None
if name is not None:
pipe = pipeline(name, tag=version)
return pipe
if model is not None:
return ImageEmbeddingPipeline(model=model, ensemble=ensemble)
| 2.046875 | 2 |
gallery/urls.py | D-GopalKrishna/RobotixWeb2021 | 0 | 12770189 | <reponame>D-GopalKrishna/RobotixWeb2021
from django.urls import path
from . import views
urlpatterns = [
path('',views.gallery,name='gallery'),
path('api/',views.gallery_api,name='gallery_api')
]
| 1.742188 | 2 |
projects/rich.py | Acemyzoe/data_anasylsis | 0 | 12770190 | from rich import print
#print("Hello, [bold magenta]World[/bold magenta]!", ":vampire:", locals())
from rich.console import Console
console = Console()
console.print("Hello", "World!", style="bold red")
console.print("Hello", style="5")
console.print("Hello", style="#af00ff")
console.print("Hello", style="rgb(175,0,255)")
console.print("DANGER!", style="red on white")
console.print("Where there is a [bold cyan]Will[/bold cyan] there [u]is[/u] a [i]way[/i].")
console.print([1, 2, 3])
console.print("[blue underline]Looks like a link")
console.print(locals())
console.print("FOO", style="white on blue")
console.print("Google", style="link https://google.com")
console.log("Hello, World!")
console.input("What is [i]your[/i] [bold red]name[/]? :smiley: ")
from rich.panel import Panel
print(Panel("Hello, [red]World!"))
from rich.theme import Theme
custom_theme = Theme({
"info" : "dim cyan",
"warning": "magenta",
"danger": "bold red"
})
console = Console(theme=custom_theme)
console.print("This is information", style="info")
console.print("Something terrible happened!", style="danger")
from rich.text import Text
text = Text("Hello, World!")
text.stylize(0, 8, "bold magenta")
console.print(text)
from rich.highlighter import RegexHighlighter
class EmailHighlighter(RegexHighlighter):
"""Apply style to anything that looks like an email."""
base_style = "example."
highlights = [r"(?P<email>[\w-]+@([\w-]+\.)+[\w-]+)"]
theme = Theme({"example.email": "bold magenta"})
console = Console(highlighter=EmailHighlighter(), theme=theme)
console.print("Send funds to <EMAIL>")
from rich.table import Table
table = Table(title="Star Wars Movies")
table.add_column("Released", justify="right", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rouge One: A Star Wars Story", "$1,332,439,889")
console = Console()
console.print(table)
MARKDOWN = """
# This is a h1
Rich can do a pretty *decent* job of rendering markdown.
1. This is a list item
2. This is another list item
"""
from rich.markdown import Markdown
console = Console()
md = Markdown(MARKDOWN)
console.print(md)
| 3.203125 | 3 |
factorial_recursion.py | amalshehu/Python-Introduction | 0 | 12770191 | # File: factorial_recursion.py
# Purpose: Example: FActorial using recursion
# Programmer: <NAME>
# Course: Practice
# Date: Sunday 28th August 2016, 11:10 PM
num = int(input("Enter a number")) # Convert to an int
def factorial(num):
if (num == 0):
return
else:
return num*factorial(num-1) # Factorial using recursion
result = factorial(num)
print(result)
| 4.59375 | 5 |
reqcli/source/__init__.py | shiftinv/reqcli | 0 | 12770192 | from .basesource import BaseSource
from .config import SourceConfig
from .reqdata import CertType, ReqData
from .status import StatusCheckMode
from .unloadable import UnloadableType
| 1.046875 | 1 |
tests/kernel_duration/gpt.py | drunkcoding/model-inference | 1 | 12770193 | <gh_stars>1-10
from dataclasses import dataclass, field
from functools import partial
import logging
import os
import time
from transformers import (
AutoModelWithLMHead,
GPT2Tokenizer,
HfArgumentParser,
AutoTokenizer
)
import pandas as pd
from datasets import load_dataset
from tqdm import tqdm
import numpy as np
import torch
from hfutils.logger import Logger
from hfutils.pipe.gpt import GPTLMHeadModelPipe
from hfutils.measure import get_energy_by_group, get_gpu_uuid
@dataclass
class Arguments:
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
}
)
batch_size: int = field(metadata={"help": "batch size for profiling kernel"})
logger = Logger(__file__, logging.INFO, 50000000, 5)
parser = HfArgumentParser(Arguments)
args = parser.parse_args_into_dataclasses()[0]
basename = os.path.basename(args.model_name_or_path)
# logger.info("=================================")
# logger.info("%s", args)
tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
val_dataset = load_dataset("wikitext", "wikitext-2-raw-v1", split="validation")
# val_dataset = val_dataset.select([x for x in range(500)])
print(val_dataset)
encodings = tokenizer("\n\n".join(val_dataset["text"]), return_tensors="pt")
encodings.input_ids = encodings.input_ids.to(torch.long)
print(encodings.input_ids.shape)
def load_encodings(encodings):
max_length = 512
stride = 128
for i in tqdm(range(0, encodings.input_ids.size(1), stride)):
begin_loc = max(i + stride - max_length, 0)
end_loc = min(i + stride, encodings.input_ids.size(1))
trg_len = end_loc - i # may be different from stride on last loop
input_ids = encodings.input_ids[:, begin_loc:end_loc]
# input_ids = input_ids.to(torch.int8)
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
if input_ids.size(1) != max_length:
continue
yield input_ids, target_ids, trg_len, end_loc
device_id = 6
device = f"cuda:{device_id}"
uuid = get_gpu_uuid(device_id)
model = AutoModelWithLMHead.from_pretrained(args.model_name_or_path)
model = GPTLMHeadModelPipe(model)
if "gpt-j" in args.model_name_or_path:
model.partition_by_parameter(0, 4)
if "gpt-xl" in args.model_name_or_path:
model.partition_by_parameter(0, 2)
model.convert(device)
start_energy = get_energy_by_group()[uuid]
records_start = []
records_end = []
for step, batch in enumerate(tqdm(load_encodings(encodings), desc=f"{args.batch_size}")):
input_ids = batch[0].to(device)
start_time = time.perf_counter()
outputs = model((input_ids, None))
print(None, outputs[1].shape)
exit()
torch.cuda.empty_cache()
end_time = time.perf_counter()
if step > 10:
records_start.append(start_time)
records_end.append(end_time)
if step > 100:
break
end_energy = get_energy_by_group()[uuid]
diff = end_energy - start_energy
logger.info(
"energy total %s, request %s, sample %s",
diff,
diff / step,
diff / step / args.batch_size,
)
logger.info(
"memory reserved %s, allocated %s, total %s",
torch.cuda.memory_reserved(device_id),
torch.cuda.memory_allocated(device_id),
torch.cuda.get_device_properties(device_id).total_memory,
)
df = pd.DataFrame({
"model": [basename]*len(records_end),
"batch_size": [args.batch_size]*len(records_end),
"start_time": records_start,
"end_time": records_end,
"latency": np.array(records_end) - np.array(records_start),
})
df.to_csv(os.path.join("profile", f"latency_{basename}_{args.batch_size}.csv"), index=False)
logger.info("%s", df.describe()) | 2.328125 | 2 |
oslo_versionedobjects/fields.py | openstack/oslo.versionedobjects | 37 | 12770194 | <filename>oslo_versionedobjects/fields.py
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from collections import abc as collections_abc
import datetime
from distutils import versionpredicate
import re
import uuid
import warnings
import copy
import iso8601
import netaddr
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_versionedobjects._i18n import _
from oslo_versionedobjects import _utils
from oslo_versionedobjects import exception
class KeyTypeError(TypeError):
def __init__(self, expected, value):
super(KeyTypeError, self).__init__(
_('Key %(key)s must be of type %(expected)s not %(actual)s'
) % {'key': repr(value),
'expected': expected.__name__,
'actual': value.__class__.__name__,
})
class ElementTypeError(TypeError):
def __init__(self, expected, key, value):
super(ElementTypeError, self).__init__(
_('Element %(key)s:%(val)s must be of type %(expected)s'
' not %(actual)s'
) % {'key': key,
'val': repr(value),
'expected': expected,
'actual': value.__class__.__name__,
})
class AbstractFieldType(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def coerce(self, obj, attr, value):
"""This is called to coerce (if possible) a value on assignment.
This method should convert the value given into the designated type,
or throw an exception if this is not possible.
:param:obj: The VersionedObject on which an attribute is being set
:param:attr: The name of the attribute being set
:param:value: The value being set
:returns: A properly-typed value
"""
pass
@abc.abstractmethod
def from_primitive(self, obj, attr, value):
"""This is called to deserialize a value.
This method should deserialize a value from the form given by
to_primitive() to the designated type.
:param:obj: The VersionedObject on which the value is to be set
:param:attr: The name of the attribute which will hold the value
:param:value: The serialized form of the value
:returns: The natural form of the value
"""
pass
@abc.abstractmethod
def to_primitive(self, obj, attr, value):
"""This is called to serialize a value.
This method should serialize a value to the form expected by
from_primitive().
:param:obj: The VersionedObject on which the value is set
:param:attr: The name of the attribute holding the value
:param:value: The natural form of the value
:returns: The serialized form of the value
"""
pass
@abc.abstractmethod
def describe(self):
"""Returns a string describing the type of the field."""
pass
@abc.abstractmethod
def stringify(self, value):
"""Returns a short stringified version of a value."""
pass
class FieldType(AbstractFieldType):
@staticmethod
def coerce(obj, attr, value):
return value
@staticmethod
def from_primitive(obj, attr, value):
return value
@staticmethod
def to_primitive(obj, attr, value):
return value
def describe(self):
return self.__class__.__name__
def stringify(self, value):
return str(value)
def get_schema(self):
raise NotImplementedError()
class UnspecifiedDefault(object):
pass
class Field(object):
def __init__(self, field_type, nullable=False,
default=UnspecifiedDefault, read_only=False):
self._type = field_type
self._nullable = nullable
self._default = default
self._read_only = read_only
def __repr__(self):
if isinstance(self._default, set):
# TODO(stephenfin): Drop this when we switch from
# 'inspect.getargspec' to 'inspect.getfullargspec', since our
# hashes will have to change anyway
# make a py27 and py35 compatible representation. See bug 1771804
default = 'set([%s])' % ','.join(
sorted([str(v) for v in self._default])
)
else:
default = str(self._default)
return '%s(default=%s,nullable=%s)' % (self._type.__class__.__name__,
default, self._nullable)
@property
def nullable(self):
return self._nullable
@property
def default(self):
return self._default
@property
def read_only(self):
return self._read_only
def _null(self, obj, attr):
if self.nullable:
return None
elif self._default != UnspecifiedDefault:
# NOTE(danms): We coerce the default value each time the field
# is set to None as our contract states that we'll let the type
# examine the object and attribute name at that time.
return self._type.coerce(obj, attr, copy.deepcopy(self._default))
else:
raise ValueError(_("Field `%s' cannot be None") % attr)
def coerce(self, obj, attr, value):
"""Coerce a value to a suitable type.
This is called any time you set a value on an object, like:
foo.myint = 1
and is responsible for making sure that the value (1 here) is of
the proper type, or can be sanely converted.
This also handles the potentially nullable or defaultable
nature of the field and calls the coerce() method on a
FieldType to actually do the coercion.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being set
:param:value: The value being set
:returns: The properly-typed value
"""
if value is None:
return self._null(obj, attr)
else:
return self._type.coerce(obj, attr, value)
def from_primitive(self, obj, attr, value):
"""Deserialize a value from primitive form.
This is responsible for deserializing a value from primitive
into regular form. It calls the from_primitive() method on a
FieldType to do the actual deserialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being deserialized
:param:value: The value to be deserialized
:returns: The deserialized value
"""
if value is None:
return None
else:
return self._type.from_primitive(obj, attr, value)
def to_primitive(self, obj, attr, value):
"""Serialize a value to primitive form.
This is responsible for serializing a value to primitive
form. It calls to_primitive() on a FieldType to do the actual
serialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being serialized
:param:value: The value to be serialized
:returns: The serialized value
"""
if value is None:
return None
else:
return self._type.to_primitive(obj, attr, value)
def describe(self):
"""Return a short string describing the type of this field."""
name = self._type.describe()
prefix = self.nullable and 'Nullable' or ''
return prefix + name
def stringify(self, value):
if value is None:
return 'None'
else:
return self._type.stringify(value)
def get_schema(self):
schema = self._type.get_schema()
schema.update({'readonly': self.read_only})
if self.nullable:
schema['type'].append('null')
default = self.default
if default != UnspecifiedDefault:
schema.update({'default': default})
return schema
class String(FieldType):
@staticmethod
def coerce(obj, attr, value):
# FIXME(danms): We should really try to avoid the need to do this
accepted_types = (int, float, str, datetime.datetime)
if isinstance(value, accepted_types):
return str(value)
raise ValueError(_('A string is required in field %(attr)s, '
'not a %(type)s') %
{'attr': attr, 'type': type(value).__name__})
@staticmethod
def stringify(value):
return '\'%s\'' % value
def get_schema(self):
return {'type': ['string']}
class SensitiveString(String):
"""A string field type that may contain sensitive (password) information.
Passwords in the string value are masked when stringified.
"""
def stringify(self, value):
return super(SensitiveString, self).stringify(
strutils.mask_password(value))
class VersionPredicate(String):
@staticmethod
def coerce(obj, attr, value):
try:
versionpredicate.VersionPredicate('check (%s)' % value)
except ValueError:
raise ValueError(_('Version %(val)s is not a valid predicate in '
'field %(attr)s') %
{'val': value, 'attr': attr})
return value
class Enum(String):
def __init__(self, valid_values, **kwargs):
if not valid_values:
raise exception.EnumRequiresValidValuesError()
try:
# Test validity of the values
for value in valid_values:
super(Enum, self).coerce(None, 'init', value)
except (TypeError, ValueError):
raise exception.EnumValidValuesInvalidError()
self._valid_values = valid_values
super(Enum, self).__init__(**kwargs)
@property
def valid_values(self):
return copy.copy(self._valid_values)
def coerce(self, obj, attr, value):
if value not in self._valid_values:
msg = _("Field value %s is invalid") % value
raise ValueError(msg)
return super(Enum, self).coerce(obj, attr, value)
def stringify(self, value):
if value not in self._valid_values:
msg = _("Field value %s is invalid") % value
raise ValueError(msg)
return super(Enum, self).stringify(value)
def get_schema(self):
schema = super(Enum, self).get_schema()
schema['enum'] = self._valid_values
return schema
class StringPattern(FieldType):
def get_schema(self):
if hasattr(self, "PATTERN"):
return {'type': ['string'], 'pattern': self.PATTERN}
else:
msg = _("%s has no pattern") % self.__class__.__name__
raise AttributeError(msg)
class UUID(StringPattern):
PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
# FIXME(danms): We should actually verify the UUIDness here
with warnings.catch_warnings():
# Change the warning action only if no other filter exists
# for this warning to allow the client to define other action
# like 'error' for this warning.
warnings.filterwarnings(action="once", append=True)
try:
uuid.UUID("%s" % value)
except Exception:
# This is to ensure no breaking behaviour for current
# users
warnings.warn("%s is an invalid UUID. Using UUIDFields "
"with invalid UUIDs is no longer "
"supported, and will be removed in a future "
"release. Please update your "
"code to input valid UUIDs or accept "
"ValueErrors for invalid UUIDs. See "
"https://docs.openstack.org/oslo.versionedobjects/latest/reference/fields.html#oslo_versionedobjects.fields.UUIDField " # noqa
"for further details" %
repr(value).encode('utf8'),
FutureWarning)
return "%s" % value
class MACAddress(StringPattern):
PATTERN = r'^[0-9a-f]{2}(:[0-9a-f]{2}){5}$'
_REGEX = re.compile(PATTERN)
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, str):
lowered = value.lower().replace('-', ':')
if MACAddress._REGEX.match(lowered):
return lowered
raise ValueError(_("Malformed MAC %s") % (value,))
class PCIAddress(StringPattern):
PATTERN = r'^[0-9a-f]{4}:[0-9a-f]{2}:[0-1][0-9a-f].[0-7]$'
_REGEX = re.compile(PATTERN)
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, str):
newvalue = value.lower()
if PCIAddress._REGEX.match(newvalue):
return newvalue
raise ValueError(_("Malformed PCI address %s") % (value,))
class Integer(FieldType):
@staticmethod
def coerce(obj, attr, value):
return int(value)
def get_schema(self):
return {'type': ['integer']}
class NonNegativeInteger(FieldType):
@staticmethod
def coerce(obj, attr, value):
v = int(value)
if v < 0:
raise ValueError(_('Value must be >= 0 for field %s') % attr)
return v
def get_schema(self):
return {'type': ['integer'], 'minimum': 0}
class Float(FieldType):
def coerce(self, obj, attr, value):
return float(value)
def get_schema(self):
return {'type': ['number']}
class NonNegativeFloat(FieldType):
@staticmethod
def coerce(obj, attr, value):
v = float(value)
if v < 0:
raise ValueError(_('Value must be >= 0 for field %s') % attr)
return v
def get_schema(self):
return {'type': ['number'], 'minimum': 0}
class Boolean(FieldType):
@staticmethod
def coerce(obj, attr, value):
return bool(value)
def get_schema(self):
return {'type': ['boolean']}
class FlexibleBoolean(Boolean):
@staticmethod
def coerce(obj, attr, value):
return strutils.bool_from_string(value)
class DateTime(FieldType):
def __init__(self, tzinfo_aware=True, *args, **kwargs):
self.tzinfo_aware = tzinfo_aware
super(DateTime, self).__init__(*args, **kwargs)
def coerce(self, obj, attr, value):
if isinstance(value, str):
# NOTE(danms): Being tolerant of isotime strings here will help us
# during our objects transition
value = timeutils.parse_isotime(value)
elif not isinstance(value, datetime.datetime):
raise ValueError(_('A datetime.datetime is required '
'in field %(attr)s, not a %(type)s') %
{'attr': attr, 'type': type(value).__name__})
if value.utcoffset() is None and self.tzinfo_aware:
# NOTE(danms): Legacy objects from sqlalchemy are stored in UTC,
# but are returned without a timezone attached.
# As a transitional aid, assume a tz-naive object is in UTC.
value = value.replace(tzinfo=iso8601.UTC)
elif not self.tzinfo_aware:
value = value.replace(tzinfo=None)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, timeutils.parse_isotime(value))
def get_schema(self):
return {'type': ['string'], 'format': 'date-time'}
@staticmethod
def to_primitive(obj, attr, value):
return _utils.isotime(value)
@staticmethod
def stringify(value):
return _utils.isotime(value)
class IPAddress(StringPattern):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPAddress(value)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
@staticmethod
def to_primitive(obj, attr, value):
return str(value)
class IPV4Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4:
raise ValueError(_('Network "%(val)s" is not valid '
'in field %(attr)s') %
{'val': value, 'attr': attr})
return result
def get_schema(self):
return {'type': ['string'], 'format': 'ipv4'}
class IPV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 6:
raise ValueError(_('Network "%(val)s" is not valid '
'in field %(attr)s') %
{'val': value, 'attr': attr})
return result
def get_schema(self):
return {'type': ['string'], 'format': 'ipv6'}
class IPV4AndV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4 and result.version != 6:
raise ValueError(_('Network "%(val)s" is not valid '
'in field %(attr)s') %
{'val': value, 'attr': attr})
return result
def get_schema(self):
return {'oneOf': [IPV4Address().get_schema(),
IPV6Address().get_schema()]}
class IPNetwork(IPAddress):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
class IPV4Network(IPNetwork):
PATTERN = (r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-'
r'9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2]['
r'0-9]|3[0-2]))$')
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value, version=4)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
class IPV6Network(IPNetwork):
def __init__(self, *args, **kwargs):
super(IPV6Network, self).__init__(*args, **kwargs)
self.PATTERN = self._create_pattern()
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value, version=6)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def _create_pattern(self):
ipv6seg = '[0-9a-fA-F]{1,4}'
ipv4seg = '(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'
return (
# Pattern based on answer to
# http://stackoverflow.com/questions/53497/regular-expression-that-matches-valid-ipv6-addresses
'^'
# fdf8:f53e:61e4::18
'(' + ipv6seg + ':){7,7}' + ipv6seg + '|'
# 1:: 1:2:3:4:5:6:7::
'(' + ipv6seg + ':){1,7}:|'
# fc00:db20:35b:7399::5 fdf8:f53e:61e4::18 fdf8:f53e:61e4::18
'(' + ipv6seg + ':){1,6}:' + ipv6seg + '|'
# fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b fdf8:f53e:61e4::18:8 fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
'(' + ipv6seg + ':){1,5}(:' + ipv6seg + '){1,2}|'
# fc00:db20:35b:7399::5:7:8 fc00:db20:35b:7399::5:7:8 fc00:db20:35b:7399::5
'(' + ipv6seg + ':){1,4}(:' + ipv6seg + '){1,3}|'
# fc00:db20:35b:7399::5:6:7:8 fc00:db20:35b:7399::5:6:7:8 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
'(' + ipv6seg + ':){1,3}(:' + ipv6seg + '){1,4}|'
# fc00:e968:6179::de52:7100:6:7:8 fc00:e968:6179::de52:7100:5:6:7:8 fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
'(' + ipv6seg + ':){1,2}(:' + ipv6seg + '){1,5}|' +
# fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:5:6:7:8 fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b fc00:db20:35b:7399::5
ipv6seg + ':((:' + ipv6seg + '){1,6})|'
# fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b fc00:e968:6179::de52:7100:4:5:6:7:8 ::8 ::
':((:' + ipv6seg + '){1,7}|:)|'
# fe80::7:8%eth0 fe80::7:8%1
'fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|'
# fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b.255.255.255 ::ffff:255.255.255.255 ::ffff:0:255.255.255.255
'::(ffff(:0{1,4}){0,1}:){0,1}'
'(' + ipv4seg + r'\.){3,3}' +
ipv4seg + '|'
# 2001:db8:3:4::192.0.2.33 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b.0.2.33
'(' + ipv6seg + ':){1,4}:'
'(' + ipv4seg + r'\.){3,3}' +
ipv4seg +
# /128
r'(\/(d|dd|1[0-1]d|12[0-8]))$'
)
class CompoundFieldType(FieldType):
def __init__(self, element_type, **field_args):
self._element_type = Field(element_type, **field_args)
class List(CompoundFieldType):
def coerce(self, obj, attr, value):
if (not isinstance(value, collections_abc.Iterable) or
isinstance(value, (str, collections_abc.Mapping))):
raise ValueError(_('A list is required in field %(attr)s, '
'not a %(type)s') %
{'attr': attr, 'type': type(value).__name__})
coerced_list = CoercedList()
coerced_list.enable_coercing(self._element_type, obj, attr)
coerced_list.extend(value)
return coerced_list
def to_primitive(self, obj, attr, value):
return [self._element_type.to_primitive(obj, attr, x) for x in value]
def from_primitive(self, obj, attr, value):
return [self._element_type.from_primitive(obj, attr, x) for x in value]
def stringify(self, value):
return '[%s]' % (
','.join([self._element_type.stringify(x) for x in value]))
def get_schema(self):
return {'type': ['array'], 'items': self._element_type.get_schema()}
class Dict(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, dict):
raise ValueError(_('A dict is required in field %(attr)s, '
'not a %(type)s') %
{'attr': attr, 'type': type(value).__name__})
coerced_dict = CoercedDict()
coerced_dict.enable_coercing(self._element_type, obj, attr)
coerced_dict.update(value)
return coerced_dict
def to_primitive(self, obj, attr, value):
primitive = {}
for key, element in value.items():
primitive[key] = self._element_type.to_primitive(
obj, '%s["%s"]' % (attr, key), element)
return primitive
def from_primitive(self, obj, attr, value):
concrete = {}
for key, element in value.items():
concrete[key] = self._element_type.from_primitive(
obj, '%s["%s"]' % (attr, key), element)
return concrete
def stringify(self, value):
return '{%s}' % (
','.join(['%s=%s' % (key, self._element_type.stringify(val))
for key, val in sorted(value.items())]))
def get_schema(self):
return {'type': ['object'],
'additionalProperties': self._element_type.get_schema()}
class DictProxyField(object):
"""Descriptor allowing us to assign pinning data as a dict of key_types
This allows us to have an object field that will be a dict of key_type
keys, allowing that will convert back to string-keyed dict.
This will take care of the conversion while the dict field will make sure
that we store the raw json-serializable data on the object.
key_type should return a type that unambiguously responds to str
so that calling key_type on it yields the same thing.
"""
def __init__(self, dict_field_name, key_type=int):
self._fld_name = dict_field_name
self._key_type = key_type
def __get__(self, obj, obj_type):
if obj is None:
return self
if getattr(obj, self._fld_name) is None:
return
return {self._key_type(k): v
for k, v in getattr(obj, self._fld_name).items()}
def __set__(self, obj, val):
if val is None:
setattr(obj, self._fld_name, val)
else:
setattr(obj, self._fld_name, {str(k): v for k, v in val.items()})
class Set(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, set):
raise ValueError(_('A set is required in field %(attr)s, '
'not a %(type)s') %
{'attr': attr, 'type': type(value).__name__})
coerced_set = CoercedSet()
coerced_set.enable_coercing(self._element_type, obj, attr)
coerced_set.update(value)
return coerced_set
def to_primitive(self, obj, attr, value):
return tuple(
self._element_type.to_primitive(obj, attr, x) for x in value)
def from_primitive(self, obj, attr, value):
return set([self._element_type.from_primitive(obj, attr, x)
for x in value])
def stringify(self, value):
return 'set([%s])' % (
','.join([self._element_type.stringify(x) for x in value]))
def get_schema(self):
return {'type': ['array'], 'uniqueItems': True,
'items': self._element_type.get_schema()}
class Object(FieldType):
def __init__(self, obj_name, subclasses=False, **kwargs):
self._obj_name = obj_name
self._subclasses = subclasses
super(Object, self).__init__(**kwargs)
@staticmethod
def _get_all_obj_names(obj):
obj_names = []
for parent in obj.__class__.mro():
# Skip mix-ins which are not versioned object subclasses
if not hasattr(parent, "obj_name"):
continue
obj_names.append(parent.obj_name())
return obj_names
def coerce(self, obj, attr, value):
try:
obj_name = value.obj_name()
except AttributeError:
obj_name = ""
if self._subclasses:
obj_names = self._get_all_obj_names(value)
else:
obj_names = [obj_name]
if self._obj_name not in obj_names:
if not obj_name:
# If we're not dealing with an object, it's probably a
# primitive so get it's type for the message below.
obj_name = type(value).__name__
obj_mod = ''
if hasattr(obj, '__module__'):
obj_mod = ''.join([obj.__module__, '.'])
val_mod = ''
if hasattr(value, '__module__'):
val_mod = ''.join([value.__module__, '.'])
raise ValueError(_('An object of type %(type)s is required '
'in field %(attr)s, not a %(valtype)s') %
{'type': ''.join([obj_mod, self._obj_name]),
'attr': attr, 'valtype': ''.join([val_mod,
obj_name])})
return value
@staticmethod
def to_primitive(obj, attr, value):
return value.obj_to_primitive()
@staticmethod
def from_primitive(obj, attr, value):
# FIXME(danms): Avoid circular import from base.py
from oslo_versionedobjects import base as obj_base
# NOTE (ndipanov): If they already got hydrated by the serializer, just
# pass them back unchanged
if isinstance(value, obj_base.VersionedObject):
return value
return obj.obj_from_primitive(value, obj._context)
def describe(self):
return "Object<%s>" % self._obj_name
def stringify(self, value):
if 'uuid' in value.fields:
ident = '(%s)' % (value.obj_attr_is_set('uuid') and value.uuid or
'UNKNOWN')
elif 'id' in value.fields:
ident = '(%s)' % (value.obj_attr_is_set('id') and value.id or
'UNKNOWN')
else:
ident = ''
return '%s%s' % (value.obj_name(), ident)
def get_schema(self):
from oslo_versionedobjects import base as obj_base
obj_classes = obj_base.VersionedObjectRegistry.obj_classes()
if self._obj_name in obj_classes:
cls = obj_classes[self._obj_name][0]
namespace_key = cls._obj_primitive_key('namespace')
name_key = cls._obj_primitive_key('name')
version_key = cls._obj_primitive_key('version')
data_key = cls._obj_primitive_key('data')
changes_key = cls._obj_primitive_key('changes')
field_schemas = {key: field.get_schema()
for key, field in cls.fields.items()}
required_fields = [key for key, field in sorted(cls.fields.items())
if not field.nullable]
schema = {
'type': ['object'],
'properties': {
namespace_key: {
'type': 'string'
},
name_key: {
'type': 'string'
},
version_key: {
'type': 'string'
},
changes_key: {
'type': 'array',
'items': {
'type': 'string'
}
},
data_key: {
'type': 'object',
'description': 'fields of %s' % self._obj_name,
'properties': field_schemas,
},
},
'required': [namespace_key, name_key, version_key, data_key]
}
if required_fields:
schema['properties'][data_key]['required'] = required_fields
return schema
else:
raise exception.UnsupportedObjectError(objtype=self._obj_name)
class AutoTypedField(Field):
AUTO_TYPE = None
def __init__(self, **kwargs):
super(AutoTypedField, self).__init__(self.AUTO_TYPE, **kwargs)
class StringField(AutoTypedField):
AUTO_TYPE = String()
class SensitiveStringField(AutoTypedField):
"""Field type that masks passwords when the field is stringified."""
AUTO_TYPE = SensitiveString()
class VersionPredicateField(AutoTypedField):
AUTO_TYPE = VersionPredicate()
class BaseEnumField(AutoTypedField):
'''Base class for all enum field types
This class should not be directly instantiated. Instead
subclass it and set AUTO_TYPE to be a SomeEnum()
where SomeEnum is a subclass of Enum.
'''
def __init__(self, **kwargs):
if self.AUTO_TYPE is None:
raise exception.EnumFieldUnset(
fieldname=self.__class__.__name__)
if not isinstance(self.AUTO_TYPE, Enum):
raise exception.EnumFieldInvalid(
typename=self.AUTO_TYPE.__class__.__name__,
fieldname=self.__class__.__name__)
super(BaseEnumField, self).__init__(**kwargs)
def __repr__(self):
valid_values = self._type.valid_values
args = {
'nullable': self._nullable,
'default': self._default,
}
args.update({'valid_values': valid_values})
return '%s(%s)' % (self._type.__class__.__name__,
','.join(['%s=%s' % (k, v)
for k, v in sorted(args.items())]))
@property
def valid_values(self):
"""Return the list of valid values for the field."""
return self._type.valid_values
class EnumField(BaseEnumField):
'''Anonymous enum field type
This class allows for anonymous enum types to be
declared, simply by passing in a list of valid values
to its constructor. It is generally preferable though,
to create an explicit named enum type by sub-classing
the BaseEnumField type directly.
'''
def __init__(self, valid_values, **kwargs):
self.AUTO_TYPE = Enum(valid_values=valid_values)
super(EnumField, self).__init__(**kwargs)
class StateMachine(EnumField):
"""A mixin that can be applied to an EnumField to enforce a state machine
e.g: Setting the code below on a field will ensure an object cannot
transition from ERROR to ACTIVE
:example:
.. code-block:: python
class FakeStateMachineField(fields.EnumField, fields.StateMachine):
ACTIVE = 'ACTIVE'
PENDING = 'PENDING'
ERROR = 'ERROR'
DELETED = 'DELETED'
ALLOWED_TRANSITIONS = {
ACTIVE: {
PENDING,
ERROR,
DELETED,
},
PENDING: {
ACTIVE,
ERROR
},
ERROR: {
PENDING,
},
DELETED: {} # This is a terminal state
}
_TYPES = (ACTIVE, PENDING, ERROR, DELETED)
def __init__(self, **kwargs):
super(FakeStateMachineField, self).__init__(
self._TYPES, **kwargs)
"""
# This is dict of states, that have dicts of states an object is
# allowed to transition to
ALLOWED_TRANSITIONS = {}
def _my_name(self, obj):
for name, field in obj.fields.items():
if field == self:
return name
return 'unknown'
def coerce(self, obj, attr, value):
super(StateMachine, self).coerce(obj, attr, value)
my_name = self._my_name(obj)
msg = _("%(object)s.%(name)s is not allowed to transition out of "
"%(value)s state")
if attr in obj:
current_value = getattr(obj, attr)
else:
return value
if current_value in self.ALLOWED_TRANSITIONS:
if value in self.ALLOWED_TRANSITIONS[current_value]:
return value
else:
msg = _(
"%(object)s.%(name)s is not allowed to transition out of "
"'%(current_value)s' state to '%(value)s' state, choose "
"from %(options)r")
msg = msg % {
'object': obj.obj_name(),
'name': my_name,
'current_value': current_value,
'value': value,
'options': [x for x in self.ALLOWED_TRANSITIONS[current_value]]
}
raise ValueError(msg)
class UUIDField(AutoTypedField):
"""UUID Field Type
.. warning::
This class does not actually validate UUIDs. This will happen in a
future major version of oslo.versionedobjects
To validate that you have valid UUIDs you need to do the following in
your own objects/fields.py
:Example:
.. code-block:: python
import oslo_versionedobjects.fields as ovo_fields
class UUID(ovo_fields.UUID):
def coerce(self, obj, attr, value):
uuid.UUID(value)
return str(value)
class UUIDField(ovo_fields.AutoTypedField):
AUTO_TYPE = UUID()
and then in your objects use
``<your_projects>.object.fields.UUIDField``.
This will become default behaviour in the future.
"""
AUTO_TYPE = UUID()
class MACAddressField(AutoTypedField):
AUTO_TYPE = MACAddress()
class PCIAddressField(AutoTypedField):
AUTO_TYPE = PCIAddress()
class IntegerField(AutoTypedField):
AUTO_TYPE = Integer()
class NonNegativeIntegerField(AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class FloatField(AutoTypedField):
AUTO_TYPE = Float()
class NonNegativeFloatField(AutoTypedField):
AUTO_TYPE = NonNegativeFloat()
# This is a strict interpretation of boolean
# values using Python's semantics for truth/falsehood
class BooleanField(AutoTypedField):
AUTO_TYPE = Boolean()
# This is a flexible interpretation of boolean
# values using common user friendly semantics for
# truth/falsehood. ie strings like 'yes', 'no',
# 'on', 'off', 't', 'f' get mapped to values you
# would expect.
class FlexibleBooleanField(AutoTypedField):
AUTO_TYPE = FlexibleBoolean()
class DateTimeField(AutoTypedField):
def __init__(self, tzinfo_aware=True, **kwargs):
self.AUTO_TYPE = DateTime(tzinfo_aware=tzinfo_aware)
super(DateTimeField, self).__init__(**kwargs)
class DictOfStringsField(AutoTypedField):
AUTO_TYPE = Dict(String())
class DictOfNullableStringsField(AutoTypedField):
AUTO_TYPE = Dict(String(), nullable=True)
class DictOfIntegersField(AutoTypedField):
AUTO_TYPE = Dict(Integer())
class ListOfStringsField(AutoTypedField):
AUTO_TYPE = List(String())
class DictOfListOfStringsField(AutoTypedField):
AUTO_TYPE = Dict(List(String()))
class ListOfEnumField(AutoTypedField):
def __init__(self, valid_values, **kwargs):
self.AUTO_TYPE = List(Enum(valid_values))
super(ListOfEnumField, self).__init__(**kwargs)
def __repr__(self):
valid_values = self._type._element_type._type.valid_values
args = {
'nullable': self._nullable,
'default': self._default,
}
args.update({'valid_values': valid_values})
return '%s(%s)' % (self._type.__class__.__name__,
','.join(['%s=%s' % (k, v)
for k, v in sorted(args.items())]))
class SetOfIntegersField(AutoTypedField):
AUTO_TYPE = Set(Integer())
class ListOfSetsOfIntegersField(AutoTypedField):
AUTO_TYPE = List(Set(Integer()))
class ListOfIntegersField(AutoTypedField):
AUTO_TYPE = List(Integer())
class ListOfDictOfNullableStringsField(AutoTypedField):
AUTO_TYPE = List(Dict(String(), nullable=True))
class ObjectField(AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class ListOfObjectsField(AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = List(Object(objtype, subclasses))
self.objname = objtype
super(ListOfObjectsField, self).__init__(**kwargs)
class ListOfUUIDField(AutoTypedField):
AUTO_TYPE = List(UUID())
class IPAddressField(AutoTypedField):
AUTO_TYPE = IPAddress()
class IPV4AddressField(AutoTypedField):
AUTO_TYPE = IPV4Address()
class IPV6AddressField(AutoTypedField):
AUTO_TYPE = IPV6Address()
class IPV4AndV6AddressField(AutoTypedField):
AUTO_TYPE = IPV4AndV6Address()
class IPNetworkField(AutoTypedField):
AUTO_TYPE = IPNetwork()
class IPV4NetworkField(AutoTypedField):
AUTO_TYPE = IPV4Network()
class IPV6NetworkField(AutoTypedField):
AUTO_TYPE = IPV6Network()
class CoercedCollectionMixin(object):
def __init__(self, *args, **kwargs):
self._element_type = None
self._obj = None
self._field = None
super(CoercedCollectionMixin, self).__init__(*args, **kwargs)
def enable_coercing(self, element_type, obj, field):
self._element_type = element_type
self._obj = obj
self._field = field
class CoercedList(CoercedCollectionMixin, list):
"""List which coerces its elements
List implementation which overrides all element-adding methods and
coercing the element(s) being added to the required element type
"""
def _coerce_item(self, index, item):
if hasattr(self, "_element_type") and self._element_type is not None:
att_name = "%s[%i]" % (self._field, index)
return self._element_type.coerce(self._obj, att_name, item)
else:
return item
def __setitem__(self, i, y):
if type(i) is slice: # compatibility with py3 and [::] slices
start = i.start or 0
step = i.step or 1
coerced_items = [self._coerce_item(start + index * step, item)
for index, item in enumerate(y)]
super(CoercedList, self).__setitem__(i, coerced_items)
else:
super(CoercedList, self).__setitem__(i, self._coerce_item(i, y))
def append(self, x):
super(CoercedList, self).append(self._coerce_item(len(self) + 1, x))
def extend(self, t):
coerced_items = [self._coerce_item(len(self) + index, item)
for index, item in enumerate(t)]
super(CoercedList, self).extend(coerced_items)
def insert(self, i, x):
super(CoercedList, self).insert(i, self._coerce_item(i, x))
def __iadd__(self, y):
coerced_items = [self._coerce_item(len(self) + index, item)
for index, item in enumerate(y)]
return super(CoercedList, self).__iadd__(coerced_items)
def __setslice__(self, i, j, y):
coerced_items = [self._coerce_item(i + index, item)
for index, item in enumerate(y)]
return super(CoercedList, self).__setslice__(i, j, coerced_items)
class CoercedDict(CoercedCollectionMixin, dict):
"""Dict which coerces its values
Dict implementation which overrides all element-adding methods and
coercing the element(s) being added to the required element type
"""
def _coerce_dict(self, d):
res = {}
for key, element in d.items():
res[key] = self._coerce_item(key, element)
return res
def _coerce_item(self, key, item):
if not isinstance(key, str):
raise KeyTypeError(str, key)
if hasattr(self, "_element_type") and self._element_type is not None:
att_name = "%s[%s]" % (self._field, key)
return self._element_type.coerce(self._obj, att_name, item)
else:
return item
def __setitem__(self, key, value):
super(CoercedDict, self).__setitem__(key,
self._coerce_item(key, value))
def update(self, other=None, **kwargs):
if other is not None:
super(CoercedDict, self).update(self._coerce_dict(other),
**self._coerce_dict(kwargs))
else:
super(CoercedDict, self).update(**self._coerce_dict(kwargs))
def setdefault(self, key, default=None):
return super(CoercedDict, self).setdefault(key,
self._coerce_item(key,
default))
class CoercedSet(CoercedCollectionMixin, set):
"""Set which coerces its values
Dict implementation which overrides all element-adding methods and
coercing the element(s) being added to the required element type
"""
def _coerce_element(self, element):
if hasattr(self, "_element_type") and self._element_type is not None:
return self._element_type.coerce(self._obj,
"%s[%s]" % (self._field, element),
element)
else:
return element
def _coerce_iterable(self, values):
coerced = set()
for element in values:
coerced.add(self._coerce_element(element))
return coerced
def add(self, value):
return super(CoercedSet, self).add(self._coerce_element(value))
def update(self, values):
return super(CoercedSet, self).update(self._coerce_iterable(values))
def symmetric_difference_update(self, values):
return super(CoercedSet, self).symmetric_difference_update(
self._coerce_iterable(values))
def __ior__(self, y):
return super(CoercedSet, self).__ior__(self._coerce_iterable(y))
def __ixor__(self, y):
return super(CoercedSet, self).__ixor__(self._coerce_iterable(y))
| 1.875 | 2 |
scripts/normalization.py | ysy6868/STPF | 958 | 12770195 | #! /usr/bin/python
'''
Data Normalization
'''
from sklearn import preprocessing
def normalize(file_dataframe, cols):
'''
Data Normalization.
'''
for col in cols:
preprocessing.normalize(file_dataframe[col], \
axis=1, norm='l2', copy=False)
return file_dataframe | 3.3125 | 3 |
code/tests/unit/mock_data_for_tests.py | CiscoSecurity/tr-05-serverless-palo-alto-autofocus | 0 | 12770196 | from datetime import timedelta
AUTOFOCUS_IP_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "172.16.31.10",
"indicatorType": "IPV4_ADDRESS",
"summaryGenerationTs": 1607951568568,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "MALWARE"
},
"seenByDataSourceIds": [],
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_IP_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "ip",
"value": "172.16.31.10"
},
"priority": 85,
"reason": "MALWARE in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com/"
"#/search/indicator/ipv4_address/"
"172.16.31.10",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "ip",
"value": "172.16.31.10"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this IP on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-ip-172.16.31.10",
"title": "Search for this IP",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/ipv4_address/103.110.84.196"
}]
}
}
AUTOFOCUS_IPV6_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"indicatorType": "IPV6_ADDRESS",
"summaryGenerationTs": 1607953105326,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "BENIGN"
},
"seenByDataSourceIds": [],
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_IPV6_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "ipv6",
"value": "2001:db8:85a3:8d3:1319:8a2e:370:7348"
},
"priority": 85,
"reason": "BENIGN in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/ipv6_address/"
"2001:db8:85a3:8d3:1319:8a2e:370:7348",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "ipv6",
"value": "2001:db8:85a3:8d3:1319:8a2e:370:7348"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this IPv6 on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-"
"ipv6-2001:db8:85a3:8d3:1319:8a2e:370:7348",
"title": "Search for this IPv6",
"url": "https://autofocus.paloaltonetworks.com/#/search"
"/indicator/ipv6_address/"
"2001:db8:85a3:8d3:1319:8a2e:370:7348"
}
]
}
}
AUTOFOCUS_DOMAIN_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "cisco.com",
"indicatorType": "DOMAIN",
"summaryGenerationTs": 1607953513675,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "BENIGN"
},
"seenByDataSourceIds": [],
"whoisAdminCountry": None,
"whoisAdminEmail": None,
"whoisAdminName": None,
"whoisDomainCreationDate": "1987-05-14",
"whoisDomainExpireDate": "2022-05-15",
"whoisDomainUpdateDate": "2019-06-21",
"whoisRegistrar": "MarkMonitor Inc.",
"whoisRegistrarUrl": "http://www.markmonitor.com",
"whoisRegistrant": None,
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_DOMAIN_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "domain",
"value": "cisco.com"
},
"priority": 85,
"reason": "BENIGN in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/domain/cisco.com",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "domain",
"value": "cisco.com"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this domain on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-domain-cisco.com",
"title": "Search for this domain",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/domain/cisco.com"
}
]
}
}
AUTOFOCUS_URL_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "http://0win365.com/wp-admin/sites/",
"indicatorType": "URL",
"summaryGenerationTs": 1607953838339,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "MALWARE"
},
"seenByDataSourceIds": [],
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_URL_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "url",
"value": "http://0win365.com/wp-admin/sites/"
},
"priority": 85,
"reason": "MALWARE in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/url/"
"http%3A%2F%2F0win365.com%2Fwp-admin%2"
"Fsites%2F/summary",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "url",
"value": "http://0win365.com/wp-admin/sites/"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this URL on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-url-http://"
"0win365.com/wp-admin/sites/",
"title": "Search for this URL",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/url/http%3A%2F%2F0win365.com%2Fwp-"
"admin%2Fsites%2F/summary"
}
]
}
}
AUTOFOCUS_SHA256_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "7fa2c54d7dabb0503d75bdd13cc4d6a6520516a990fb7879ae0"
"52bad9520763b",
"indicatorType": "FILEHASH",
"summaryGenerationTs": 1607954098735,
"firstSeenTsGlobal": 1605847163000,
"lastSeenTsGlobal": 1605847163000,
"latestPanVerdicts": {
"WF_SAMPLE": "GRAYWARE"
},
"seenByDataSourceIds": [
"WF_SAMPLE"
]
},
"tags": [
{
"support_id": 1,
"tag_name": "RenameOnReboot",
"public_tag_name": "Unit42.RenameOnReboot",
"tag_definition_scope_id": 4,
"tag_definition_status_id": 1,
"count": 16068736,
"lasthit": "2020-12-14 03:08:59",
"description": "The PendingFileRenameOperations key stores the nam"
"es of files to be renamed when the system restarts"
". It consists of pairs of file names. The file spe"
"cified in the first item of the pair is renamed to"
" match the second item of the pair. The system add"
"s this entry to the registry when a user or progra"
"m tries to rename a file that is in use. The file "
"names are stored in the value of this entry until "
"the system is restarted and they are renamed. Whil"
"e this is often a legitimate operation, it is some"
"times used by malware to overwrite or replace legi"
"timate system binaries with malicious ones.",
"customer_name": "Palo Alto Networks Unit42",
"customer_industry": "High Tech",
"upVotes": None,
"downVotes": None,
"myVote": None,
"source": "Unit 42",
"tag_class_id": 5,
"tag_definition_id": 36580
},
{
"support_id": 1,
"tag_name": "HttpNoUserAgent",
"public_tag_name": "Unit42.HttpNoUserAgent",
"tag_definition_scope_id": 4,
"tag_definition_status_id": 1,
"count": 23313610,
"lasthit": "2020-12-14 03:39:11",
"description": "A sample creates HTTP traffic but omits or uses a "
"blank user-agent field. Typically, legitimate appl"
"ications will include a user-agent value in HTTP r"
"equests. HTTP requests without the user-agent head"
"er or with a blank user agent value are extremely "
"suspect. This tag identified such suspect applicat"
"ions.",
"customer_name": "<NAME> Networks Unit42",
"customer_industry": "High Tech",
"upVotes": 4,
"downVotes": None,
"myVote": None,
"source": "Unit 42",
"tag_class_id": 5,
"tag_definition_id": 41533
},
{
"support_id": 1,
"tag_name": "SelfExtractingExecutable",
"public_tag_name": "Unit42.SelfExtractingExecutable",
"tag_definition_scope_id": 4,
"tag_definition_status_id": 1,
"count": 3750321,
"lasthit": "2020-12-13 21:31:08",
"description": "This sample is a self-extracting executable, which"
" is often an attribute of legitimate executables b"
"ut is also commonly used by malware authors.\n\nTh"
"ese files allow attackers to compress their malici"
"ous file(s) into a single binary and launch a seri"
"es of commands in sequence. This often allows them"
" to execute a malicious binary and display a decoy"
" document in a simple fashion.",
"customer_name": "<NAME> Unit42",
"customer_industry": "High Tech",
"upVotes": 1,
"downVotes": None,
"myVote": None,
"source": "Unit 42",
"tag_class_id": 5,
"tag_definition_id": 42834
}
],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_SHA256_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 3,
"disposition_name": "Suspicious",
"observable": {
"type": "sha256",
"value": "7fa2c54d7dabb0503d75bdd13cc4d6a6520516a9"
"90fb7879ae052bad9520763b"
},
"priority": 85,
"reason": "GRAYWARE in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/sha256/7fa2c54d7dab"
"b0503d75bdd13cc4d6a6520516a990fb7879ae"
"052bad9520763b",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 3,
"disposition_name": "Suspicious",
"observable": {
"type": "sha256",
"value": "7fa2c54d7dabb0503d75bdd13cc4d6a6520516a9"
"90fb7879ae052bad9520763b"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this SHA256 on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-sha256-"
"7fa2c54d7dabb0503d75bdd13cc4d6a6520516a990fb7879ae052ba"
"d9520763b",
"title": "Search for this SHA256",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/sha256/7fa2c54d7dabb0503d75bdd13cc4d6a"
"6520516a990fb7879ae052bad9520763b"
}
]
}
}
ENTITY_LIFETIME_MOCK = timedelta(days=7)
EXPECTED_RESPONSE_OF_JWKS_ENDPOINT = {
'keys': [
{
'kty': 'RSA',
'n': 'tSKfSeI0fukRIX38AHlKB1YPpX8PUYN2JdvfM-XjNmLfU1M74N0V'
'mdzIX95sneQGO9kC2xMIE-AIlt52Yf_KgBZggAlS9Y0Vx8DsSL2H'
'vOjguAdXir3vYLvAyyHin_mUisJOqccFKChHKjnk0uXy_38-1r17'
'_cYTp76brKpU1I4kM20M__dbvLBWjfzyw9ehufr74aVwr-0xJfsB'
'Vr2oaQFww_XHGz69Q7yHK6DbxYO4w4q2sIfcC4pT8XTPHo4JZ2M7'
'33Ea8a7HxtZS563_mhhRZLU5aynQpwaVv2U--CL6EvGt8TlNZOke'
'Rv8wz-Rt8B70jzoRpVK36rR-pHKlXhMGT619v82LneTdsqA25Wi2'
'Ld_c0niuul24A6-aaj2u9SWbxA9LmVtFntvNbRaHXE1SLpLPoIp8'
'uppGF02Nz2v3ld8gCnTTWfq_BQ80Qy8e0coRRABECZrjIMzHEg6M'
'loRDy4na0pRQv61VogqRKDU2r3_VezFPQDb3ciYsZjWBr3HpNOkU'
'jTrvLmFyOE9Q5R_qQGmc6BYtfk5rn7iIfXlkJAZHXhBy-ElBuiBM'
'-YSkFM7dH92sSIoZ05V4MP09Xcppx7kdwsJy72Sust9Hnd9B7V35'
'YnVF6W791lVHnenhCJOziRmkH4xLLbPkaST2Ks3IHH7tVltM6NsR'
'k3jNdVM',
'e': 'AQAB',
'alg': 'RS256',
'kid': '02B1174234C29F8EFB69911438F597FF3FFEE6B7',
'use': 'sig'
}
]
}
RESPONSE_OF_JWKS_ENDPOINT_WITH_WRONG_KEY = {
'keys': [
{
'kty': 'RSA',
'n': '<KEY>'
'<KEY>'
'<KEY>vYLvAyyHin_mUisJOqccFKChHKjnk0uXy_38-1r17'
'_cYTp76brKpU1I4kM20M__dbvLBWjfzyw9ehufr74aVwr-0xJfsB'
'<KEY>XHGz69Q7yHK6DbxYO4w4q2sIfcC4pT8XTPHo4JZ2M7'
'<KEY>'
'Rv8wz-Rt8B70jzoRpVK36rR-pHKlXhMGT619v82LneTdsqA25Wi2'
'Ld_c0niuul24A6-aaj2u9SWbxA9LmVtFntvNbRaHXE1SLpLPoIp8'
'uppGF02Nz2v3ld8gCnTTWfq_BQ80Qy8e0coRRABECZrjIMzHEg6M'
'loRDy4na0pRQv61VogqRKDU2r3_VezFPQDb3ciYsZjWBr3HpNOkU'
'jTrvLmFyOE9Q5R_qQGmc6BYtfk5rn7iIfXlkJAZHXhBy-ElBuiBM'
'-YSkFM7dH92sSIoZ05V4MP09Xcppx7kdwsJy72Sust9Hnd9B7V35'
'YnVF6W791lVHnenhCJOziRmkH4xLLbPkaST2Ks3IHH7tVltM6NsR'
'k3jNdVM',
'e': 'AQAB',
'alg': 'RS256',
'kid': '02B1174234C29F8EFB69911438F597FF3FFEE6B7',
'use': 'sig'
}
]
}
PRIVATE_KEY = """-----<KEY>
-----END RSA PRIVATE KEY-----"""
| 1.875 | 2 |
scripts/plot_uc.py | wyolum/uControl_v3 | 0 | 12770197 | <reponame>wyolum/uControl_v3
from uControl import *
from pylab import *
import util
def load(fn):
data = pickle.load(open(fn))
try:
out = data['hirate']
except ValueError:
out = data
return out
def plot_hirate(hirate):
times = hirate[:,0]
gage = hirate[:,1]
flow = hirate[:,2]
figure(1)
ax = pylab.subplot(211)
ylabel('Gage mmHG')
pylab.plot(times, gage)
pylab.subplot(212)
xlabel('Time sec.')
ylabel('Flow')
pylab.plot(times, flow)
figure(2) ## plot high pass data
lp_taps = util.get_lowpass_taps(.5, dt=.004, n=100)
hpd = util.filter(gage - gage[0], lp_taps) + gage[0]
plot(gage, gage - hpd)
if __name__ == '__main__':
import sys
usage = 'python plot_uc.py [ucontrol.pkl, ...]'
if len(sys.argv) > 1:
for fn in sys.argv[1:]:
# fn = sys.argv[1]
hirate = load(fn)
plot_hirate(hirate)
show()
else:
raise Exception(usage)
| 2.171875 | 2 |
LPDM-Volttron/volttron/applications/lbnl/LPDM/SupervisorAgent/supervisor/supervisor_logic.py | LBNL-ETA/LPDM-Volttron | 0 | 12770198 | <reponame>LBNL-ETA/LPDM-Volttron<filename>LPDM-Volttron/volttron/applications/lbnl/LPDM/SupervisorAgent/supervisor/supervisor_logic.py
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
from uuid import uuid4
import os
import json
# Thjis is a hack since it is a back dependency from VOLTTRON code
# can we resolve this with something more general that applies to everything?
TOPIC_TTIE = "LPDM/time_until_next_event/{id}"
class SupervisorLogic(object):
def __init__(self, **kwargs):
self.messages_waiting_on = {"any": []}
self.time = 0
self.agents_and_subscriptions = {}
self.terminating_scenario = False
self.scenario_end_timestamp = None
self.end_scenario_run_msg_id = None
self.finished_callback = kwargs.get("finished_callback", lambda: None)
self.times_until_next_event = {}
self.agent_time_functions = {}
def add_event(self, event):
self.events.append(event)
def set_end_timestamp(self, end_scenario_timestamp):
self.scenario_end_timestamp = end_scenario_timestamp
def add_agent(self, agent_id, agent_time_function):
# hack, shouldn't need it but shouldn't hurt anything either
if agent_id in self.agents_and_subscriptions:
return
self.agents_and_subscriptions[agent_id] = []
# Don't know what the message ID of the initial TTIE because it is generated at agent start and not
# responding to anything so wait on any message id on the agent's TTIE topic
if "any" not in self.messages_waiting_on:
self.messages_waiting_on["any"] = []
self.messages_waiting_on["any"].append({"topic": TOPIC_TTIE.format(id=agent_id)})
self.times_until_next_event[agent_id] = {"responding_to_message_id": None, "timestamp": None,
"time_until_next_event": None}
self.agent_time_functions[agent_id] = agent_time_function
def on_subscription_announcement(self, agent_id, subscriptions):
self.agents_and_subscriptions[agent_id] = subscriptions
def get_agents_to_watch_for_response(self, topic):
agents_to_watch_for_response = []
for agent_id, subscriptions in self.agents_and_subscriptions.items():
if topic in subscriptions:
agents_to_watch_for_response.append(agent_id)
return agents_to_watch_for_response
def on_device_change_announcement(self, topic, message_id):
agents_to_watch = self.get_agents_to_watch_for_response(topic)
if agents_to_watch:
self.messages_waiting_on[message_id] = {"agent_ids": agents_to_watch, "topic": topic}
def on_finished_processing_announcement(self, agent_id, responding_to, topic):
if topic == "LPDM/finished_processing/Diesel Generator":
pass
if responding_to and responding_to in self.messages_waiting_on:
try:
del self.messages_waiting_on[responding_to]["agent_ids"][
self.messages_waiting_on[responding_to]["agent_ids"].index(agent_id)]
except:
pass
if len(self.messages_waiting_on[responding_to]["agent_ids"]) == 0:
del self.messages_waiting_on[responding_to]
# if there are no more messages waiting on and the topic is the terminate topic
# this means that hopefully all the agents generated for this simulation have cleaned
# themselves up and exited. Post a message to the dashboard saying the simulation is finished
# then the supervisor can stop itself.
if not self.messages_waiting_on and responding_to == self.end_scenario_run_msg_id:
self.finished_callback()
return
self.check_all_agents_ready_for_next_time()
def on_time_until_next_event(self, topic, agent_id, message_id, timestamp, responding_to, time_until_next_event):
for id, vals in self.messages_waiting_on.items():
if id.lower() == "any":
for i in range(len(vals)):
if topic == vals[i]["topic"]:
del self.messages_waiting_on[id][i]
break
if len(self.messages_waiting_on[id]) == 0:
del self.messages_waiting_on[id]
else:
if id == responding_to and agent_id in vals["agent_ids"]:
del self.messages_waiting_on[id]["agent_ids"][
self.messages_waiting_on[id]["agent_ids"].index(agent_id)]
if len(self.messages_waiting_on[id]["agent_ids"]) == 0:
del self.messages_waiting_on[id]
self.times_until_next_event[agent_id] = {"message_id": message_id, "responding_to_message_id": responding_to,
"timestamp": timestamp, "time_until_next_event": time_until_next_event}
self.check_all_agents_ready_for_next_time()
def check_all_agents_ready_for_next_time(self):
if self.terminating_scenario:
self.finished_callback()
return
earliest_next_event = 1e100 # Something impossibly big to start with.
for agent_id, values in self.times_until_next_event.items():
if agent_id == "eud_1":
pass
ttie = values["time_until_next_event"]
message_timestamp = values["timestamp"]
self.time = max(self.time, message_timestamp)
if ttie is None:
return
if ttie < earliest_next_event:
earliest_agent = agent_id
earliest_next_event = ttie
self.send_new_time(earliest_agent, earliest_next_event)
def update_times_until_next_event(self, time_skip):
for agent_id, vals in self.times_until_next_event.items():
if vals["time_until_next_event"]:
self.times_until_next_event[agent_id]["time_until_next_event"] = vals[
"time_until_next_event"] - time_skip
def end_scenario_run(self):
self.terminating_scenario = True
self.end_scenario_run_msg_id = str(uuid4())
def send_new_time(self, agent_id, timestamp):
message_id = str(uuid4())
# although there is some fractional processing time assumed for agents to process messages
# when sending time to agents only send whole number times to avoid problems with slight misses with schedules
if timestamp > int(timestamp):
timestamp = int(timestamp) + 1
self.time += timestamp
if self.scenario_end_timestamp and self.scenario_end_timestamp < self.time:
self.end_scenario_run()
return
self.update_times_until_next_event(timestamp)
self.times_until_next_event[agent_id] = {"responding_to_message_id": None, "timestamp": None,
"time_until_next_event": None}
self.agent_time_functions[agent_id](message_id, timestamp)
self.messages_waiting_on[message_id] = {"agent_ids": [agent_id]}
| 1.976563 | 2 |
utils/global_settings.py | kchiang6997/toxicity | 0 | 12770199 |
UNKNOWN_WORD = "<unk>"
embedding_dimension = 50
min_count = 5
window_size = 3
sample = 1e-3
negative = 5
vocab_size = None
train_words = None
# Special parameters
MIN_SENTENCE_LENGTH = 3 | 1.773438 | 2 |
setup.py | d4mi1/python-ipx800 | 4 | 12770200 | # -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-05-10 17:57:07
# @Last Modified by: <NAME>
# @Last Modified time: 2018-05-28 21:50:38
from distutils.core import setup
setup(
name = 'IPX800',
packages = ['IPX800'],
version = '0.1.5',
description = 'Library for controlling GCE-Electronics IPX800',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/d4mi1/python-ipx800',
download_url = 'https://github.com/d4mi1/python-ipx800/archive/0.1.1.tar.gz',
keywords = ['GCE-Electronics', 'IPX800'],
classifiers = [],
) | 1.007813 | 1 |
ros_ws/src/baxter_interface/src/baxter_interface/settings.py | mesneym/Baxter-Arm-PP | 0 | 12770201 | <reponame>mesneym/Baxter-Arm-PP<filename>ros_ws/src/baxter_interface/src/baxter_interface/settings.py<gh_stars>0
version https://git-lfs.github.com/spec/v1
oid sha256:ecf7c6e4073ed731198a562b6cb7ecaae49c80b9594f3e2dfeef108ea4868bc0
size 2045
| 0.96875 | 1 |
services/service-template/service_template_gae_handlers.py | google/dnae | 6 | 12770202 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNA - Service template - App Engine handlers.
App Engine handler definitions for the service, including the main "launcher".
"""
import base64
import json
from dna_general_settings import GCE_MACHINE_MAP
from dna_project_settings import PROJECT_ID
from gcp_connector import GCPConnector
from service_template_settings import GBQ_DATASET
from service_template_settings import GCE_RUN_SCRIPT
from service_template_settings import GCS_BUCKET
from service_template_settings import SERVICE_NAME
def service_template_launcher():
gcp = GCPConnector(PROJECT_ID)
queue_name = GCE_MACHINE_MAP['l0']['queue']
# Change the input for the initial config_data (e.g. from a Spreadsheet)
config_data = [['account1', 'data_for_account1'],
['account2', 'data_for_account2']]
for row in config_data:
# Add params to be passed via task payload
task_params = dict()
task_params['service'] = SERVICE_NAME # Mandatory field
task_params['run_script'] = GCE_RUN_SCRIPT # Mandatory field
task_params['account_id'] = row[0]
task_params['label'] = row[1]
task_params['bucket'] = GCS_BUCKET
task_params['dataset'] = GBQ_DATASET
# Add a new task to the task queue
string_payload = json.dumps(task_params)
base64_payload = base64.urlsafe_b64encode(string_payload.encode())
payload = base64_payload.decode()
gcp.gct_createtask(queue_name, payload)
return 'OK'
| 2.140625 | 2 |
sim21/provider/nrtl.py | kpatvt/sim21 | 7 | 12770203 | <filename>sim21/provider/nrtl.py
import numpy as np
import math
from sim21.data import nrtl
from sim21.provider.ideal import IdealVapLiq
def nrtl_model_log_gamma(given_temp, liq_vol_mole, x, ip_g, ip_alpha, valid=None):
n = len(x)
log_gamma = np.zeros(n)
if valid is None:
valid = range(n)
# If there is only one component, we return nothing....
if len(valid) == 1:
return log_gamma
tau = np.zeros((n, n))
G = np.zeros((n, n))
RT = 1.9858775 * given_temp
S = np.zeros(n)
C = np.zeros(n)
for i in valid:
for j in valid:
tau[i, j] = (ip_g[i, j])/RT
G[i, j] = math.exp(-ip_alpha[i, j]*tau[i, j])
for i in valid:
for j in valid:
S[i] += x[j]*G[j, i]
C[i] += x[j]*G[j, i]*tau[j, i]
for i in valid:
log_gamma[i] = C[i]/S[i]
for k in range(n):
log_gamma[i] += x[k]*G[i, k]*(tau[i, k] - C[k]/S[k])/S[k]
return log_gamma
class IdealVapLiqNRTL(IdealVapLiq):
def __init__(self, components=None):
super().__init__(components)
if components:
c = [i for i in components]
self.setup_components(c)
self._ip_g = None
self._ip_alpha = None
def setup_components(self, components, **kwargs):
super().setup_components(components, **kwargs)
try:
self._ip_g, self._ip_alpha = nrtl.generate_ip([c.casn for c in components])
except KeyError:
raise NotImplementedError
def log_gamma(self, temp, press, n, valid):
return nrtl_model_log_gamma(temp, np.zeros_like(n), n, self._ip_g, self._ip_alpha, valid)
| 1.9375 | 2 |
src/load.py | peferso/pegaso-collect | 0 | 12770204 | <reponame>peferso/pegaso-collect
'''
Created on 18 ene. 2022
@author: pedfernandez
'''
import logging
import time
import datetime
import os
import json
import pandas as pd
import pymysql
import subprocess
import hashlib
from utils import AWSOperations, AWSNotifications
logging.basicConfig(
format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s",
level=logging.INFO)
def initial_checks(data_folder):
time_start = time.time()
logging.info('Start')
if not os.path.exists(data_folder):
logging.warning('Folder ' + data_folder + 'does not exist: creating...')
os.makedirs(data_folder)
else:
logging.info('Folder \'' + data_folder + '\' exists: not creating.')
logging.info('Folder \'' + data_folder + '\' contains the following files:')
ic = 0
for i in os.listdir(data_folder):
ic += 1
logging.info('File ' + str(ic) + ': \'' + str(i) + '\'')
time_end = time.time()
logging.info('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
def scan_csv_files(data_folder):
time_start = time.time()
logging.info('Start')
ic = 0
list_of_files = []
for i in os.listdir(data_folder):
ext = i.split('.')[-1]
if ext == 'csv':
ic += 1
logging.info('File ' + str(ic) + ': \'' + str(i) + '\'')
list_of_files.append(i)
time_end = time.time()
logging.info('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
return list_of_files
def get_monday_of_week_date(input_batch_date):
xd = input_batch_date.split('-')
dt = datetime.datetime(int(xd[0]), int(xd[1]), int(xd[2]), 0, 0, 0)
last_monday = dt + datetime.timedelta(days=-dt.weekday(), weeks=0)
last_monday = str(last_monday).split(' ')[0]
return last_monday
def generate_sql_inserts(file, sql_folder):
time_start = time.time()
logging.info('Start')
batch_date = file.split('_')[1] + '_' + file.split('_')[2]
logical_batch_date = get_monday_of_week_date(file.split('_')[1])
batch_page = file.split('.')[0].split('_')[-1]
d_folder = sql_folder + '/' + batch_date
if not os.path.exists(d_folder):
logging.warning('Folder ' + d_folder + ' does not exist: creating...')
os.makedirs(d_folder)
else:
logging.info('Folder \'' + d_folder + '\' exists: not creating.')
df = pd.read_csv(file)
f = open(d_folder + '/' + batch_page + '.sql', 'w+')
print('USE pegaso_db;', file=f, sep="','")
for index, row in df.iterrows():
text = str(row['brand']).replace(' ', '').upper() + \
str(row['model']).replace(' ', '').upper() + \
str(row['price_c']).replace(' ', '').upper() + \
str(row['price_f']).replace(' ', '').upper() + \
str(row['kilometers']).replace(' ', '').upper() + \
str(row['power']).replace(' ', '').upper() + \
str(row['doors']).replace(' ', '').upper() + \
str(row['profesional_vendor']).replace(' ', '').upper() + \
str(row['automatic_gearbox']).replace(' ', '').upper() + \
str(row['year']).replace(' ', '').upper()
hashed_cols = hashlib.sha3_256(text.encode()).hexdigest()
query = 'INSERT INTO raw_data VALUES (\'' + \
str(row['id']) + '\',\'' + \
str(row['brand']) + '\',\'' + \
str(row['model']) + '\',\'' + \
str(row['price_c']) + '\',\'' + \
str(row['price_f']) + '\',\'' + \
str(row['kilometers']) + '\',\'' + \
str(row['power']) + '\',\'' + \
str(row['doors']) + '\',\'' + \
str(row['profesional_vendor']) + '\',\'' + \
str(row['automatic_gearbox']) + '\',\'' + \
str(row['year']) + '\',\'' + \
str(row['source']) + '\',\'' + \
str(hashed_cols) + '\',\'' + \
str(logical_batch_date) + '\');'
query = query.replace("'nan'", "NULL")
print(query, file=f)
f.close()
# id,brand,model,price_c,price_f,kilometers,power,doors,profesional_vendor,automatic_gearbox,year,source
time_end = time.time()
logging.info('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
def check_db_disk_usage():
time_start = time.time()
logging.info('Start')
aws.start_database_ec2_if_stopped()
ip = aws.get_database_public_ip()
SSH_KEYS_DIR = os.environ['SSH_KEYS_DIR']
SSH_KEY_APPLCTN = os.environ['SSH_KEY_APPLCTN']
ssh = 'ssh -i ' + str(SSH_KEYS_DIR) + '/' + str(SSH_KEY_APPLCTN) + \
' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ec2-user@' + str(ip)
cmd_1 = ' "df -k" 2>/dev/null | grep /dev/xvda1 | awk -F " " \'{ print $3 }\' '
cmd_2 = ' "df -k" 2>/dev/null | grep /dev/xvda1 | awk -F " " \'{ print $4 }\' '
stdout_l = []
for cmd in [cmd_1, cmd_2]:
logging.info('Trying ' + ssh + cmd)
try:
stdout = str(round(int(str(os.popen(ssh + cmd).read()).replace('\n', ''))/1000, 2))
except ValueError as msg:
logging.warning('Could not run command ' + cmd + '.')
stdout_l.append(-1)
else:
stdout_l.append(stdout)
root_fs_disk_used = stdout_l[0]
root_fs_disk_avmb = stdout_l[1]
root_fs_disk_avpc = str(round(float(root_fs_disk_avmb) / (float(root_fs_disk_avmb) + float(root_fs_disk_avmb) + 0.0000001)*100, 2))
logging.info('Amount of root db disk used (MB) ' + root_fs_disk_used)
logging.info('Amount of root db disk available (MB) ' + root_fs_disk_avmb)
logging.info('Amount of root db disk available (%) ' + root_fs_disk_avpc)
time_end = time.time()
logging.info('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
return root_fs_disk_used, root_fs_disk_avmb, root_fs_disk_avpc
def connect_to_database(aws):
time_start = time.time()
logging.info('Start')
aws.start_database_ec2_if_stopped()
ip = aws.get_database_public_ip()
ready = False
while not ready:
try:
logging.info('Trying to connect...')
connection = pymysql.connect(host=ip,
user=os.environ['DBUSER'],
passwd=os.environ['DBPASS'],
db="pegaso_db",
charset='utf8')
except pymysql.err.OperationalError as msg:
logging.warning("Unable to connect" + str(msg))
logging.warning("Waiting 120 seconds for graceful start.")
time.sleep(120)
ready = False
else:
logging.info("The database is up. Proceeding")
ready = True
ready = False
while not ready:
try:
query='select * from global_statistics;'
logging.info('Trying to run a select query: \'' + query + '\'')
connection.cursor().execute(query)
connection.commit()
except pymysql.err.OperationalError as msg:
logging.warning("Unable to make select" + str(msg))
logging.warning("Waiting 120 seconds for graceful startup.")
time.sleep(120)
ready = False
else:
logging.info("The database is ready. Proceeding")
ready = True
time_end = time.time()
logging.info('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
return connection
# Variables
THIS_SCRIPT_PATH = os.environ['PEGASO_COLLT_DIR']
execution_timestamp = datetime.datetime.now()
csv_data_folder = 'processed-data'
sql_data_folder = 'sql-data'
mode = 'GENERATE_SQL_FILES'
# Main
os.chdir(THIS_SCRIPT_PATH)
initial_checks(sql_data_folder)
csv_files = scan_csv_files(csv_data_folder)
if mode == 'GENERATE_SQL_FILES':
for csv_f in csv_files:
generate_sql_inserts(csv_data_folder + '/' + csv_f, sql_data_folder)
logging.info('Retrieving initial database state...')
aws = AWSOperations()
ec2_info_dict = aws.retrieve_aws_ec2_info()
INIT_DB_STATE = ec2_info_dict['insSt']
d_used, d_avmb, d_avpc = check_db_disk_usage()
SCRIPT = 'load.py'
aws_n = AWSNotifications()
aws_n.generate_json_event(SCRIPT, 'Start', 'Preparing to inject sql queries. ' +
'The database initial state is ' + str(INIT_DB_STATE) + '. ' +
'Disk usage info in root volume -> MB used: ' + str(d_used) + '.' +
' MB available: ' + str(d_avmb) + ' (' + str(d_avpc) + ').')
logging.info('Initial database state is: ' + INIT_DB_STATE)
connection = connect_to_database(aws)
ib = 0
nq_s = 0
nq_f_o = 0
nq_f_pk = 0
for batch_date in os.listdir(sql_data_folder):
ib += 1
logging.info('Iterating batch \'' + batch_date + '\' (' + str(ib) + ' of ' + str(len(os.listdir(sql_data_folder))) + ').')
iq = 0
for query_file in os.listdir(sql_data_folder + '/' + batch_date):
iq += 1
time_start_q = time.time()
logging.info(' + Executing query file \'' + query_file + '\' (' + str(iq) + ' of ' + str(len(os.listdir(sql_data_folder + '/' + batch_date))) + ').' + ' [batch \'' + batch_date + '\' (' + str(ib) + ' of ' + str(len(os.listdir(sql_data_folder))) + ')]')
file = open(sql_data_folder + '/' + batch_date + '/' + query_file, 'r')
sql_file = file.read()
file.close()
sql_commands = sql_file.split(';\n')
con_cursor = connection.cursor()
for command in sql_commands:
# This will skip and report errors
# For example, if the tables do not yet exist, this will skip over
# the DROP TABLE commands
if command != '':
try:
con_cursor.execute(command)
connection.commit()
except pymysql.err.OperationalError as msg:
logging.warning(" ++ Command skipped: " + str(msg))
print(command)
nq_f_o += 1
except pymysql.err.IntegrityError as msg:
logging.warning(" ++ Command skipped: " + str(msg))
print(command)
nq_f_pk += 1
except pymysql.err as msg:
logging.Error(" ++ Command skipped: " + str(msg))
print(command)
nq_f_o += 1
else:
logging.info(" ++ Command executed: " + str(command))
nq_s += 1
con_cursor.close()
time_end_q = time.time()
logging.info(' + Finished query file \'' + query_file + '\' (' + str(iq) + ' of ' + str(len(os.listdir(sql_data_folder + '/' + batch_date))) + ').' + ' ' + str(time_end_q - time_start_q) + ' seconds elapsed.' + ' [batch \'' + batch_date + '\' (' + str(ib) + ' of ' + str(len(os.listdir(sql_data_folder))) + ')]')
connection.close()
ec2_info_dict = aws.retrieve_aws_ec2_info()
d_used, d_avmb, d_avpc = check_db_disk_usage()
aws_n.generate_json_event(SCRIPT, 'End', 'The data load has finished. The database final state is ' + str(ec2_info_dict['insSt']) + '.' +
'A total of ' + str(nq_s + nq_f_o + nq_f_pk) + ' were executed, ' +
str(nq_s) + ' were OK, ' + str(nq_f_pk) + ' failed due to primary key, ' + str(nq_f_o) + ' for other reasons. ' +
'The queries can be tracked by batch date(s): ' +
str(os.listdir(sql_data_folder)).replace('[', '').replace(']', '').replace('\'', '').replace('\"', '') +
'. Disk usage info in root volume -> MB used: ' + str(d_used) + '.' +
' MB available: ' + str(d_avmb) + ' (' + str(d_avpc) + ').')
if INIT_DB_STATE.lower() == 'stopped':
logging.warning("Stopping database instance to leave it in initial state...")
aws.stop_database_ec2_if_running()
logging.warning("Stopped.")
| 1.992188 | 2 |
credit_calculator.py | ChameleonTartu/NorwegianLoanCalculator | 0 | 12770205 | #!/usr/bin/python
DNB_YEARLY_PERCENTAGE = 2.10 / 100
DNB_MONTHLY_PERCENTAGE = DNB_YEARLY_PERCENTAGE / 12
DNB_FEE = 50
DNB_INITIAL_PAYMENT = 10000
NORDEA_YEARLY_PERCENTAGE = 2.15 / 100
NORDEA_MONTHLY_PERCENTAGE = NORDEA_YEARLY_PERCENTAGE / 12
NORDEA_FEE = 65
NORDEA_INITIAL_PAYMENT = 0
def months_until_paid_out(credit_sum, monthly_payment, initial_payment, monthly_percentage, monthly_fee):
MAX_YEARS = 30
MAX_MONTHS = MAX_YEARS * 12
for month in range(1, MAX_MONTHS):
if month == 1:
credit_sum = credit_sum * (1 + monthly_percentage) - (monthly_payment - monthly_fee - initial_payment)
else:
credit_sum = credit_sum * (1 + monthly_percentage) - (monthly_payment - monthly_fee)
if credit_sum < 0:
print("You will pay out credit in approximately {} months. Sum left {}".format(month, credit_sum))
return
print("You have to pay way much bigger monthly payment than {}".format(monthly_payment))
return
if __name__ == "__main__":
credit_sum = 3000000
monthly_payment = 12500
print("DNB: ")
months_until_paid_out(credit_sum, monthly_payment, DNB_INITIAL_PAYMENT, DNB_MONTHLY_PERCENTAGE, DNB_FEE)
print("NORDEA: ")
months_until_paid_out(credit_sum, monthly_payment, NORDEA_INITIAL_PAYMENT, NORDEA_MONTHLY_PERCENTAGE, NORDEA_FEE)
| 3.90625 | 4 |
users/views.py | Yttrium40/quiz_site | 0 | 12770206 | <reponame>Yttrium40/quiz_site<filename>users/views.py
import django.contrib.auth as django_auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from quizzes.models import Quiz
from .models import Profile
from .utils import get_current_user, validate_new_user
def login(request):
return render(request, 'users/login.html', {
'current_user': get_current_user(request)
})
def logging_in(request):
username = request.POST['username_input']
password = request.POST['password_input']
user = django_auth.authenticate(request, username=username, password=password)
if user is not None:
django_auth.login(request, user)
return HttpResponseRedirect(reverse('users:profile', args=(username,)))
else:
return render(request, 'users/login.html', {
'current_user': get_current_user(request),
'error_messages': ['User not found.']
})
@login_required
def logout(request):
django_auth.logout(request)
return HttpResponseRedirect('')
def register(request):
return render(request, 'users/register.html', {
'current_user': get_current_user(request)
})
def registering(request):
new_username = request.POST['username_input']
new_password = request.POST['password_input']
confirm_password = request.POST['password_confirm']
error_messages = validate_new_user(new_username, new_password, confirm_password)
if error_messages != []:
return render(request, 'users/register.html', {
'current_user': get_current_user(request),
'error_messages': error_messages
})
new_user = User(username=new_username, first_name='NONE', last_name='NONE', email='<EMAIL>')
new_user.set_password(<PASSWORD>)
new_user.save()
django_auth.login(request, new_user)
return HttpResponseRedirect(reverse('users:profile', args=(new_username,)))
def profile(request, username):
user = get_object_or_404(User, username=username)
return render(request, 'users/profile.html', {
'current_user': get_current_user(request),
'user': user
})
def ajax_profile_update(request):
new_description = request.POST['description']
current_user = get_current_user(request)
current_user.profile.description = new_description
current_user.save()
return HttpResponse()
def own_quizzes(request, username):
user = get_object_or_404(User, username=username)
quizzes = user.authored_set.all()[::-1]
return render(request, 'users/quizzes.html', {
'current_user': get_current_user(request),
'user': user,
'quizzes': quizzes
})
def taken_quizzes(request, username):
user = get_object_or_404(User, username=username)
quizzes = user.taken_set.all()[::-1]
return render(request, 'users/quizzes.html', {
'current_user': get_current_user(request),
'user': user,
'quizzes': quizzes
})
| 2.375 | 2 |
lino_book/projects/dumps/settings/memory.py | lino-framework/lino_book | 3 | 12770207 | <filename>lino_book/projects/dumps/settings/memory.py
from .a import *
SITE.verbose_name = SITE.verbose_name + " (:memory:)"
DATABASES['default']['NAME'] = ':memory:'
| 1.507813 | 2 |
veles/loader/pickles.py | AkshayJainG/veles | 1,007 | 12770208 | # -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jan 25, 2015
Loaders which get data from pickles
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import pickle
import numpy
import six
from zope.interface import implementer
from veles import error
from veles.compat import from_none
from veles.external.progressbar import ProgressBar
from veles.memory import interleave
from veles.loader.base import CLASS_NAME, Loader
from veles.loader.image import IImageLoader, COLOR_CHANNELS_MAP
from veles.loader.fullbatch import FullBatchLoader, IFullBatchLoader
from veles.loader.fullbatch_image import FullBatchImageLoader
@implementer(IFullBatchLoader)
class PicklesLoader(FullBatchLoader):
"""
Loads samples from pickles for data set.
"""
def __init__(self, workflow, **kwargs):
super(PicklesLoader, self).__init__(workflow, **kwargs)
self._test_pickles = list(kwargs.get("test_pickles", []))
self._validation_pickles = list(kwargs.get("validation_pickles", []))
self._train_pickles = list(kwargs.get("train_pickles", []))
self._pickles = (self.test_pickles, self.validation_pickles,
self.train_pickles)
@property
def test_pickles(self):
return self._test_pickles
@property
def validation_pickles(self):
return self._validation_pickles
@property
def train_pickles(self):
return self._train_pickles
def reshape(self, shape):
return shape
def transform_data(self, data):
return data
def load_data(self):
pbar = ProgressBar(maxval=sum(len(p) for p in self._pickles),
term_width=40)
self.info("Loading %d pickles...", pbar.maxval)
pbar.start()
loaded = [self.load_pickles(i, self._pickles[i], pbar)
for i in range(3)]
pbar.finish()
self.info("Initializing the arrays...")
shape = loaded[2][1][0].shape[1:]
for i in range(2):
if loaded[i][0] > 0:
shi = loaded[i][1][0].shape[1:]
if shape != shi:
raise error.BadFormatError(
"TRAIN and %s sets have the different sample shape "
"(%s vs %s)" % (CLASS_NAME[i], shape, shi))
self.create_originals(self.reshape(shape))
offsets = [0, 0]
for ds in range(3):
if loaded[ds][0] == 0:
continue
for arr in loaded[ds][1]:
self.original_data[offsets[0]:(offsets[0] + arr.shape[0])] = \
self.transform_data(arr)
offsets[0] += arr.shape[0]
for arr in loaded[ds][2]:
self.original_labels[offsets[1]:(offsets[1] + arr.shape[0])] =\
arr
offsets[1] += arr.shape[0]
def load_pickles(self, index, pickles, pbar):
unpickled = []
for pick in pickles:
try:
with open(pick, "rb") as fin:
self.debug("Loading %s...", pick)
if six.PY3:
loaded = pickle.load(fin, encoding='charmap')
else:
loaded = pickle.load(fin)
unpickled.append(loaded)
pbar.inc()
except Exception as e:
self.warning(
"Failed to load %s (part of %s set)" %
(pick, CLASS_NAME[index]))
raise from_none(e)
data = []
labels = []
for obj, pick in zip(unpickled, pickles):
if not isinstance(obj, dict):
raise TypeError(
"%s has the wrong format (part of %s set)" %
(pick, CLASS_NAME[index]))
try:
data.append(obj["data"])
labels.append(
numpy.array(obj["labels"], dtype=Loader.LABEL_DTYPE))
except KeyError as e:
self.error("%s has the wrong format (part of %s set)",
pick, CLASS_NAME[index])
raise from_none(e)
lengths = [0, sum(len(l) for l in labels)]
for arr in data:
lengths[0] += arr.shape[0]
if arr.shape[1:] != data[0].shape[1:]:
raise error.BadFormatError(
"Array has a different shape: expected %s, got %s"
"(%s set)" % (data[0].shape[1:],
arr.shape[1:], CLASS_NAME[index]))
if lengths[0] != lengths[1]:
raise error.BadFormatError(
"Data and labels has the different number of samples (data %d,"
" labels %d)" % lengths)
length = lengths[0]
self.class_lengths[index] = length
return length, data, labels
@implementer(IImageLoader)
class PicklesImageFullBatchLoader(PicklesLoader, FullBatchImageLoader):
MAPPING = "full_batch_pickles_image"
def __init__(self, workflow, **kwargs):
super(PicklesImageFullBatchLoader, self).__init__(workflow, **kwargs)
# Since we can not extract the color space information from pickles
# set it explicitly without any default value
self.color_space = kwargs["color_space"]
def get_image_label(self, key):
return int(self.image_labels[key])
def get_image_info(self, key):
return self.image_data[key].shape[:2], self.color_space
def get_image_data(self, key):
return self.image_data[key]
def get_keys(self, index):
offsets = [0, self.class_lengths[0],
self.class_lengths[0] + self.class_lengths[1],
self.total_samples]
self.original_shape = self.image_data.shape[1:-1]
return range(offsets[index], offsets[index + 1])
def reshape(self, shape):
if shape[0] == COLOR_CHANNELS_MAP[self.color_space]:
return shape[1:] + (shape[0],)
return shape
def transform_data(self, data):
if data.shape[1] == COLOR_CHANNELS_MAP[self.color_space]:
return interleave(data)
return data
def load_data(self):
PicklesLoader.load_data(self)
self.original_class_lengths = self.class_lengths
self.image_data = self.original_data.mem
self.original_data.mem = None
self.image_labels = self.original_labels[:]
del self.original_labels[:]
FullBatchImageLoader.load_data(self)
assert self.original_class_lengths == self.class_lengths
del self.image_data
def initialize(self, device, **kwargs):
super(PicklesImageFullBatchLoader, self).initialize(
device=device, **kwargs)
del self.image_labels
| 1.6875 | 2 |
checkov/version.py | meghaddn/checkov | 0 | 12770209 | version = '2.0.396'
| 1.085938 | 1 |
tests/datastructures/test_queue.py | TristenSeth/campy | 5 | 12770210 | <filename>tests/datastructures/test_queue.py
"""Tests for the :mod:`campy.datastructures.queue` module."""
| 1.09375 | 1 |
creation/lib/cvWParams.py | bbockelm/glideinWMS | 0 | 12770211 | <reponame>bbockelm/glideinWMS
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module contains the create_frontend params class
#
# Author:
# <NAME>
#
import os
import copy
import re
import imp
import os.path
import imp
import string
import socket
from glideinwms.lib import xmlParse
import cWParams
from matchPolicy import MatchPolicy
import pprint
class VOFrontendSubParams(cWParams.CommonSubParams):
# return attribute value in the proper python format
def extract_attr_val(self,attr_obj):
return extract_attr_val(attr_obj)
######################################################
# Params used by create_glideins and recreate_glideins
class VOFrontendParams(cWParams.CommonParams):
# populate self.defaults
def init_defaults(self):
self.init_support_defaults()
# VO scripts should start after the factory has been set completely
# but there could be exceptions
# Files/Validation/Custom scripts settings for frontend
self.file_defaults["after_entry"]=("True",'Bool','Should this file be loaded after the factory entry ones?',None)
# Publishing attr specific to frontend
self.attr_defaults["type"]=["string","string|int|expr","What kind on data is value. (if expr, a python expression with access to frontend and glidein dictionaries)",None]
# Config section exclusive to frontend group
group_config_defaults=cWParams.commentedOrderedDict()
group_config_running_defaults=cWParams.commentedOrderedDict()
group_config_running_defaults["max"]=['10000',"nr_jobs","What is the max number of running glideins I want to get to",None]
group_config_running_defaults["relative_to_queue"]=['1.15',"fraction","Max relative to number of matching jobs in the queue.",None]
group_config_defaults['running_glideins_per_entry']=group_config_running_defaults
common_config_running_total_defaults=cWParams.commentedOrderedDict()
common_config_running_total_defaults["max"]=['100000',"nr_jobs","What is the max number of running glideins I want to get to - globally",None]
common_config_running_total_defaults["curb"]=['90000',"nr_jobs","When should I start curbing glidein submission",None]
group_config_defaults['running_glideins_total']=common_config_running_total_defaults
group_config_idle_defaults=cWParams.commentedOrderedDict()
group_config_idle_defaults["max"]=['100',"nr_jobs","How much pressure should I apply to the entry points",None]
group_config_idle_defaults["reserve"]=['5',"nr_jobs","How much to overcommit.",None]
group_config_defaults['idle_glideins_per_entry']=group_config_idle_defaults
group_config_vms_defaults=cWParams.commentedOrderedDict()
group_config_vms_defaults["max"]=['100',"nr_vms","How many idle VMs should I tollerate, before stopping submitting glideins",None]
group_config_vms_defaults["curb"]=['5',"nr_vms","How many idle VMs should I tollerate, before starting to curb submissions.",None]
group_config_defaults['idle_vms_per_entry']=group_config_vms_defaults
# Global config section
common_config_vms_total_defaults=cWParams.commentedOrderedDict()
common_config_vms_total_defaults["max"]=['1000',"nr_jobs","How many total idle VMs should I tollerate, before stopping submitting glideins",None]
common_config_vms_total_defaults["curb"]=['200',"nr_jobs","How many total idle VMs should I tollerate, before starting to curb submissions.",None]
group_config_defaults['idle_vms_total']=common_config_vms_total_defaults
group_config_proc_work_defaults=cWParams.commentedOrderedDict()
group_config_proc_work_defaults["matchmakers"]=['3',"NR","Max number of worker processes that will be doing the matchmaking",None]
group_config_defaults['processing_workers']=group_config_proc_work_defaults
# not exported and order does not matter, can stay a regular dictionary
sub_defaults={'attrs':(xmlParse.OrderedDict(),'Dictionary of attributes',"Each attribute group contains",self.attr_defaults),
'files':([],'List of files',"Each file group contains",self.file_defaults)}
# User Pool collectors
collector_defaults=cWParams.commentedOrderedDict()
collector_defaults["node"]=(None,"nodename","Factory collector node name (for example, fg2.my.org:9999)",None)
collector_defaults["DN"]=(None,"dn","Factory collector distinguised name (subject) (for example, /DC=org/DC=myca/OU=Services/CN=fg2.my.org)",None)
collector_defaults["factory_identity"]=("<EMAIL>","authenticated_identity","What is the AuthenticatedIdentity of the factory at the WMS collector",None)
collector_defaults["my_identity"]=("<EMAIL>","authenticated_identity","What is the AuthenticatedIdentity of my proxy at the WMS collector",None)
# User schedulers
schedd_defaults=cWParams.commentedOrderedDict()
schedd_defaults["fullname"]=(None,"name","User schedd name (for example, <EMAIL>)",None)
schedd_defaults["DN"]=(None,"dn","User schedd distinguised name (subject) (for example, /DC=org/DC=myca/OU=Services/CN=sb1.my.org)",None)
# match_attr for factory and job query_expr
query_attrs_defaults=cWParams.commentedOrderedDict()
query_attrs_defaults['type']=('string','string|int|real|bool','Attribute type',None)
# Factory and job query_expr
fj_match_defaults=cWParams.commentedOrderedDict()
fj_match_defaults["query_expr"]=['True','CondorExpr','Expression for selecting user jobs',None]
fj_match_defaults["match_attrs"]=(xmlParse.OrderedDict(),"Dictionary of ClassAd attributes","Each attribute contains",query_attrs_defaults)
# Factory match settings
factory_match_defaults=copy.deepcopy(fj_match_defaults)
factory_match_defaults["collectors"]=([],"List of factory collectors","Each collector contains",collector_defaults)
# Job match settings
job_match_defaults=copy.deepcopy(fj_match_defaults)
job_match_defaults["schedds"]=([],"List of user schedds","Each schedd contains",schedd_defaults)
# Match section. Aka VO policies.
match_defaults=cWParams.commentedOrderedDict()
match_defaults["factory"]=factory_match_defaults
match_defaults["job"]=job_match_defaults
match_defaults["match_expr"]=('True','PythonExpr', 'Python expression for matching jobs to factory entries with access to job and glidein dictionaries',None)
match_defaults["start_expr"]=('True','CondorExpr', 'Condor expression for matching jobs to glideins at runtime',None)
match_defaults["policy_file"]=(None, 'PolicyFile', 'External policy file where match_expr, query_expr, start_expr and match_attr are defined',None)
# Credential settings
proxy_defaults=cWParams.commentedOrderedDict()
proxy_defaults["absfname"]=(None,"fname","x509 proxy file name (see also pool_idx_list)",None)
proxy_defaults["keyabsfname"]=(None,"fname","for key files, file name of the key pair",None)
proxy_defaults["pilotabsfname"]=(None,"fname","to specify a different pilot proxy instead of using submit proxy",None)
proxy_defaults["type"]=("grid_proxy","proxy_type","Type of credential: grid_proxy,cert_pair,key_pair,username_password",None)
proxy_defaults["trust_domain"]=("OSG","grid_type","Trust Domain",None)
proxy_defaults["creation_script"]=(None,"command","Script to re-create credential",None)
proxy_defaults["update_frequency"]=(None,"int","Update proxy when there is this much time left",None)
proxy_defaults["vm_id"]=(None,"vm_id","VM Id",None)
proxy_defaults["vm_type"]=(None,"vm_type","VM Type",None)
proxy_defaults["pool_idx_len"]=(None,"boolean","Adds leading zeros to the suffix so all filenames the same length",None)
proxy_defaults["pool_idx_list"]=(None,"string","List of indices, can include ranges of indices",None)
proxy_defaults["security_class"]=(None,"id","Proxies in the same security class can potentially access each other (Default: proxy_nr)",None)
proxy_defaults["vm_id_fname"]=(None,"fname","to specify a vm id without reconfig",None)
proxy_defaults["vm_type_fname"]=(None,"fname","to specify a vm type without reconfig",None)
security_defaults=cWParams.commentedOrderedDict()
security_defaults["proxy_selection_plugin"]=(None,"proxy_name","Which proxy selection plugin should I use (ProxyAll if None)",None)
security_defaults["credentials"]=([],'List of credentials',"Each proxy element contains",proxy_defaults)
security_defaults["security_name"]=(None,"frontend_name","What name will we advertize for security purposes?",None)
self.group_defaults=cWParams.commentedOrderedDict()
self.group_defaults["match"]=match_defaults
self.group_defaults["enabled"]=("True","Bool","Is this group enabled?",None)
self.group_defaults["config"]=group_config_defaults
self.group_defaults["attrs"]=sub_defaults['attrs']
self.group_defaults["files"]=sub_defaults['files']
self.group_defaults["security"]=copy.deepcopy(security_defaults)
###############################
# Start defining the defaults
self.defaults["frontend_name"]=(socket.gethostname(),'ID', 'VO Frontend name',None)
self.defaults['frontend_versioning'] = ('True', 'Bool', 'Should we create versioned subdirectories of the type frontend_$frontend_name?', None)
self.defaults['frontend_monitor_index_page'] = ('True', 'Bool', 'Should we create an index.html in the monitoring web directory?',None)
work_defaults=cWParams.commentedOrderedDict()
work_defaults["base_dir"]=("%s/frontstage"%os.environ["HOME"],"base_dir","Frontend base dir",None)
work_defaults["base_log_dir"]=("%s/frontlogs"%os.environ["HOME"],"log_dir","Frontend base log dir",None)
self.defaults["work"]=work_defaults
process_log_defaults=cWParams.commentedOrderedDict()
process_log_defaults["min_days"] = ["3.0","days","Min number of days the logs must be preserved (even if they use too much space)",None]
process_log_defaults["max_days"] = ["7.0","days","Max number of days the logs should be preserved",None]
process_log_defaults["max_mbytes"] = ["100.0","Mbytes","Max number of Mbytes the logs can use",None]
process_log_defaults['extension'] = ["all", "string", "name of the log extention", None]
process_log_defaults['msg_types'] = ["INFO, WARN, ERR", "string", "types of log messages", None]
process_log_defaults['backup_count'] = ["5", "string", "Number of backup logs to keep", None]
process_log_defaults['compression'] = ["", "string", "Compression for backup log files", None]
log_retention_defaults = cWParams.commentedOrderedDict()
log_retention_defaults["process_logs"] = ([], 'Dictionary of log types', "Each log corresponds to a log file", copy.deepcopy(process_log_defaults))
self.defaults["log_retention"] = log_retention_defaults
monitor_footer_defaults=cWParams.commentedOrderedDict()
monitor_footer_defaults["display_txt"] = ["", "string", "what will be displayed at the bottom of the monitoring page", None]
monitor_footer_defaults["href_link"] = ["", "string", "where to link to", None]
self.defaults["monitor_footer"] = monitor_footer_defaults
self.defaults['loop_delay']=('60','seconds', 'Number of seconds between iterations',None)
self.defaults['advertise_delay']=('5','NR', 'Advertize evert NR loops',None)
self.defaults['advertise_with_tcp']=('True','Bool', 'Should condor_advertise use TCP connections?',None)
self.defaults['advertise_with_multiple']=('True','Bool', 'Should condor_advertise use -multiple?',None)
self.defaults['group_parallel_workers']=('2','NR', 'Max number of parallel workers that process the group policies', None)
self.defaults['restart_attempts']=('3','NR', 'Max allowed NR restarts every restart_interval before shutting down',None)
self.defaults['restart_interval']=('1800','NR', 'Time interval NR sec which allow max restart attempts',None)
stage_defaults=cWParams.commentedOrderedDict()
stage_defaults["base_dir"]=("/var/www/html/vofrontend/stage","base_dir","Stage base dir",None)
stage_defaults["web_base_url"]=("http://%s/vofrontend/stage"%socket.gethostname(),'base_url','Base Web server URL',None)
stage_defaults["use_symlink"]=("True","Bool","Can I symlink stage dir from work dir?",None)
self.defaults["stage"]=stage_defaults
self.monitor_defaults["base_dir"]=("/var/www/html/vofrontend/monitor","base_dir","Monitoring base dir",None)
self.monitor_defaults["web_base_url"]=(None,"web_base_url","Monitoring base dir",None)
self.defaults["monitor"]=self.monitor_defaults
pool_collector_defaults=cWParams.commentedOrderedDict()
pool_collector_defaults["node"]=(None,"nodename","Pool collector node name (for example, col1.my.org:9999)",None)
pool_collector_defaults["DN"]=(None,"dn","Pool collector distinguised name (subject) (for example, /DC=org/DC=myca/OU=Services/CN=col1.my.org)",None)
pool_collector_defaults["secondary"]=("False","Bool","Secondary nodes will be used by glideins, if present",None)
pool_collector_defaults["group"]=("default","string","Collector group name useful to group HA setup",None)
self.defaults["collectors"]=([],'List of pool collectors',"Each proxy collector contains",pool_collector_defaults)
ccb_defaults=cWParams.commentedOrderedDict()
ccb_defaults["node"]=(None,"nodename","CCB collector node name (for example, ccb1.my.org:9999)",None)
ccb_defaults["DN"]=(None,"dn","CCB collector distinguised name (subject) (for example, /DC=org/DC=myca/OU=Services/CN=ccb1.my.org)",None)
ccb_defaults["group"]=("default","string","CCB collector group name useful to group HA setup",None)
self.defaults["ccbs"]=([],'List of CCB collectors',"Each CCB contains",ccb_defaults)
self.defaults["security"]=copy.deepcopy(security_defaults)
self.defaults["security"]["classad_proxy"]=(None,"fname","File name of the proxy used for talking to the WMS collector",None)
self.defaults["security"]["proxy_DN"]=(None,"dn","Distinguised name (subject) of the proxy (for example, /DC=org/DC=myca/OU=Services/CN=fe1.my.org)",None)
self.defaults["security"]["sym_key"]=("aes_256_cbc","sym_algo","Type of symetric key system used for secure message passing",None)
self.defaults["match"]=copy.deepcopy(match_defaults)
# Change default match value
# By default we want to look only for vanilla universe jobs
# that are not monitoring jobs
self.defaults["match"]["job"]["query_expr"][0]='(JobUniverse==5)&&(GLIDEIN_Is_Monitor =!= TRUE)&&(JOB_Is_Monitor =!= TRUE)'
self.defaults["attrs"]=sub_defaults['attrs']
self.defaults["files"]=copy.deepcopy(sub_defaults['files'])
# ordering is specific to global section of factory
self.defaults["files"][3]["after_group"]=("False",'Bool','Should this file be loaded after the group ones?',None)
global_config_defaults=cWParams.commentedOrderedDict()
global_config_defaults['idle_vms_total']=copy.deepcopy(common_config_vms_total_defaults)
global_config_defaults['idle_vms_total_global']=copy.deepcopy(common_config_vms_total_defaults)
global_config_defaults['running_glideins_total']=copy.deepcopy(common_config_running_total_defaults)
global_config_defaults['running_glideins_total_global']=copy.deepcopy(common_config_running_total_defaults)
self.defaults["config"]=global_config_defaults
self.defaults["groups"]=(xmlParse.OrderedDict(),"Dictionary of groups","Each group contains",self.group_defaults)
# Initialize the external policy modules data structure
self.match_policy_modules = {
'frontend': None,
'groups': {},
}
# High Availability Configuration settings
haf_defaults = cWParams.commentedOrderedDict()
haf_defaults['frontend_name'] = (None, 'frontend_name',
'Name of the frontend', None)
ha_defaults = cWParams.commentedOrderedDict()
ha_defaults['ha_frontends'] = ([], 'List of frontends in HA mode',
'Each element contains', haf_defaults)
ha_defaults["enabled"]=('False', 'Bool', 'Enable HA?', None)
ha_defaults["check_interval"]=('300', 'NR', 'How frequently should slav check if the master is down', None)
#ha_defaults["activation_delay"]=('150', 'NR', 'How many sec to wait before slav activates after detecting that master is down', None)
self.defaults['high_availability'] = ha_defaults
return
# return name of top element
def get_top_element(self):
return "frontend"
def buildDir(self,frontendVersioning, basedir):
# return either basedir or basedir/frontend_fename
subdir = "frontend_%s" % self.frontend_name
if frontendVersioning:
return os.path.join(basedir, subdir)
else:
return basedir
# validate data and add additional attributes if needed
def derive(self):
if len(self.groups.keys())==0:
raise "No groups defined!"
self.validate_names()
frontendVersioning = False
if self.data.has_key('frontend_versioning') and \
self.data['frontend_versioning'].lower() == 'true':
frontendVersioning = True
self.stage_dir=self.buildDir(frontendVersioning, self.stage.base_dir)
self.monitor_dir=self.buildDir(frontendVersioning, self.monitor.base_dir)
self.work_dir=self.buildDir(frontendVersioning, self.work.base_dir)
self.log_dir=self.buildDir(frontendVersioning, self.work.base_log_dir)
self.web_url=self.buildDir(frontendVersioning, self.stage.web_base_url)
if hasattr(self.monitor,"web_base_url") and (self.monitor.web_base_url is not None):
self.monitoring_web_url=self.buildDir(frontendVersioning, self.monitor.web_base_url)
else:
self.monitoring_web_url=self.web_url.replace("stage","monitor")
self.derive_match_attrs()
####################
has_collector=self.attrs.has_key('GLIDEIN_Collector')
if not has_collector:
# collector not defined at global level, must be defined in every group
has_collector=True
for group_name in self.groups.keys():
has_collector&=self.groups[group_name].attrs.has_key('GLIDEIN_Collector')
if has_collector:
raise RuntimeError, "Attribute GLIDEIN_Collector cannot be defined by the user"
####################
has_ccb=self.attrs.has_key('GLIDEIN_CCB')
if not has_collector:
# collector not defined at global level, must be defined in every group
has_ccb=True
for group_name in self.groups.keys():
has_ccb&=self.groups[group_name].attrs.has_key('GLIDEIN_CCB')
if has_ccb:
raise RuntimeError, "Attribute GLIDEIN_CCB cannot be defined by the user"
####################
if self.security.proxy_DN is None:
raise RuntimeError, "security.proxy_DN not defined"
if len(self.collectors)==0:
raise RuntimeError, "At least one pool collector is needed"
####################
has_security_name=(self.security.security_name is not None)
if not has_security_name:
# security_name not defined at global level, look if defined in every group
has_security_name=True
for group_name in self.groups.keys():
has_security_name&=(self.groups[group_name].security.security_name is not None)
if not has_security_name:
# explicity define one, so it will not change if config copied
# it also makes the frontend admins aware of the name
self.data['security']['security_name']=self.frontend_name
####################
for i in range(len(self.security.credentials)):
pel=self.subparams.data['security']['credentials'][i]
if pel['security_class'] is None:
# define an explicit security, so the admin is aware of it
pel['security_class']="frontend"
group_names=self.groups.keys()
for group_name in group_names:
for i in range(len(self.groups[group_name].security.credentials)):
pel=self.subparams.data['groups'][group_name]['security']['credentials'][i]
if pel['security_class'] is None:
# define an explicit security, so the admin is aware of it
pel['security_class']="group_%s"%group_name
# verify and populate HA
if self.high_availability['enabled'].lower() == 'true':
if (len(self.high_availability['ha_frontends']) == 1):
haf = self.high_availability['ha_frontends'][0]
if not haf['frontend_name']:
raise RuntimeError, 'High availability is enabled but the configuration is missing frontend_name of the master ha_frontend.'
else:
raise RuntimeError, 'Exactly one master ha_frontend information is needed when running this frontend in high_availability slave mode.'
# verify match data and create the attributes if needed
def derive_match_attrs(self):
# Load all the match policy modules upfront since we need them
self.load_match_policies()
# TODO: Do we really need to validate frontend main section?
# This gets validated any ways in the groups section
policy_modules = []
if self.match_policy_modules['frontend']:
policy_modules.append(self.match_policy_modules['frontend'])
self.validate_match('frontend', self.match.match_expr,
self.match.factory.match_attrs,
self.match.job.match_attrs, self.attrs,
policy_modules)
for group_name in self.groups.keys():
# Merge group match info and attrs from
# global section with those sepcific to group
# Match and query expressions are ANDed
# attrs, job & factory match_attrs are appended with group
# specific values overriding the global values
# Get frontend and group specific policy modules to use
pmodules = list(policy_modules)
if self.match_policy_modules['groups'].get(group_name):
pmodules.append(self.match_policy_modules['groups'][group_name])
#if self.match_policy_modules['frontend']:
# policy_modules.append(self.match_policy_modules['frontend'])
#if self.match_policy_modules['groups'].get(group_name):
# policy_modules.append(self.match_policy_modules['groups'][group_name])
# Construct group specific dict of attrs in <attrs>
attrs_dict={}
for attr_name in self.attrs.keys():
attrs_dict[attr_name]=self.attrs[attr_name]
for attr_name in self.groups[group_name].attrs.keys():
attrs_dict[attr_name]=self.groups[group_name].attrs[attr_name]
# Construct group specific dict of factory_attrs in <match_attrs>
# and those from the policy_modules
factory_attrs={}
for attr_name in self.match.factory.match_attrs.keys():
factory_attrs[attr_name]=self.match.factory.match_attrs[attr_name]
for attr_name in self.groups[group_name].match.factory.match_attrs.keys():
factory_attrs[attr_name]=self.groups[group_name].match.factory.match_attrs[attr_name]
for pmodule in pmodules:
if pmodule.factoryMatchAttrs:
for attr_name in pmodule.factoryMatchAttrs.keys():
factory_attrs[attr_name] = pmodule.factoryMatchAttrs[attr_name]
# Construct group specific dict of job_attrs in <match_attrs>
# and those from the policy_modules
job_attrs={}
for attr_name in self.match.job.match_attrs.keys():
job_attrs[attr_name]=self.match.job.match_attrs[attr_name]
for attr_name in self.groups[group_name].match.job.match_attrs.keys():
job_attrs[attr_name]=self.groups[group_name].match.job.match_attrs[attr_name]
for pmodule in pmodules:
if pmodule.jobMatchAttrs:
for attr_name in pmodule.jobMatchAttrs.keys():
job_attrs[attr_name] = pmodule.jobMatchAttrs[attr_name]
# AND global and group specific match_expr
# and those from the policy_modules
match_expr = "(%s) and (%s)" % (
self.match.match_expr, self.groups[group_name].match.match_expr)
self.validate_match('group %s'%group_name, match_expr,
factory_attrs, job_attrs, attrs_dict,
pmodules)
return
def get_xml_format(self):
"""
Return xml formatting for the config
"""
return {'lists_params':{'files':{'el_name':'file','subtypes_params':{'class':{}}},
'process_logs':{'el_name':'process_log','subtypes_params':{'class':{}}},
'collectors':{'el_name':'collector','subtypes_params':{'class':{}}},
'ccbs':{'el_name':'ccb','subtypes_params':{'class':{}}},
'schedds':{'el_name':'schedd','subtypes_params':{'class':{}}},
'ha_frontends':{'el_name':'ha_frontend','subtypes_params':{'class':{}}},
'credentials':{'el_name':'credential','subtypes_params':{'class':{}}}},
'dicts_params':{'attrs':{'el_name':'attr','subtypes_params':{'class':{}}},
'groups':{'el_name':'group','subtypes_params':{'class':{}}},
'match_attrs':{'el_name':'match_attr','subtypes_params':{'class':{}}}}}
def validate_names(self):
"""
Validate frontend, group name and attr name
"""
# glidein name does not have a reasonable default
if self.frontend_name is None:
raise RuntimeError, "Missing frontend name"
if self.frontend_name.find(' ')!=-1:
raise RuntimeError, "Invalid frontend name '%s', contains a space."%self.frontend_name
if not cWParams.is_valid_name(self.frontend_name):
raise RuntimeError, "Invalid frontend name '%s', contains invalid characters."%self.frontend_name
if self.frontend_name.find('.')!=-1:
raise RuntimeError, "Invalid frontend name '%s', contains a point."%self.frontend_name
group_names=self.groups.keys()
for group_name in group_names:
if group_name.find(' ')!=-1:
raise RuntimeError, "Invalid group name '%s', contains a space."%group_name
if not cWParams.is_valid_name(group_name):
raise RuntimeError, "Invalid group name '%s', contains invalid characters."%group_name
if group_name[:4]=='XPVO':
raise RuntimeError, "Invalid group name '%s', starts with reserved sequence 'XPVO'."%group_name
if group_name.find('.')!=-1:
raise RuntimeError, "Invalid group name '%s', contains a period '.'"%group_name
attr_names=self.attrs.keys()
for attr_name in attr_names:
if not cWParams.is_valid_name(attr_name):
raise RuntimeError, "Invalid global attribute name '%s'."%attr_name
for group_name in group_names:
attr_names=self.groups[group_name].attrs.keys()
for attr_name in attr_names:
if not cWParams.is_valid_name(attr_name):
raise RuntimeError, "Invalid group '%s' attribute name '%s'."%(group_name,attr_name)
return
def translate_match_attrs(self, loc_str, match_attrs_name, match_attrs):
"""
Translate the passed factory/job match_attrs to format useful
for match validation step
"""
translations = { 'string': 'a', 'int': 1, 'bool': True, 'real': 1.0 }
translated_attrs = {}
for attr_name in match_attrs.keys():
attr_type = match_attrs[attr_name]['type']
try:
translated_attrs[attr_name] = translations[attr_type]
except KeyError, e:
raise RuntimeError, "Invalid %s %s attr type '%s'" % (
loc_str, match_attrs_name, attr_type)
return translated_attrs
def validate_match(self, loc_str, match_str, factory_attrs,
job_attrs, attr_dict, policy_modules):
"""
Validate match_expr, factory_match_attrs, job_match_attrs,
<attrs> and their equivalents in policy_modules. This is done
during the config load
@param loc_str: Section to be validated. i.e. 'frontend' or 'group x'
@type loc_str: string
@param match_str: match_expr to be applied to this section
@type match_str: string
@param factory_attrs: factory_match_attrs for this section
@type factory_attrs: dict
@param job_attrs: job_match_attrs for this section
@type job_attrs: dict
@param attr_dict: attrs for this section
@type job_attrs: dict
"""
# Globals/Locals that will be passed to the eval so that we
# can validate the match_expr as well
env = {'glidein':{'attrs':{}},'job':{},'attr_dict':{}}
# Validate factory's match_attrs
env['glidein']['attrs'] = self.translate_match_attrs(loc_str, 'factory', factory_attrs)
# Validate job's match_attrs
env['job'] = self.translate_match_attrs(loc_str, 'job', job_attrs)
# Validate attr
for attr_name in attr_dict.keys():
attr_type=attr_dict[attr_name]['type']
if attr_type=='string':
attr_val='a'
elif attr_type=='int':
attr_val=1
elif attr_type=='expr':
attr_val='a'
else:
raise RuntimeError, "Invalid %s attr type '%s'"%(loc_str,attr_type)
env['attr_dict'][attr_name]=attr_val
# Now that we have validated the match_attrs, compile match_obj
try:
match_obj = compile(match_str, "<string>", "exec")
eval(match_obj, env)
except KeyError, e:
raise RuntimeError, "Invalid %s match_expr '%s': Missing attribute %s"%(loc_str,match_str,e)
except Exception, e:
raise RuntimeError, "Invalid %s match_expr '%s': %s"%(loc_str,match_str,e)
# Validate the match(job, glidein) from the policy modules
try:
for pmodule in policy_modules:
if 'match' in dir(pmodule.pyObject):
match_result = pmodule.pyObject.match(env['job'],
env['glidein'])
except KeyError, e:
raise RuntimeError, "Error in %s policy module's %s.match(job, glidein): Missing attribute %s" % (loc_str, pmodule.name, e)
except Exception, e:
raise RuntimeError, "Error in %s policy module's %s.match(job, glidein): %s" % (loc_str, pmodule.name, e)
return
# return attribute value in the proper python format
def extract_attr_val(self,attr_obj):
return extract_attr_val(attr_obj)
def get_subparams_class(self):
return VOFrontendSubParams
def load_match_policies(self):
"""
Load external match policies for frontend and groups
"""
# Load global frontend policy module
if self.match.policy_file:
self.match_policy_modules['frontend'] = MatchPolicy(self.match.policy_file)
# Load per group policy module
self.match_policy_modules['groups'] = {}
for group_name in self.groups.keys():
# Only load if the group is enabled
policy_file = self.groups[group_name].match.policy_file
if self.groups[group_name].enabled and policy_file:
work_dir = os.path.join(self.work_dir, 'group_%s'%group_name)
self.match_policy_modules['groups'][group_name] = \
MatchPolicy(policy_file)
def update_match_attrs(self):
# Load global match_attrs from externally loaded match_policies
if self.match_policy_modules['frontend']:
if self.match_policy_modules['frontend'].factoryMatchAttrs:
self.match.factory.match_attrs.data = self.match_policy_modules['frontend'].factoryMatchAttrs
if self.match_policy_modules['frontend'].jobMatchAttrs:
self.match.job.match_attrs.data = self.match_policy_modules['frontend'].jobMatchAttrs
# Load group match_attrs from externally loaded match_policies
for group_name in self.groups.keys():
# Shorthand for easy access
group_module = self.match_policy_modules['groups'].get(group_name)
if group_module:
if group_module.factoryMatchAttrs:
self.groups[group_name].match.factory.match_attrs.data = group_module.factoryMatchAttrs
if group_module.jobMatchAttrs:
self.groups[group_name].match.job.match_attrs.data = group_module.jobMatchAttrs
####################################################################
# INTERNAL, do not use directly
# Use the class method instead
####################################################################
# return attribute value in the proper python format
def extract_attr_val(attr_obj):
if (not attr_obj.type in ("string","int","expr")):
raise RuntimeError, "Wrong attribute type '%s', must be either 'int', 'string' or 'expr'"%attr_obj.type
if attr_obj.type in ("string","expr"):
return str(attr_obj.value)
else:
return int(attr_obj.value)
| 2.125 | 2 |
tests/test_config.py | aalexanderkevin/midtrans-python-client | 0 | 12770212 | import pytest
from .context import ApiConfig
from pprint import pprint
def test_api_config_class():
apiconfig = ApiConfig(is_production=False,
server_key='sk-abc',
client_key='ck-123')
assert apiconfig.is_production == False
assert apiconfig.server_key == 'sk-abc'
assert apiconfig.client_key == 'ck-123'
assert repr(apiconfig) == '<ApiConfig(False,sk-abc,ck-123)>' | 2.328125 | 2 |
solutions/00_based/sol/enc.py | GiacomoFerro/crypto-challenges-2020 | 0 | 12770213 | <reponame>GiacomoFerro/crypto-challenges-2020
text="ANCHE TU BRUTO FIGLIO MIO?";
s=4;
result="";
text=text.upper();
for i in range(len(text)):
c = text[i];
if(c.isupper()):
result+=chr((ord(c)+s-65)%26+65);
else:
result+=chr((ord(c)+s-97)%26+97);
print(result);
| 3.75 | 4 |
cincanregistry/checkers/didierstevens.py | cincanproject/cincan-registry | 0 | 12770214 | <gh_stars>0
from ._checker import NO_VERSION
from .github import GitHubChecker
import base64
class DidierStevensChecker(GitHubChecker):
"""
Class for checking latests possible tool releases of Didier Stevens.
"""
def __init__(self, tool_info: dict, **kwargs):
super().__init__(tool_info, **kwargs)
self.version_variable = "__version__"
def _get_version(self, curr_ver: str = ""):
if self.method == "release":
self._by_release()
else:
self.logger.error(
f"Invalid query method for {self.provider} in tool {self.tool}."
)
self.version = NO_VERSION
def _by_release(self):
"""
Method for finding latest release for single tool from DidierStevens Git repository.
"""
# Select branch
r = self.session.get(
f"{self.api}/repos/{self.repository}/{self.suite}/contents/{self.tool}.py",
timeout=self.timeout,
)
resp = r.json()
if r.status_code == 200:
encoded_file = resp.get("content")
decoded = base64.b64decode(encoded_file)
for line in decoded.splitlines():
line = line.decode()
if self.version_variable in line:
self.version = line.split("=")[1].strip()
break
if not self.version:
self._fail(r)
else:
self._fail(r)
| 2.59375 | 3 |
Pipers/Telegram2VK.py | Andrew-Morozko/Funnel | 0 | 12770215 | import Piper
import html
import os
import DB
class Telegram2VK(Piper.Piper):
def __init__(self, source, dest):
""" Gets 2 handlers """
super(Telegram2VK, self).__init__(source, dest)
def converter(self, in_q, out_q):
while True:
telegram_msg = in_q.get(block=True)
if 'message' in telegram_msg: # New message
if 'text' not in telegram_msg['message']:
continue
vk_id = DB.convert_ids('Telegram', 'VK', telegram_msg['message']['chat']['id'])
if vk_id is None:
continue
vk_msg = {'api_method': 'messages.send', 'params': {}}
vk_msg['params']['peer_id'] = vk_id
vk_msg['params']['message'] = telegram_msg['message']['text']
out_q.put(vk_msg)
# else:
# discarding the message
| 2.640625 | 3 |
tb_gen.py | masipcat/VHDL-TestbenchGen | 6 | 12770216 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from vhdl import *
from vParser import *
print """
dP dP dP dP 888888ba dP
88 88 88 88 88 `8b 88
88 .8P 88aaaaa88a 88 88 88
88 d8' 88 88 88 88 88
88 .d8P 88 88 88 .8P 88
888888' dP dP 8888888P 88888888P
ooooooooooooooooooooooooooooooooooooooooo
d888888P 888888ba .88888.
88 88 `8b d8' `88
88 a88aaaa8P' 88 .d8888b. 88d888b.
88 88 `8b. 88 YP88 88ooood8 88' `88
88 88 .88 Y8. .88 88. ... 88 88
dP 88888888P `88888' `88888P' dP dP
oooooooooooooooooooooooooooooooooooooooooooooooo
version: 1.0.4
author: <NAME>, <NAME>
"""
def libraryTb():
libs, uses = [], []
for l in vhdl.getLibs():
libs += ['library %s;' % l.getName()]
uses += ['use %s;' % p for p in l.getPackages()]
return "%s%s\n\n" % ("\n".join(libs), "\n".join(uses))
def entityTb():
entities = ['entity %s_tb is\nend %s_tb;' % (a.getEntity().getName(), a.getEntity().getName()) for a in vhdl.getArchitectures()]
return "\n".join(entities) + "\n\n"
def architectureTb():
result = ""
for architecture in vhdl.getArchitectures():
entity = architecture.getEntity()
result += 'architecture behav of %s_tb is\n\tcomponent my_%s\n' % (entity.getName(), entity.getName())
result += portsTb() + dutSignalsTb() + dutTb() + clockTb()
result += '\n\t-- Els teus process van aqui:\nend behav;'
return result
def portsTb():
result = '\tport ('
for arch in vhdl.getArchitectures():
ent = arch.getEntity()
ports = ['\t{0} : {1} {2};\n'.format(p.getName(), p.getPortType(), p.getType()) for p in ent.getPorts().values()]
result += "\t\t".join(ports)[:-2] + ');\n\tend component;'
return result
def dutSignalsTb():
result = ""
for arch in vhdl.getArchitectures():
e = arch.getEntity()
result += '\n\tfor dut : my_%s use entity work.%s;\n\n' % (e.getName(), e.getName())
result += "\n".join(['\tsignal t_%s : %s;' % (p.getName(), p.getType()) for p in e.getPorts().values()])
result += '\n\n\tbegin\n'
return result
def dutTb():
result = ""
for architecture in vhdl.getArchitectures():
entity = architecture.getEntity()
result += '\tdut: my_%s port map (\n' % entity.getName()
for p in entity.getPorts().values():
result += '\t\t%s => t_%s,\n' % (p.getName(), p.getName())
result = result[:-2] + ");\n"
return result
def clockTb():
while True:
clk = raw_input('Vols generar un clock (s/n) [n]: ').lower()
clk = "n" if clk == "" else clk
if clk != 's' and clk != 'n':
print 'error: opció invàlida'
continue
elif clk == 's':
while True:
try:
clk_freq = float(input("De quina freqüència (Hz): "))
half_period = (1/clk_freq) / 2. * 10**9
if clk_freq > 0:
break
except Exception as e:
print e
print "error: freqüència invàlida"
while True:
try:
n_times = int(input("Quantes oscil·lacions vols? ")) * 2
if n_times > 0:
break
except Exception:
pass
print "error: nombre d'oscil·lacions invàlid"
return "\tclk_process: process\n\tbegin\n\t\tt_clk <= '0';\n\t\twait for %.8f ns;\n\t\tfor i in 1 to %i loop\n\t\t\tt_clk <= not t_clk;\n\t\t\twait for %.8f ns;\n\t\tend loop;\n\t\twait;\n\tend process clk_process;" % (half_period, n_times, half_period)
else:
return ""
if __name__ == "__main__":
if len(sys.argv) != 2:
print "error: has d'especificar un fitxer .vhd"
sys.exit(1)
vhdl_filename = sys.argv[1].split('.')
if vhdl_filename[-1] != 'vhd':
print 'error: l\'extenció del fitxer ha de ser .vhd'
sys.exit(1)
# VHDL_tb filename
vhdl_filename = ".".join(vhdl_filename[:-1]) + '_tb.vhd'
# VHDL content
vhd_file = read_file(sys.argv[1])
# Creating VHDL obj
vhdl = VHDL()
[vhdl.addLibrary(l) for l in getLibs(vhd_file)]
[vhdl.setEntity(e) for e in getEntities(vhd_file)]
# Get each entity in 'vhdl' and adds each architecture in 'vhdl'
for entity in vhdl.getEntities():
arch = getArchitectureOfEntity(vhd_file, entity)
if arch != "":
vhdl.setArchitecture(arch)
# Write to file
try:
write_file(vhdl_filename, libraryTb() + entityTb() + architectureTb() + "\n\0")
print '\nEl fitxer "%s" s\'ha creat correctament' % vhdl_filename
except Exception as e:
print "error: no hem pogut escriure l'arxiu '%s'" % vhdl_filename | 2.453125 | 2 |
thirdparty/RobotCarDataset-Scraper/get_datasets.py | mingu6/hfnet | 0 | 12770217 | <gh_stars>0
'''
Gets a list of available datasets from the Oxford Robotcar Dataset website.
<NAME>
Mar 2019
Oxford Robotics Institute, Oxford University.
'''
import requests
import re
from scrape_mrgdatashare import datasets_url
def main():
# open session
session_requests = requests.session()
# get http response from website
result = session_requests.get(datasets_url)
text = result.text
# parse response text
text_locations = [text_location.end()
for text_location in re.finditer(datasets_url, text)]
datasets = [str(text[text_location:text_location + 19])
for text_location in text_locations]
# ignore metadata and sort unique datasets
datasets = datasets[2:]
datasets = sorted(list(set(datasets)))
# write output text file
datasets_file = "datasets.csv"
with open(datasets_file, "w") as file_handle:
# iterate datasets
for dataset in datasets:
# url to dataset page
dataset_url = datasets_url + dataset
result = session_requests.get(dataset_url)
text = result.text
# parse text for sensor type
start = [
text_location.end() for text_location in re.finditer(
"download/\?filename=datasets", text)]
sensor_types = []
for s in start:
ss = s
while text[ss + 40:ss + 44] != ".tar":
ss += 1
sensor_type = text[s + 41:ss + 40]
sensor_types.append(str(sensor_type))
# write dataset entry
file_handle.write(dataset + "," + ",".join(sensor_types) + "\n")
if __name__ == "__main__":
main()
| 3.03125 | 3 |
diameterOfBinaryTree.py | passionzhan/LeetCode | 1 | 12770218 | # -*- encoding: utf-8 -*-
'''
@project : LeetCode
@File : diameterOfBinaryTree.py
@Contact : <EMAIL>
@Desc :
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020-03-10 zhan 1.0 None
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
if root == None: return 0
ans = self.divideConquer(root)
return ans[0]
def divideConquer(self,root):
if root.left == None and root.right == None: return 0,1
if root.left:
leftDiameter, leftDepth = self.divideConquer(root.left)
else:
leftDiameter, leftDepth = -1, 0
if root.right:
rightDiameter, rightDepth = self.divideConquer(root.right)
else:
rightDiameter, rightDepth = -1, 0
diameter = max(leftDiameter,rightDiameter,leftDepth+rightDepth)
return diameter, max(leftDepth,rightDepth) + 1
if __name__ == '__main__':
[1, 2, 3, 4, 5]
leftChild = TreeNode(2)
leftChild.left = TreeNode(4)
leftChild.right = TreeNode(5)
root = TreeNode(1)
root.left = leftChild
root.right = TreeNode(3)
ans = Solution().diameterOfBinaryTree(root)
print(ans)
| 3.609375 | 4 |
source/import_sql.py | ykjason/MSFlames | 0 | 12770219 | import sqlite3
if __name__ == '__main__':
SQL_FILE_NAME = "main_solo_vals_flame_advantaged.sql"
DB_FILE_NAME = "solo_values_FA.db"
connection = sqlite3.connect(DB_FILE_NAME)
cursor = connection.cursor()
file = open(SQL_FILE_NAME)
read_file = file.read()
cursor.executescript(read_file)
connection.close() | 2.234375 | 2 |
wyze_sdk/service/__init__.py | RebelTat/wyze-sdk | 132 | 12770220 | from .api_service import ApiServiceClient # noqa
from .auth_service import AuthServiceClient # noqa
from .earth_service import EarthServiceClient # noqa
from .ford_service import FordServiceClient # noqa
from .general_api_service import GeneralApiServiceClient # noqa
from .platform_service import PlatformServiceClient # noqa
from .scale_service import ScaleServiceClient # noqa
from .venus_service import VenusServiceClient # noqa
from .wyze_response import WyzeResponse # noqa
| 1.09375 | 1 |
config.py | SpikeKing/stylegan2encoder-my | 4 | 12770221 | <reponame>SpikeKing/stylegan2encoder-my<gh_stars>1-10
#!/usr/bin/env python
# -- coding: utf-8 --
"""
Copyright (c) 2019. All rights reserved.
Created by <NAME> on 2020/1/9
"""
dlatents_dir = 'latent_representations'
generated_dir = 'generated_images'
result_dir = 'results'
| 0.996094 | 1 |
service/urls.py | zingbretsen/diplomacy | 0 | 12770222 | from django.urls import path, include
from . import views
urlpatterns = [
path(
'game-filter-choices',
views.GameFilterChoicesView.as_view(),
name='game-filter-choices'
),
path(
'games',
views.ListGames.as_view(),
name='list-games'
),
path(
'games/create',
views.CreateGameView.as_view(),
name='create-game'
),
path(
'game/<int:pk>/join',
views.ToggleJoinGame.as_view(),
name='toggle-join-game'
),
path(
'game/finalize/<int:pk>',
views.ToggleFinalizeOrdersView.as_view(),
name='toggle-finalize-orders'
),
path(
'game/<int:pk>',
views.GameStateView.as_view(),
name='game-state'
),
path(
'game/<int:game>/order',
views.CreateOrderView.as_view(),
name='order'
),
path(
'game/<int:game>/orders',
views.ListOrdersView.as_view(),
name='orders'
),
path(
'game/<int:game>/nation-state',
views.RetrievePrivateNationStateView.as_view(),
name='private-nation-state'
),
path(
'game/<int:game>/order/<int:pk>',
views.DestroyOrderView.as_view(),
name='order'
),
path(
'api-auth/',
include('rest_framework.urls', namespace='rest_framework')
)
]
| 1.953125 | 2 |
backend/apps/transfer/dropbox_adapter.py | kevindice/cnap-dms | 1 | 12770223 | <reponame>kevindice/cnap-dms
from allauth.socialaccount.models import SocialAccount, SocialToken
import dropbox
import os
from django.core.exceptions import PermissionDenied
class DropboxAdapter():
def download(self, localPath, remotePath, user, uniqueBatchId):
token_query = user.profile.tokens.filter(app__provider='dropbox')
if not token_query.exists():
raise PermissionDenied('No dropbox token found.')
token = token_query.get().token
systemProvider = remotePath.split('/')[1]
systemId = remotePath.split('/')[2]
remoteFilePath = '/'.join([''] + remotePath.split('/')[3:])
downloadDirectory = '/transient/%s/' % uniqueBatchId
fullLocalPath = downloadDirectory + localPath
if not os.path.exists(downloadDirectory):
os.makedirs(downloadDirectory)
fullLocalDir = '/'.join(fullLocalPath.split('/')[0:-1] + [''])
if not os.path.exists(fullLocalDir):
os.makedirs(fullLocalDir)
dbx = dropbox.Dropbox(token)
dbx.files_download_to_file(fullLocalPath, remoteFilePath)
def upload(self, localPath, remotePath, user, uniqueBatchId):
token_query = user.profile.tokens.filter(app__provider='dropbox')
if not token_query.exists():
raise PermissionDenied('No dropbox token found.')
token = token_query.get().token
dbx = dropbox.Dropbox(token)
systemProvider = remotePath.split('/')[1]
systemId = remotePath.split('/')[2]
remoteFilePath = '/'.join([''] + remotePath.split('/')[3:])
fullLocalPath = '/transient/%s/' % uniqueBatchId + localPath
file_size = os.path.getsize(fullLocalPath)
CHUNK_SIZE = 4 * 1024 * 1024
f = open(fullLocalPath, "rb")
if file_size <= CHUNK_SIZE:
dbx.files_upload(f.read(), remoteFilePath)
else:
upload_session = dbx.files_upload_session_start(f.read(CHUNK_SIZE))
cursor = dropbox.files.UploadSessionCursor(
session_id=upload_session.session_id,
offset=f.tell()
)
commit = dropbox.files.CommitInfo(path=remoteFilePath)
while f.tell() < file_size:
if (file_size - f.tell()) <= CHUNK_SIZE:
dbx.files_upload_session_finish(
f.read(CHUNK_SIZE),
cursor,
commit
)
else:
dbx.files_upload_session_append(
f.read(CHUNK_SIZE),
cursor.session_id,
cursor.offset
)
cursor.offset = f.tell()
print(cursor.offset)
f.close()
def list_directory(self, path, user):
token_query = user.profile.tokens.filter(app__provider='dropbox')
if not token_query.exists():
raise PermissionDenied('No dropbox token found.')
token = token_query.get().token
dbx = dropbox.Dropbox(token)
systemProvider = path.split('/')[1]
systemId = path.split('/')[2]
directoryPath = '/'.join([''] + path.split('/')[3:])
response = dbx.files_list_folder(directoryPath)
print(response)
return {
'directories': [
'/'.join(path.split('/')[0:3]) + d.path_display + '/'
for d in
filter(lambda x: isinstance(x, dropbox.files.FolderMetadata), response.entries)
],
'files': [
'/'.join(path.split('/')[0:3]) + f.path_display
for f in
filter(lambda x: isinstance(x, dropbox.files.FileMetadata), response.entries)
]
}
def create_directory(self, path, user):
pass | 2.109375 | 2 |
Tensorflow/my_tensorflow/src/activations/__init__.py | hywel1994/mac-workspace | 37 | 12770224 | <gh_stars>10-100
"""激活函数"""
from .relu import *
def linear(x):
""""""
return x
def identity(x):
""""""
return tf.identity(x)
def sigmoid(x):
""""""
return tf.nn.sigmoid(x)
def hard_sigmoid(x):
"""
x = 0. x < -2.5
= 1. x > 2.5
= 0.2 * x + 0.5 otherwise
"""
x = (0.2 * x) + 0.5
x = tf.clip_by_value(x, 0., 1.)
return x
def tanh(x):
""""""
return tf.nn.tanh(x)
def softplus(x):
""""""
return tf.nn.softplus(x)
def softsign(x):
"""
o = x / (1 + abs(x))
"""
return tf.nn.softsign(x)
def softmax(x, axis=-1):
"""
Examples:
n_dim = x.get_shape().ndims
assert n_dim >= 2
if n_dim == 2:
return tf.nn.softmax(x)
else:
e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
s = tf.reduce_sum(e, axis=axis, keepdims=True)
return e / s
"""
return tf.nn.softmax(x, axis=axis)
def elu(x):
"""指数线性单元"""
return tf.nn.elu(x)
def selu(x):
"""缩放型指数线性单元"""
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
o = tf.nn.elu(x)
return scale * tf.where(x > 0, o, alpha * o)
| 2.84375 | 3 |
EXAMPLES/scripts/scenes/collision.py | K9Kraken/EZpanda | 0 | 12770225 | <reponame>K9Kraken/EZpanda
render = ez.Node()
aspect2D = ez.Node()
camera = ez.Camera(parent=render)
camera.y = -20
# Create collision shapes:
# Collision Sphere:
sphere = ez.collision.shapes.Sphere(0.25, parent=render)
sphere.parent = None
sphere.parent = render
# Collide from mask 2:
ez.collision.set_mask(sphere, ez.mask[2])
# Set what sphere will collide to:
ez.collision.set_from_mask(sphere, ez.mask[1])
sphere.pos = 0, 0 , 0
# Show the shape so we can see what it is doing, for dubugging:
sphere.show()
sphere.name = "Sphere"
# Collision Capsule:
capsule = ez.collision.shapes.Capsule((0,0,0), (0,0,1), 0.25, parent=render)
ez.collision.set_mask(capsule, ez.mask[1])
capsule.pos = -4, 0, 0
capsule.show()
capsule.name = "Capsule"
# Collision Box:
box = ez.collision.shapes.Box((0.5, 0.25, 1), origin=(0, 0, 0.5), parent=render)
ez.collision.set_mask(box, ez.mask[1])
box.pos = 4, 0, 0
box.show()
box.name = "Box"
# Rays, they are only used for hitting other objects:
pos = -2, 0 , -1
to_pos = 0, 0, 2
segment = ez.collision.rays.Segment( pos, to_pos, parent=render)
ez.collision.set_from_mask(segment, ez.mask[2])
segment.show()
segment.name = "Segment"
from_pos = 1, 0, 1
towards = 0, 0, -1
ray = ez.collision.rays.Ray( from_pos, towards, parent=render )
ez.collision.set_from_mask(ray, ez.mask[2])
ray.show()
ray.name = "Ray"
pos = 2, 0, 0
direction = 0, 0, 1
line = ez.collision.rays.Line( pos, direction, parent=render)
ez.collision.set_from_mask(line, ez.mask[2])
line.show()
line.name = "line"
# Create a collision handler and add colliders:
handler = ez.collision.Handler()
handler.add_collider(sphere)
handler.add_collider(segment)
handler.add_collider(ray)
handler.add_collider(line)
def input(event):
device, name, state = event
if name == 'escape' and state == 0:
ez.set_scene(ez['menu'])
def logic(dt):
hits = handler.get_collisions(render)
if hits:
for hit in hits:
from_name = hit['FROM'].name
into_name = hit['INTO'].name
text = ez['text']
text.text = "W - move left"
text.text += "\nD - move right\n"
text.text += from_name+" HIT "+into_name+" AT "+str(hit['POS'])
if ez.is_button_down('a'):
if sphere.x > -5:
sphere.x -= 2*dt
if ez.is_button_down('d'):
if sphere.x < 5:
sphere.x += 2*dt
def enter():
ez.window.background_color = 0, 0, 0
L, R, T, B = ez.window.get_aspect2D_edges()
text = ez['text']
text.text = "A - move left"
text.text += "\nD - move right"
text.x = L+0.01
text.y = T-0.08
text.parent = aspect2D
def exit():
pass
| 2.921875 | 3 |
tests/west/project/test_project.py | maciejjo/west | 0 | 12770226 | <reponame>maciejjo/west
import os
from os.path import dirname
import shlex
import shutil
import subprocess
import sys
import textwrap
import pytest
from west import config
GIT = shutil.which('git')
# Assumes this file is west/tests/west/project/test_project.py, returns
# path to toplevel 'west'
THIS_WEST = os.path.abspath(dirname(dirname(dirname(dirname(__file__)))))
#
# Test fixtures
#
@pytest.fixture
def repos_tmpdir(tmpdir):
'''Fixture for tmpdir with "remote" repositories, manifest, and west.
These can then be used to bootstrap an installation and run
project-related commands on it with predictable results.
Switches directory to, and returns, the top level tmpdir -- NOT
the subdirectory containing the repositories themselves.
Initializes placeholder upstream repositories in <tmpdir>/remote-repos/
with the following contents:
repos/
├── west (branch: master)
│ └── (contains this west's worktree contents)
├── manifest (branch: master)
│ └── west.yml
├── Kconfiglib (branch: zephyr)
│ └── kconfiglib.py
├── net-tools (branch: master)
│ └── qemu-script.sh
└── zephyr (branch: master)
├── CODEOWNERS
├── west.yml
├── include
│ └── header.h
└── subsys
└── bluetooth
└── code.c
The contents of west.yml are:
west:
url: file://<tmpdir>/west
manifest:
defaults:
remote: test-local
remotes:
- name: test-local
url-base: file://<tmpdir>/remote-repos
projects:
- name: Kconfiglib
revision: zephyr
path: subdir/Kconfiglib
- name: net-tools
clone_depth: 1
west-commands: scripts/west-commands.yml
self:
path: zephyr
'''
rr = tmpdir.mkdir('repos') # "remote" repositories
rp = {} # individual repository paths under rr
# Mirror this west tree into a "remote" west repository under rr.
wdst = rr.join('west')
mirror_west_repo(wdst)
rp['west'] = str(wdst)
# Create the other repositories.
for repo in 'net-tools', 'Kconfiglib', 'zephyr':
path = str(rr.join(repo))
rp[repo] = path
create_repo(path)
# Initialize the manifest repository.
add_commit(rp['zephyr'], 'test manifest',
files={'west.yml': textwrap.dedent('''\
west:
url: file://{west}
manifest:
defaults:
remote: test-local
remotes:
- name: test-local
url-base: file://{rr}
projects:
- name: Kconfiglib
revision: zephyr
path: subdir/Kconfiglib
- name: net-tools
west-commands: scripts/west-commands.yml
self:
path: zephyr
'''.format(west=rp['west'], rr=str(rr)))})
# Initialize the Kconfiglib repository.
subprocess.check_call([GIT, 'checkout', '-b', 'zephyr'],
cwd=rp['Kconfiglib'])
add_commit(rp['Kconfiglib'], 'test kconfiglib commit',
files={'kconfiglib.py': 'print("hello world kconfiglib")\n'})
# Initialize the net-tools repository.
add_commit(rp['net-tools'], 'test net-tools commit',
files={'qemu-script.sh': 'echo hello world net-tools\n',
'scripts/west-commands.yml': textwrap.dedent('''\
west-commands:
- file: scripts/test.py
commands:
- name: test
class: Test
help: test-help
'''),
'scripts/test.py': textwrap.dedent('''\
from west.commands import WestCommand
class Test(WestCommand):
def __init__(self):
super(Test, self).__init__(
'test',
'test application',
'')
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(self.name)
return parser
def do_run(self, args, ignored):
print('Testing test command 1')
'''),
})
# Initialize the zephyr repository.
add_commit(rp['zephyr'], 'test zephyr commit',
files={'CODEOWNERS': '',
'include/header.h': '#pragma once\n',
'subsys/bluetooth/code.c': 'void foo(void) {}\n'})
# Switch to and return the top-level temporary directory.
#
# This can be used to populate a west installation alongside.
# Switch to the top-level West installation directory
tmpdir.chdir()
return tmpdir
@pytest.fixture
def west_init_tmpdir(repos_tmpdir):
'''Fixture for a tmpdir with 'remote' repositories and 'west init' run.
Uses the remote repositories from the repos_tmpdir fixture to
create a west installation using the system bootstrapper's init
command -- and thus the test environment must install the
bootstrapper from the current west source code tree under test.
The contents of the west installation aren't checked at all.
This is left up to the test cases.
The directory that 'west init' created is returned as a
py.path.local, with the current working directory set there.'''
west_tmpdir = repos_tmpdir.join('west_installation')
cmd('init -m "{}" "{}"'.format(str(repos_tmpdir.join('repos', 'zephyr')),
str(west_tmpdir)))
west_tmpdir.chdir()
config.read_config()
return west_tmpdir
@pytest.fixture
def west_update_tmpdir(west_init_tmpdir):
'''Like west_init_tmpdir, but also runs west update.'''
cmd('update', cwd=str(west_init_tmpdir))
return west_init_tmpdir
#
# Test cases
#
def test_installation(west_update_tmpdir):
# Basic test that west_update_tmpdir bootstrapped correctly. This
# is a basic test of west init and west update.
# Make sure the expected files and directories exist in the right
# places.
wct = west_update_tmpdir
assert wct.check(dir=1)
assert wct.join('subdir', 'Kconfiglib').check(dir=1)
assert wct.join('subdir', 'Kconfiglib', '.git').check(dir=1)
assert wct.join('subdir', 'Kconfiglib', 'kconfiglib.py').check(file=1)
assert wct.join('net-tools').check(dir=1)
assert wct.join('net-tools', '.git').check(dir=1)
assert wct.join('net-tools', 'qemu-script.sh').check(file=1)
assert wct.join('zephyr').check(dir=1)
assert wct.join('zephyr', '.git').check(dir=1)
assert wct.join('zephyr', 'CODEOWNERS').check(file=1)
assert wct.join('zephyr', 'include', 'header.h').check(file=1)
assert wct.join('zephyr', 'subsys', 'bluetooth', 'code.c').check(file=1)
def test_list(west_update_tmpdir):
# Projects shall be listed in the order they appear in the manifest.
# Check the behavior for some format arguments of interest as well.
actual = cmd('list -f "{name} {revision} {path} {cloned} {clone_depth}"')
expected = ['zephyr (not set) zephyr (cloned) None',
'Kconfiglib zephyr {} (cloned) None'.format(
os.path.join('subdir', 'Kconfiglib')),
'net-tools master net-tools (cloned) None']
assert actual.splitlines() == expected
def test_diff(west_init_tmpdir):
# FIXME: Check output
# Diff with no projects cloned shouldn't fail
cmd('diff')
# Neither should it fail after fetching one or both projects
cmd('update net-tools')
cmd('diff')
cmd('update Kconfiglib')
cmd('diff --cached') # Pass a custom flag too
def test_status(west_init_tmpdir):
# FIXME: Check output
# Status with no projects cloned shouldn't fail
cmd('status')
# Neither should it fail after fetching one or both projects
cmd('update net-tools')
cmd('status')
cmd('update Kconfiglib')
cmd('status --long') # Pass a custom flag too
def test_forall(west_init_tmpdir):
# FIXME: Check output
# The 'echo' command is available in both 'shell' and 'batch'
# 'forall' with no projects cloned shouldn't fail
cmd("forall -c 'echo *'")
# Neither should it fail after cloning one or both projects
cmd('update net-tools')
cmd("forall -c 'echo *'")
cmd('update Kconfiglib')
cmd("forall -c 'echo *'")
def test_update_west(west_init_tmpdir):
# Test the 'west selfupdate' command. It calls through to the same backend
# functions that are used for automatic updates and 'west init'
# reinitialization.
# update the net-tools repository
cmd('update net-tools')
west_prev = head_subject('.west/west')
# Add commits to the local repos. We need to reconfigure
# explicitly as these are clones, and west doesn't handle that for
# us.
for path in 'zephyr', '.west/west', 'net-tools':
add_commit(path, 'test-update-local', reconfigure=True)
# Check that resetting the west repository removes the local commit
cmd('selfupdate --reset-west')
assert head_subject('zephyr') == 'test-update-local' # Unaffected
assert head_subject('.west/west') == west_prev
assert head_subject('net-tools') == 'test-update-local' # Unaffected
def test_update_projects(west_init_tmpdir):
# Test the 'west update' command. It calls through to the same backend
# functions that are used for automatic updates and 'west init'
# reinitialization.
# update all repositories
cmd('update')
# Add commits to the local repos. We need to reconfigure
# explicitly as these are clones, and west doesn't handle that for
# us.
(nt_mr_0, nt_mr_1,
nt_head_0, nt_head_1,
kl_mr_0, kl_mr_1,
kl_head_0, kl_head_1) = update_helper(west_init_tmpdir, 'update')
assert nt_mr_0 != nt_mr_1, 'failed to update net-tools manifest-rev'
assert nt_head_0 != nt_head_1, 'failed to update net-tools HEAD'
assert kl_mr_0 != kl_mr_1, 'failed to update kconfiglib manifest-rev'
assert kl_head_0 != kl_head_1, 'failed to update kconfiglib HEAD'
def test_update_projects_local_branch_commits(west_init_tmpdir):
# Test the 'west update' command when working on local branch with local
# commits and then updating project to upstream commit.
# It calls through to the same backend functions that are used for
# automatic updates and 'west init' reinitialization.
# update all repositories
cmd('update')
# Create a local branch and add commits
checkout_branch('net-tools', 'local_net_tools_test_branch', create=True)
checkout_branch('subdir/Kconfiglib', 'local_kconfig_test_branch',
create=True)
add_commit('net-tools', 'test local branch commit', reconfigure=True)
add_commit('subdir/Kconfiglib', 'test local branch commit',
reconfigure=True)
net_tools_prev = head_subject('net-tools')
kconfiglib_prev = head_subject('subdir/Kconfiglib')
# Add commits to the upstream repos. We need to reconfigure
# explicitly as these are clones, and west doesn't handle that for
# us.
(nt_mr_0, nt_mr_1,
nt_head_0, nt_head_1,
kl_mr_0, kl_mr_1,
kl_head_0, kl_head_1) = update_helper(west_init_tmpdir, 'update')
assert nt_mr_0 != nt_mr_1, 'failed to update net-tools manifest-rev'
assert nt_head_0 != nt_head_1, 'failed to update net-tools HEAD'
assert kl_mr_0 != kl_mr_1, 'failed to update kconfiglib manifest-rev'
assert kl_head_0 != kl_head_1, 'failed to update kconfiglib HEAD'
# Verify local branch is still present and untouched
assert net_tools_prev != head_subject('net-tools')
assert kconfiglib_prev != head_subject('subdir/Kconfiglib')
checkout_branch('net-tools', 'local_net_tools_test_branch')
checkout_branch('subdir/Kconfiglib', 'local_kconfig_test_branch')
assert net_tools_prev == head_subject('net-tools')
assert kconfiglib_prev == head_subject('subdir/Kconfiglib')
def test_init_again(west_init_tmpdir):
# Test that 'west init' on an initialized tmpdir errors out
with pytest.raises(subprocess.CalledProcessError):
cmd('init')
with pytest.raises(subprocess.CalledProcessError):
cmd('init -m foo')
def test_init_local_manifest_project(repos_tmpdir):
# Do a local clone of manifest repo
zephyr_install_dir = repos_tmpdir.join('west_installation', 'zephyr')
clone(str(repos_tmpdir.join('repos', 'zephyr')),
str(zephyr_install_dir))
cmd('init -l "{}"'.format(str(zephyr_install_dir)))
# Verify Zephyr and .west/west has been installed during init -l
# but not projects
zid = repos_tmpdir.join('west_installation')
assert zid.check(dir=1)
assert zid.join('.west', 'west').check(dir=1)
assert zid.join('subdir', 'Kconfiglib').check(dir=0)
assert zid.join('net-tools').check(dir=0)
assert zid.join('zephyr').check(dir=1)
assert zid.join('zephyr', '.git').check(dir=1)
assert zid.join('zephyr', 'CODEOWNERS').check(file=1)
assert zid.join('zephyr', 'include', 'header.h').check(file=1)
assert zid.join('zephyr', 'subsys', 'bluetooth', 'code.c').check(file=1)
cmd('update', cwd=str(zid))
# The projects should be installled now
assert zid.check(dir=1)
assert zid.join('subdir', 'Kconfiglib').check(dir=1)
assert zid.join('net-tools').check(dir=1)
assert zid.join('subdir', 'Kconfiglib').check(dir=1)
assert zid.join('subdir', 'Kconfiglib', '.git').check(dir=1)
assert zid.join('subdir', 'Kconfiglib', 'kconfiglib.py').check(file=1)
assert zid.join('net-tools').check(dir=1)
assert zid.join('net-tools', '.git').check(dir=1)
assert zid.join('net-tools', 'qemu-script.sh').check(file=1)
def test_init_local_already_initialized_failure(west_init_tmpdir):
# Test that 'west init -l' on an initialized tmpdir errors out
with pytest.raises(subprocess.CalledProcessError):
cmd('init -l "{}"'.format(str(west_init_tmpdir)))
def test_init_local_missing_west_yml_failure(repos_tmpdir):
# Test that 'west init -l' on repo without a 'west.yml' fails
# Do a local clone of manifest repo
zephyr_install_dir = repos_tmpdir.join('west_installation', 'zephyr')
clone(str(repos_tmpdir.join('repos', 'zephyr')),
str(zephyr_install_dir))
os.remove(str(zephyr_install_dir.join('west.yml')))
with pytest.raises(subprocess.CalledProcessError):
cmd('init -l "{}"'.format(str(zephyr_install_dir)))
def test_extension_command_execution(west_init_tmpdir):
with pytest.raises(subprocess.CalledProcessError):
cmd('test')
cmd('update')
actual = cmd('test')
assert actual == 'Testing test command 1\n'
def test_extension_command_multiproject(repos_tmpdir):
# Test to ensure that multiple projects can define extension commands and
# that those are correctly presented and executed.
rr = repos_tmpdir.join('repos')
remote_kconfiglib = str(rr.join('Kconfiglib'))
remote_zephyr = str(rr.join('zephyr'))
remote_west = str(rr.join('west'))
# Update the manifest to specify extension commands in Kconfiglib.
add_commit(remote_zephyr, 'test added extension command',
files={'west.yml': textwrap.dedent('''\
west:
url: file://{west}
manifest:
defaults:
remote: test-local
remotes:
- name: test-local
url-base: file://{rr}
projects:
- name: Kconfiglib
revision: zephyr
path: subdir/Kconfiglib
west-commands: scripts/west-commands.yml
- name: net-tools
west-commands: scripts/west-commands.yml
self:
path: zephyr
'''.format(west=remote_west, rr=str(rr)))})
# Add an extension command to the Kconfiglib remote.
add_commit(remote_kconfiglib, 'add west commands',
files={'scripts/west-commands.yml': textwrap.dedent('''\
west-commands:
- file: scripts/test.py
commands:
- name: kconfigtest
class: Test
'''),
'scripts/test.py': textwrap.dedent('''\
from west.commands import WestCommand
class Test(WestCommand):
def __init__(self):
super(Test, self).__init__(
'kconfigtest',
'Kconfig test application',
'')
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(self.name)
return parser
def do_run(self, args, ignored):
print('Testing kconfig test')
'''),
})
west_tmpdir = repos_tmpdir.join('west_installation')
cmd('init -m "{}" "{}"'.format(str(repos_tmpdir.join('repos', 'zephyr')),
str(west_tmpdir)))
west_tmpdir.chdir()
config.read_config()
cmd('update')
help_text = cmd('-h')
expected = textwrap.dedent('''\
commands from project at "subdir/Kconfiglib":
kconfigtest: (no help provided; try "west kconfigtest -h")
commands from project at "net-tools":
test: test-help
''')
assert expected in help_text
actual = cmd('test')
assert actual == 'Testing test command 1\n'
actual = cmd('kconfigtest')
assert actual == 'Testing kconfig test\n'
def test_extension_command_duplicate(repos_tmpdir):
# Test to ensure that in case to subprojects introduces same command, it
# will print a warning.
rr = repos_tmpdir.join('repos')
remote_kconfiglib = str(rr.join('Kconfiglib'))
remote_zephyr = str(rr.join('zephyr'))
remote_west = str(rr.join('west'))
add_commit(remote_zephyr, 'test added extension command',
files={'west.yml': textwrap.dedent('''\
west:
url: file://{west}
manifest:
defaults:
remote: test-local
remotes:
- name: test-local
url-base: file://{rr}
projects:
- name: Kconfiglib
revision: zephyr
path: subdir/Kconfiglib
west-commands: scripts/west-commands.yml
- name: net-tools
west-commands: scripts/west-commands.yml
self:
path: zephyr
'''.format(west=remote_west, rr=str(rr)))})
# Initialize the net-tools repository.
add_commit(remote_kconfiglib, 'add west commands',
files={'scripts/west-commands.yml': textwrap.dedent('''\
west-commands:
- file: scripts/test.py
commands:
- name: test
class: Test
'''),
'scripts/test.py': textwrap.dedent('''\
from west.commands import WestCommand
class Test(WestCommand):
def __init__(self):
super(Test, self).__init__(
'test',
'test application',
'')
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(self.name)
return parser
def do_run(self, args, ignored):
print('Testing kconfig test command')
'''),
})
west_tmpdir = repos_tmpdir.join('west_installation')
cmd('init -m "{}" "{}"'.format(str(repos_tmpdir.join('repos', 'zephyr')),
str(west_tmpdir)))
west_tmpdir.chdir()
config.read_config()
cmd('update')
actual = cmd('test', stderr=subprocess.STDOUT)
warning = 'WARNING: ignoring project net-tools external command "test";'\
' command "test" already defined as extension command\n'
command_out = 'Testing kconfig test command\n'
assert actual == warning + command_out
#
# Helper functions used by the test cases and fixtures.
#
def create_repo(path):
# Initializes a Git repository in 'path', and adds an initial commit to it
subprocess.check_call([GIT, 'init', path])
config_repo(path)
add_commit(path, 'initial')
def config_repo(path):
# Set name and email. This avoids a "Please tell me who you are" error when
# there's no global default.
subprocess.check_call([GIT, 'config', 'user.name', '<NAME>'], cwd=path)
subprocess.check_call([GIT, 'config', 'user.email',
'<EMAIL>'],
cwd=path)
def add_commit(repo, msg, files=None, reconfigure=True):
# Adds a commit with message 'msg' to the repo in 'repo'
#
# If 'files' is given, it must be a dictionary mapping files to
# edit to the contents they should contain in the new
# commit. Otherwise, the commit will be empty.
#
# If 'reconfigure' is True, the user.name and user.email git
# configuration variables will be set in 'repo' using config_repo().
repo = str(repo)
if reconfigure:
config_repo(repo)
# Edit any files as specified by the user and add them to the index.
if files:
for path, contents in files.items():
dirname, basename = os.path.dirname(path), os.path.basename(path)
fulldir = os.path.join(repo, dirname)
if not os.path.isdir(fulldir):
# Allow any errors (like trying to create a directory
# where a file already exists) to propagate up.
os.makedirs(fulldir)
with open(os.path.join(fulldir, basename), 'w') as f:
f.write(contents)
subprocess.check_call([GIT, 'add', path], cwd=repo)
# The extra '--no-xxx' flags are for convenience when testing
# on developer workstations, which may have global git
# configuration to sign commits, etc.
#
# We don't want any of that, as it could require user
# intervention or fail in environments where Git isn't
# configured.
subprocess.check_call(
[GIT, 'commit', '-a', '--allow-empty', '-m', msg, '--no-verify',
'--no-gpg-sign', '--no-post-rewrite'], cwd=repo)
def clone(repo, dst):
# Creates a new branch.
repo = str(repo)
subprocess.check_call([GIT, 'clone', repo, dst])
def checkout_branch(repo, branch, create=False):
# Creates a new branch.
repo = str(repo)
# Edit any files as specified by the user and add them to the index.
if create:
subprocess.check_call([GIT, 'checkout', '-b', branch], cwd=repo)
else:
subprocess.check_call([GIT, 'checkout', branch], cwd=repo)
def check_output(*args, **kwargs):
# Like subprocess.check_output, but returns a string in the
# default encoding instead of a byte array.
try:
out_bytes = subprocess.check_output(*args, **kwargs)
except subprocess.CalledProcessError as e:
print('*** check_output: nonzero return code', e.returncode,
file=sys.stderr)
print('cwd =', os.getcwd(), 'args =', args,
'kwargs =', kwargs, file=sys.stderr)
print('subprocess output:', file=sys.stderr)
print(e.output.decode(), file=sys.stderr)
raise
return out_bytes.decode(sys.getdefaultencoding())
def mirror_west_repo(dst):
# Create a west repository in dst which mirrors the exact state of
# the current tree, except ignored files.
#
# This is done in a simple way:
#
# 1. recursively copy THIS_WEST there (except .git and ignored files)
# 2. init a new git repository there
# 3. add the entire tree, and commit
#
# (We can't just clone THIS_WEST because we want to allow
# developers to test their working trees without having to make a
# commit -- remember, 'west init' clones the remote.)
wut = str(dst) # "west under test"
# Copy the west working tree, except ignored files.
def ignore(directory, files):
# Get newline separated list of ignored files, as a string.
try:
ignored = check_output([GIT, 'check-ignore'] + files,
cwd=directory)
except subprocess.CalledProcessError as e:
# From the manpage: return values 0 and 1 respectively
# mean that some and no argument files were ignored. These
# are both OK. Treat other return values as errors.
if e.returncode not in (0, 1):
raise
else:
ignored = e.output.decode(sys.getdefaultencoding())
# Convert ignored to a set of file names as strings.
ignored = set(ignored.splitlines())
# Also ignore the .git directory itself.
if '.git' in files:
ignored.add('.git')
return ignored
shutil.copytree(THIS_WEST, wut, ignore=ignore)
# Create a fresh .git and commit existing directory tree.
create_repo(wut)
subprocess.check_call([GIT, 'add', '-A'], cwd=wut)
add_commit(wut, 'west under test')
def cmd(cmd, cwd=None, stderr=None):
# Run a west command in a directory (cwd defaults to os.getcwd()).
#
# This helper takes the command as a string, which is less clunky
# to work with than a list. It is split according to shell rules
# before being run.
#
# This helper relies on the test environment to ensure that the
# 'west' executable is a bootstrapper installed from the current
# west source code.
#
# stdout from cmd is captured and returned. The command is run in
# a python subprocess so that program-level setup and teardown
# happen fresh.
try:
return check_output(shlex.split('west ' + cmd), cwd=cwd, stderr=stderr)
except subprocess.CalledProcessError:
print('cmd: west:', shutil.which('west'), file=sys.stderr)
raise
def head_subject(path):
# Returns the subject of the HEAD commit in the repository at 'path'
return subprocess.check_output([GIT, 'log', '-n1', '--format=%s'],
cwd=path).decode().rstrip()
def update_helper(west_tmpdir, command):
# Helper command for causing a change in two remote repositories,
# then running a project command on the west installation.
#
# Adds a commit to both of the kconfiglib and net-tools projects
# remotes, then run `command`.
#
# Captures the 'manifest-rev' and HEAD SHAs in both repositories
# before and after running the command, returning them in a tuple
# like this:
#
# (net-tools-manifest-rev-before,
# net-tools-manifest-rev-after,
# net-tools-HEAD-before,
# net-tools-HEAD-after,
# kconfiglib-manifest-rev-before,
# kconfiglib-manifest-rev-after,
# kconfiglib-HEAD-before,
# kconfiglib-HEAD-after)
nt_remote = str(west_tmpdir.join('..', 'repos', 'net-tools'))
nt_local = str(west_tmpdir.join('net-tools'))
kl_remote = str(west_tmpdir.join('..', 'repos', 'Kconfiglib'))
kl_local = str(west_tmpdir.join('subdir', 'Kconfiglib'))
nt_mr_0 = check_output([GIT, 'rev-parse', 'manifest-rev'], cwd=nt_local)
kl_mr_0 = check_output([GIT, 'rev-parse', 'manifest-rev'], cwd=kl_local)
nt_head_0 = check_output([GIT, 'rev-parse', 'HEAD'], cwd=nt_local)
kl_head_0 = check_output([GIT, 'rev-parse', 'HEAD'], cwd=kl_local)
add_commit(nt_remote, 'another net-tools commit')
add_commit(kl_remote, 'another kconfiglib commit')
cmd(command)
nt_mr_1 = check_output([GIT, 'rev-parse', 'manifest-rev'], cwd=nt_local)
kl_mr_1 = check_output([GIT, 'rev-parse', 'manifest-rev'], cwd=kl_local)
nt_head_1 = check_output([GIT, 'rev-parse', 'HEAD'], cwd=nt_local)
kl_head_1 = check_output([GIT, 'rev-parse', 'HEAD'], cwd=kl_local)
return (nt_mr_0, nt_mr_1,
nt_head_0, nt_head_1,
kl_mr_0, kl_mr_1,
kl_head_0, kl_head_1)
| 2.1875 | 2 |
scripts/get_sample_dataset.py | hankehly/metabase-clickhouse-containerized-deployment | 0 | 12770227 | <gh_stars>0
try:
from pathlib import Path
from urllib.request import urlopen
except ImportError:
print("python version 3+ required")
exit(1)
try:
import pandas as pd
except ImportError:
print("sorry, please install pandas too")
exit(1)
BASE_DIR = Path(__file__).parent.parent.absolute()
# 15.45 MB as of 2021/06/05
DATASET_URI = "https://data.london.gov.uk/download/coronavirus--covid-19--cases/ae4d5fc9-5448-49a6-810f-910f7cbc9fd2/phe_vaccines_age_london_boroughs.csv"
DATASET_OUT_PATH = (
BASE_DIR / "clickhouse_user_files" / "phe_vaccines_age_london_boroughs.csv"
)
if __name__ == "__main__":
DATASET_OUT_PATH.parent.mkdir(exist_ok=True)
print(f"Downloading resource: {DATASET_URI}")
# clickhouse fails to load csv as-is (perhaps this could be fixed with some
# configuration tweaking) so parsing with pandas first..
pd.read_csv(DATASET_URI).to_csv(DATASET_OUT_PATH, index=False, header=False)
print(f"Download complete. Saved to path: {DATASET_OUT_PATH}")
| 3.015625 | 3 |
numba/tests/test_guvectorize_scalar.py | meawoppl/numba | 1 | 12770228 | <filename>numba/tests/test_guvectorize_scalar.py<gh_stars>1-10
"""
Tests for guvectorize scalar arguments
"""
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import guvectorize
from numba import unittest_support as unittest
class TestGUVectorizeScalar(unittest.TestCase):
"""
Nothing keeps user from out-of-bound memory access
"""
def test_scalar_output(self):
"""
Note that scalar output is a 0-dimension array that acts as
a pointer to the output location.
"""
@guvectorize(['void(int32[:], int32[:])'], '(n)->()')
def sum_row(inp, out):
tmp = 0.
for i in range(inp.shape[0]):
tmp += inp[i]
out[0] = tmp
# inp is (10000, 3)
# out is (10000)
# The outter (leftmost) dimension must match or numpy broadcasting is performed.
inp = np.arange(30000, dtype=np.int32).reshape(10000, 3)
out = sum_row(inp)
# verify result
for i in range(inp.shape[0]):
assert out[i] == inp[i].sum()
def test_scalar_input(self):
@guvectorize(['int32[:], int32[:], int32[:]'], '(n),()->(n)')
def foo(inp, n, out):
for i in range(inp.shape[0]):
out[i] = inp[i] * n[0]
inp = np.arange(3 * 10, dtype=np.int32).reshape(10, 3)
# out = np.empty_like(inp)
out = foo(inp, 2)
# verify result
self.assertTrue(np.all(inp * 2 == out))
if __name__ == '__main__':
unittest.main()
| 2.75 | 3 |
wavelet.py | fmndantas/wavelet | 0 | 12770229 | <reponame>fmndantas/wavelet
#!/bin/python3
import numpy as np
class Wavelet:
def __init__(self, m, g):
self.m = m
self.g = g
self.mg = m * g
self.A = None
self.mcw = None
def _allocate_a_matrix(self):
if self.A is None:
self.A = np.zeros((self.m, self.mg))
def _get_raw_lines(self, file, type=float):
with open(file, 'r') as file:
flatten = [type(l) for l in file.readlines()]
size = int(np.size(flatten))
step = int(size / self.m)
for i in np.arange(0, size, step):
yield flatten[i: i + step]
def _set_a_coefficients(self, file):
"""Loads A with coefficients defined on raw file `file`"""
self._allocate_a_matrix()
lines = self._get_raw_lines(file)
for i in range(self.m):
self.A[i] = next(lines)
def _allocate_mcw(self, message_length):
"""MCW matrix will be rotated, so it has m as number of lines"""
if self.mcw is None:
self.mcw = np.zeros((self.m, message_length + self.mg - self.m))
def check_mcw_existence(self):
if self.mcw is None:
raise ValueError("MCW is not known")
def _get_encoded_model(self):
self.check_mcw_existence()
return np.zeros((self.mcw.shape[1],))
def mcw_from_coefficients(self, file, message_length):
self._set_a_coefficients(file)
self._allocate_mcw(message_length)
self.mcw[:self.m, :self.mg] = self.A
def encode(self, message):
encoded = self._get_encoded_model()
mcw = np.copy(self.mcw)
for i in range(0, np.size(message), self.m):
encoded += np.matmul(message[i: i + self.m], mcw)
mcw = np.roll(mcw, self.m)
return encoded
def _get_decoded_model(self):
self.check_mcw_existence()
return np.zeros((self.mcw.shape[1] - self.mg + self.m,))
def decode(self, encoded):
decoded = self._get_decoded_model()
mcw = np.copy(self.mcw)
for i in range(np.size(decoded)):
if i and i % self.m == 0:
mcw = np.roll(mcw, self.m)
decoded[i] = np.dot(mcw[i % self.m], encoded)
return decoded
| 3.21875 | 3 |
Mundo 01/daniellindo.py | lucsap/Python3 | 0 | 12770230 | a, b, c = input().split(' ')
def age (dias):
dias = dias
anos = dias // 360
meses = (((dias / 360) - (dias // 360) * 360) // 30
dyas = ((((dias / 360) - (dias // 360) * 360) / 30) - (((dias / 360) - (dias // 360) * 360) // 30)) *30
print(f'{anos} ano(s), {meses} mes(es) e {dyas} dia(s)')
age_a = age(a)
age_b = age(b)
age_c = age(c)
| 3.359375 | 3 |
migrations/versions/0e4285fb2929_add_newsletter_history.py | betagouv/ecosante | 3 | 12770231 | """Add newsletter history
Revision ID: <KEY>
Revises: 2<PASSWORD>a6ada0d
Create Date: 2020-11-03 12:01:49.481652
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '28165a6ada0d'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('newsletter',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('inscription_id', sa.Integer(), nullable=True),
sa.Column('recommandation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['inscription_id'], ['inscription.id'], ),
sa.ForeignKeyConstraint(['recommandation_id'], ['recommandation.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('newsletter')
| 1.445313 | 1 |
PythonM3/senha.py | MiguelTeixeiraUFPB/PythonM3 | 0 | 12770232 | <gh_stars>0
while True:
senha=int(input('digite sua senha : '))
if senha==2:
print('acesso permitido')
break
else:
print('senha invalida, tente novamente') | 3.734375 | 4 |
learn-python/simple-scripts/2.py | sirius1024/qwer | 0 | 12770233 | <filename>learn-python/simple-scripts/2.py
#!/usr/bin/env python3
def myFunc(x, y):
if not isinstance(x, (int, float)):
raise TypeError('x has wrong type')
if not isinstance(y, (int, float)):
raise TypeError('y has wrong type')
else:
print(x + y)
myFunc(1, 3)
| 4.15625 | 4 |
api/polls/views.py | vetordev/django-api | 0 | 12770234 | from django.shortcuts import render
from django.http import HttpResponse, HttpRequest
# Create your views here.
def index(request: HttpRequest):
return HttpResponse("Hello, world.")
| 1.8125 | 2 |
back/utils/CommonUtils.py | xiaomogui/json-databook | 0 | 12770235 | <reponame>xiaomogui/json-databook<gh_stars>0
import time
def formatTime(longtime):
# 格式化时间的函数
return time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(longtime))
def formatByte(number):
# 格式化文件大小的函数
for(scale, label) in [(1024*1024*1024,"GB"),(1024*1024,"MB"),(1024,"KB")]:
if number>=scale:
return "%.2f %s" %(number*1.0/scale,lable)
elif number ==1:
return "1字节"
else:
#小于1字节
byte = "%.2f" % (number or 0)
return (byte[:-3]) if byte.endswith(".00") else byte + "字节"
| 2.734375 | 3 |
pyscf/pbc/df/test/test_incore.py | robert-anderson/pyscf | 2 | 12770236 | <gh_stars>1-10
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
from pyscf.pbc import gto as pgto
from pyscf.pbc import dft as pdft
from pyscf.pbc.df import incore
import pyscf.pbc
pyscf.pbc.DEBUG = False
def finger(a):
w = numpy.cos(numpy.arange(a.size))
return numpy.dot(w, a.ravel())
class KnowValues(unittest.TestCase):
def test_aux_e2(self):
cell = pgto.Cell()
cell.unit = 'B'
cell.a = numpy.eye(3) * 3.
cell.mesh = numpy.array([41]*3)
cell.atom = 'He 0 1 1; He 1 1 0'
cell.basis = { 'He': [[0, (0.8, 1.0)],
[0, (1.2, 1.0)]] }
cell.verbose = 0
cell.build(0, 0)
auxcell = incore.format_aux_basis(cell)
a1 = incore.aux_e2(cell, auxcell, 'int3c1e_sph')
self.assertAlmostEqual(finger(a1), 0.1208944790152819, 9)
a2 = incore.aux_e2(cell, auxcell, 'int3c1e_sph', aosym='s2ij')
self.assertTrue(numpy.allclose(a1, lib.unpack_tril(a2, axis=0).reshape(a1.shape)))
numpy.random.seed(3)
kpt = numpy.random.random(3)
kptij_lst = numpy.array([[kpt,kpt]])
a1 = incore.aux_e2(cell, auxcell, 'int3c1e_sph', kptij_lst=kptij_lst)
self.assertAlmostEqual(finger(a1), -0.073719031689332651-0.054002639392614758j, 9)
a2 = incore.aux_e2(cell, auxcell, 'int3c1e_sph', aosym='s2', kptij_lst=kptij_lst)
self.assertTrue(numpy.allclose(a1, lib.unpack_tril(a2, 1, axis=0).reshape(a1.shape)))
numpy.random.seed(1)
kptij_lst = numpy.random.random((1,2,3))
a1 = incore.aux_e2(cell, auxcell, 'int3c1e_sph', kptij_lst=kptij_lst)
self.assertAlmostEqual(finger(a1), 0.039329191948685879-0.039836453846241987j, 9)
if __name__ == '__main__':
print("Full Tests for pbc.df.incore")
unittest.main()
| 1.96875 | 2 |
espnet2/text/symbols.py | unparalleled-ysj/espnet | 0 | 12770237 | <filename>espnet2/text/symbols.py
from espnet2.text.bilingual import thchs_phoneme, cmu_phoneme
from espnet2.text.minnan import minnan_phoneme
def get_pinyin_list():
thchs_tonebeep = "../../../espnet2/text/bilingual/dictionary/thchs_tonebeep"
pinyin_list = []
with open(thchs_tonebeep, 'r', encoding='utf-8')as f:
for line in f.readlines():
pinyin_list.append(line.split(' ')[0])
pinyin_list = sorted(pinyin_list)
return pinyin_list
class Symbols():
def __init__(self):
self.thchs_phoneme = thchs_phoneme
self.cmu_phoneme = cmu_phoneme
self.minnan_phoneme = minnan_phoneme
self.punc_phoneme = ["sp", "np", "lp"]
def get_symbols(self, language="bilingual"):
if language == "bilingual":
symbols = self.punc_phoneme + self.thchs_phoneme + self.cmu_phoneme
elif language == "minnan":
symbols = self.punc_phoneme + self.minnan_phoneme
elif language == "multilingual":
decorate_phoneme = lambda x: '@' + x
minnan_phoneme = [decorate_phoneme(phoneme) for phoneme in self.minnan_phoneme]
symbols = self.punc_phoneme + self.thchs_phoneme + self.cmu_phoneme + minnan_phoneme
elif language == "pinyin":
symbols = self.punc_phoneme + get_pinyin_list()
else:
raise RuntimeError(f"only surpport [bilingual minnan multilingual pinyin] format !")
return symbols
| 2.9375 | 3 |
pysal/model/spreg/tests/test_error_sp_hom.py | ocefpaf/pysal | 1 | 12770238 | '''
Unittests for pysal.model.spreg.error_sp_hom module
'''
import unittest
import pysal.lib
from pysal.model.spreg import error_sp_hom as HOM
import numpy as np
from pysal.lib.common import RTOL
import pysal.model.spreg
class BaseGM_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.BaseGM_Error_Hom(self.y, self.X, self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 47.9478524 ], [ 0.70633223], [ -0.55595633], [ 0.41288558]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([27.466734]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 32.37298547]),RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.predy[0],np.array([ 53.000269]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
sig2 = 189.94459439729718
np.testing.assert_allclose(reg.sig2,sig2)
vm = np.array([[ 1.51340717e+02, -5.29057506e+00, -1.85654540e+00, -2.39139054e-03], [ -5.29057506e+00, 2.46669610e-01, 5.14259101e-02, 3.19241302e-04], [ -1.85654540e+00, 5.14259101e-02, 3.20510550e-02, -5.95640240e-05], [ -2.39139054e-03, 3.19241302e-04, -5.95640240e-05, 3.36690159e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03], [ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04], [ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class GM_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Error_Hom(self.y, self.X, self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 47.9478524 ], [ 0.70633223], [ -0.55595633], [ 0.41288558]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([27.46673388]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 32.37298547]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 53.00026912]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 1.51340717e+02, -5.29057506e+00, -1.85654540e+00, -2.39139054e-03], [ -5.29057506e+00, 2.46669610e-01, 5.14259101e-02, 3.19241302e-04], [ -1.85654540e+00, 5.14259101e-02, 3.20510550e-02, -5.95640240e-05], [ -2.39139054e-03, 3.19241302e-04, -5.95640240e-05, 3.36690159e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.iteration,1,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.34950977055969729
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 189.94459439729718
np.testing.assert_allclose(reg.sig2,sig2)
std_err = np.array([ 12.30206149, 0.49665844, 0.17902808, 0.18349119])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 3.89754616e+00, 9.71723059e-05], [ 1.42216900e+00, 1.54977196e-01], [ -3.10541409e+00, 1.90012806e-03], [ 2.25016500e+00, 2.44384731e-02]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03], [ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04], [ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class BaseGM_Endog_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.BaseGM_Endog_Error_Hom(self.y, self.X, self.yd, self.q, self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([ 80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
betas = np.array([[ 55.36575166], [ 0.46432416], [ -0.66904404], [ 0.43205526]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.55390939])
np.testing.assert_allclose(reg.u[0],u,RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 31.74114306]),RTOL)
predy = np.array([ 53.91309361])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
sig2 = 190.59435238060928
np.testing.assert_allclose(reg.sig2,sig2)
vm = np.array([[ 5.52064057e+02, -1.61264555e+01, -8.86360735e+00, 1.04251912e+00], [ -1.61264555e+01, 5.44898242e-01, 2.39518645e-01, -1.88092950e-02], [ -8.86360735e+00, 2.39518645e-01, 1.55501840e-01, -2.18638648e-02], [ 1.04251912e+00, -1.88092950e-02, -2.18638648e-02, 3.71222222e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
sig2 = 0
#np.testing.assert_allclose(reg.sig2,sig2)
hth = np.array([[ 49. , 704.371999 , 139.75 ], [ 704.371999 , 11686.67338121, 2246.12800625], [ 139.75 , 2246.12800625, 498.5851]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class GM_Endog_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Endog_Error_Hom(self.y, self.X, self.yd, self.q, self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([ 80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
betas = np.array([[ 55.36575166], [ 0.46432416], [ -0.66904404], [ 0.43205526]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.55390939])
np.testing.assert_allclose(reg.u[0],u,RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 31.74114306]),RTOL)
predy = np.array([ 53.91309361])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 5.52064057e+02, -1.61264555e+01, -8.86360735e+00, 1.04251912e+00], [ -1.61264555e+01, 5.44898242e-01, 2.39518645e-01, -1.88092950e-02], [ -8.86360735e+00, 2.39518645e-01, 1.55501840e-01, -2.18638648e-02], [ 1.04251912e+00, -1.88092950e-02, -2.18638648e-02, 3.71222222e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.34647366525657419
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 190.59435238060928
np.testing.assert_allclose(reg.sig2,sig2)
#std_err
std_err = np.array([ 23.49604343, 0.73817223, 0.39433722, 0.19267128])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 2.35638617, 0.01845372], [ 0.62901874, 0.52933679], [-1.69662923, 0.08976678], [ 2.24244556, 0.02493259]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
class BaseGM_Combo_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
yd2, q2 = pysal.model.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = HOM.BaseGM_Combo_Hom(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 10.12541428], [ 1.56832263], [ 0.15132076], [ 0.21033397]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([34.3450723]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 36.6149682]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 46.1219307]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 2.33694742e+02, -6.66856869e-01, -5.58304254e+00, 4.85488380e+00], [ -6.66856869e-01, 1.94241504e-01, -5.42327138e-02, 5.37225570e-02], [ -5.58304254e+00, -5.42327138e-02, 1.63860721e-01, -1.44425498e-01], [ 4.85488380e+00, 5.37225570e-02, -1.44425498e-01, 1.78622255e-01]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
z = np.array([ 1. , 19.531 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 18.594])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594])
np.testing.assert_allclose(reg.q[0],q,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
sig2 = 232.22680651270042
#np.testing.assert_allclose(reg.sig2,sig2)
np.testing.assert_allclose(reg.sig2,sig2)
hth = np.array([[ 49. , 704.371999 , 724.7435916 ], [ 704.371999 , 11686.67338121, 11092.519988 ], [ 724.7435916 , 11092.519988 , 11614.62257048]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class GM_Combo_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Combo_Hom(self.y, self.X, w=self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 10.12541428], [ 1.56832263], [ 0.15132076], [ 0.21033397]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([34.3450723]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 36.6149682]),RTOL)
np.testing.assert_allclose(reg.e_pred[0],np.array([ 32.90372983]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 46.1219307]),RTOL)
np.testing.assert_allclose(reg.predy_e[0],np.array([47.56327317]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
z = np.array([ 1. , 19.531 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 18.594])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594])
np.testing.assert_allclose(reg.q[0],q,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.iteration,1,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.28379825632694394
np.testing.assert_allclose(reg.pr2,pr2)
pr2_e = 0.25082892555141506
np.testing.assert_allclose(reg.pr2_e,pr2_e)
sig2 = 232.22680651270042
#np.testing.assert_allclose(reg.sig2, sig2)
np.testing.assert_allclose(reg.sig2, sig2)
std_err = np.array([ 15.28707761, 0.44072838, 0.40479714, 0.42263726])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 6.62351206e-01, 5.07746167e-01], [ 3.55847888e+00, 3.73008780e-04], [ 3.73818749e-01, 7.08539170e-01], [ 4.97670189e-01, 6.18716523e-01]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
vm = np.array([[ 2.33694742e+02, -6.66856869e-01, -5.58304254e+00, 4.85488380e+00], [ -6.66856869e-01, 1.94241504e-01, -5.42327138e-02, 5.37225570e-02], [ -5.58304254e+00, -5.42327138e-02, 1.63860721e-01, -1.44425498e-01], [ 4.85488380e+00, 5.37225570e-02, -1.44425498e-01, 1.78622255e-01]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
suite = unittest.TestSuite()
test_classes = [BaseGM_Error_Hom_Tester, GM_Error_Hom_Tester,\
BaseGM_Endog_Error_Hom_Tester, GM_Endog_Error_Hom_Tester, \
BaseGM_Combo_Hom_Tester, GM_Combo_Hom_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| 2.234375 | 2 |
dictionaries.py | dylancmartin/Voice-Enabled-Record-Search | 0 | 12770239 | phonetic_alphabet = {"alpha": "A", "adam": "A", "boy": "B", "bravo": "B", "charlie": "C",
"delta": "D", "david": "D", "echo": "E", "edward": "E", "foxtrot": "F", "frank": "F",
"golf": "G", "george": "G", "hotel": "H", "henry": "H", "india": "I", "ida": "I",
"aida": "I", "juliette": "J", "john": "J", "kilo": "K", "king": "K", "lima": "L",
"lincoln": "L", "mike": "M", "mary": "M", "marry": "M", "november": "N", "norah": "N",
"nora": "N", "oscar": "O", "ocean": "O", "papa": "P", "paul": "P", "quebec": "Q",
"queen": "Q", "romeo": "R", "robert": "R", "sierra": "S", "sam": "S", "tango": "T",
"tom": "T", "uniform": "U", "union": "U", "victor": "V", "vector": "V", "whiskey": "W",
"william": "W", "x-ray": "X","yankee": "Y", "young": "Y", "zulu": "Z", "zebra": "Z",
"neiner": "9", "niner": "9", "neither": "9", "alfa": "A"
}
states = {
'alaska': 'AK', 'alabama': 'AL', 'arkansas': 'AR', 'american samoa': 'AS',
'arizona': 'AZ', 'california': 'CA', 'colorado': 'CO', 'connecticut': 'CT',
'district of columbia': 'DC', 'delaware': 'DE', 'florida': 'FL', 'georgia': 'GA',
'guam': 'GU', 'hawaii': 'HI', 'iowa': 'IA', 'idaho': 'ID', 'illinois': 'IL',
'indiana': 'IN', 'kansas': 'KS', 'kentucky': 'KY', 'louisiana': 'LA',
'massachusetts': 'MA', 'maryland': 'MD', 'maine': 'ME', 'main': 'ME', 'michigan': 'MI',
'minnesota': 'MN', 'missouri': 'MO', 'northern mariana islands': 'MP',
'mississippi': 'MS', 'montana': 'MT', 'national': 'NA', 'north carolina': 'NC',
'north dakota': 'ND', 'nebraska': 'NE', 'new hampshire': 'NH', 'new jersey': 'NJ',
'new mexico': 'NM', 'nevada': 'NV', 'new york': 'NY', 'ohio': 'OH', 'oklahoma': 'OK',
'oregon': 'OR', 'pennsylvania': 'PA', 'puerto rico': 'PR', 'rhode island': 'RI',
'south carolina': 'SC', 'south dakota': 'SD', 'tennessee': 'TN', 'texas': 'TX',
'utah': 'UT', 'virginia': 'VA', 'virgin islands': 'VI', 'vermont': 'VT',
'washington': 'WA', 'wisconsin': 'WI', 'west virginia': 'WV', 'wyoming': 'WY'
}
| 2.359375 | 2 |
exer11.py | KauanLL/excercises-cv | 0 | 12770240 | #ref Exercício Python 094 - Unindo dicionários e listas do Curso em vídeo
dici = {}
lista = []
mulher = []
mmedi = []
m = 0
while True:
dici['nome'] = str(input(f'Qual o nome ? '))
dici['sexo'] = str(input(f'Qual o sexo do {dici["nome"]} ? '))
dici['idade'] = int(input(f'Qual a idade do {dici["nome"]} ? '))
lista.append(dici.copy())
print(dici)
resp = str(input('Deseja continuar ? (S/N) '))
if resp[0] in 'Nn':
break
print(lista)
print(f'Ao total teve {len(lista)} pessoa(s) cadastrada(s).')
for c in lista:
m += c['idade']
m = m // len(lista)
for c in lista:
if c['sexo'] == 'Femi':
mulher.append(c['nome'])
if c['idade'] > m:
mmedi.append(c['nome'])
print(f'A média de idade das {len(lista)} pessoa(s) é {m}')
print(f'A lista com o nome de todas as mulheres: {mulher}')
print(f'A lista com a(s) pessoa(s) com idade acima da média é: {mmedi}')
| 3.59375 | 4 |
feature_extraction/demo_utils.py | v-iashin/SpecVQGAN | 81 | 12770241 | '''
The code is partially borrowed from:
https://github.com/v-iashin/video_features/blob/861efaa4ed67/utils/utils.py
and
https://github.com/PeihaoChen/regnet/blob/199609/extract_audio_and_video.py
'''
import os
import shutil
import subprocess
from glob import glob
from pathlib import Path
from typing import Dict
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
from omegaconf.omegaconf import OmegaConf
from sample_visualization import (load_feature_extractor,
load_model_from_config, load_vocoder)
from specvqgan.data.vggsound import CropFeats
from specvqgan.util import download, md5_hash
from specvqgan.models.cond_transformer import disabled_train
from train import instantiate_from_config
from feature_extraction.extract_mel_spectrogram import get_spectrogram
plt.rcParams['savefig.bbox'] = 'tight'
def which_ffmpeg() -> str:
'''Determines the path to ffmpeg library
Returns:
str -- path to the library
'''
result = subprocess.run(['which', 'ffmpeg'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
ffmpeg_path = result.stdout.decode('utf-8').replace('\n', '')
return ffmpeg_path
def which_ffprobe() -> str:
'''Determines the path to ffprobe library
Returns:
str -- path to the library
'''
result = subprocess.run(['which', 'ffprobe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
ffprobe_path = result.stdout.decode('utf-8').replace('\n', '')
return ffprobe_path
def check_video_for_audio(path):
assert which_ffprobe() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
cmd = f'{which_ffprobe()} -loglevel error -show_entries stream=codec_type -of default=nw=1 {path}'
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = result.stdout.decode('utf-8')
print(result)
return 'codec_type=audio' in result
def get_duration(path):
assert which_ffprobe() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
cmd = f'{which_ffprobe()} -hide_banner -loglevel panic' \
f' -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {path}'
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
duration = float(result.stdout.decode('utf-8').replace('\n', ''))
return duration
def trim_video(video_path: str, start: int, trim_duration: int = 10, tmp_path: str = './tmp'):
assert which_ffmpeg() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
if Path(video_path).suffix != '.mp4':
print(f'File Extension is not `mp4` (it is {Path(video_path).suffix}). It will be re-encoded to mp4.')
video_duration = get_duration(video_path)
print('Video Duration:', video_duration)
assert video_duration > start, f'Video Duration < Trim Start: {video_duration} < {start}'
# create tmp dir if doesn't exist
os.makedirs(tmp_path, exist_ok=True)
trim_vid_path = os.path.join(tmp_path, f'{Path(video_path).stem}_trim_to_{trim_duration}s.mp4')
cmd = f'{which_ffmpeg()} -hide_banner -loglevel panic' \
f' -i {video_path} -ss {start} -t {trim_duration} -y {trim_vid_path}'
subprocess.call(cmd.split())
print('Trimmed the input video', video_path, 'and saved the output @', trim_vid_path)
return trim_vid_path
def reencode_video_with_diff_fps(video_path: str, tmp_path: str, extraction_fps: int) -> str:
'''Reencodes the video given the path and saves it to the tmp_path folder.
Args:
video_path (str): original video
tmp_path (str): the folder where tmp files are stored (will be appended with a proper filename).
extraction_fps (int): target fps value
Returns:
str: The path where the tmp file is stored. To be used to load the video from
'''
assert which_ffmpeg() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
# assert video_path.endswith('.mp4'), 'The file does not end with .mp4. Comment this if expected'
# create tmp dir if doesn't exist
os.makedirs(tmp_path, exist_ok=True)
# form the path to tmp directory
new_path = os.path.join(tmp_path, f'{Path(video_path).stem}_new_fps.mp4')
cmd = f'{which_ffmpeg()} -hide_banner -loglevel panic '
cmd += f'-y -i {video_path} -filter:v fps=fps={extraction_fps} {new_path}'
subprocess.call(cmd.split())
return new_path
def maybe_download_model(model_name: str, log_dir: str) -> str:
name2info = {
'2021-06-20T16-35-20_vggsound_transformer': {
'info': 'No Feats',
'hash': 'b1f9bb63d831611479249031a1203371',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-06-20T16-35-20_vggsound_transformer.tar.gz',
},
'2021-07-30T21-03-22_vggsound_transformer': {
'info': '1 ResNet50 Feature',
'hash': '27a61d4b74a72578d13579333ed056f6',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-07-30T21-03-22_vggsound_transformer.tar.gz',
},
'2021-07-30T21-34-25_vggsound_transformer': {
'info': '5 ResNet50 Features',
'hash': 'f4d7105811589d441b69f00d7d0b8dc8',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-07-30T21-34-25_vggsound_transformer.tar.gz',
},
'2021-07-30T21-34-41_vggsound_transformer': {
'info': '212 ResNet50 Features',
'hash': 'b222cc0e7aeb419f533d5806a08669fe',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-07-30T21-34-41_vggsound_transformer.tar.gz',
},
'2021-06-03T00-43-28_vggsound_transformer': {
'info': 'Class Label',
'hash': '98a3788ab973f1c3cc02e2e41ad253bc',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-06-03T00-43-28_vggsound_transformer.tar.gz',
},
'2021-05-19T22-16-54_vggsound_codebook': {
'info': 'VGGSound Codebook',
'hash': '7ea229427297b5d220fb1c80db32dbc5',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-05-19T22-16-54_vggsound_codebook.tar.gz',
}
}
print(f'Using: {model_name} ({name2info[model_name]["info"]})')
model_dir = os.path.join(log_dir, model_name)
if not os.path.exists(model_dir):
tar_local_path = os.path.join(log_dir, f'{model_name}.tar.gz')
# check if tar already exists and its md5sum
if not os.path.exists(tar_local_path) or md5_hash(tar_local_path) != name2info[model_name]['hash']:
down_link = name2info[model_name]['link']
download(down_link, tar_local_path)
print('Unpacking', tar_local_path, 'to', log_dir)
shutil.unpack_archive(tar_local_path, log_dir)
# clean-up space as we already have unpacked folder
os.remove(tar_local_path)
return model_dir
def load_config(model_dir: str):
# Load the config
config_main = sorted(glob(os.path.join(model_dir, 'configs/*-project.yaml')))[-1]
config_pylt = sorted(glob(os.path.join(model_dir, 'configs/*-lightning.yaml')))[-1]
config = OmegaConf.merge(
OmegaConf.load(config_main),
OmegaConf.load(config_pylt),
)
# patch config. E.g. if the model is trained on another machine with different paths
for a in ['spec_dir_path', 'rgb_feats_dir_path', 'flow_feats_dir_path']:
if config.data.params[a] is not None:
if 'vggsound.VGGSound' in config.data.params.train.target:
base_path = './data/vggsound/'
elif 'vas.VAS' in config.data.params.train.target:
base_path = './data/vas/features/*/'
else:
raise NotImplementedError
config.data.params[a] = os.path.join(base_path, Path(config.data.params[a]).name)
return config
def load_model(model_name, log_dir, device):
to_use_gpu = True if device.type == 'cuda' else False
model_dir = maybe_download_model(model_name, log_dir)
config = load_config(model_dir)
# Sampling model
ckpt = sorted(glob(os.path.join(model_dir, 'checkpoints/*.ckpt')))[-1]
pl_sd = torch.load(ckpt, map_location='cpu')
sampler = load_model_from_config(config.model, pl_sd['state_dict'], to_use_gpu)['model']
sampler.to(device)
# aux models (vocoder and melception)
ckpt_melgan = config.lightning.callbacks.image_logger.params.vocoder_cfg.params.ckpt_vocoder
melgan = load_vocoder(ckpt_melgan, eval_mode=True)['model'].to(device)
melception = load_feature_extractor(to_use_gpu, eval_mode=True)
return config, sampler, melgan, melception
def load_neural_audio_codec(model_name, log_dir, device):
model_dir = maybe_download_model(model_name, log_dir)
config = load_config(model_dir)
config.model.params.ckpt_path = f'./logs/{model_name}/checkpoints/last.ckpt'
print(config.model.params.ckpt_path)
model = instantiate_from_config(config.model)
model = model.to(device)
model = model.eval()
model.train = disabled_train
vocoder = load_vocoder(Path('./vocoder/logs/vggsound/'), eval_mode=True)['model'].to(device)
return config, model, vocoder
class LeftmostCropOrTile(object):
def __init__(self, crop_or_tile_to):
self.crop_or_tile_to = crop_or_tile_to
def __call__(self, item: Dict):
# tile or crop features to the `crop_or_tile_to`
T, D = item['feature'].shape
if T != self.crop_or_tile_to:
how_many_tiles_needed = 1 + (self.crop_or_tile_to // T)
item['feature'] = np.tile(item['feature'], (how_many_tiles_needed, 1))[:self.crop_or_tile_to, :]
return item
class ExtractResNet50(torch.nn.Module):
def __init__(self, extraction_fps, feat_cfg, device, batch_size=32, tmp_dir='./tmp'):
super(ExtractResNet50, self).__init__()
self.tmp_path = tmp_dir
self.extraction_fps = extraction_fps
self.batch_size = batch_size
self.feat_cfg = feat_cfg
self.means = [0.485, 0.456, 0.406]
self.stds = [0.229, 0.224, 0.225]
self.transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=self.means, std=self.stds)
])
random_crop = False
self.post_transforms = transforms.Compose([
LeftmostCropOrTile(feat_cfg.feat_len),
CropFeats([feat_cfg.feat_crop_len, feat_cfg.feat_depth], random_crop),
(lambda x: x) if feat_cfg.feat_sampler_cfg is None else instantiate_from_config(feat_cfg.feat_sampler_cfg),
])
self.device = device
self.model = models.resnet50(pretrained=True).to(device)
self.model.eval()
# save the pre-trained classifier for show_preds and replace it in the net with identity
self.model_class = self.model.fc
self.model.fc = torch.nn.Identity()
@torch.no_grad()
def forward(self, video_path: str) -> Dict[str, np.ndarray]:
if self.feat_cfg.replace_feats_with_random:
T, D = self.feat_cfg.feat_sampler_cfg.params.feat_sample_size, self.feat_cfg.feat_depth
print(f'Since we are in "No Feats" setting, returning a random feature: [{T}, {D}]')
random_features = {'feature': torch.rand(T, D)}
return random_features, []
# take the video, change fps and save to the tmp folder
if self.extraction_fps is not None:
video_path = reencode_video_with_diff_fps(video_path, self.tmp_path, self.extraction_fps)
# read a video
cap = cv2.VideoCapture(video_path)
batch_list = []
vid_feats = []
cached_frames = []
transforms_for_show = transforms.Compose(self.transforms.transforms[:4])
# sometimes when the target fps is 1 or 2, the first frame of the reencoded video is missing
# and cap.read returns None but the rest of the frames are ok. timestep is 0.0 for the 2nd frame in
# this case
first_frame = True
# iterating through the opened video frame-by-frame and occationally run the model once a batch is
# formed
while cap.isOpened():
frame_exists, rgb = cap.read()
if first_frame and not frame_exists:
continue
first_frame = False
if frame_exists:
# prepare data and cache if needed
rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)
cached_frames.append(transforms_for_show(rgb))
rgb = self.transforms(rgb).unsqueeze(0).to(self.device)
batch_list.append(rgb)
# when batch is formed to inference
if len(batch_list) == self.batch_size:
batch_feats = self.model(torch.cat(batch_list))
vid_feats.extend(batch_feats.tolist())
# clean up the batch list
batch_list = []
else:
# if the last batch was smaller than the batch size, we still need to process those frames
if len(batch_list) != 0:
batch_feats = self.model(torch.cat(batch_list))
vid_feats.extend(batch_feats.tolist())
cap.release()
break
vid_feats = np.array(vid_feats)
features = {'feature': vid_feats}
print('Raw Extracted Representation:', features['feature'].shape)
if self.post_transforms is not None:
features = self.post_transforms(features)
# using 'feature' as the key to reuse the feature resampling transform
cached_frames = self.post_transforms.transforms[-1]({'feature': torch.stack(cached_frames)})['feature']
print('Post-processed Representation:', features['feature'].shape)
return features, cached_frames
def extract_melspectrogram(in_path: str, sr: int, duration: int = 10, tmp_path: str = './tmp') -> np.ndarray:
'''Extract Melspectrogram similar to RegNet.'''
assert which_ffmpeg() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
# assert in_path.endswith('.mp4'), 'The file does not end with .mp4. Comment this if expected'
# create tmp dir if doesn't exist
os.makedirs(tmp_path, exist_ok=True)
# Extract audio from a video if needed
if in_path.endswith('.wav'):
audio_raw = in_path
else:
audio_raw = os.path.join(tmp_path, f'{Path(in_path).stem}.wav')
cmd = f'{which_ffmpeg()} -i {in_path} -hide_banner -loglevel panic -f wav -vn -y {audio_raw}'
subprocess.call(cmd.split())
# Extract audio from a video
audio_new = os.path.join(tmp_path, f'{Path(in_path).stem}_{sr}hz.wav')
cmd = f'{which_ffmpeg()} -i {audio_raw} -hide_banner -loglevel panic -ac 1 -ab 16k -ar {sr} -y {audio_new}'
subprocess.call(cmd.split())
length = int(duration * sr)
audio_zero_pad, spec = get_spectrogram(audio_new, save_dir=None, length=length, save_results=False)
# specvqgan expects inputs to be in [-1, 1] but spectrograms are in [0, 1]
spec = 2 * spec - 1
return spec
def show_grid(imgs):
print('Rendering the Plot with Frames Used in Conditioning')
figsize = ((imgs.shape[1] // 228 + 1) * 5, (imgs.shape[2] // 228 + 1) * 5)
if not isinstance(imgs, list):
imgs = [imgs]
fig, axs = plt.subplots(ncols=len(imgs), squeeze=False, figsize=figsize)
for i, img in enumerate(imgs):
img = img.detach()
img = F.to_pil_image(img)
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
return fig
def calculate_codebook_bitrate(duration, quant_z, codebook_size):
# Calculating the Bitrate
bottle_neck_size = quant_z.shape[-2:]
bits_per_codebook_entry = (codebook_size-1).bit_length()
bitrate = bits_per_codebook_entry * bottle_neck_size.numel() / duration / 1024
print(f'The input audio is {duration:.2f} seconds long.')
print(f'Codebook size is {codebook_size} i.e. a codebook entry allocates {bits_per_codebook_entry} bits')
print(f'SpecVQGAN bottleneck size: {list(bottle_neck_size)}')
print(f'Thus, bitrate is {bitrate:.2f} kbps')
return bitrate
def get_audio_file_bitrate(file):
assert which_ffprobe() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
cmd = f'{which_ffprobe()} -v error -select_streams a:0'\
f' -show_entries stream=bit_rate -of default=noprint_wrappers=1:nokey=1 {file}'
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
bitrate = int(result.stdout.decode('utf-8').replace('\n', ''))
bitrate /= 1024
return bitrate
if __name__ == '__main__':
# if empty, it wasn't found
print(which_ffmpeg())
| 2.1875 | 2 |
build_tools/patch_configure.py | kosyak/naclports_samsung-smart-tv | 2 | 12770242 | <reponame>kosyak/naclports_samsung-smart-tv<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to patch a configure script in-place such that the libtool
dynamic library detection works for NaCl.
Once this patch makes it into upstream libtool it should eventually
be possible to remove this completely.
"""
import optparse
import re
import sys
# There are essentailly three patches here, which will make configure do
# the right things for shared library support when used with the NaCl
# GLIBC toolchain.
CONFIGURE_PATCHS = [
# Correct result for "dynamic linker characteristics"
['(\n\*\)\n dynamic_linker=no)',
'''
nacl)
if $CC -v 2>&1 | grep -q enable-shared; then
dynamic_linker="GNU/NaCl ld.so"
version_type=linux
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
else
dynamic_linker=no
fi
;;
\\1'''],
# Correct result for "supports shared libraries"
['''(
netbsd\*\)
if echo __ELF__ \| \$CC -E - \| grep __ELF__ >/dev/null; then
archive_cmds_CXX='\$LD -Bshareable -o \$lib \$predep_objects \$libobjs \$deplibs \$postdep_objects \$linker_flags')
''',
'''
nacl)
if $CC -v 2>&1 | grep -q enable-shared; then
ld_shlibs_CXX=yes
else
ld_shlibs_CXX=no
fi
;;
\\1
'''],
# Correct result for "how to recognize dependent libraries"
['''
(.*linux.*)\)
lt_cv_deplibs_check_method=pass_all''',
'''
\\1 | nacl*)
lt_cv_deplibs_check_method=pass_all''']
]
def main(args):
usage = "usage: %prog [options] <configure_script>"
parser = optparse.OptionParser(usage=usage)
args = parser.parse_args(args)[1]
if not args:
parser.error("no configure script specified")
configure = args[0]
# Read configure
with open(configure) as input_file:
filedata = input_file.read()
# Check for patch location
for i, (pattern, replacement) in enumerate(CONFIGURE_PATCHS):
if not re.search(pattern, filedata):
sys.stderr.write("Failed to find patch %s location in configure "
"script: %s\n" % (i, configure))
continue
# Apply patch
filedata = re.sub(pattern, replacement, filedata)
# Overwrite input file with patched file data
with open(configure, 'w') as output_file:
output_file.write(filedata)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1.828125 | 2 |
runai/profiler/tf/hooks/__init__.py | bamps53/runai | 86 | 12770243 | <reponame>bamps53/runai
from .session_run import session_run
| 0.988281 | 1 |
pyquil/experiment/tests/test_group.py | Tommy-Moffat/pyquil | 1 | 12770244 | <filename>pyquil/experiment/tests/test_group.py
import itertools
import pytest
from pyquil.experiment._main import TomographyExperiment
from pyquil.experiment._result import ExperimentResult
from pyquil.experiment._setting import (
ExperimentSetting,
plusX,
minusX,
plusZ,
minusZ,
plusY,
TensorProductState,
zeros_state,
_pauli_to_product_state,
)
from pyquil.experiment._group import (
get_results_by_qubit_groups,
merge_disjoint_experiments,
group_settings,
_max_weight_operator,
_max_weight_state,
_max_tpb_overlap,
)
from pyquil.gates import X, Z
from pyquil.paulis import sZ, sX, sY, sI, PauliTerm
from pyquil import Program
from pyquil.gates import H
def test_results_by_qubit_groups():
er1 = ExperimentResult(
setting=ExperimentSetting(plusX(0), sZ(0)), expectation=0.0, std_err=0.0, total_counts=1,
)
er2 = ExperimentResult(
setting=ExperimentSetting(plusX(0), sZ(1)), expectation=0.0, std_err=0.0, total_counts=1,
)
er3 = ExperimentResult(
setting=ExperimentSetting(plusX(0), sX(0) * sZ(1)),
expectation=0.0,
std_err=0.0,
total_counts=1,
)
er4 = ExperimentResult(
setting=ExperimentSetting(plusX(0), sX(0) * sZ(2)),
expectation=0.0,
std_err=0.0,
total_counts=1,
)
groups = [(0,), (1,), (2, 0)]
res_by_group = get_results_by_qubit_groups([er1, er2, er3, er4], groups)
assert res_by_group == {(0,): [er1], (1,): [er2], (0, 2): [er1, er4]}
def test_merge_disjoint_experiments():
sett1 = ExperimentSetting(TensorProductState(), sX(0) * sY(1))
sett2 = ExperimentSetting(plusZ(1), sY(1))
sett3 = ExperimentSetting(plusZ(0), sX(0))
sett4 = ExperimentSetting(minusX(1), sY(1))
sett5 = ExperimentSetting(TensorProductState(), sZ(2))
expt1 = TomographyExperiment(settings=[sett1, sett2], program=Program(X(1)))
expt2 = TomographyExperiment(settings=[sett3, sett4], program=Program(Z(0)))
expt3 = TomographyExperiment(settings=[sett5], program=Program())
merged_expt = merge_disjoint_experiments([expt1, expt2, expt3])
assert len(merged_expt) == 2
@pytest.fixture(params=["clique-removal", "greedy"])
def grouping_method(request):
return request.param
def test_expt_settings_share_ntpb():
expts = [
[
ExperimentSetting(zeros_state([0, 1]), sX(0) * sI(1)),
ExperimentSetting(zeros_state([0, 1]), sI(0) * sX(1)),
],
[
ExperimentSetting(zeros_state([0, 1]), sZ(0) * sI(1)),
ExperimentSetting(zeros_state([0, 1]), sI(0) * sZ(1)),
],
]
for group in expts:
for e1, e2 in itertools.combinations(group, 2):
assert _max_weight_state([e1.in_state, e2.in_state]) is not None
assert _max_weight_operator([e1.out_operator, e2.out_operator]) is not None
def test_group_experiments(grouping_method):
expts = [ # cf above, I removed the inner nesting. Still grouped visually
ExperimentSetting(TensorProductState(), sX(0) * sI(1)),
ExperimentSetting(TensorProductState(), sI(0) * sX(1)),
ExperimentSetting(TensorProductState(), sZ(0) * sI(1)),
ExperimentSetting(TensorProductState(), sI(0) * sZ(1)),
]
suite = TomographyExperiment(expts, Program())
grouped_suite = group_settings(suite, method=grouping_method)
assert len(suite) == 4
assert len(grouped_suite) == 2
def test_max_weight_operator_1():
pauli_terms = [sZ(0), sX(1) * sZ(0), sY(2) * sX(1)]
assert _max_weight_operator(pauli_terms) == sY(2) * sX(1) * sZ(0)
def test_max_weight_operator_2():
pauli_terms = [sZ(0), sX(1) * sZ(0), sY(2) * sX(1), sZ(5) * sI(3)]
assert _max_weight_operator(pauli_terms) == sZ(5) * sY(2) * sX(1) * sZ(0)
def test_max_weight_operator_3():
pauli_terms = [sZ(0) * sX(5), sX(1) * sZ(0), sY(2) * sX(1), sZ(5) * sI(3)]
assert _max_weight_operator(pauli_terms) is None
def test_max_weight_operator_misc():
assert _max_weight_operator([sZ(0), sZ(0) * sZ(1)]) is not None
assert _max_weight_operator([sX(5), sZ(4)]) is not None
assert _max_weight_operator([sX(0), sY(0) * sZ(2)]) is None
x_term = sX(0) * sX(1)
z1_term = sZ(1)
z0_term = sZ(0)
z0z1_term = sZ(0) * sZ(1)
assert _max_weight_operator([x_term, z1_term]) is None
assert _max_weight_operator([z0z1_term, x_term]) is None
assert _max_weight_operator([z1_term, z0_term]) is not None
assert _max_weight_operator([z0z1_term, z0_term]) is not None
assert _max_weight_operator([z0z1_term, z1_term]) is not None
assert _max_weight_operator([z0z1_term, sI(1)]) is not None
assert _max_weight_operator([z0z1_term, sI(2)]) is not None
assert _max_weight_operator([z0z1_term, sX(5) * sZ(7)]) is not None
xxxx_terms = (
sX(1) * sX(2)
+ sX(2)
+ sX(3) * sX(4)
+ sX(4)
+ sX(1) * sX(3) * sX(4)
+ sX(1) * sX(4)
+ sX(1) * sX(2) * sX(3)
)
true_term = sX(1) * sX(2) * sX(3) * sX(4)
assert _max_weight_operator(xxxx_terms.terms) == true_term
zzzz_terms = sZ(1) * sZ(2) + sZ(3) * sZ(4) + sZ(1) * sZ(3) + sZ(1) * sZ(3) * sZ(4)
assert _max_weight_operator(zzzz_terms.terms) == sZ(1) * sZ(2) * sZ(3) * sZ(4)
pauli_terms = [sZ(0), sX(1) * sZ(0), sY(2) * sX(1), sZ(5) * sI(3)]
assert _max_weight_operator(pauli_terms) == sZ(5) * sY(2) * sX(1) * sZ(0)
def test_max_weight_operator_4():
# this last example illustrates that a pair of commuting operators
# need not be diagonal in the same tpb
assert _max_weight_operator([sX(1) * sZ(0), sZ(1) * sX(0)]) is None
def test_max_weight_state_1():
states = [plusX(0) * plusZ(1), plusX(0), plusZ(1)]
assert _max_weight_state(states) == states[0]
def test_max_weight_state_2():
states = [plusX(1) * plusZ(0), plusX(0), plusZ(1)]
assert _max_weight_state(states) is None
def test_max_weight_state_3():
states = [plusX(0) * minusZ(1), plusX(0), minusZ(1)]
assert _max_weight_state(states) == states[0]
def test_max_weight_state_4():
states = [plusX(1) * minusZ(0), plusX(0), minusZ(1)]
assert _max_weight_state(states) is None
def test_max_tpb_overlap_1():
tomo_expt_settings = [
ExperimentSetting(plusZ(1) * plusX(0), sY(2) * sY(1)),
ExperimentSetting(plusX(2) * plusZ(1), sY(2) * sZ(0)),
]
tomo_expt_program = Program(H(0), H(1), H(2))
tomo_expt = TomographyExperiment(tomo_expt_settings, tomo_expt_program)
expected_dict = {
ExperimentSetting(plusX(0) * plusZ(1) * plusX(2), sZ(0) * sY(1) * sY(2)): [
ExperimentSetting(plusZ(1) * plusX(0), sY(2) * sY(1)),
ExperimentSetting(plusX(2) * plusZ(1), sY(2) * sZ(0)),
]
}
assert expected_dict == _max_tpb_overlap(tomo_expt)
def test_max_tpb_overlap_2():
expt_setting = ExperimentSetting(
_pauli_to_product_state(PauliTerm.from_compact_str("(1+0j)*Z7Y8Z1Y4Z2Y5Y0X6")),
PauliTerm.from_compact_str("(1+0j)*Z4X8Y5X3Y7Y1"),
)
p = Program(H(0), H(1), H(2))
tomo_expt = TomographyExperiment([expt_setting], p)
expected_dict = {expt_setting: [expt_setting]}
assert expected_dict == _max_tpb_overlap(tomo_expt)
def test_max_tpb_overlap_3():
# add another ExperimentSetting to the above
expt_setting = ExperimentSetting(
_pauli_to_product_state(PauliTerm.from_compact_str("(1+0j)*Z7Y8Z1Y4Z2Y5Y0X6")),
PauliTerm.from_compact_str("(1+0j)*Z4X8Y5X3Y7Y1"),
)
expt_setting2 = ExperimentSetting(plusZ(7), sY(1))
p = Program(H(0), H(1), H(2))
tomo_expt2 = TomographyExperiment([expt_setting, expt_setting2], p)
expected_dict2 = {expt_setting: [expt_setting, expt_setting2]}
assert expected_dict2 == _max_tpb_overlap(tomo_expt2)
def test_group_experiments_greedy():
ungrouped_tomo_expt = TomographyExperiment(
[
[
ExperimentSetting(
_pauli_to_product_state(PauliTerm.from_compact_str("(1+0j)*Z7Y8Z1Y4Z2Y5Y0X6")),
PauliTerm.from_compact_str("(1+0j)*Z4X8Y5X3Y7Y1"),
)
],
[ExperimentSetting(plusZ(7), sY(1))],
],
program=Program(H(0), H(1), H(2)),
)
grouped_tomo_expt = group_settings(ungrouped_tomo_expt, method="greedy")
expected_grouped_tomo_expt = TomographyExperiment(
[
[
ExperimentSetting(
TensorProductState.from_str(
"Z0_7 * Y0_8 * Z0_1 * Y0_4 * Z0_2 * Y0_5 * Y0_0 * X0_6"
),
PauliTerm.from_compact_str("(1+0j)*Z4X8Y5X3Y7Y1"),
),
ExperimentSetting(plusZ(7), sY(1)),
]
],
program=Program(H(0), H(1), H(2)),
)
assert grouped_tomo_expt == expected_grouped_tomo_expt
def test_expt_settings_diagonal_in_tpb():
def _expt_settings_diagonal_in_tpb(es1: ExperimentSetting, es2: ExperimentSetting):
"""
Extends the concept of being diagonal in the same tpb to ExperimentSettings, by
determining if the pairs of in_states and out_operators are separately diagonal in the same
tpb
"""
max_weight_in = _max_weight_state([es1.in_state, es2.in_state])
max_weight_out = _max_weight_operator([es1.out_operator, es2.out_operator])
return max_weight_in is not None and max_weight_out is not None
expt_setting1 = ExperimentSetting(plusZ(1) * plusX(0), sY(1) * sZ(0))
expt_setting2 = ExperimentSetting(plusY(2) * plusZ(1), sZ(2) * sY(1))
assert _expt_settings_diagonal_in_tpb(expt_setting1, expt_setting2)
expt_setting3 = ExperimentSetting(plusX(2) * plusZ(1), sZ(2) * sY(1))
expt_setting4 = ExperimentSetting(plusY(2) * plusZ(1), sX(2) * sY(1))
assert not _expt_settings_diagonal_in_tpb(expt_setting2, expt_setting3)
assert not _expt_settings_diagonal_in_tpb(expt_setting2, expt_setting4)
| 2.125 | 2 |
resources/_py/_req.py | gabru-md/faces | 0 | 12770245 | # PYTHON
# <NAME>
# https://github.com/gabru-md
#BEGIN
#Containing import modules
import sys
import os
import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
#END
| 1.554688 | 2 |
appengine/monorail/framework/test/framework_helpers_test.py | allaparthi/monorail | 0 | 12770246 | <reponame>allaparthi/monorail
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unit tests for the framework_helpers module."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
import unittest
import mox
import time
from businesslogic import work_env
from framework import framework_helpers
from framework import framework_views
from proto import features_pb2
from proto import project_pb2
from proto import user_pb2
from services import service_manager
from testing import fake
from testing import testing_helpers
class HelperFunctionsTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.time = self.mox.CreateMock(framework_helpers.time)
framework_helpers.time = self.time # Point to a mocked out time module.
def tearDown(self):
framework_helpers.time = time # Point back to the time module.
self.mox.UnsetStubs()
self.mox.ResetAll()
def testRetryDecorator_ExceedFailures(self):
class Tracker(object):
func_called = 0
tracker = Tracker()
# Use a function that always fails.
@framework_helpers.retry(2, delay=1, backoff=2)
def testFunc(tracker):
tracker.func_called += 1
raise Exception('Failed')
self.time.sleep(1).AndReturn(None)
self.time.sleep(2).AndReturn(None)
self.mox.ReplayAll()
with self.assertRaises(Exception):
testFunc(tracker)
self.mox.VerifyAll()
self.assertEqual(3, tracker.func_called)
def testRetryDecorator_EventuallySucceed(self):
class Tracker(object):
func_called = 0
tracker = Tracker()
# Use a function that succeeds on the 2nd attempt.
@framework_helpers.retry(2, delay=1, backoff=2)
def testFunc(tracker):
tracker.func_called += 1
if tracker.func_called < 2:
raise Exception('Failed')
self.time.sleep(1).AndReturn(None)
self.mox.ReplayAll()
testFunc(tracker)
self.mox.VerifyAll()
self.assertEqual(2, tracker.func_called)
def testGetRoleName(self):
proj = project_pb2.Project()
proj.owner_ids.append(111)
proj.committer_ids.append(222)
proj.contributor_ids.append(333)
self.assertEqual(None, framework_helpers.GetRoleName(set(), proj))
self.assertEqual('Owner', framework_helpers.GetRoleName({111}, proj))
self.assertEqual('Committer', framework_helpers.GetRoleName({222}, proj))
self.assertEqual('Contributor', framework_helpers.GetRoleName({333}, proj))
self.assertEqual(
'Owner', framework_helpers.GetRoleName({111, 222, 999}, proj))
self.assertEqual(
'Committer', framework_helpers.GetRoleName({222, 333, 999}, proj))
self.assertEqual(
'Contributor', framework_helpers.GetRoleName({333, 999}, proj))
def testGetHotlistRoleName(self):
hotlist = features_pb2.Hotlist()
hotlist.owner_ids.append(111)
hotlist.editor_ids.append(222)
hotlist.follower_ids.append(333)
self.assertEqual(None, framework_helpers.GetHotlistRoleName(set(), hotlist))
self.assertEqual(
'Owner', framework_helpers.GetHotlistRoleName({111}, hotlist))
self.assertEqual(
'Editor', framework_helpers.GetHotlistRoleName({222}, hotlist))
self.assertEqual(
'Follower', framework_helpers.GetHotlistRoleName({333}, hotlist))
self.assertEqual(
'Owner', framework_helpers.GetHotlistRoleName({111, 222, 999}, hotlist))
self.assertEqual(
'Editor', framework_helpers.GetHotlistRoleName(
{222, 333, 999}, hotlist))
self.assertEqual(
'Follower', framework_helpers.GetHotlistRoleName({333, 999}, hotlist))
class UrlFormattingTest(unittest.TestCase):
"""Tests for URL formatting."""
def setUp(self):
self.services = service_manager.Services(user=fake.UserService())
def testFormatMovedProjectURL(self):
"""Project foo has been moved to bar. User is visiting /p/foo/..."""
mr = testing_helpers.MakeMonorailRequest()
mr.current_page_url = '/p/foo/'
self.assertEqual(
'/p/bar/',
framework_helpers.FormatMovedProjectURL(mr, 'bar'))
mr.current_page_url = '/p/foo/issues/list'
self.assertEqual(
'/p/bar/issues/list',
framework_helpers.FormatMovedProjectURL(mr, 'bar'))
mr.current_page_url = '/p/foo/issues/detail?id=123'
self.assertEqual(
'/p/bar/issues/detail?id=123',
framework_helpers.FormatMovedProjectURL(mr, 'bar'))
mr.current_page_url = '/p/foo/issues/detail?id=123#c7'
self.assertEqual(
'/p/bar/issues/detail?id=123#c7',
framework_helpers.FormatMovedProjectURL(mr, 'bar'))
def testFormatURL(self):
mr = testing_helpers.MakeMonorailRequest()
path = '/dude/wheres/my/car'
recognized_params = [(name, mr.GetParam(name)) for name in
framework_helpers.RECOGNIZED_PARAMS]
url = framework_helpers.FormatURL(recognized_params, path)
self.assertEqual(path, url)
def testFormatURLWithRecognizedParams(self):
params = {}
query = []
for name in framework_helpers.RECOGNIZED_PARAMS:
params[name] = name
query.append('%s=%s' % (name, 123))
path = '/dude/wheres/my/car'
expected = '%s?%s' % (path, '&'.join(query))
mr = testing_helpers.MakeMonorailRequest(path=expected)
recognized_params = [(name, mr.GetParam(name)) for name in
framework_helpers.RECOGNIZED_PARAMS]
# No added params.
url = framework_helpers.FormatURL(recognized_params, path)
self.assertEqual(expected, url)
def testFormatURLWithKeywordArgs(self):
params = {}
query_pairs = []
for name in framework_helpers.RECOGNIZED_PARAMS:
params[name] = name
if name != 'can' and name != 'start':
query_pairs.append('%s=%s' % (name, 123))
path = '/dude/wheres/my/car'
mr = testing_helpers.MakeMonorailRequest(
path='%s?%s' % (path, '&'.join(query_pairs)))
query_pairs.append('can=yep')
query_pairs.append('start=486')
query_string = '&'.join(query_pairs)
expected = '%s?%s' % (path, query_string)
recognized_params = [(name, mr.GetParam(name)) for name in
framework_helpers.RECOGNIZED_PARAMS]
url = framework_helpers.FormatURL(
recognized_params, path, can='yep', start=486)
self.assertEqual(expected, url)
def testFormatURLWithKeywordArgsAndID(self):
params = {}
query_pairs = []
query_pairs.append('id=200') # id should be the first parameter.
for name in framework_helpers.RECOGNIZED_PARAMS:
params[name] = name
if name != 'can' and name != 'start':
query_pairs.append('%s=%s' % (name, 123))
path = '/dude/wheres/my/car'
mr = testing_helpers.MakeMonorailRequest(
path='%s?%s' % (path, '&'.join(query_pairs)))
query_pairs.append('can=yep')
query_pairs.append('start=486')
query_string = '&'.join(query_pairs)
expected = '%s?%s' % (path, query_string)
recognized_params = [(name, mr.GetParam(name)) for name in
framework_helpers.RECOGNIZED_PARAMS]
url = framework_helpers.FormatURL(
recognized_params, path, can='yep', start=486, id=200)
self.assertEqual(expected, url)
def testFormatURLWithStrangeParams(self):
mr = testing_helpers.MakeMonorailRequest(path='/foo?start=0')
recognized_params = [(name, mr.GetParam(name)) for name in
framework_helpers.RECOGNIZED_PARAMS]
url = framework_helpers.FormatURL(
recognized_params, '/foo',
r=0, path='/foo/bar', sketchy='/foo/ bar baz ')
self.assertEqual(
'/foo?start=0&path=/foo/bar&r=0&sketchy=/foo/%20bar%20baz%20',
url)
def testFormatAbsoluteURL(self):
_request, mr = testing_helpers.GetRequestObjects(
path='/p/proj/some-path',
headers={'Host': 'www.test.com'})
self.assertEqual(
'http://www.test.com/p/proj/some/path',
framework_helpers.FormatAbsoluteURL(mr, '/some/path'))
def testFormatAbsoluteURL_CommonRequestParams(self):
_request, mr = testing_helpers.GetRequestObjects(
path='/p/proj/some-path?foo=bar&can=1',
headers={'Host': 'www.test.com'})
self.assertEqual(
'http://www.test.com/p/proj/some/path?can=1',
framework_helpers.FormatAbsoluteURL(mr, '/some/path'))
self.assertEqual(
'http://www.test.com/p/proj/some/path',
framework_helpers.FormatAbsoluteURL(
mr, '/some/path', copy_params=False))
def testFormatAbsoluteURL_NoProject(self):
path = '/some/path'
_request, mr = testing_helpers.GetRequestObjects(
headers={'Host': 'www.test.com'}, path=path)
url = framework_helpers.FormatAbsoluteURL(mr, path, include_project=False)
self.assertEqual(url, 'http://www.test.com/some/path')
def testGetHostPort_Local(self):
"""We use testing-app.appspot.com when running locally."""
self.assertEqual('testing-app.appspot.com',
framework_helpers.GetHostPort())
self.assertEqual('testing-app.appspot.com',
framework_helpers.GetHostPort(project_name='proj'))
@mock.patch('settings.preferred_domains',
{'testing-app.appspot.com': 'example.com'})
def testGetHostPort_PreferredDomain(self):
"""A prod server can have a preferred domain."""
self.assertEqual('example.com',
framework_helpers.GetHostPort())
self.assertEqual('example.com',
framework_helpers.GetHostPort(project_name='proj'))
@mock.patch('settings.branded_domains',
{'proj': 'branded.com', '*': 'unbranded.com'})
@mock.patch('settings.preferred_domains',
{'testing-app.appspot.com': 'example.com'})
def testGetHostPort_BrandedDomain(self):
"""A prod server can have a preferred domain."""
self.assertEqual('example.com',
framework_helpers.GetHostPort())
self.assertEqual('branded.com',
framework_helpers.GetHostPort(project_name='proj'))
self.assertEqual('unbranded.com',
framework_helpers.GetHostPort(project_name='other-proj'))
def testIssueCommentURL(self):
hostport = 'port.someplex.com'
proj = project_pb2.Project()
proj.project_name = 'proj'
url = 'https://port.someplex.com/p/proj/issues/detail?id=2'
actual_url = framework_helpers.IssueCommentURL(
hostport, proj, 2)
self.assertEqual(actual_url, url)
url = 'https://port.someplex.com/p/proj/issues/detail?id=2#c2'
actual_url = framework_helpers.IssueCommentURL(
hostport, proj, 2, seq_num=2)
self.assertEqual(actual_url, url)
class WordWrapSuperLongLinesTest(unittest.TestCase):
def testEmptyLogMessage(self):
msg = ''
wrapped_msg = framework_helpers.WordWrapSuperLongLines(msg)
self.assertEqual(wrapped_msg, '')
def testShortLines(self):
msg = 'one\ntwo\nthree\n'
wrapped_msg = framework_helpers.WordWrapSuperLongLines(msg)
expected = 'one\ntwo\nthree\n'
self.assertEqual(wrapped_msg, expected)
def testOneLongLine(self):
msg = ('This is a super long line that just goes on and on '
'and it seems like it will never stop because it is '
'super long and it was entered by a user who had no '
'familiarity with the return key.')
wrapped_msg = framework_helpers.WordWrapSuperLongLines(msg)
expected = ('This is a super long line that just goes on and on and it '
'seems like it will never stop because it\n'
'is super long and it was entered by a user who had no '
'familiarity with the return key.')
self.assertEqual(wrapped_msg, expected)
msg2 = ('This is a super long line that just goes on and on '
'and it seems like it will never stop because it is '
'super long and it was entered by a user who had no '
'familiarity with the return key. '
'This is a super long line that just goes on and on '
'and it seems like it will never stop because it is '
'super long and it was entered by a user who had no '
'familiarity with the return key.')
wrapped_msg2 = framework_helpers.WordWrapSuperLongLines(msg2)
expected2 = ('This is a super long line that just goes on and on and it '
'seems like it will never stop because it\n'
'is super long and it was entered by a user who had no '
'familiarity with the return key. This is a\n'
'super long line that just goes on and on and it seems like '
'it will never stop because it is super\n'
'long and it was entered by a user who had no familiarity '
'with the return key.')
self.assertEqual(wrapped_msg2, expected2)
def testMixOfShortAndLong(self):
msg = ('[Author: mpcomplete]\n'
'\n'
# Description on one long line
'Fix a memory leak in JsArray and JsObject for the IE and NPAPI '
'ports. Each time you call GetElement* or GetProperty* to '
'retrieve string or object token, the token would be leaked. '
'I added a JsScopedToken to ensure that the right thing is '
'done when the object leaves scope, depending on the platform.\n'
'\n'
'R=zork\n'
'CC=<EMAIL>\n'
'DELTA=108 (52 added, 36 deleted, 20 changed)\n'
'OCL=5932446\n'
'SCL=5933728\n')
wrapped_msg = framework_helpers.WordWrapSuperLongLines(msg)
expected = (
'[Author: mpcomplete]\n'
'\n'
'Fix a memory leak in JsArray and JsObject for the IE and NPAPI '
'ports. Each time you call\n'
'GetElement* or GetProperty* to retrieve string or object token, the '
'token would be leaked. I added\n'
'a JsScopedToken to ensure that the right thing is done when the '
'object leaves scope, depending on\n'
'the platform.\n'
'\n'
'R=zork\n'
'CC=<EMAIL>\n'
'DELTA=108 (52 added, 36 deleted, 20 changed)\n'
'OCL=5932446\n'
'SCL=5933728\n')
self.assertEqual(wrapped_msg, expected)
class ComputeListDeltasTest(unittest.TestCase):
def DoOne(self, old=None, new=None, added=None, removed=None):
"""Run one call to the target method and check expected results."""
actual_added, actual_removed = framework_helpers.ComputeListDeltas(
old, new)
self.assertItemsEqual(added, actual_added)
self.assertItemsEqual(removed, actual_removed)
def testEmptyLists(self):
self.DoOne(old=[], new=[], added=[], removed=[])
self.DoOne(old=[1, 2], new=[], added=[], removed=[1, 2])
self.DoOne(old=[], new=[1, 2], added=[1, 2], removed=[])
def testUnchanged(self):
self.DoOne(old=[1], new=[1], added=[], removed=[])
self.DoOne(old=[1, 2], new=[1, 2], added=[], removed=[])
self.DoOne(old=[1, 2], new=[2, 1], added=[], removed=[])
def testCompleteChange(self):
self.DoOne(old=[1, 2], new=[3, 4], added=[3, 4], removed=[1, 2])
def testGeneralChange(self):
self.DoOne(old=[1, 2], new=[2], added=[], removed=[1])
self.DoOne(old=[1], new=[1, 2], added=[2], removed=[])
self.DoOne(old=[1, 2], new=[2, 3], added=[3], removed=[1])
class UserSettingsTest(unittest.TestCase):
def setUp(self):
self.mr = testing_helpers.MakeMonorailRequest()
self.cnxn = 'cnxn'
self.services = service_manager.Services(
user=fake.UserService(),
usergroup=fake.UserGroupService())
def testGatherUnifiedSettingsPageData(self):
mr = self.mr
mr.auth.user_view = framework_views.StuffUserView(100, 'user@invalid', True)
mr.auth.user_view.profile_url = '/u/profile/url'
userprefs = user_pb2.UserPrefs(
prefs=[user_pb2.UserPrefValue(name='public_issue_notice', value='true')])
page_data = framework_helpers.UserSettings.GatherUnifiedSettingsPageData(
mr.auth.user_id, mr.auth.user_view, mr.auth.user_pb, userprefs)
expected_keys = [
'settings_user',
'settings_user_pb',
'settings_user_is_banned',
'self',
'profile_url_fragment',
'preview_on_hover',
'settings_user_prefs',
]
self.assertItemsEqual(expected_keys, list(page_data.keys()))
self.assertEqual('profile/url', page_data['profile_url_fragment'])
self.assertTrue(page_data['settings_user_prefs'].public_issue_notice)
self.assertFalse(page_data['settings_user_prefs'].restrict_new_issues)
def testGatherUnifiedSettingsPageData_NoUserPrefs(self):
"""If UserPrefs were not loaded, consider them all false."""
mr = self.mr
mr.auth.user_view = framework_views.StuffUserView(100, 'user@invalid', True)
userprefs = None
page_data = framework_helpers.UserSettings.GatherUnifiedSettingsPageData(
mr.auth.user_id, mr.auth.user_view, mr.auth.user_pb, userprefs)
self.assertFalse(page_data['settings_user_prefs'].public_issue_notice)
self.assertFalse(page_data['settings_user_prefs'].restrict_new_issues)
def testProcessBanForm(self):
"""We can ban and unban users."""
user = self.services.user.TestAddUser('<EMAIL>', 111)
post_data = {'banned': 1, 'banned_reason': 'rude'}
framework_helpers.UserSettings.ProcessBanForm(
self.cnxn, self.services.user, post_data, 111, user)
self.assertEqual('rude', user.banned)
post_data = {} # not banned
framework_helpers.UserSettings.ProcessBanForm(
self.cnxn, self.services.user, post_data, 111, user)
self.assertEqual('', user.banned)
def testProcessSettingsForm_OldStylePrefs(self):
"""We can set prefs that are stored in the User PB."""
user = self.services.user.TestAddUser('<EMAIL>', 111)
post_data = {'obscure_email': 1, 'notify': 1}
with work_env.WorkEnv(self.mr, self.services) as we:
framework_helpers.UserSettings.ProcessSettingsForm(
we, post_data, user)
self.assertTrue(user.obscure_email)
self.assertTrue(user.notify_issue_change)
self.assertFalse(user.notify_starred_ping)
def testProcessSettingsForm_NewStylePrefs(self):
"""We can set prefs that are stored in the UserPrefs PB."""
user = self.services.user.TestAddUser('<EMAIL>', 111)
post_data = {'restrict_new_issues': 1}
with work_env.WorkEnv(self.mr, self.services) as we:
framework_helpers.UserSettings.ProcessSettingsForm(
we, post_data, user)
userprefs = we.GetUserPrefs(111)
actual = {upv.name: upv.value
for upv in userprefs.prefs}
expected = {
'restrict_new_issues': 'true',
'public_issue_notice': 'false',
}
self.assertEqual(expected, actual)
class MurmurHash3Test(unittest.TestCase):
def testMurmurHash(self):
test_data = [
('', 0),
('<EMAIL>', 4092810879),
(u'<EMAIL>', 904770043),
('<EMAIL>', 1301269279),
('<EMAIL>', 4186878788),
('jparent+foo@', 2923900874),
('<EMAIL>', 3043483168),
]
hashes = [framework_helpers.MurmurHash3_x86_32(x)
for (x, _) in test_data]
self.assertListEqual(hashes, [e for (_, e) in test_data])
def testMurmurHashWithSeed(self):
test_data = [
('', 1113155926, 2270882445),
('<EMAIL>', 772936925, 3995066671),
(u'<EMAIL>', 1519359761, 1273489513),
('<EMAIL>', 49913829, 1202521153),
('<EMAIL>', 314860298, 3636123309),
('jparent+foo@', 195791379, 332453977),
('<EMAIL>', 521490555, 257496459),
]
hashes = [framework_helpers.MurmurHash3_x86_32(x, s)
for (x, s, _) in test_data]
self.assertListEqual(hashes, [e for (_, _, e) in test_data])
class MakeRandomKeyTest(unittest.TestCase):
def testMakeRandomKey_Normal(self):
key1 = framework_helpers.MakeRandomKey()
key2 = framework_helpers.MakeRandomKey()
self.assertEqual(128, len(key1))
self.assertEqual(128, len(key2))
self.assertNotEqual(key1, key2)
def testMakeRandomKey_Length(self):
key = framework_helpers.MakeRandomKey()
self.assertEqual(128, len(key))
key16 = framework_helpers.MakeRandomKey(length=16)
self.assertEqual(16, len(key16))
def testMakeRandomKey_Chars(self):
key = framework_helpers.MakeRandomKey(chars='a', length=4)
self.assertEqual('aaaa', key)
class IsServiceAccountTest(unittest.TestCase):
def testIsServiceAccount(self):
appspot = '<EMAIL>'
developer = '@developer.gserviceaccount.com'
bugdroid = '<EMAIL>'
user = '<EMAIL>'
self.assertTrue(framework_helpers.IsServiceAccount(appspot))
self.assertTrue(framework_helpers.IsServiceAccount(developer))
self.assertTrue(framework_helpers.IsServiceAccount(bugdroid))
self.assertFalse(framework_helpers.IsServiceAccount(user))
client_emails = set([appspot, developer, bugdroid])
self.assertTrue(framework_helpers.IsServiceAccount(
appspot, client_emails=client_emails))
self.assertTrue(framework_helpers.IsServiceAccount(
developer, client_emails=client_emails))
self.assertTrue(framework_helpers.IsServiceAccount(
bugdroid, client_emails=client_emails))
self.assertFalse(framework_helpers.IsServiceAccount(
user, client_emails=client_emails))
| 1.742188 | 2 |
src/enstore_log_file_search_cgi.py | moibenko/enstore | 4 | 12770247 | <reponame>moibenko/enstore
#!/usr/bin/env python
######################################################################
# src/$RCSfile$ $Revision$
#
import cgi
import string
import os
import posixpath
import sys
import tempfile
import re
import getpass
import enstore_utils_cgi
def go():
# first print the two lines for the header
print "Content-type: text/html"
print
# now start the real html
print "<HTML><HEAD><TITLE>Enstore Command Output</TITLE></HEAD><BODY>"
try:
# get the data from the form
form = cgi.FieldStorage()
keys = form.keys()
an_argv = []
if form.has_key("search"):
search_string = form["search"].value
else:
# the user did not enter a search string
print "ERROR: Please enter a search string."
raise SystemExit
# we need to find the location of enstore so we can import
(config_host, config_port) = enstore_utils_cgi.find_enstore()
config_port = int(config_port)
import log_client
import log_server
if form.has_key("logfile"):
logfile = form["logfile"].value
# as a convenience to the user, we will check if the user forgot to add
# the LOG- prefix onto the log file name, and add it ourselves.
for lkey in log_client.VALID_PERIODS.keys():
if logfile == lkey:
# we found a match so we will not be adding the generic log
# file prefix to the name of the entered logfile
break
else:
# assume that if the first character of the log file is a nubmer, then
# we need to add the file prefix.
if logfile[0] in string.digits:
logfile = "%s%s"%(log_server.FILE_PREFIX, logfile)
else:
# the user did not enter a logfile name assume all
logfile = "all"
# get a list of the log files we need
logc = log_client.LoggerClient((config_host, config_port))
ticket = logc.get_logfiles(logfile, enstore_utils_cgi.TIMEOUT,
enstore_utils_cgi.RETRIES)
logfile_names = ticket['logfiles']
if logfile_names == []:
# there were no matches
print "<BR><P>"
print "There were no log files that matched the entered description."
else:
# put the files in alphabetical order
logfile_names.sort()
# if the period was yesterday, we do not need today
if logfile == log_client.YESTERDAY:
logfile_names = [logfile_names[0],]
# for each name, search the file using the search string
enstore_utils_cgi.pgrep_html(search_string, logfile_names, 0)
finally:
print "</BODY></HTML>"
if __name__ == "__main__":
go()
| 2.84375 | 3 |
brats_competition/model_training/common/datasets/__init__.py | andriiaprysiazhnyk/brats_competition | 2 | 12770248 | from .brats_2d import *
from .brats_3d import * | 1.195313 | 1 |
Caesar cipher/Decrypt.py | ChetanSP8698/Cryptography | 0 | 12770249 | <reponame>ChetanSP8698/Cryptography<gh_stars>0
def decrypt(ciphertext, s):
pltext = ""
for i in range(len(ciphertext)):
char = ciphertext[i]
if (char.isupper()):
pltext += chr((ord(char) - s-65) % 26 + 65)
else:
pltext += chr((ord(char) - s - 97) % 26 + 97)
return pltext
ciphertext = "EXXEGOEXSRGI"
s = 4
print("Cipher : " + ciphertext)
print("Shift : " + str(s))
print("Plain text: " + decrypt(ciphertext,s)) | 3.875 | 4 |
aiida/backends/sqlalchemy/migrations/versions/162b99bca4a2_drop_dbcalcstate.py | azadoks/aiida-core | 180 | 12770250 | <reponame>azadoks/aiida-core<filename>aiida/backends/sqlalchemy/migrations/versions/162b99bca4a2_drop_dbcalcstate.py
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Drop the DbCalcState table
Revision ID: <KEY>
Revises: a603da2cc809
Create Date: 2018-11-14 08:37:13.719646
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'a603da2cc809'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table('db_dbcalcstate')
def downgrade():
op.create_table(
'db_dbcalcstate', sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], ['db_dbnode.id'],
name='db_dbcalcstate_dbnode_id_fkey',
ondelete='CASCADE',
initially='DEFERRED',
deferrable=True), sa.PrimaryKeyConstraint('id', name='db_dbcalcstate_pkey'),
sa.UniqueConstraint('dbnode_id', 'state', name='db_dbcalcstate_dbnode_id_state_key')
)
| 1.523438 | 2 |