blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30e8a20521a6bc8d67539d32f44ca040fae0ad73
|
292c19b382dd882a60a31cbe291ca036f39ebb7f
|
/args.py
|
3dba48b23e9683711ac9610852aeb74c3c154d93
|
[] |
no_license
|
shailzajolly/EaSe
|
687e947319f008240db352119b0b9363e2be8d5a
|
53518e8273b7ee01d5c005f052671c2e96ab29a4
|
refs/heads/main
| 2023-04-05T04:13:55.864936
| 2021-04-06T13:22:41
| 2021-04-06T13:22:41
| 355,194,977
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description="data parameters")
parser.add_argument("--annotation_path_tr",
help="Path to file that contains annotations for training data.",
type=str,
default='v2_mscoco_train2014_annotations.json')
parser.add_argument("--question_path_tr",
help="Path to file that contains questions for training data.",
type=str,
default='v2_OpenEnded_mscoco_train2014_questions.json')
parser.add_argument("--annotation_path_val",
help="Path to file that contains annotations for validation data.",
type=str,
default='v2_mscoco_val2014_annotations.json')
parser.add_argument("--question_path_val",
help="Path to file that contains questions for validation data.",
type=str,
default='v2_OpenEnded_mscoco_val2014_questions.json')
parser.add_argument("--data_split",
help="Split of the dataset whose scores are to be computed. Possible options: train/val",
type=str,
default='train')
parser.add_argument("--word2vec",
help="Path for word2vec model",
type=str,
default='fastText/wiki-news-300d-1M-subword.vec')
parser.add_argument("--data_name",
help="Dataset name used in the experiment. Possible Types: VQA/VizWiz",
type=str,
default='VQA')
parser.add_argument("--pred_file",
help="Prediction file generated by the model.",
type=str,
default='LXMERTpreds/VQ_pred_id2scr_TrHE')
parser.add_argument("--pred_model",
help="Model used to create prediction file.",
type=str,
default='lxmert')
parser.add_argument("--id_dir",
help="Directory where question ids computed by SeS scores are present.",
type=str,
default='VQA2.0_ids/entropy/E_')
args, unparsed = parser.parse_known_args()
print(args)
return args
|
[
"shailzajolly@gmail.com"
] |
shailzajolly@gmail.com
|
b6d70d3cd0bcef780e9d1bf21d1470f79ecdd2e7
|
2fc197681ac9cdd0346fe9ab56d9aa4d59b6f1d0
|
/polyaxon/db/migrations/0001_initial.py
|
27bab9b4b5e9c713404ae0d918e70a6b313ea7ff
|
[
"MIT"
] |
permissive
|
dtaniwaki/polyaxon
|
32e0fcfc4cd4b46d1d502ae26cd285dc9c11d55a
|
04e3c9c9a732a2128233e8d1db1bdc1647fe7c55
|
refs/heads/master
| 2020-03-20T08:16:33.334881
| 2018-06-13T22:40:17
| 2018-06-13T22:40:17
| 137,303,634
| 0
| 0
| null | 2018-06-14T03:53:13
| 2018-06-14T03:53:13
| null |
UTF-8
|
Python
| false
| false
| 52,402
|
py
|
# Generated by Django 2.0.3 on 2018-06-12 13:31
import db.models.abstract_jobs
import db.models.repos
import db.models.utils
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import libs.blacklist
import libs.resource_validation
import libs.spec_validation
import re
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ActivityLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_type', models.CharField(max_length=128)),
('context', django.contrib.postgres.fields.jsonb.JSONField(help_text='Extra context information.')),
('created_at', models.DateTimeField()),
('object_id', models.PositiveIntegerField()),
('actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'activity log',
'verbose_name_plural': 'activities logs',
},
),
migrations.CreateModel(
name='BuildJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the build job.', validators=[libs.spec_validation.validate_build_spec_config])),
('dockerfile', models.TextField(blank=True, help_text='The dockerfile used to create the image with this job.', null=True)),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='BuildJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.BuildJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Build Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='ChartVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='CliVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Cluster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('version_api', django.contrib.postgres.fields.jsonb.JSONField(help_text='The cluster version api info')),
],
),
migrations.CreateModel(
name='ClusterEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('meta', django.contrib.postgres.fields.jsonb.JSONField()),
('level', models.CharField(max_length=16)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='db.Cluster')),
],
),
migrations.CreateModel(
name='ClusterNode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this node within the cluser.')),
('name', models.CharField(help_text='Name of the node', max_length=256)),
('hostname', models.CharField(blank=True, max_length=256, null=True)),
('role', models.CharField(choices=[('master', 'master'), ('agent', 'agent')], help_text='The role of the node', max_length=16)),
('docker_version', models.CharField(blank=True, max_length=128, null=True)),
('kubelet_version', models.CharField(max_length=64)),
('os_image', models.CharField(max_length=128)),
('kernel_version', models.CharField(max_length=128)),
('schedulable_taints', models.BooleanField(default=False)),
('schedulable_state', models.BooleanField(default=False)),
('memory', models.BigIntegerField()),
('cpu', models.FloatField()),
('n_gpus', models.PositiveSmallIntegerField()),
('status', models.CharField(choices=[('UNKNOWN', 'UNKNOWN'), ('Ready', 'Ready'), ('NotReady', 'NotReady'), ('Deleted', 'Deleted')], default='UNKNOWN', max_length=24)),
('is_current', models.BooleanField(default=True)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nodes', to='db.Cluster')),
],
options={
'ordering': ['sequence'],
},
),
migrations.CreateModel(
name='CodeReference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('commit', models.CharField(blank=True, max_length=40, null=True)),
],
),
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this experiment within the project.')),
('declarations', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The parameters used for this experiment.', null=True)),
('config', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The compiled polyaxon with specific values for this experiment.', null=True, validators=[libs.spec_validation.validate_experiment_spec_config])),
('cloning_strategy', models.CharField(blank=True, choices=[('copy', 'copy'), ('restart', 'restart'), ('resume', 'resume')], default='restart', max_length=16, null=True)),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this group within the project.')),
('content', models.TextField(blank=True, help_text='The yaml content of the polyaxonfile/specification.', null=True, validators=[libs.spec_validation.validate_group_spec_content])),
('hptuning', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The experiment group hptuning params config.', null=True, validators=[libs.spec_validation.validate_group_hptuning_config])),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentGroupIteration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField(help_text='The experiment group iteration meta data.')),
('experiment_group', models.ForeignKey(help_text='The experiment group.', on_delete=django.db.models.deletion.CASCADE, related_name='iterations', to='db.ExperimentGroup')),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='ExperimentGroupStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped')], default='Created', max_length=64, null=True)),
('experiment_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.ExperimentGroup')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment group Statuses',
},
),
migrations.CreateModel(
name='ExperimentJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('role', models.CharField(default='master', max_length=64)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='db.Experiment')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.ExperimentJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='ExperimentMetric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('values', django.contrib.postgres.fields.jsonb.JSONField()),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='metrics', to='db.Experiment')),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='ExperimentStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Resuming', 'Resuming'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Starting', 'Starting'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.Experiment')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment Statuses',
},
),
migrations.CreateModel(
name='ExternalRepo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('git_url', models.URLField()),
('is_public', models.BooleanField(default=True, help_text='If repo is public or private.')),
],
bases=(models.Model, db.models.repos.RepoMixin),
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the run job.', validators=[libs.spec_validation.validate_job_spec_config])),
('cloning_strategy', models.CharField(blank=True, choices=[('copy', 'copy'), ('restart', 'restart'), ('resume', 'resume')], default='restart', max_length=16, null=True)),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
('original_job', models.ForeignKey(blank=True, help_text='The original job that was cloned from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='db.Job')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='JobResources',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cpu', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
('memory', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
('gpu', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
],
options={
'verbose_name': 'job resources',
'verbose_name_plural': 'jobs resources',
},
),
migrations.CreateModel(
name='JobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.Job')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Run Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='LibVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='NodeGPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('index', models.PositiveSmallIntegerField()),
('serial', models.CharField(max_length=256)),
('name', models.CharField(max_length=256)),
('memory', models.BigIntegerField()),
('cluster_node', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gpus', to='db.ClusterNode')),
],
options={
'ordering': ['index'],
},
),
migrations.CreateModel(
name='NotebookJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the notebook job.', validators=[libs.spec_validation.validate_notebook_spec_config])),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='NotebookJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.NotebookJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Notebook Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='Operation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('execute_at', models.DateTimeField(blank=True, help_text='When this instance should be executed. default None which translate to now', null=True)),
('timeout', models.PositiveIntegerField(blank=True, help_text='specify how long this instance should be up before timing out in seconds.', null=True)),
('trigger_policy', models.CharField(blank=True, choices=[('all_succeeded', 'all_succeeded'), ('all_failed', 'all_failed'), ('all_done', 'all_done'), ('one_succeeded', 'one_succeeded'), ('one_failed', 'one_failed'), ('one_done', 'one_done')], default='all_succeeded', help_text='defines the rule by which dependencies are applied, default is `all_success`.', max_length=16, null=True)),
('max_retries', models.PositiveSmallIntegerField(blank=True, help_text='the number of retries that should be performed before failing the operation.', null=True)),
('retry_delay', models.PositiveIntegerField(blank=True, default=60, help_text='The delay between retries.', null=True)),
('retry_exponential_backoff', models.BooleanField(default=False, help_text='allow progressive longer waits between retries by using exponential backoff algorithm on retry delay.')),
('max_retry_delay', models.PositiveIntegerField(blank=True, default=3600, help_text='maximum delay interval between retries.', null=True)),
('concurrency', models.PositiveSmallIntegerField(blank=True, help_text='When set, an operation will be able to limit the concurrent runs across execution_dates', null=True)),
('run_as_user', models.CharField(blank=True, help_text='unix username to impersonate while running the operation.', max_length=64, null=True)),
('config', models.TextField(blank=True, null=True)),
('celery_task', models.CharField(help_text='The celery task name to execute.', max_length=128)),
('celery_queue', models.CharField(blank=True, help_text='The celery queue name to use for the executing this task. If provided, it will override the queue provided in CELERY_TASK_ROUTES.', max_length=128, null=True)),
],
),
migrations.CreateModel(
name='OperationRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('celery_task_context', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The kwargs required to execute the celery task.', null=True)),
('celery_task_id', models.CharField(blank=True, max_length=36)),
('operation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runs', to='db.Operation')),
],
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='OperationRunStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('created', 'created'), ('scheduled', 'scheduled'), ('running', 'running'), ('finished', 'finished'), ('stopped', 'stopped'), ('skipped', 'skipped')], default='created', max_length=64, null=True)),
('operation_run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.OperationRun')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Operation Run Statuses',
},
),
migrations.CreateModel(
name='Pipeline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('execute_at', models.DateTimeField(blank=True, help_text='When this instance should be executed. default None which translate to now', null=True)),
('timeout', models.PositiveIntegerField(blank=True, help_text='specify how long this instance should be up before timing out in seconds.', null=True)),
('name', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid'), libs.blacklist.validate_blacklist_name])),
('concurrency', models.PositiveSmallIntegerField(blank=True, help_text='If set, it determines the number of operation instances allowed to run concurrently.', null=True)),
],
),
migrations.CreateModel(
name='PipelineRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('pipeline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runs', to='db.Pipeline')),
],
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='PipelineRunStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('created', 'created'), ('scheduled', 'scheduled'), ('running', 'running'), ('finished', 'finished'), ('stopped', 'stopped'), ('skipped', 'skipped')], default='created', max_length=64, null=True)),
('pipeline_run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.PipelineRun')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Pipeline Run Statuses',
},
),
migrations.CreateModel(
name='PlatformVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid'), libs.blacklist.validate_blacklist_name])),
('is_public', models.BooleanField(default=True, help_text='If project is public or private.')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_public', models.BooleanField(default=True, help_text='If repo is public or private.')),
('project', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='repo', to='db.Project')),
],
bases=(models.Model, db.models.repos.RepoMixin),
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('frequency', models.CharField(blank=True, help_text="Defines how often to run, this timedelta object gets added to your latest operation instance's execution_date to figure out the next schedule", max_length=64, null=True)),
('start_at', models.DateTimeField(blank=True, help_text='When this instance should run, default is None which translate to now.', null=True)),
('end_at', models.DateTimeField(blank=True, help_text='When this instance should stop running, default is None which translate to open ended.', null=True)),
('depends_on_past', models.BooleanField(default=False, help_text="when set to true, the instances will run sequentially while relying on the previous instances' schedule to succeed.")),
],
),
migrations.CreateModel(
name='SSOIdentity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('provider', models.CharField(choices=[('github', 'github'), ('bitbucket', 'bitbucket'), ('gitlab', 'gitlab')], max_length=32)),
('external_id', models.CharField(max_length=64, null=True)),
('valid', models.BooleanField(default=False)),
('last_verified', models.DateTimeField(default=django.utils.timezone.now)),
('last_synced', models.DateTimeField(default=django.utils.timezone.now)),
('scopes', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), blank=True, null=True, size=None)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='identities', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'SSO identity',
'verbose_name_plural': 'SSO identities',
},
),
migrations.CreateModel(
name='TensorboardJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the tensorboard job.', validators=[libs.spec_validation.validate_tensorboard_spec_config])),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
('experiment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.Experiment')),
('experiment_group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.ExperimentGroup')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.Project')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='TensorboardJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.TensorboardJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Tensorboard Job Statuses',
'abstract': False,
},
),
migrations.AddField(
model_name='tensorboardjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.TensorboardJobStatus'),
),
migrations.AddField(
model_name='tensorboardjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='pipelinerun',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.PipelineRunStatus'),
),
migrations.AddField(
model_name='pipeline',
name='project',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pipelines', to='db.Project'),
),
migrations.AddField(
model_name='pipeline',
name='schedule',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='db.Schedule'),
),
migrations.AddField(
model_name='pipeline',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pipelines', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='operationrun',
name='pipeline_run',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operation_runs', to='db.PipelineRun'),
),
migrations.AddField(
model_name='operationrun',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.OperationRunStatus'),
),
migrations.AddField(
model_name='operationrun',
name='upstream_runs',
field=models.ManyToManyField(blank=True, related_name='downstream_runs', to='db.OperationRun'),
),
migrations.AddField(
model_name='operation',
name='pipeline',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operations', to='db.Pipeline'),
),
migrations.AddField(
model_name='operation',
name='schedule',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='db.Schedule'),
),
migrations.AddField(
model_name='operation',
name='upstream_operations',
field=models.ManyToManyField(blank=True, related_name='downstream_operations', to='db.Operation'),
),
migrations.AddField(
model_name='notebookjob',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notebook_jobs', to='db.Project'),
),
migrations.AddField(
model_name='notebookjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.NotebookJobStatus'),
),
migrations.AddField(
model_name='notebookjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='job',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='db.Project'),
),
migrations.AddField(
model_name='job',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.JobStatus'),
),
migrations.AddField(
model_name='job',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='externalrepo',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='external_repos', to='db.Project'),
),
migrations.AddField(
model_name='experimentjob',
name='resources',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.JobResources'),
),
migrations.AddField(
model_name='experimentjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentJobStatus'),
),
migrations.AddField(
model_name='experimentgroup',
name='project',
field=models.ForeignKey(help_text='The project this polyaxonfile belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='experiment_groups', to='db.Project'),
),
migrations.AddField(
model_name='experimentgroup',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentGroupStatus'),
),
migrations.AddField(
model_name='experimentgroup',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiment_groups', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='experiment',
name='experiment_group',
field=models.ForeignKey(blank=True, help_text='The experiment group that generate this experiment.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to='db.ExperimentGroup'),
),
migrations.AddField(
model_name='experiment',
name='metric',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentMetric'),
),
migrations.AddField(
model_name='experiment',
name='original_experiment',
field=models.ForeignKey(blank=True, help_text='The original experiment that was cloned from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='db.Experiment'),
),
migrations.AddField(
model_name='experiment',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to='db.Project'),
),
migrations.AddField(
model_name='experiment',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentStatus'),
),
migrations.AddField(
model_name='experiment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='codereference',
name='external_repo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='references', to='db.ExternalRepo'),
),
migrations.AddField(
model_name='codereference',
name='repo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='references', to='db.Repo'),
),
migrations.AddField(
model_name='buildjob',
name='code_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference'),
),
migrations.AddField(
model_name='buildjob',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='build_jobs', to='db.Project'),
),
migrations.AddField(
model_name='buildjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJobStatus'),
),
migrations.AddField(
model_name='buildjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='ssoidentity',
unique_together={('provider', 'user'), ('provider', 'external_id')},
),
migrations.AlterUniqueTogether(
name='project',
unique_together={('user', 'name')},
),
migrations.AlterUniqueTogether(
name='nodegpu',
unique_together={('cluster_node', 'index')},
),
migrations.AlterUniqueTogether(
name='externalrepo',
unique_together={('project', 'git_url')},
),
migrations.AlterUniqueTogether(
name='experimentjob',
unique_together={('experiment', 'sequence')},
),
migrations.AlterUniqueTogether(
name='experimentgroup',
unique_together={('project', 'sequence')},
),
migrations.AlterUniqueTogether(
name='experiment',
unique_together={('project', 'sequence')},
),
migrations.AlterUniqueTogether(
name='clusternode',
unique_together={('cluster', 'sequence')},
),
]
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
ff0d63e5404e77e306860887c8d0ff6eb8b09f4a
|
426e3b7138107d4ef6fb048a0f7b1c4d72617ee6
|
/MRI_ProstateCancer_Classification/test.py
|
e2fca76eb8b994975e620311388537eb98390f09
|
[] |
no_license
|
andrewmlu/MRI-prostate
|
5d8d16987ab197e79dd231cad61ce65c48cd50d1
|
760552cfea0a3056d21ba1ac81261f5e2af1fe46
|
refs/heads/master
| 2023-01-23T10:29:46.507468
| 2019-09-09T15:20:31
| 2019-09-09T15:20:31
| 198,726,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
#%%
import keras
import numpy as np
import matplotlib.pyplot as plt
#%%
X = np.load('./CoRegistration/img_valid_data_3d_t2_tse_tra.npy')
#%%
print(X.shape)
for i in range(16):
plt.imshow(X[0,i,:,:,0])
plt.show()
#%%
print(X.shape)
plt.imshow(X[4500,3,:,:,0])
plt.show()
|
[
"lu.andrew.m@gmail.com"
] |
lu.andrew.m@gmail.com
|
141edf402032a4bbe9c3349258944e9dcfa2c803
|
fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd
|
/chrome/browser/android/digital_asset_links/DEPS
|
7023254e344e39b9b94c5db81d7a70a7df505240
|
[
"BSD-3-Clause"
] |
permissive
|
wzyy2/chromium-browser
|
2644b0daf58f8b3caee8a6c09a2b448b2dfe059c
|
eb905f00a0f7e141e8d6c89be8fb26192a88c4b7
|
refs/heads/master
| 2022-11-23T20:25:08.120045
| 2018-01-16T06:41:26
| 2018-01-16T06:41:26
| 117,618,467
| 3
| 2
|
BSD-3-Clause
| 2022-11-20T22:03:57
| 2018-01-16T02:09:10
| null |
UTF-8
|
Python
| false
| false
| 296
|
# It is likely that this code will eventually be shared across platforms, so
# excluding dependencies that would make this being a component impossible.
include_rules = [
"-content",
"-chrome",
"+base",
"+content/public/test",
"+chrome/browser/android/digital_asset_links",
"+net",
]
|
[
"jacob-chen@iotwrt.com"
] |
jacob-chen@iotwrt.com
|
|
9242c6b500fb9f1bf7ec63e07416b8c4618c046e
|
1ea4892f4a8f3ef4c1aad0d69ad6800dfbb74661
|
/actions/send_email.py
|
7be87c16bf8585d3c6a48a22d77aeed5972ae858
|
[] |
no_license
|
qube-ai/rule_vm
|
55c32715d3828bbb4d97438c80758631857b89e8
|
4e7cd890535077eb79bb8647fd5ea2542fd5c34d
|
refs/heads/main
| 2023-05-08T15:55:04.247590
| 2021-05-31T22:36:26
| 2021-05-31T22:36:26
| 304,846,945
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
import os
import trio
from loguru import logger
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, Email, To
from .base import BaseAction
from .base import ActionConstant
from typing import Dict
class SendEmailAction(BaseAction):
action_type = ActionConstant.SEND_EMAIL
schema = {
"$schema": "http://json-schema.org/draft-07/schema",
"type": "object",
"properties": {
"type": {"type": "string", "enum": ["send_email"]},
"subject": {"type": "string"},
"body": {"type": "string"},
"to": {"type": "array"},
},
"required": ["type", "subject", "body", "to"],
}
def __init__(self, action_data: Dict):
super(SendEmailAction, self).__init__(action_data)
self.subject = action_data["subject"]
self.body = action_data["body"]
self.to = action_data["to"]
async def perform(self):
from_email = Email("automated@thepodnet.com", name="Podnet")
to_email = list(map(lambda x: To(x), self.to))
message = Mail(
from_email=from_email,
to_emails=to_email,
subject=self.subject,
html_content=self.body,
)
def f():
sg = SendGridAPIClient(os.getenv("SENDGRID_API_KEY"))
response = sg.send(message)
logger.info(
f"Email sent. Status Code: {response.status_code}, Body: {response.body}"
)
try:
await trio.to_thread.run_sync(f)
except Exception as e:
logger.error(f"Unable to send the email due to some error. Error: {e}")
logger.error(f"Error body: {e.body}")
|
[
"apoorvasingh157@gmail.com"
] |
apoorvasingh157@gmail.com
|
0b11b102eba28b070f0010ddf12a2dc8dfa87b8c
|
a0396d4db961291938f04bf36d4305cc332414ce
|
/Lect/GUI/02.qpushbutton.py
|
89ffab1dee2ebe1f7cb6d1e02d1f0575aa5a69e5
|
[] |
no_license
|
taeyoung02/python
|
06697750565cce1602f472c09562273465227360
|
b0edc8ee30c32d1861460a91694ade94cc7633bb
|
refs/heads/master
| 2023-03-13T01:15:28.628185
| 2021-03-03T03:03:19
| 2021-03-03T03:03:19
| 219,281,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
from PyQt5 import QtWidgets
class MyWindows(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("파이썬 GUI")
button = QtWidgets.QPushButton(self)
button.setText("일반버튼")
disableButton= QtWidgets.QPushButton(self)
disableButton.setText("비활성버튼")
disableButton.setEnabled(False)
checkButton=QtWidgets.QPushButton(self)
checkButton.setText("체크버튼")
checkButton.setCheckable(True)
checkButton.toggle()
layout=QtWidgets.QHBoxLayout()
layout.addWidget(button)
layout.addWidget(disableButton)
layout.addWidget(checkButton)
self.setLayout(layout)
self.show()
app=QtWidgets.QApplication([])
win=MyWindows()
app.exec_()
|
[
"49622935+taeyoung02@users.noreply.github.com"
] |
49622935+taeyoung02@users.noreply.github.com
|
9c1b67405acfc447e0bcde61a0b406ab29189c33
|
f4713830c8519daca9d75ec692a6937ee03c74d4
|
/Problems/Algorithms/953. Verifying an Alien Dictionary/alien_dictionary.py
|
af8a014ae986a3a0467e9a3207355cbfdb4b4240
|
[
"MIT"
] |
permissive
|
xuedong/leet-code
|
a0dd38cb884292de9d947718bb00160eff2b0f00
|
285d49cd7061ec43368d63b7c7c56763be520570
|
refs/heads/master
| 2023-09-03T02:38:55.932182
| 2023-09-02T18:35:42
| 2023-09-02T18:35:42
| 189,745,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
#!/usr/bin/env python3
from typing import List
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
map = {ch: idx for idx, ch in enumerate(order)}
words = [[map[ch] for ch in word] for word in words]
return all(word1 <= word2 for word1, word2 in zip(words[:-1], words[1:]))
|
[
"shang.xuedong@yahoo.fr"
] |
shang.xuedong@yahoo.fr
|
3342dbd03130abc2b867b2e3e093a75c7f00aafa
|
1e177ebdcb470f738c058606ac0f86a36085f661
|
/Python3/Tkinter/tkinter020.py
|
23c5a22cc77a5d384d51239477848c13a696f07a
|
[] |
no_license
|
robingreig/raspi-git
|
5cbdd295c1048a0571aa2c2f8576438269439f07
|
7373bf94557d7a88c8f343362ba64f9cd19c8ce7
|
refs/heads/master
| 2023-08-31T03:16:17.286700
| 2023-08-26T11:54:23
| 2023-08-26T11:54:23
| 16,873,881
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
#!/usr/bin/python3
from tkinter import *
root = Tk()
myContainer1 = Frame(root)
myContainer1.pack()
root.mainloop()
|
[
"robin.greig@calalta.com"
] |
robin.greig@calalta.com
|
97bd664e8680888b299bcb523f490f03791bb935
|
d81f9d100f3b57c75f98a0501794cfb954414f0e
|
/exercises/ex_254.py
|
938d8c0e3d4490f99c76f6a535358391b56b4540
|
[
"Apache-2.0"
] |
permissive
|
edoriggio/algorithms-and-data-structures
|
6e45031453c6be7b7138034382137c2c9be5d5eb
|
d6f3ac520bb3021400bf47770de692ab0c305b75
|
refs/heads/master
| 2023-05-31T01:02:22.516132
| 2021-06-18T12:28:04
| 2021-06-18T12:28:04
| 255,593,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
# Copyright 2021 Edoardo Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Complexity: O(n^3)
# Given an array of points, find if there are at least 3 points that
# joined together form a right angle.
def algo_x(P):
# Complexity: O(n)
for i in range(len(P)):
# Complexity: O(n)
for j in range(len(P)):
if j != i:
ax = P[j][0] - P[i][0]
ay = P[j][1] - P[i][1]
# Complexity: O(n)
for k in range(j+1, len(P)):
if k != i:
bx = P[k][0] - P[i][0]
by = P[k][1] - P[i][1]
if (ax * bx) + (ay + by) == 0:
return True
return False
# Complexity: O(n^2)
def better_algo_x(P):
x = [0] * (len(P)+1)
DP = []
# Complexity: O(n)
for _ in range(len(P)+1):
DP.append(x[:])
# Complexity: O(n)
for i in range(1, len(P)+1):
# Complexity: O(n)
for j in range(1, len(x)):
if i == j:
continue
if DP[i-1][j] == 1:
ax = P[j-1][0] - P[i-2][0]
ay = P[j-1][1] - P[i-2][1]
bx = P[i-1][0] - P[i-2][0]
by = P[i-1][1] - P[i-2][1]
if (ax * bx) + (ay * by) == 0:
return True
else:
if P[i-1][0] == P[j-1][0] \
or P[i-1][1] == P[j-1][1]:
DP[i][j] = 1
return False
array = [(1, 5), (1, 2), (6, 2)]
print(better_algo_x(array))
|
[
"edo.riggio19@gmail.com"
] |
edo.riggio19@gmail.com
|
f205239515d090b06d7334b477d110d97c8c5e8c
|
33b97572f76c7f6ebfac4407ba2bca467edf0745
|
/batting_order_back_test.py
|
37253b7d77e4920adf5e9e0e14ce386e307c059f
|
[] |
no_license
|
fidler-analyst/Batting-Order-Optimization
|
0e83247cf4a4df226a6130a45e4c2421ff9a143e
|
88a7eddaa43f245cc2cd7030f3ac5d3527d76967
|
refs/heads/main
| 2023-08-24T06:14:41.964184
| 2021-10-16T22:24:22
| 2021-10-16T22:24:22
| 389,487,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,712
|
py
|
#batting_order_back_test
import pandas as pd
import numpy as np
df3 = pd.read_csv(r'C:\Users\maxfi\Desktop\Python\baseball\2021-mlb-season-pbp-feed.csv', usecols \
= ['GAME ID', 'BATTER', 'PLAYTYPE', 'ROADSCORE', 'HOMESCORE', 'INNING'])
df3 = df3.dropna(axis = 0)
HOME = np.where(df3['INNING'] == 'B', 1, 0)
HOME_TEAM = df3['GAME ID'].str[10:13]
ROAD_TEAM = df3['GAME ID'].str[6:9]
df3.insert(2, 'HM_TEAM', HOME_TEAM)
df3.insert(3, 'RD_TEAM', ROAD_TEAM)
df3.INNING = df3['INNING'].str[-1]
#df3 = df3.replace({'PLAYTYPE': {'SINGLE':1,'DOUBLE':2,'TRIPLE':3, 'HOME RUN':4, \
#'STRIKEOUT':0, 'FLYOUT':0, 'LINEOUT':0, 'POP OUT':0, 'GROUNDOUT':0, 'GROUNDED INTO DP':0, \
#'FORCEOUT':0, 'FIELD ERROR':1, 'FIELDERS CHOICE':0, 'DOUBLE PLAY':0, \
#'WALK':1, 'SAC BUNT':0, 'SAC FLY':0, 'HIT BY PITCH':1, 'CATCHER INTERFERENCE':1, 'INTENT WALK':1}})
home_games = df3[df3['HM_TEAM'] == 'COL']
home_games = home_games[home_games['INNING'] == 'B']
road_games = df3[df3['RD_TEAM'] == 'COL']
road_games = road_games[road_games['INNING'] == 'T']
#based off of weighted obp
best_lup = ['Yonathan Daza','C.J. Cron','Ryan McMahon','Joshua Fuentes', \
'Raimel Tapia','Charlie Blackmon','Garrett Hampson', 'Dom Nunez']
all_rocky_games = pd.concat([home_games, road_games])
list_of_games = pd.Series(all_rocky_games['GAME ID']).drop_duplicates().to_list()
games_with_the_boys = []
for game in list_of_games:
single_game = all_rocky_games[all_rocky_games['GAME ID'] == game]
players_in_game = pd.Series(single_game['BATTER']).head(n=8)
players_in_game.to_list()
players_in_game_check = sorted(players_in_game)
best_lup_check = sorted(best_lup)
if players_in_game_check == best_lup_check:
games_with_the_boys.append(game)
score_comparison = pd.DataFrame(index = games_with_the_boys, columns = ['Actual Score', 'My Model'])
for game in games_with_the_boys:
game_with_my_order = all_rocky_games[all_rocky_games['GAME ID']==game]
my_order_df = pd.DataFrame(index = best_lup, columns = ['1st', '2nd', '3rd', '4th'])
for player in best_lup:
player_df = game_with_my_order[game_with_my_order['BATTER']==player]
player_df = player_df.reset_index(drop = True)
my_order_df.loc[player, '1st'] = player_df.PLAYTYPE[0]
my_order_df.loc[player, '2nd'] = player_df.PLAYTYPE[1]
my_order_df.loc[player, '3rd'] = player_df.PLAYTYPE[2]
my_order_df.loc[player, '4th'] = player_df.PLAYTYPE[3]
my_order_df.loc[len(my_order_df)] = ['STRIKEOUT', 'STRIKEOUT', 'STRIKEOUT', 'STRIKEOUT']
my_order_df = my_order_df.rename(index={8:'Pitcher'})
my_event_list = pd.Series()
my_event_list = my_order_df['1st']
my_event_list = my_event_list.append(my_order_df['2nd'])
my_event_list = my_event_list.append(my_order_df['3rd'])
my_event_list = my_event_list.append(my_order_df['4th'])
inning = 1
runs = 0
outs = 0
r1, r2, r3 = 0, 0, 0;
total_outs = 0
for play in my_event_list:
if play == 'SINGLE':
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 0
runs += 1
else:
if r3 == 0:
r1, r2 = 1, 0
runs +=1
else:
r1, r2, r3 = 1, 0, 0
runs += 2
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 0
runs+=1
elif r2 == 1:
if r3 == 0:
runs += 1
elif r3 == 1:
r3 = 0
runs += 2
elif play == 'DOUBLE':
runs += r2 + r3
if r1 == 1:
r1, r2, r3 = 0, 1, 1
else:
r1, r2, r3 = 0, 1, 0
elif play == 'TRIPLE':
runs += r1 + r2 + r3
r1, r2, r3 = 0, 0, 1
elif play == 'HOME RUN':
runs += r1 + r2 + r3 + 1
r1, r2, r3 = 0, 0, 0
elif play == 'FLYOUT':
outs += 1
elif play == 'POP OUT':
outs += 1
elif play == 'LINEOUT':
outs += 1
elif play == 'WALK':
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 1
else:
if r3 == 0:
r1, r2 = 1, 1
else:
r1, r2, r3 = 1, 1, 1
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs += 1
elif play == "INTENT WALK":
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 1
else:
if r3 == 0:
r1, r2 = 1, 1
else:
r1, r2, r3 = 1, 1, 1
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs += 1
elif play == 'HIT BY PITCH':
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 1
else:
if r3 == 0:
r1, r2 = 1, 1
else:
r1, r2, r3 = 1, 1, 1
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs += 1
elif play == 'SAC BUNT':
#runs += 1
outs += 1
r3 = 0
elif play == 'SAC FLY':
runs += 1
outs += 1
r3 = 0
elif play == 'FIELDERS CHOICE':
outs += 1
elif play == 'FORCEOUT':
outs += 1
elif play == 'STRIKEOUT':
outs += 1
elif play == 'GROUNDOUT':
outs += 1
elif play == 'FIELD ERROR':
if r1 == 0:
r1 = 1
elif r1 == 1:
if r2 == 0:
r2 = 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs +=1
elif play == 'DOUBLE PLAY':
outs += 2
r1, r2, r3 = 0, 0, 0
elif play == 'GROUNDED INTO DP':
outs += 2
r1, r2, r3 = 0, 0, 0
if outs == 3:
inning += 1
outs = 0
r1, r2, r3 = 0, 0, 0;
score_comparison.loc[game, 'Actual Score'] = single_game['HOMESCORE'].iloc[-1]
score_comparison.loc[game, 'My Model'] = runs
score_comparison.to_excel(r'C:\Users\maxfi\Desktop\Python\baseball\Batting Order Optimizer\batting_order_back_test_results.xlsx')
|
[
"noreply@github.com"
] |
noreply@github.com
|
3e2d840dab7fafdba0367c9dcc831eaf950482c9
|
7554c1309bb2409618ced1747b0c6052fe7b963c
|
/audio-streaming-client-python/client_demo_simple.py
|
92cdbe562c3a1d069688c4370cc2515b5a7b9926
|
[] |
no_license
|
TheCarryKing/pie
|
ec464e1a459bfc602cdfeabac4f07318273df485
|
e18301e26c94004e84be73e2d31489537c2989ef
|
refs/heads/master
| 2022-04-24T22:49:53.692507
| 2020-04-26T12:00:05
| 2020-04-26T12:00:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
# -*-coding:utf-8-*-
from baidu_acu_asr.asr_client import AsrClient
from baidu_acu_asr.asr_product import AsrProduct
import time
import logging
import baidu_acu_asr.audio_streaming_pb2
def run():
"""
添加失败重传
:return:
"""
for i in range(5):
client = AsrClient(url, port, product, enable_flush_data,
log_level=log_level,
user_name=user_name,
password=password)
responses = client.get_result("testaudio/xeq16k.wav")
try:
for response in responses:
if response.type == baidu_acu_asr.audio_streaming_pb2.FRAGMENT_DATA:
logging.info("%s\t%s\t%s\t%s",
response.audio_fragment.start_time,
response.audio_fragment.end_time,
response.audio_fragment.result,
response.audio_fragment.serial_num)
else:
logging.warning("type is: %d, error code: %d, error message: %d",
response.type, response.error_code, response.error_message)
break
except Exception as ex:
# 如果出现异常,此处需要重试当前音频
logging.error("encounter an error: %s, will create a new channel and retry audio! times : %d",
ex.message, i + 1)
time.sleep(0.5)
if __name__ == '__main__':
logging.basicConfig(filename="asr_result.log")
log_level = 0
url = "127.0.0.1"
port = "8050"
# product_id = AsrProduct.INPUT_METHOD
product_id = "888"
sample_rate = 16000
enable_flush_data = True
user_name = "abc"
password = "123"
run()
|
[
"xiashuai01@baidu.com"
] |
xiashuai01@baidu.com
|
38c22d8b506d8af3eee94ee00588bd24a6db4a55
|
afd2743914263d62074da07303fd6a1e5d9b2fdc
|
/movie_recommender/src/scoring_service.py
|
82274b15c7c673b3fc1ae4961f326f23b8179b55
|
[] |
no_license
|
vybhavk/cloud_native_analytics
|
5676004dc4046a856d212c059875e354ae5d341c
|
e792a697723611fbf9267ed1a677951daca42c7d
|
refs/heads/master
| 2020-12-02T06:17:06.601908
| 2017-07-06T09:59:05
| 2017-07-06T09:59:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
import logging
import sys
import flask
from flask import Flask, request
from flask_cors import CORS
from movie_recommender.src.model import MovieRecommender
from movie_recommender.src import config
# Python2.x: Make default encoding as UTF-8
from util.data_store.local_filesystem import LocalFileSystem
if sys.version_info.major == 2:
reload(sys)
sys.setdefaultencoding('UTF8')
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
app = Flask(__name__)
CORS(app)
global movie_reco_model
@app.before_first_request
def load_model():
model_data_store = LocalFileSystem(src_dir="/movie_recommender/data")
assert (model_data_store is not None)
app.movie_reco_model = MovieRecommender.load_from_data_store(data_store=model_data_store)
assert app.movie_reco_model is not None
app.logger.info("movie recommendation model got loaded successfully!")
@app.route('/')
def heart_beat():
return flask.jsonify({"status": "ok"})
@app.route('/api/v1/recommend_movies')
def find_user_category():
userid = request.args.get('userid')
response = app.movie_reco_model.recommend_movies(user_id=int(userid))
return flask.jsonify(response)
if __name__ == "__main__":
app.run()
|
[
"hmistry@redhat.com"
] |
hmistry@redhat.com
|
b6cb0d98aa20ba4ad45816517fb090f55425bab8
|
493fb070c7f60137ae5f0f92e64f416445e2d126
|
/managers/swap.py
|
ac5abed4965adfdbb709030be04f58be42461a64
|
[] |
no_license
|
nyko27/algorithms_lab1
|
a93365c4481c6a86c719e3d3127ca45b8004df83
|
a52871b620ab343de7af0e58eabb35fb82e50228
|
refs/heads/main
| 2023-01-05T16:25:13.652155
| 2020-10-27T14:43:34
| 2020-10-27T14:43:34
| 303,790,346
| 0
| 0
| null | 2020-10-27T14:43:35
| 2020-10-13T18:09:54
| null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
def swap_elements(array, element_index, second_element_index):
array[element_index], array[second_element_index] = array[second_element_index], array[element_index]
|
[
"yuranykolysak@gmail.com"
] |
yuranykolysak@gmail.com
|
958e3c40bcd396ccc9057e1589dd0d12791f5c09
|
b4e8b85f5154bba569b83227d5d1a57acc9c10f7
|
/Script/createdata.py
|
e0ccdc861d699f22b344f5489274d6c1994f61be
|
[] |
no_license
|
Lazerbeam50/Roguelike-TBS
|
1380bd2ce34705e9c3ff8bd77cf2c8ce8a5e3aac
|
711e92439e91d0bf95cef6137d20653e0e504088
|
refs/heads/main
| 2023-03-06T04:19:42.842811
| 2021-02-20T21:28:27
| 2021-02-20T21:28:27
| 340,178,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,428
|
py
|
"""
Used to set up the empty database and tables.
"""
import sqlite3
def set_up_empty_database():
db = sqlite3.connect('Game data/game_data')
cursor = db.cursor()
#Ambiguous names
cursor.execute('''
CREATE TABLE IF NOT EXISTS AmbiguousNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Classes
(
id INTEGER PRIMARY KEY,
name TEXT,
usesSwords INTEGER,
usesAxes INTEGER,
usesLances INTEGER,
usesBows INTEGER,
usesDaggers INTEGER,
usesDestruction INTEGER,
usesRestoration INTEGER,
catForm INTEGER,
bearForm INTEGER,
breathWeapon INTEGER,
usesTime INTEGER,
usesEnchanting INTEGER,
usesWitchcraft INTEGER,
hpBase INTEGER,
strengthBase INTEGER,
magicBase INTEGER,
dexterityBase INTEGER,
speedBase INTEGER,
defenceBase INTEGER,
resistanceBase INTEGER,
luckBase INTEGER,
hpBaseSD INTEGER,
strengthBaseSD INTEGER,
magicBaseSD INTEGER,
dexterityBaseSD INTEGER,
speedBaseSD INTEGER,
defenceBaseSD INTEGER,
resistanceBaseSD INTEGER,
luckBaseSD INTEGER,
hpGrowth INTEGER,
strengthGrowth INTEGER,
magicGrowth INTEGER,
dexterityGrowth INTEGER,
speedGrowth INTEGER,
defenceGrowth INTEGER,
resistanceGrowth INTEGER,
luckGrowth INTEGER,
hpGrowthSD INTEGER,
strengthGrowthSD INTEGER,
magicGrowthSD INTEGER,
dexterityGrowthSD INTEGER,
speedGrowthSD INTEGER,
defenceGrowthSD INTEGER,
resistanceGrowthSD INTEGER,
luckGrowthSD INTEGER,
movement INTEGER,
flying INTEGER,
mounted INTEGER,
critBonus INTEGER,
sprite TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS WeaponsSpells
(
id INTEGER PRIMARY KEY,
name TEXT,
magic INTEGER,
itemType INTEGER,
rank INTEGER,
uses INTEGER,
power INTEGER,
hit INTEGER,
crit INTEGER,
scalesWith INTEGER,
minRange INTEGER,
maxRange INTEGER,
weight INTEGER,
exp INTEGER,
value INTEGER,
effect INTEGER,
bonus_vs1 INTEGER,
bonus_vs2 INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS FemaleNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS LastNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS MaleNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS AmbiguousNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Maps
(
id INTEGER PRIMARY KEY,
name TEXT,
tileset INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS MapTiles
(
id INTEGER,
x TEXT,
y INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS MapDeployment
(
id INTEGER,
force INTEGER,
boss INTEGER,
x INTEGER,
y INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Tiles
(
id INTEGER PRIMARY KEY,
name TEXT,
def INTEGER,
avoid INTEGER,
heal INTEGER,
moveCost INTEGER,
sprite INTEGER
)
''')
db.commit()
cursor.close()
db.close()
set_up_empty_database()
|
[
"FSolofin@gmail.com"
] |
FSolofin@gmail.com
|
0ba9aca97b1c1f59da1afb823752e4f46a680b96
|
feae88b4a8bc0aba388dcc2eeb7debb49d736809
|
/apps/second_app/urls.py
|
fb99d9914ffc2c2fedcdee10fd14c61afe4e550b
|
[] |
no_license
|
john-gore/belt3_retry
|
ec8a5582382fc00f0bcb3cf973fe9cd073ed571c
|
03aa6d7ff9988615a96d2c882282107d389b1c52
|
refs/heads/master
| 2021-07-21T11:11:42.972344
| 2017-10-29T21:34:09
| 2017-10-29T21:34:09
| 108,772,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from django.conf.urls import url
from django.contrib import admin
from . import views
from ..first_app.models import User # This line is new!
urlpatterns = [
url(r'^$', views.index, name='index') # This line has changed!
]
|
[
"johngore@Johns-MBP.localdomain"
] |
johngore@Johns-MBP.localdomain
|
4d934a9777103d54358d8bcbe71f4c6a9b733b2b
|
329ddb4590887a0dfa3a8b18c6d93441e9e2eed0
|
/virtual/lib/python2.7/site-packages/sphinx/ext/autosummary/__init__.py
|
030fec3019b19069472e009b02f989406f723ff8
|
[] |
no_license
|
dj-neza/MoneyManager
|
d9b257cda5eb0a4b6a1e7edb0648d5c3a767cfa1
|
ba033672424f9833ca0f411779eb89fc28cb4b6c
|
refs/heads/master
| 2021-01-11T06:04:34.139883
| 2017-01-15T21:18:42
| 2017-01-15T21:18:42
| 71,716,793
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,388
|
py
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension that adds an autosummary:: directive, which can be
used to generate function/method/attribute/etc. summary lists, similar
to those output eg. by Epydoc and other API doc generation tools.
An :autolink: role is also provided.
autosummary directive
---------------------
The autosummary directive has the form::
.. autosummary::
:nosignatures:
:toctree: generated/
module.function_1
module.function_2
...
and it generates an output table (containing signatures, optionally)
======================== =============================================
module.function_1(args) Summary line from the docstring of function_1
module.function_2(args) Summary line from the docstring
...
======================== =============================================
If the :toctree: option is specified, files matching the function names
are inserted to the toctree with the given prefix:
generated/module.function_1
generated/module.function_2
...
Note: The file names contain the module:: or currentmodule:: prefixes.
.. seealso:: autosummary_generate.py
autolink role
-------------
The autolink role functions as ``:obj:`` when the name referred can be
resolved to a Python object, and otherwise it becomes simple emphasis.
This can be used as the default role to make links 'smart'.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
import posixpath
from six import string_types
from types import ModuleType
from six import text_type
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from docutils import nodes
import sphinx
from sphinx import addnodes
from sphinx.util import import_object, rst
from sphinx.util.compat import Directive
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.ext.autodoc import Options
# -- autosummary_toc node ------------------------------------------------------
class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
crawled = {}
def crawl_toc(node, depth=1):
crawled[node] = True
for j, subnode in enumerate(node):
try:
if (isinstance(subnode, autosummary_toc) and
isinstance(subnode[0], addnodes.toctree)):
env.note_toctree(env.docname, subnode[0])
continue
except IndexError:
continue
if not isinstance(subnode, nodes.section):
continue
if subnode not in crawled:
crawl_toc(subnode, depth+1)
crawl_toc(doctree)
def autosummary_toc_visit_html(self, node):
"""Hide autosummary toctree list in HTML output."""
raise nodes.SkipNode
def autosummary_noop(self, node):
pass
# -- autosummary_table node ----------------------------------------------------
class autosummary_table(nodes.comment):
pass
def autosummary_table_visit_html(self, node):
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
for row in tbody:
col1_entry = row[0]
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
new_text = text_type(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
# -- autodoc integration -------------------------------------------------------
class FakeDirective(object):
env = {}
genopt = Options()
def get_documenter(obj, parent):
"""Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to.
"""
from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \
ModuleDocumenter
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
return ModuleDocumenter
# Construct a fake documenter for *parent*
if parent is not None:
parent_doc_cls = get_documenter(parent, None)
else:
parent_doc_cls = ModuleDocumenter
if hasattr(parent, '__name__'):
parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
else:
parent_doc = parent_doc_cls(FakeDirective(), "")
# Get the corrent documenter class for *obj*
classes = [cls for cls in AutoDirective._registry.values()
if cls.can_document_member(obj, '', False, parent_doc)]
if classes:
classes.sort(key=lambda cls: cls.priority)
return classes[-1]
else:
return DataDocumenter
# -- .. autosummary:: ----------------------------------------------------------
class Autosummary(Directive):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary can also optionally generate a hidden toctree:: node.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
has_content = True
option_spec = {
'toctree': directives.unchanged,
'nosignatures': directives.flag,
'template': directives.unchanged,
}
def warn(self, msg):
self.warnings.append(self.state.document.reporter.warning(
msg, line=self.lineno))
def run(self):
self.env = env = self.state.document.settings.env
self.genopt = Options()
self.warnings = []
self.result = ViewList()
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
items = self.get_items(names)
nodes = self.get_table(items)
if 'toctree' in self.options:
dirname = posixpath.dirname(env.docname)
tree_prefix = self.options['toctree'].strip()
docnames = []
for name, sig, summary, real_name in items:
docname = posixpath.join(tree_prefix, real_name)
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
self.warn('toctree references unknown document %r'
% docname)
docnames.append(docname)
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docn) for docn in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode = autosummary_toc('', '', tocnode)
nodes.append(tocnode)
return self.warnings + nodes
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
items = []
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent, modname = import_by_name(name, prefixes=prefixes)
except ImportError:
self.warn('failed to import %s' % name)
items.append((name, '', '', name))
continue
self.result = ViewList() # initialize for each documenter
full_name = real_name
if not isinstance(obj, ModuleType):
# give explicitly separated module name, so that members
# of inner classes can be documented
full_name = modname + '::' + full_name[len(modname)+1:]
# NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, full_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if documenter.options.members and not documenter.check_module():
continue
# try to also get a source code analyzer for attribute docs
try:
documenter.analyzer = ModuleAnalyzer.for_module(
documenter.get_real_modname())
# parse right now, to get PycodeErrors on parsing (results will
# be cached anyway)
documenter.analyzer.find_attr_docs()
except PycodeError as err:
documenter.env.app.debug(
'[autodoc] module analyzer failed: %s', err)
# no source file -- e.g. for builtin and C modules
documenter.analyzer = None
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
sig = sig.replace('*', r'\*')
# -- Grab the summary
documenter.add_content(None)
doc = list(documenter.process_doc([self.result.data]))
while doc and not doc[0].strip():
doc.pop(0)
# If there's a blank line, then we can assume the first sentence /
# paragraph has ended, so anything after shouldn't be part of the
# summary
for i, piece in enumerate(doc):
if not piece.strip():
doc = doc[:i]
break
# Try to find the "first sentence", which may span multiple lines
m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'p{0.5\linewidth}p{0.5\linewidth}'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars-2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2)
return u"(%s)" % sig
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
Returns: joined_string
"""
full_str = sep.join(items)
if len(full_str) < max_chars:
return full_str
n_chars = 0
n_items = 0
for j, item in enumerate(items):
n_chars += len(item) + len(sep)
if n_chars < max_chars - len(overflow_marker):
n_items += 1
else:
break
return sep.join(list(items[:n_items]) + [overflow_marker])
# -- Importing items -----------------------------------------------------------
def get_import_prefixes_from_env(env):
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
prefixes = [None]
currmodule = env.ref_context.get('py:module')
if currmodule:
prefixes.insert(0, currmodule)
currclass = env.ref_context.get('py:class')
if currclass:
if currmodule:
prefixes.insert(0, currmodule + "." + currclass)
else:
prefixes.insert(0, currclass)
return prefixes
def import_by_name(name, prefixes=[None]):
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
tried = []
for prefix in prefixes:
try:
if prefix:
prefixed_name = '.'.join([prefix, name])
else:
prefixed_name = name
obj, parent, modname = _import_by_name(prefixed_name)
return prefixed_name, obj, parent, modname
except ImportError:
tried.append(prefixed_name)
raise ImportError('no module named %s' % ' or '.join(tried))
def _import_by_name(name):
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
# try first interpret `name` as MODNAME.OBJ
modname = '.'.join(name_parts[:-1])
if modname:
try:
__import__(modname)
mod = sys.modules[modname]
return getattr(mod, name_parts[-1]), mod, modname
except (ImportError, IndexError, AttributeError):
pass
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0
modname = None
for j in reversed(range(1, len(name_parts)+1)):
last_j = j
modname = '.'.join(name_parts[:j])
try:
__import__(modname)
except ImportError:
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
parent = None
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
parent = obj
obj = getattr(obj, obj_name)
return obj, parent, modname
else:
return sys.modules[modname], None, modname
except (ValueError, ImportError, AttributeError, KeyError) as e:
raise ImportError(*e.args)
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r
def get_rst_suffix(app):
def get_supported_format(suffix):
parser_class = app.config.source_parsers.get(suffix)
if parser_class is None:
return ('restructuredtext',)
if isinstance(parser_class, string_types):
parser_class = import_object(parser_class, 'source parser')
return parser_class.supported
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
return None
def process_generate_options(app):
genfiles = app.config.autosummary_generate
if genfiles and not hasattr(genfiles, '__len__'):
env = app.builder.env
genfiles = [env.doc2path(x, base=None) for x in env.found_docs
if os.path.isfile(env.doc2path(x))]
if not genfiles:
return
from sphinx.ext.autosummary.generate import generate_autosummary_docs
ext = app.config.source_suffix
genfiles = [genfile + (not genfile.endswith(tuple(ext)) and ext[0] or '')
for genfile in genfiles]
suffix = get_rst_suffix(app)
if suffix is None:
app.warn('autosummary generats .rst files internally. '
'But your source_suffix does not contain .rst. Skipped.')
return
generate_autosummary_docs(genfiles, builder=app.builder,
warn=app.warn, info=app.info, suffix=suffix,
base_path=app.srcdir)
def setup(app):
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
html=(autosummary_toc_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_node(autosummary_table,
html=(autosummary_table_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_directive('autosummary', Autosummary)
app.add_role('autolink', autolink_role)
app.connect('doctree-read', process_autosummary_toc)
app.connect('builder-inited', process_generate_options)
app.add_config_value('autosummary_generate', [], True, [bool])
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
[
"neza@Nezas-MacBook-Pro.local"
] |
neza@Nezas-MacBook-Pro.local
|
bea2ef34103c1be80a214a122e6c43d572c7b693
|
f4f09a1a04ff6f3029b5bd4c7f16a7315387cdd3
|
/avatar_pose_controls/handctrl_gestures/__i.py
|
308487f520d39e61e5666a7e30a22a54ac1c8d1e
|
[
"MIT"
] |
permissive
|
YetzabethGC/chilean_sign_language_speller
|
469d268c90c8d1b379a7ae95fe655cfe0c7ea23b
|
c7fa69321ef4a6073b88979432eca98f27f7bf47
|
refs/heads/master
| 2023-02-02T12:52:21.211099
| 2020-12-21T22:30:05
| 2020-12-21T22:30:05
| 284,134,122
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
import bpy
import sys
sys.path.append("/home/vpoblete/yetzabethg/New Folder/") #DIRECTORIO CON CÓDIGOS DE CONTROL
import handctrl as hc
#####CONTROL DE LAS MANOS######
#Mano izquierda
hc.hLH(0) #ALTURA(0= REPOSO, 1=ESTOMAGO, 2=PECHO, 3=CUELLO, 4=CARA, 5=CABEZA)
hc.dLH(0) #DISTANCIA AL CUERPO(0= CENTRO, 1= ALEJADO)
hc.lhF(0,0,0,0,0) #CONTROL DEDOS(1=PULGAR, 2=INDICE, 3=MEDIO, 4=ANULAR, 5=MEÑIQUE) VALORES DEL 0(ABIERTO) A 6(CERRADO)
#####CONTROL DE LAS MANOS######
#Mano derecha
hc.hRH(3) #ALTURA(0= REPOSO, 1=ESTOMAGO, 2=PECHO, 3=CUELLO, 4=CARA, 5=CABEZA)
hc.dRH(0) #DISTANCIA AL CUERPO(0= CENTRO, 1= ALEJADO, 2= CONTRARIO)
hc.rhF(6,6,6,6,0) #CONTROL DEDOS(1=PULGAR, 2=INDICE, 3=MEDIO, 4=ANULAR, 5=MEÑIQUE) VALORES DEL 0(ABIERTO) A 6(CERRADO)
hc.detRF(2,0,0)
hc.detRF(1,0,0)
hc.rotRH(0,0)
|
[
"yetzabeth.gonzalez@hotmail.com"
] |
yetzabeth.gonzalez@hotmail.com
|
13c365ba493e7a5703589a2025cbc4b9bf7f895a
|
0de1ffce4bc996e44561907e31fa3e25d93be03b
|
/lab 2/main.py
|
131d7cde3f017742bfbef0849a641a6752639361
|
[] |
no_license
|
moxxiq/db-2y-labs
|
59b891ac3c6b9e224957fc73de7336c251d2244c
|
966c277471d4f1afabadf4ad882d05887018d46c
|
refs/heads/master
| 2023-03-23T10:37:05.771818
| 2021-02-02T22:48:10
| 2021-02-02T22:48:10
| 257,173,634
| 1
| 0
| null | 2021-03-19T12:36:26
| 2020-04-20T04:44:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
import cx_Oracle
import cred
conn = cx_Oracle.connect(cred.name, cred.passw, "localhost/XE")
cur = conn.cursor()
cur.execute("""
SELECT *
FROM (SELECT ARTIST_ID, ARTIST_NAME, COUNT(RAA.ARTWORK_ARTWORK_ID) ARTWORKS_COUNT
FROM ARTIST
LEFT JOIN RELATION_ARTWORK_ARTIST RAA on ARTIST.ARTIST_ID = RAA.ARTIST_ARTIST_ID
GROUP BY ARTIST_ID, ARTIST_NAME
ORDER BY ARTWORKS_COUNT DESC)
WHERE ROWNUM <= 10
""")
query1 = cur.fetchmany(10)
print('\nЗапит 1')
print(*list(i[0] for i in cur.description))
for row in query1:
print(*row)
cur.execute("""
SELECT PROC_OFFICER.PROC_OFFICER_NAME, round(COUNT(RELATION_AO.ARTWORK_ARTWORK_ID)*100/all_count,2) ARTWORKS_CREDITED_COUNT
FROM PROC_OFFICER
JOIN RELATION_AO on PROC_OFFICER.PROC_OFFICER_NAME = RELATION_AO.PROC_OFFICER_NAME
, (select count(ARTWORK_ID) as all_count from ARTWORK)
GROUP BY PROC_OFFICER.PROC_OFFICER_NAME, all_count
ORDER BY ARTWORKS_CREDITED_COUNT DESC
""")
print('\nЗапит 2')
query2 = cur.fetchmany(10)
print(*list(i[0] for i in cur.description))
for row in query2:
print(*row)
cur.execute("""
SELECT ARTWORK_CREATION_YEAR, COUNT(ARTWORK_ID) NUMBER_OF_ARTWORKS
FROM ARTWORK
GROUP BY ARTWORK_CREATION_YEAR
ORDER BY ARTWORK_CREATION_YEAR
""")
print('\nЗапит 3')
query3 = cur.fetchmany(10)
print(*list(i[0] for i in cur.description))
for row in query3:
print(*row)
cur.close()
conn.close()
|
[
"dimanavsisto@gmail.com"
] |
dimanavsisto@gmail.com
|
50f0a61a6acffc49e6f9907a711fbfbce9e14a7c
|
9fa6547def43d4d56892eafa3f0a7f7b54a50227
|
/src/backend/app/tasks/config.py
|
3d9fb115ecd781a3ffd1f1a581be539c4afd6933
|
[
"Apache-2.0"
] |
permissive
|
code4security/LuWu
|
f66c23fe17605840c2781e38d2378f5ede03c6bd
|
47ae6e1c5a177b1c5e4253f4158db5a50c04d1dc
|
refs/heads/master
| 2022-11-26T08:40:29.565173
| 2020-07-08T01:14:01
| 2020-07-08T01:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,813
|
py
|
from typing import List
from pydantic import parse_obj_as
from app.crud.crud_config import crud_isp
from app.db.session import session_manager
from app.schemas.config import IspItem
from app.tasks.base import BaseTask
from utils.redis import RedisPool
class ReloadVpsIspConfigTask(BaseTask):
name = 'reload_vps_isp_config'
def run(self, *args, **kwargs) -> dict:
task_result = {
'handled_vps_isp_list': []
}
with session_manager() as db_session:
vps_isp_obj_list = crud_isp.get_vps_isp_list(db_session).all()
vps_isp_data_list = parse_obj_as(List[IspItem], vps_isp_obj_list)
for vps_isp_data in vps_isp_data_list:
isp_provider_name = vps_isp_data.provider_name
if isp_provider_name in task_result['handled_vps_isp_list']:
continue
reload_result = self.reload_vps_isp_config(vps_isp_data.id)
if reload_result:
task_result['handled_vps_isp_list'].append(isp_provider_name)
return self.set_result(task_result)
def reload_vps_isp_config(self, vps_isp_id: int) -> bool:
with session_manager() as db_session:
rp = RedisPool()
try:
vps_raw_spec_data = rp.get_vps_spec_data(
db_session=db_session, isp_id=vps_isp_id, reload=True
)
except:
vps_raw_spec_data = None
return bool(vps_raw_spec_data)
class CreateVpsIspSshKey(BaseTask):
name = 'create_vps_isp_ssh_key'
def run(self, vps_isp_id: int):
create_result = {}
# with session_manager() as db_session:
# vps_isp_obj = crud_isp.get(db_session=db_session, id=vps_isp_id)
# if vps_isp_obj and vps_isp_obj.isp_instance.is_valid_account:
# ssh_key_data, ssh_key_created = crud_vps_ssh_key.get_or_create_ssh_key_data(
# db_session, vps_isp_id
# )
# if ssh_key_created:
# unix_timestamp = int(self.now_time.utcnow().timestamp())
# isp_ssh_key_id = vps_isp_obj.isp_instance.create_ssh_key(
# name=f"{ssh_key_data['name']}-{unix_timestamp}",
# public_key_content=ssh_key_data['public_key']
# )
# crud_vps_ssh_key.filter_by(
# db_session=db_session,
# id=ssh_key_data['id']
# ).update({'ssh_key_id': isp_ssh_key_id})
# create_result.update({
# 'ssh_key_data': ssh_key_data,
# 'ssh_key_created': ssh_key_created
# })
return self.set_result(create_result)
|
[
"root@lazymaple.pw"
] |
root@lazymaple.pw
|
704af671c34b28fe54897638a551ae17e85e2de8
|
77f880463d3dc6a7d5e314c1001641370b00e49b
|
/tests/test_client.py
|
d47c69a88a4435a499209cb2f66606431a49934a
|
[
"MIT"
] |
permissive
|
IngoKl/simple-xapi-client
|
81e615698a33f01bd76d4e5b2dee9000b1397d58
|
23258d2af5d461f95d431265f8af6674f7f05b32
|
refs/heads/master
| 2022-12-08T08:01:48.160952
| 2020-08-24T10:57:25
| 2020-08-24T10:57:25
| 289,899,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
import uuid
import configparser
from simple_xapi_client import LRS, XapiStatement, XapiActor, XapiVerb, XapiObject
config = configparser.ConfigParser()
config.read('settings.conf')
test_cfg = config['test']
def test_config():
assert(config.sections() == ['test'])
def test_simple_put_statement():
statement = {
'id': str(uuid.uuid1()),
'actor': {'name': 'TestUser', 'mbox': 'mailto:test@simple-xapi-client.com'},
'verb': {'id': 'http://activitystrea.ms/schema/1.0/accept'},
'object': {'id': 'http://simple-xapi-client.com/test'}
}
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
obj = XapiObject('http://simple-xapi-client.com/essay', 'Essay')
verb = XapiVerb('created')
statement = XapiStatement(actor, verb, obj)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement_custom_object():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
custom_object_definition = {
'type': 'http://adlnet.gov/expapi/activities/course',
'name': {'en-US': 'Python Test'},
'description': {'en-US': 'A simple test'}
}
obj = XapiObject('http://simple-xapi-client.com/custom', custom_object_definition)
verb = XapiVerb('created')
statement = XapiStatement(actor, verb, obj)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement_context():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
obj = XapiObject('http://simple-xapi-client.com/essay', 'Essay')
verb = XapiVerb('created')
context = {'instructor': {'name': 'Tester', 'mbox': 'mailto:tester@simple-xapi-client.com'}}
statement = XapiStatement(actor, verb, obj, context=context)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement_result():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
obj = XapiObject('http://simple-xapi-client.com/essay', 'Essay')
verb = XapiVerb('created')
result = {'completion': True, 'success': True}
statement = XapiStatement(actor, verb, obj, result=result)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
|
[
"ikleiber@gmail.com"
] |
ikleiber@gmail.com
|
7c84eefc810d6353c7ea038bea797d19876b7ea6
|
ded95a944d97f8892f0041242181d035ef120644
|
/sigmaz_nonadiabatic/nlevels=20/non_adiabtic.py
|
90125498bab58acdd70f4858a82f18aebbc30adf
|
[] |
no_license
|
Mulliken/dataAnalysis
|
ed844fbe7366a19ac3b47754efe5edb137ec8eb4
|
0f5a891587e791ae607fb7f6fb033f6088ae39d0
|
refs/heads/main
| 2023-08-19T09:50:20.621929
| 2021-10-08T15:57:52
| 2021-10-08T15:57:52
| 391,789,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,231
|
py
|
import numpy as np
from scipy.optimize import curve_fit
# from fishbonett.starSpinBoson import SpinBoson, SpinBoson1D
from fishbonett.backwardSpinBoson import SpinBoson, SpinBoson1D, calc_U
from fishbonett.stuff import sigma_x, sigma_z, temp_factor, sd_zero_temp, drude1, lemmer, drude, _num, sigma_1
from scipy.linalg import expm
from time import time
import sys
ene = int(sys.argv[1])
temp = int(sys.argv[2])
gam=int(sys.argv[3])
coupling = int(sys.argv[4])
bath_length =int(200*5*1.5)
phys_dim = 20
bond_dim = 1000
a = [np.ceil(phys_dim - N*(phys_dim -2)/ bath_length) for N in range(bath_length)]
a = [int(x) for x in a]
a = [phys_dim]*bath_length
print(a)
pd = a[::-1] + [2]
eth = SpinBoson(pd)
etn = SpinBoson1D(pd)
# set the initial state of the system. It's in the high-energy state |0>:
etn.B[-1][0, 1, 0] = 0.
etn.B[-1][0, 0, 0] = 1.
# spectral density parameters
g = 2000
eth.domain = [-g, g]
j = lambda w: drude(w, lam=3952.11670, gam=gam)* temp_factor(temp,w)
eth.sd = j
eth.he_dy = np.diag([-1, 1])/2
eth.h1e = np.diag([ene, 0]) + coupling*sigma_x
eth.build(g=1., ncap=50000)
print(eth.w_list)
print(eth.k_list)
# 0.5 ps ~ 0.1T
p = []
threshold = 1e-3
dt = 0.005/10
num_steps = 100*5 # Due to 2nd order Trotter, actual time is dt*2*num_steps
t = 0.
tt0=time()
for tn in range(num_steps):
U1, U2 = eth.get_u(2*tn*dt, 2*dt, factor=2)
t0 = time()
etn.U = U1
for j in range(bath_length-1,0,-1):
print("j==", j, tn)
etn.update_bond(j, bond_dim, threshold, swap=1)
etn.update_bond(0, bond_dim, threshold, swap=0)
etn.update_bond(0, bond_dim, threshold, swap=0)
t1 = time()
t = t + t1 - t0
t0 = time()
etn.U = U2
for j in range(1, bath_length):
print("j==", j, tn)
etn.update_bond(j, bond_dim, threshold,swap=1)
dim = [len(s) for s in etn.S]
theta = etn.get_theta1(bath_length) # c.shape vL i vR
rho = np.einsum('LiR,LjR->ij', theta, theta.conj())
pop = np.abs(rho[0,0])
p = p + [pop]
t1 = time()
t = t + t1 - t0
tt1 = time()
print(tt1-tt0)
pop = [x.real for x in p]
print("population", pop)
pop = np.array(pop)
pop.astype('float32').tofile(f'./output/pop_sigmaz_{coupling}_{temp}_{ene}_{gam}.dat')
|
[
"kavat.lux@gmail.com"
] |
kavat.lux@gmail.com
|
96871bfb4da9eeaef49b953463556f6f8f8b9173
|
254db35021faf41430b3ffc98f1deae17c881ea5
|
/wine_recipes/urls.py
|
2b49eee6357796ac7b3ded5a70da46ad6d1e6e2b
|
[] |
no_license
|
smiledt/IS411-Winesitters
|
4e4f71f9eaf8f9468d2c05413f97ac5bad4bdeaa
|
2e6946a6b7f558e3ec441d0a77505587fda48c37
|
refs/heads/master
| 2023-07-31T03:52:56.209082
| 2020-05-12T11:17:19
| 2020-05-12T11:17:19
| 238,153,903
| 1
| 0
| null | 2021-09-22T18:59:20
| 2020-02-04T08:05:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
from django.urls import path
from wine_recipes import views
app_name = 'wine_recipes'
urlpatterns = [
# Index pattern
path('', views.index, name='index'),
# /wine_recipes pattern, this one will be the final
path('wine_recipes/', views.recipes, name='wine_recipes'),
path('new_recipe/', views.new_recipe, name='new_recipe')
]
|
[
"dereksmilees@gmail.com"
] |
dereksmilees@gmail.com
|
3e33a95740de2a609722833a58f612500ad751a6
|
0bcb70eceea00ed1ed3627eee0be0b72e3112dd8
|
/seok/views/vote_views.py
|
7b9ea427e2020168ece3bb569c5f0557716045b2
|
[] |
no_license
|
odongs/dong
|
d6afad4e7fe58d8e1e71cd265a280b5228643320
|
17ae5f8eafefe13939d8bc2a6573a99312256b9c
|
refs/heads/master
| 2023-03-19T11:46:35.394134
| 2021-03-16T06:40:48
| 2021-03-16T06:40:48
| 328,822,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect
from ..models import Question, Answer
@login_required(login_url='common:login')
def vote_question(request, question_id):
"""
seok 질문추천등록
"""
question = get_object_or_404(Question, pk=question_id)
if request.user == question.author:
messages.error(request, '본인이 작성한 글은 추천할수 없습니다')
else:
question.voter.add(request.user)
return redirect('seok:detail', question_id=question.id)
@login_required(login_url='common:login')
def vote_answer(request, answer_id):
"""
seok 답글추천등록
"""
answer = get_object_or_404(Answer, pk=answer_id)
if request.user == answer.author:
messages.error(request, '본인이 작성한 글은 추천할수 없습니다')
else:
answer.voter.add(request.user)
return redirect('seok:detail', question_id=answer.question.id)
|
[
"dhehdtjr007@naver.com"
] |
dhehdtjr007@naver.com
|
2735e4cab031de78116192c8f4f40ac4584083ce
|
2dd7c708b76d6affba1323ff7b57b768200e863b
|
/2015/Day_18/day18_1.py
|
84ebf3f30809064e6995eb1362784a4a635bd467
|
[] |
no_license
|
Daniel-Fernandez-951/AdventOfCode
|
c8cccf9742e3996ef1235ac7f9ab0391ecc10157
|
d708d089fc1170a76e54c8b25332063e55fa8329
|
refs/heads/main
| 2023-02-01T04:27:17.216222
| 2020-12-18T08:04:10
| 2020-12-18T08:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
from copy import deepcopy
lights = [[True if x == '#' else False for x in y] for y in open('AOC_2015\day18.txt').read().split('\n')]
def get_num_neighbors(x, y, lights):
num_neighbors = 0
nums = [-1, 0, 1]
for a in nums:
for b in nums:
if x+a < 0 or b+y < 0:
continue
elif a == b == 0:
continue
try:
if lights[x+a][y+b]:
num_neighbors += 1
except IndexError:
pass
return num_neighbors
for step in range(100):
new_lights = []
for x in range(len(lights)):
row = []
for y in range(len(lights[x])):
a = get_num_neighbors(x, y, lights)
if lights[x][y]:
if a == 3 or a == 2:
row.append(True)
else:
row.append(False)
else:
if a == 3:
row.append(True)
else:
row.append(False)
new_lights.append(row)
lights = deepcopy(new_lights)
# print(*(''.join(['#' if r else '.' for r in l]) for l in lights), sep='\n')
# print()
total = 0
for x in lights:
total += x.count(True)
print(total)
|
[
"Max.Techniche@gmail.com"
] |
Max.Techniche@gmail.com
|
458f132534c8a8adb8eae398e8594983f35991ad
|
4501dc41b82969b665f9d68ffc8729fabbd50b60
|
/1-flask/venv/bin/flask
|
961e2559b9e1cecd0dcd703ed36f1016a11f90f3
|
[] |
no_license
|
iancmason/100daysOfPython
|
f12ad1498a7f1d6a635fc73cfb0aaf09c59fcb68
|
a93ba6ed1b4ab009734c0454f4f132114a16abb9
|
refs/heads/master
| 2021-06-21T07:18:42.825660
| 2021-01-22T19:35:05
| 2021-01-22T19:35:05
| 201,953,731
| 0
| 0
| null | 2021-03-30T02:44:51
| 2019-08-12T15:06:21
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
#!/Users/imason/code/Python/100daysOfCode/1-flask/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ian.christopher.mason@gmail.com"
] |
ian.christopher.mason@gmail.com
|
|
add8e9fd2525edaad3e388569632dcd7a8a6e0c0
|
abc4991a47998d5a6946c2609cc9b6926b95c59e
|
/get_by_tor.py
|
e0fb5200e94e646706f21f1cd2f52066f1600114
|
[
"MIT"
] |
permissive
|
Guiraud/Formation-Scraping-1
|
09ad6651d3d210f41300fd126008d7bfcc6f685f
|
2194a06da9fc727abe160157295f7e984161c0a3
|
refs/heads/master
| 2022-12-12T21:02:11.477275
| 2019-08-07T21:22:52
| 2019-08-07T21:22:52
| 201,124,297
| 0
| 0
|
MIT
| 2022-12-08T05:59:20
| 2019-08-07T20:37:08
|
Python
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
import os
profile = FirefoxProfile(r'/Users/mguiraud/Library/Application Support/TorBrowser-Data/Browser/o45k8lgc.default')
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.socks', '127.0.0.1')
profile.set_preference('network.proxy.socks_port', 9050)
profile.set_preference("network.proxy.socks_remote_dns", False)
profile.update_preferences()
#driver = webdriver.Firefox()
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://check.torproject.org")
print("done !")
print(driver.find_elements_by_css_selector(".small")[0].text)
ok_tor = ".on"
no_tor = ".off"
if driver.find_elements_by_css_selector(ok_tor):
print("ça marche :) ")
if driver.find_elements_by_css_selector(no_tor):
print("ça marche pô :(")
|
[
"mehdi.guiraud@gmail.com"
] |
mehdi.guiraud@gmail.com
|
62755ebfabc1c13420961bb6af14666e8286ca85
|
a477e529b162fe45ef4ba0a73c847096e73dae57
|
/source_view/urls.py
|
0bc818995892077897730d447d324f0a53e6211e
|
[] |
no_license
|
ubhisat/slack_source_view
|
13ff8f0869a1957f7c13bdeaba4e32a4e5bbc42f
|
f939fc7672a0808702d5becbc74ba580ae6ae002
|
refs/heads/master
| 2021-01-01T18:37:08.772781
| 2014-11-12T08:15:16
| 2014-11-12T08:15:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
from django.conf.urls import include, url
from .views import IndexView
urlpatterns = [
url(r'^$', IndexView.as_view(),
name='index'),
]
|
[
"satmeet@nclouds.com"
] |
satmeet@nclouds.com
|
12efbe3435603eb9e953e20a0996af85885357c7
|
dc931801b35d0e2a05209378e9e1a148c73e2a98
|
/crawlers/smackjeeves_crawler.py
|
a034849493110476ff37ebfe00da7a379ff6901b
|
[] |
no_license
|
regeto/comics-crawler
|
102dfe217634c3dd8cf7c14f3c11af7564e1562a
|
6f86621acf031bdfd05761278fb29bcbd07b12fa
|
refs/heads/master
| 2016-09-06T07:09:20.825799
| 2015-01-02T21:02:51
| 2015-01-02T21:02:51
| 28,471,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
import re
from crawlers import Crawler
class SmackjeevesCrawler(Crawler):
site = "Smackjeeves"
url = "smackjeeves.com"
search = "http://www.smackjeeves.com/search.php?submit=submit&comic_title=%s"
def get_chapters(self, series_url):
prefix = series_url.split(".")[0] + "."
html = self.get_html(series_url)
regex = "<option.*?value=\"(/comics/.*?)\".*?>(.*?)<\/option>"
reg = re.compile(regex)
r = reg.findall(html)
ret = [None] * len(r)
pos = 0
for x in r:
ret[pos] = dict(url=prefix + self.url + x[0], name=x[1])
pos += 1
return ret
def get_pages(self, chapter_url):
html = self.get_html(chapter_url)
regex = "src=\"(.*?)\".*?\"comic_image\""
reg = re.compile(regex)
r = reg.findall(html)
ret = [dict(url=r[0])]
return ret
|
[
"regendokira@gmail.com"
] |
regendokira@gmail.com
|
e0e6bb89daac937ec046ab4dbb02ff8fd1a54917
|
59e3082ce3a2f9e873b908c669009acbcbf79d78
|
/speech.py
|
1d53100f60e3e8a122f23199e4b459480580971e
|
[] |
no_license
|
PROTO204/chef
|
0e5529f8f3b582677fe6ea98521ad113596be7a0
|
1df3b34cfd5596f06776b764d5284b539dc44904
|
refs/heads/master
| 2020-05-19T19:22:12.098925
| 2019-05-02T05:36:14
| 2019-05-02T05:36:14
| 185,178,035
| 0
| 0
| null | 2019-05-06T10:50:57
| 2019-05-06T10:50:57
| null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something")
audio = r.listen(source)
try:
print("You said:" + r.recognize_google(audio))
except sr.UnknownValueError:
print("could not understand audio")
except sr.RequestError as e:
print(format(e))
|
[
"siba@nyu.edu"
] |
siba@nyu.edu
|
19b4e9540fda721864be49fb8fc6b663fcd29f9f
|
e500f23b931008b33e8e8f46e4ebc46bd5e35cd6
|
/data_layer.py
|
a0caf0643ec1b8071963348f249c279580a31e4c
|
[] |
no_license
|
spathak99/Covid-19-Visualization
|
5e30123b059f48348bd6fb415483811814509f1a
|
3d80c2aae77c5025644a367168b2dafab3817136
|
refs/heads/master
| 2022-05-20T20:55:58.905677
| 2020-04-23T06:44:19
| 2020-04-23T06:44:19
| 258,126,435
| 1
| 0
| null | 2020-04-23T07:24:50
| 2020-04-23T07:24:49
| null |
UTF-8
|
Python
| false
| false
| 4,513
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 05:24:53 2020
@author: lukas
"""
#imports
import pandas as pd
import global_code
#Data columns as a variable struct
class Columns:
SNO = 'SNo'
OBSERVATION_DATE = 'ObservationDate'
PROVINCE = 'Province/State'
COUNTRY = 'Country/Region'
LAST_UPDATE = 'Last Update'
CONFIRMED = 'Confirmed'
DEATHS = 'Deaths'
RECOVERED = 'Recovered'
ALL = [SNO, OBSERVATION_DATE, PROVINCE, COUNTRY, LAST_UPDATE, CONFIRMED, DEATHS, RECOVERED]
logger = global_code.getLogger()
#start code
class DataLayer:
'''
Handles all data access
## documentation about all fields stored: ##
-dataframe
contains basically all data as table
-all_countries
a list of all countries were data is available
-province_mapping
a list for each country of its subregion/regions
e.g. province_mapping = ['USA':['Massach.', 'Ohio', ..], 'Spain':[..], ..]
-total_days_recorded
for each day recored 1 entry n format 'MM/DD/YYYY'
-accum_deaths_for_day
a mapping of accumuated deaths per day [03/27/2020:151, 03/28/2020:162,..]
-accum_recovery_for_day
a mapping of accumuated recovery per day [03/27/2020:151, 03/28/2020:162,..]
-accum_confirms_for_day
a mapping of accumuated confirmed cases per day [03/27/2020:151, 03/28/2020:162,..]
'''
def __init__(self, datafile):
#load the data given
#strore it in a more specific data format
#anser querys from VierController
self.__process_input_data(datafile)
#--- private util mehods -----------------------------------------------------
def __process_input_data(self, datafile):
logger.log('Preprocessing data structures..')
dframe = pd.read_csv(datafile)
self.dataframe = dframe
#get all countries
countries = dframe[Columns.COUNTRY].unique()
self.all_countries = countries
#add all provinces to country
self.province_mapping = {}
for country in self.all_countries:
country_indices = dframe[Columns.COUNTRY] == country
provinces = dframe[country_indices][Columns.PROVINCE].unique()
self.province_mapping[country] = list(provinces)
#get all dates available
self.total_days_recorded = dframe[Columns.OBSERVATION_DATE].unique()
#for each day, record how many people died, recovered or infected
self.accum_deaths_for_day = {}
self.accum_recovery_for_day = {}
self.accum_confirms_for_day = {}
for day in self.total_days_recorded:
current_day_indices = dframe[Columns.OBSERVATION_DATE] == day
total_day_deaths = dframe[current_day_indices][Columns.DEATHS].sum()
total_day_recovers = dframe[current_day_indices][Columns.RECOVERED].sum()
total_day_confirmed = dframe[current_day_indices][Columns.CONFIRMED].sum()
self.accum_deaths_for_day[day] = total_day_deaths
self.accum_recovery_for_day[day] = total_day_recovers
self.accum_confirms_for_day[day] = total_day_confirmed
#example filter
# datafilter1 = dframe[Columns.DEATHS] > 100
# datafilter2 = dframe[Columns.PROVINCE] == 'Hubei'
# print(dframe[datafilter1 & datafilter2].head(5))
logger.log('Preprocessing geomap')
self.geo_map_dataframe = self.dataframe.rename(columns={'Country/Region':'Country'}) #copy & rename
self.geo_map_dataframe.rename(columns={'ObservationDate':'Date'}, inplace=True) #only rename
logger.log('Preprocessing finished')
#--- public mehods -----------------------------------------------------------
def get_map_dataframe(self):
final_df = self.geo_map_dataframe[self.geo_map_dataframe['Confirmed']>0]
return final_df.groupby(['Date','Country']).sum().reset_index()
def get_as_of_date(self):
no_days = len(self.total_days_recorded)
return self.total_days_recorded[no_days - 1]
def get_all_countries(self):
return self.all_countries
#Extend methods here as needed
|
[
"checker-tracker@web.de"
] |
checker-tracker@web.de
|
26520cf0e4d572626cca7f3ae58470069e37fd63
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/OpenGL/raw/GLES2/NV/read_buffer.py
|
638349916933fad25c3ba754755ffda4f1e717dc
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874
| 2023-07-22T21:48:01
| 2023-07-22T21:48:01
| 188,486,371
| 42
| 110
|
MIT
| 2022-11-20T09:47:56
| 2019-05-24T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 617
|
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_read_buffer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_read_buffer',error_checker=_errors._error_checker)
GL_READ_BUFFER_NV=_C('GL_READ_BUFFER_NV',0x0C02)
@_f
@_p.types(None,_cs.GLenum)
def glReadBufferNV(mode):pass
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
3f127a05979e2337238db15b7faefd643873ce8f
|
4bdd0fa82a314f7d005400fe1bb8bb7c42e9fa85
|
/manage.py
|
aaa1f9e04c23e569b10fefac82ec289aeca70ba1
|
[] |
no_license
|
PavloKuptsov/TestTask2021
|
05bf9d9fd1141eac7e029519b86a398422ea5fb7
|
19e66ea74065da1609e3abbb7046e5e90164049f
|
refs/heads/main
| 2023-04-24T13:34:00.921427
| 2021-05-12T18:54:36
| 2021-05-12T18:54:36
| 366,821,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'socialTestTask.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"pavlo.kupstov@sigma.software"
] |
pavlo.kupstov@sigma.software
|
a66a276129d5106408fd6ed8913421cd6fca40fd
|
2c09147aa1622d749d7356eb941ef53ec4793184
|
/phase_1.py
|
3502d36f447ec37638e4d2b3e2c4e0bfdce4ef38
|
[] |
no_license
|
TheBroMoe/Berkeley-DB-Kijiji-Query
|
7812ee9daa9c6e538643a9a8d65c26b5cd23074c
|
dea68293824889157c3336579c009614ca1f13e4
|
refs/heads/master
| 2020-04-07T18:40:42.484991
| 2019-07-22T00:24:41
| 2019-07-22T00:24:41
| 158,619,521
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,607
|
py
|
import re
'''
This file includes terms extracted from ad titles and descriptions; for our purpose, suppose a term is a consecutive sequence of alphanumeric,
underscore '_' and dashed '-' characters, i.e [0-9a-zA-Z_-]. The format of the file is as follows: for every termT in the title or the
description of an ad with id a, there is a row in this file of the form t:a where t is the lowercase form of T. Ignore all special characters
coded as &#number; such as 産 which represents 産 as well as ', " and & which respectively represent ', " and &. Also ignore terms
of length 2 or less. Convert the terms to all lowercase before writing them out.
'''
def write_terms(path):
# Read file
with open(path + '-terms.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
term_ti = re.search("(<ti>)(.*)(</ti>)", line).group(2).lower()
term_ti = re.sub("&", " ", term_ti)
term_ti = re.sub("&.*?;", "", term_ti)
term_ti = re.sub("[^0-9a-zA-Z-_]", " ", term_ti)
ti_terms = term_ti.split()
for term in ti_terms:
result = re.search("([0-9a-zA-Z-_]+)", term).group(1)
if result is not None and len(result) > 2:
fout.write("{}:{}\n".format(result,a))
term_desc = re.search("(<desc>)(.*)(</desc>)", line).group(2).lower()
term_desc = re.sub("(")|(&apos)|(&);", " ", term_desc)
term_desc = re.sub("&.*?;", "", term_desc)
term_desc = re.sub("[^0-9a-zA-Z-_]", " ", term_desc)
desc_terms = term_desc.split()
for term in desc_terms:
result = re.search("([0-9a-zA-Z-_]+)", term).group(1)
if result is not None and len(result) > 2:
fout.write("{}:{}\n".format(result,a))
print("written to " + path + '-terms.txt')
'''
This file includes one line for each ad that has a non-empty price field in the form of p:a,c,l
where p is a number indicating the price and a, c, and l are respectively the ad id, category and location of the ad.
'''
def write_prices(path):
# Read file
with open(path + '-prices.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
p = re.search("(<price>)(.*)(</price>)", line).group(2)
p = ' ' * (12 - len(p)) + p
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
c = re.search("(<cat>)(.*)(</cat>)", line).group(2)
l = re.search("(<loc>)(.*)(</loc>)", line).group(2)
fout.write("{}:{},{},{}\n".format(p,a,c,l))
print("written to " + path + '-prices.txt')
'''
d:a,c,l where d is a non-empty date at which the ad is posted and a, c, and l are respectively the ad id, category and location of the ad.
'''
def write_pdates(path):
# Read file
with open(path + '-pdates.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
d = re.search("(<date>)(.*)(</date>)", line).group(2)
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
c = re.search("(<cat>)(.*)(</cat>)", line).group(2)
l = re.search("(<loc>)(.*)(</loc>)", line).group(2)
fout.write("{}:{},{},{}\n".format(d,a,c,l))
print("written to " + path + '-pdates.txt')
'''
This file includes one line for each ad in the form of a:rec where a is the ad id and rec is the full ad record in xml.
'''
def write_ads(path):
# Read file
with open(path + '-ads.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
fout.write("{}:{}".format(a,line))
print("written to " + path + '-pdates.txt')
def main():
print("================")
path = input("enter xml file: ")
write_terms(path)
write_pdates(path)
write_prices(path)
write_ads(path)
print("================")
if __name__ =='__main__':
main()
|
[
"wespeterson2@gmail.com"
] |
wespeterson2@gmail.com
|
8735f5b0e9167684495efe5852cebc7defa664f7
|
930309163b930559929323647b8d82238724f392
|
/abc155_c.py
|
6b0f7c6960bceb99ef3c1e6274c2f06a7b5baa8f
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894
| 2021-09-12T13:32:29
| 2021-09-12T13:32:29
| 11,724,396
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import collections
N=int(input())
S=[input() for i in range(N)]
S=collections.Counter(S)
max_v = max(S.values())
for k,v in sorted(list(filter(lambda x:x[1]==max_v, S.items()))):
print(k)
|
[
"gim.kobayashi@gmail.com"
] |
gim.kobayashi@gmail.com
|
fbc05970539a311c1532e03d1461d962abe1cae2
|
5b4312ddc24f29538dce0444b7be81e17191c005
|
/autoware.ai/1.12.0/devel/.private/vector_map_msgs/lib/python2.7/dist-packages/vector_map_msgs/msg/_PointArray.py
|
302c83b9f7d628767effb2ae4bd898435e6dc65f
|
[
"MIT"
] |
permissive
|
muyangren907/autoware
|
b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2
|
5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38
|
refs/heads/master
| 2020-09-22T13:08:14.237380
| 2019-12-03T07:12:49
| 2019-12-03T07:12:49
| 225,167,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,546
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from vector_map_msgs/PointArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import vector_map_msgs.msg
import std_msgs.msg
class PointArray(genpy.Message):
_md5sum = "6d79425254a86e33112d6737776efb2b"
_type = "vector_map_msgs/PointArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
Point[] data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: vector_map_msgs/Point
# Ver 1.00
int32 pid
float64 b
float64 l
float64 h
float64 bx
float64 ly
int32 ref
int32 mcode1
int32 mcode2
int32 mcode3
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','vector_map_msgs/Point[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = []
else:
self.header = std_msgs.msg.Header()
self.data = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_i5d4i = None
def _get_struct_i5d4i():
global _struct_i5d4i
if _struct_i5d4i is None:
_struct_i5d4i = struct.Struct("<i5d4i")
return _struct_i5d4i
|
[
"907097904@qq.com"
] |
907097904@qq.com
|
232daecfb8fa2a362ef8328b7c89287ecd2fb7aa
|
ec44c3fff2eb9179b3c5e3386467563492b66387
|
/globbing .py
|
b53b658c5247417a7a115ffd0676cdf952981737
|
[] |
no_license
|
SonNinh/terminal
|
41a3ecb61a6a067af4d308ce247f41dd73f86db9
|
61ac6605235f22248733870a0b7cb253fb153359
|
refs/heads/master
| 2021-07-07T20:46:16.262103
| 2020-08-25T15:28:10
| 2020-08-25T15:28:10
| 179,713,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
#!/usr/bin/env python3
from re import findall, match, split
from glob import glob
def get_token_list(input_from_command_line):
"""
get input string from command and split it up into tokens.
A token is a none-space string if it begin with a word
Else, if it begin with a [backquote, single quote, double quote], it will
end with the next [backquote, single quote, double quote]
@para: string
@return: list
"""
token = "[^ ]*[`\'\"][^ ].*?[`\'\"][^ ]*|[^ ]*"
token_list = findall(token, input_from_command_line)
while '' in token_list:
token_list.remove('')
return token_list
def get_possible_name(path_name_list):
path_name_dictionary = {}
for path_name in path_name_list:
path_name_dictionary[path_name] = '\n'.join(glob(path_name))
return path_name_dictionary
def main():
args = "echo \"asddasd\""
print(get_token_list(args))
if __name__ == '__main__':
main()
|
[
"sonninhngocba@gmail.com"
] |
sonninhngocba@gmail.com
|
2326a5cd67d0e36dfc987657a3b77f64b1108019
|
5de646fb3ecf10ecb45e05018a23b6345fb9ca53
|
/codejam/2020 Qualification Round/d.py
|
e358bdc477498577b9dcea874b2bbacb4f08905f
|
[] |
no_license
|
PPinto22/LeetCode
|
5590d6ca87efcd29f9acd2eaed1bcf6805135e29
|
494a35542b61357c98c621202274d774e650a27c
|
refs/heads/master
| 2022-04-29T20:37:31.085120
| 2022-04-02T12:02:30
| 2022-04-02T12:02:30
| 201,478,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,118
|
py
|
from typing import Union, List, Tuple, Optional
def solve(B):
def set(index, value):
nonlocal control_equal, control_complement, known
# Fix to prevent unpaired bits right before a fluctuation
if (not control_complement or not control_equal) \
and (query % 10 == 0) \
and (known % 2 == 0):
return
solution[index] = value
known += 1
pair = get_pair(index)
if not control_equal and value == pair[1]:
control_equal = pair
elif not control_complement \
and pair[1] is not None \
and value != pair[1]:
control_complement = pair
def get_pair(index):
pair_index = B - 1 - index
return [pair_index, solution[pair_index]]
def determine_fluctuation():
nonlocal control_complement, control_equal
possibilities = ['complement', 'reverse', 'both', 'none']
if control_equal:
index, old = control_equal
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'reverse', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'both'}]
control_equal = index, new
if control_complement:
index, old = control_complement
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'both', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'reverse'}]
control_complement = index, new
return possibilities[0]
def apply_fluctuation(fluctuation):
def complement():
for i in range(B):
if solution[i] is not None:
solution[i] = not solution[i]
if fluctuation == 'complement':
complement()
elif fluctuation == 'reverse':
solution.reverse()
elif fluctuation == 'both':
complement()
solution.reverse()
def ask(i):
nonlocal query
query += 1
print(i + 1, flush=True)
response = input()
return True if response == '1' else False
def next_index():
return (known // 2) if (known % 2 == 0) else (B - (known // 2) - 1)
solution: List[Union[bool, None]] = [None] * B
control_equal: Optional[Tuple[int, bool]] = None
control_complement: Optional[Tuple[int, bool]] = None
query = 0
known = 0
while known < B and query < 150:
if query > 0 and query % 10 == 0:
fluctuation = determine_fluctuation()
apply_fluctuation(fluctuation)
else:
index = next_index()
set(index, ask(index))
return ''.join(map(lambda x: '1' if x else '0', solution))
if __name__ == '__main__':
T, B = map(int, input().split())
for Ti in range(1, T + 1):
solution = solve(B)
print(solution, flush=True)
if input() == 'N':
break
|
[
"pedropinto24@hotmail.com"
] |
pedropinto24@hotmail.com
|
8aadc45ac8ed74363bbca2826d0e6709ba10a478
|
fbf19ea11edf7e862a10ba5e1ac2d45a97b65c5a
|
/authentication/middleware.py
|
2daa243dc6edf7743dbd7d8ddfdf4d766a0cfbe7
|
[] |
no_license
|
AlexandruGhergut/epic-online-judge
|
c4bed0e69a98d369a9f95c00b9529a4967db9818
|
1af79e9343e722f352d974f445ad641c79016667
|
refs/heads/master
| 2021-03-22T03:22:45.447253
| 2017-06-29T06:19:48
| 2017-06-29T06:19:48
| 86,570,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
from django.http import HttpResponseRedirect
class SetUsernameRedirectMiddleware(MiddlewareMixin):
def process_request(self, request):
user = request.user
if user.is_authenticated:
redirect_url = reverse('authentication:set_username',
kwargs={'pk': user.pk})
if (request.path != redirect_url and user.username_set is False):
request.session['set_username'] = True
return HttpResponseRedirect(redirect_url)
|
[
"alexandru.ghergut94@gmail.com"
] |
alexandru.ghergut94@gmail.com
|
ec987b94aa935fcb73e88d0b7806f58298647634
|
62bf4ccfe2693157a838fb9f5759f912e79c669f
|
/workshop/lab_price_deviation_corr.py
|
124bcab22c249fa55db0707f686462ce489bff0d
|
[] |
no_license
|
rafaelxiao/athena
|
249ecbe56816f9ec9e7f6d4e1d0075a0b5f10e44
|
7501d7b0991e29a35c574ede4f3a4338e26649c5
|
refs/heads/master
| 2021-01-13T13:14:41.247613
| 2018-01-29T14:13:24
| 2018-01-29T14:13:24
| 72,708,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
import messenger as ms
import analyst as al
import numpy as np
import assistant as at
import matplotlib.pyplot as plt
h = al.PriceDeviation()
h.price_diff_list_save('002415', date='2017-06-28', duration=600, smooth=3)
origin_data = h.price_diff_list_load('002415', date='2017-06-28', duration=600, smooth=3)
origin_data = origin_data[-200:]
result = []
date_list = []
for i in range(30, len(origin_data)):
date = origin_data[i]['date']
sub_list = origin_data[i-30:i]
diff_list = [i['smoothed difference'] for i in sub_list]
theo_list = [i['smoothed theoretical'] for i in sub_list]
corr = np.correlate(diff_list, theo_list)
print(date, corr)
result.append(corr)
date_list.append(at.date_encoding(date))
plt.plot(date_list, result)
plt.show()
|
[
"rafaelxiao@hotmail.com"
] |
rafaelxiao@hotmail.com
|
ddeb97e4d5e9190c47dcbecb8b9a7dbe7ca9dfe4
|
bfd33836d69dd05d6e7720216cef6d341c35147b
|
/User/DanHD/class/Main.py
|
c45cc8fe6017a72125ec2f8a7222b820242a7acb
|
[] |
no_license
|
ducdan/PYTHON-COURSE
|
5371b65685769e88c2e14a38b9c86a2bdfc82b79
|
cc5a26d0d8124087f711ee0ca354065df6ea4fcf
|
refs/heads/master
| 2020-05-25T15:00:00.465760
| 2017-06-15T11:49:54
| 2017-06-15T11:49:54
| 84,941,845
| 1
| 2
| null | 2017-06-15T11:44:32
| 2017-03-14T11:34:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
#thua ke
# class Chu(Ong):
# def print_ho(self):
# print(self.ho)
class Ong:
def __init__(self):
self.ho='Tran'
self.__ten='Hung'
def __tai_san(self):
print("100 cay vang")
class Me:
def __init__(self):
self.tai_san='200 cay vang'
class Cha(Ong):
def __init__(self):
self.ho='Nguyen'
def print_name(self):
pass
# def tai_san(self):
# print("10 cay vang")
class Con(Cha,Me):
def print_ho(self):
print(self.ho)
print(self.tai_san)
nam=Con()
# nam.print_ho()
nam.tai_san()
# chu=Chu()
# chu.print_ho()
|
[
"51200482@hcmut.edu.vn"
] |
51200482@hcmut.edu.vn
|
e79db74e458b1f23bf9c7d355f33c7457e7e49b8
|
45272da6d64161a586b1dd41df63b8f701f38e39
|
/Easy Problems/1-10/1easy.py
|
075277c849e0a410bcde57f4d5bf459e7c1e8fad
|
[] |
no_license
|
Lucas-Guimaraes/Reddit-Daily-Programmer
|
559f813d2ee1a06e80a2b260bcb43718ae50b8bf
|
45d554d0e0f8bc67e2111bede3a45f77f5512d7b
|
refs/heads/main
| 2023-07-31T18:36:48.774791
| 2021-09-13T04:08:09
| 2021-09-13T04:08:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
#https://www.reddit.com/r/dailyprogrammer/comments/pih8x/easy_challenge_1/
user_name = raw_input("Put in your name: ")
user_age = raw_input("Whhat's your age?: ")
user_screenname = raw_input("How about a username?: ")
print("Your name is " + user_name + " your are " + user_age + " years old, and your username is " + user_screenname)
raw_input()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d27e3a0a5682a13390fb54adcdbba787c16b8c52
|
3c81565e599b4db30c58c5eac871becd7739231a
|
/Backend/DjangoProject_1/users/views.py
|
d95ba18d24db3904668e2a510d32eb2fe6f91831
|
[] |
no_license
|
alifelham/Bus_Management_System
|
8517be21d8715eb4fd56117c880eb7cffa460247
|
f8ef3ffebccc6f560fc2624af53d040911cd8f31
|
refs/heads/main
| 2023-08-01T07:17:28.215221
| 2021-09-19T18:01:42
| 2021-09-19T18:01:42
| 388,483,823
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
from django.shortcuts import render, redirect
# importing django forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
# defining a function for registration
def register(request):
# the form is only given if a post request is made else it is a blank return
if request.method == 'POST':
form = UserCreationForm(request.POST)
#checks if form data is valid. If true then gets the username
if form.is_valid():
username = form.cleaned_data.get('username')
#gives a flash msg when an account is created
messages.success(request, f'Account created for {username}!')
return redirect('blog-home')
else:
form = UserCreationForm()
return render(request, 'users/register.html', {'form': form})
# the {'form': form} part is used for accessing form content from within the reg template
|
[
"noreply@github.com"
] |
noreply@github.com
|
410559c8f26e95c96374a7fea4724d3d00169ba7
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/number-of-ways-to-earn-points.py
|
6707c76b184e8c02c07e41ef08fcbd9b81e9220e
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
# Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
for i in reversed(xrange(1, target+1)):
for j in xrange(1, min(i//m, c)+1):
dp[i] = (dp[i]+dp[i-j*m])%MOD
return dp[-1]
# Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution2(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
new_dp = [0]*(target+1)
for i in xrange(target+1):
for j in xrange(min((target-i)//m, c)+1):
new_dp[i+j*m] = (new_dp[i+j*m]+dp[i])%MOD
dp = new_dp
return dp[-1]
|
[
"noreply@github.com"
] |
noreply@github.com
|
d5c5909ea6644335136f2d82bcda8a30fa14ccab
|
48477a15ad96505def8097a6c098826b1e5cfe1a
|
/2_basic_algorithms/2_sorting_algorithms/14_pair_sum.py
|
9e1422278c00aead20f8116beaac4b3230077a6d
|
[] |
no_license
|
450703035/Data-Structures-Algorithms
|
02cd5bbb92ce25019fce4955af38b0317b4f4cac
|
dde33560fcb3e3ff41cf8bd37a454f8c13b15138
|
refs/heads/master
| 2021-05-22T02:25:03.554870
| 2020-06-27T14:23:24
| 2020-06-27T14:23:24
| 252,927,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
# Pair Sum
'''
Problem Statement
Given an input array and a target value (integer), find two values
in the array whose sum is equal to the target value.
Solve the problem without using extra space.
You can assume the array has unique values and will never have
more than one solution.
'''
def pair_sum(arr, target):
"""
:param: arr - input array
:param: target - target value
TODO: complete this method to find two numbers such that their sum is equal to the target
Return the two numbers in the form of a sorted list
"""
# Sort the list
arr.sort()
# Initialize two pointers - one from the start of the array and
# the other from the the end.
front_index = 0
back_index = len(arr) - 1
# Shift the pointers
while front_index < back_index:
front = arr[front_index]
back = arr[back_index]
if front + back == target:
return [front, back]
# Sum < target --> shift front pointer forwards
elif front + back < target:
front_index += 1
# Sum > target --> Shift back pointer backwards
else:
back_index -= 1
return [None, None]
# Test of pair/sum function.
def test_function(test_case):
input_list = test_case[0]
target =test_case[1]
solution = test_case[2]
output = pair_sum(input_list, target)
if output == solution:
print("Pass")
else:
print("False")
input_list = [2, 7, 11, 15]
target = 9
solution = [2, 7]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [0, 8, 5, 7, 9]
target = 9
solution = [0, 9]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [110, 9, 89]
target = 9
solution = [None, None]
test_case = [input_list, target, solution]
test_function(test_case)
|
[
"danny.wangle@gmail.com"
] |
danny.wangle@gmail.com
|
76d2a881bf9e6396738d1fd20530828d0772d4dc
|
3b298a922bea8249f4459f4252605c0774cc00f9
|
/test/test_cliffworld.py
|
ac44640d760d7d32e0a3332e227f04cc5b3eba52
|
[
"MIT",
"Python-2.0"
] |
permissive
|
bouali-meriem-estin/tabular-methods
|
536fd07ec05ccfb5fc3109cdc59e7bd2e79590a5
|
05ee6488feffc64d3bb7335f26b2e9688d90a57b
|
refs/heads/master
| 2023-06-09T14:01:01.687901
| 2020-07-03T15:23:03
| 2020-07-03T15:23:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
import sys
sys.path.append("..")
import numpy as np
from env.grid_world import GridWorld
from scipy.io import loadmat
def test_cliffworld():
# load the test data
grid_world = loadmat('../data/test_data/cliffworld.mat')['model']
# specify world parameters
num_rows = 5
num_cols = 10
restart_states = np.array([[4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7]])
obstructed_states = np.array([[0, 9], [1, 9], [2, 9], [3, 9], [4, 9]])
start_state = np.array([[4, 0]])
goal_states = np.array([[4, 8]])
# create model
gw = GridWorld(num_rows=num_rows,
num_cols=num_cols,
start_state=start_state,
goal_states=goal_states)
gw.add_obstructions(obstructed_states=obstructed_states,
restart_states=restart_states)
gw.add_rewards(step_reward=-1,
goal_reward=10,
restart_state_reward=-100)
gw.add_transition_probability(p_good_transition=1,
bias=0)
gw.add_discount(discount=0.9)
model = gw.create_gridworld()
# run tests
assert np.all(model.R == grid_world['R'][0][0][:,0].reshape(-1,1))
assert np.all(model.P[:,:,0] == grid_world['P'][0][0][:,:,0])
assert np.all(model.P[:,:,1] == grid_world['P'][0][0][:,:,1])
assert np.all(model.P[:,:,2] == grid_world['P'][0][0][:,:,2])
assert np.all(model.P[:,:,3] == grid_world['P'][0][0][:,:,3])
|
[
"david.lines.dl@gmail.com"
] |
david.lines.dl@gmail.com
|
526abb44076323b13492031101bc312d813868d2
|
40796d49a6d50237900ac1a1a20648b546613d18
|
/python/applications/mobdat/common/graph/LayoutNodes.py
|
333b5941c57af701d1ba71064bdf91f907c25351
|
[] |
no_license
|
Mondego/spacetime-apps
|
c32abca98134d80f5bff965c8d74550c8109821d
|
c2d3a714cc2819f4a72d2d0b1b8c129d69c4de7c
|
refs/heads/master
| 2021-01-23T03:43:08.197768
| 2019-07-27T22:08:58
| 2019-07-27T22:08:58
| 86,112,423
| 3
| 3
| null | 2019-07-27T22:08:59
| 2017-03-24T21:34:10
|
Python
|
UTF-8
|
Python
| false
| false
| 10,237
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2014, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file LayoutNodes.py
@author Mic Bowman
@date 2013-12-03
This file defines routines used to build features of a mobdat traffic
network such as building a grid of roads.
"""
import os, sys
import logging
# we need to import python modules from the $SUMO_HOME/tools directory
sys.path.append(os.path.join(os.environ.get("OPENSIM","/share/opensim"),"lib","python"))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..")))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "lib")))
import Node, LayoutDecoration
logger = logging.getLogger(__name__)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class IntersectionType(Node.Node) :
"""
The IntersectionType class is used to specify parameters for rendering
intersections in Sumo and OpenSim.
"""
# -----------------------------------------------------------------
def __init__(self, name, itype, render) :
"""
Args:
name -- string
itype -- string, indicates the stop light type for the intersection
render -- boolean, flag to indicate that opensim should render the object
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.IntersectionTypeDecoration(name, itype, render))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class Intersection(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, itype, x, y) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.CoordDecoration(x, y))
self.AddDecoration(LayoutDecoration.EdgeMapDecoration())
itype.AddMember(self)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
##class EndPoint(Node.Node) :
class EndPoint(Intersection) :
"""
EndPoint
This graph node class (a subset of intersections) is the destination
for a trip.
Members: None
Decorations:
EndPointDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, itype, x, y) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Intersection.__init__(self, name, itype, x, y)
self.AddDecoration(LayoutDecoration.EndPointDecoration())
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class LocationCapsule(Node.Node) :
"""
LocationCapsule
This graph node class manages a collection of EndPoint nodes.
Members: EndPoints, typically one endpoint for a residential
location and multiple endpoints for a business location
Decorations:
CapsuleDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.CapsuleDecoration())
# -----------------------------------------------------------------
def AddEndPointToCapsule(self, endpoint) :
"""
Args:
endpoint -- object of type LayoutNodes.EndPoint
"""
self.AddMember(endpoint)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class BusinessLocation(Node.Node) :
"""
BusinessLocation
This graph node class manages a business neighborhood consisting of
a collection of LocationCapsule objects
Members:- Typically one LocationCapsule nodes that contains multiple
EndPoint nodes
MemberOf:
BusinessLocationProfile
Decorations:
BusinessLocationDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, profile) :
"""
Args:
name -- string
profile -- object of type BusinessLocationProfile
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.BusinessLocationDecoration())
profile.AddMember(self)
# -----------------------------------------------------------------
def AddCapsuleToLocation(self, capsule) :
"""
Args:
capsule -- object of type LayoutNodes.LocationCapsule
"""
self.AddMember(capsule)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class ResidentialLocation(Node.Node) :
"""
ResidentialLocation
This graph node class manages a residential neighborhood consisting of
a collection of LocationCapsule objects
Members: Typically several LocationCapsule nodes that each contain
a single EndPoint node
MemberOf:
ResidentialLocationProfile
Decorations:
ResidentialLocationDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, profile) :
"""
Args:
name -- string
profile -- object of type ResidentialLocationProfile
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.ResidentialLocationDecoration())
profile.AddMember(self)
# -----------------------------------------------------------------
def AddCapsuleToLocation(self, capsule) :
"""
Args:
capsule -- object of type LayoutNodes.LocationCapsule
"""
self.AddMember(capsule)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class BusinessLocationProfile(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, employees, customers, types) :
"""
Args:
name -- string
employees -- integer, max number of employees per node
customers -- integer, max number of customers per node
types -- dict mapping Business.BusinessTypes to count
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.BusinessLocationProfileDecoration(employees, customers, types))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class ResidentialLocationProfile(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, residents) :
"""
Args:
residents -- integer, max number of residents per node
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.ResidentialLocationProfileDecoration(residents))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class RoadType(Node.Node) :
"""
The RoadType class is used to specify parameters for rendering roads
in Sumo and OpenSim.
"""
# -----------------------------------------------------------------
def __init__(self, name, lanes, pri, speed, wid, sig, render, center) :
"""
Args:
name -- string
lanes -- integer, number of lanes in the road
pri -- integer, priority for stop lights
speed -- float, maximum speed allowed on the road
sig -- string, signature
render -- boolean, flag to indicate whether opensim should render
center -- boolean, flag to indicate the coordinate origin
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.RoadTypeDecoration(name, lanes, pri, speed, wid, sig, render, center))
|
[
"ra.rohan@gmail.com"
] |
ra.rohan@gmail.com
|
3ed100785341bbd1cd924b27b04e790838d9c78d
|
bbe169a02aaec4e6744350530d835af17587cf09
|
/models/vgg19_localization.py
|
d743a694819efc1af8a7865e8222a5d65f3eab69
|
[] |
no_license
|
atwang16/sp19-6s897-colon
|
af4fca0b472ca9e9d4039794b4e33fa9640d1e73
|
e414708d1af6953d89ea7cdf88ceeabd04a56136
|
refs/heads/master
| 2021-08-06T20:42:29.518055
| 2020-06-01T06:27:34
| 2020-06-01T06:27:34
| 182,820,179
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,190
|
py
|
#!/usr/bin/env python3
# 64c-64c-p-128c-128c-p-(256c)4-p-(512c)4-p-(512c)4-p-1560fc-1560fc
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import MaxPool2D, Input, Flatten, GlobalAveragePooling2D
from keras.models import Model
from keras.applications.vgg19 import VGG19
def convs(input, num_filters, kernel_size, stride, num_layers=1):
x = input
for i in range(num_layers):
x = Conv2D(num_filters,
kernel_size=kernel_size,
strides=stride,
padding="same",
kernel_initializer="he_normal")(x)
x = MaxPool2D(pool_size=2)(x)
return x
def vgg19(input_shape, pretrained_weights=None, use_sigmoid=False):
# inputs = Input(shape=input_shape)
#
# # convolutional layers
# x = convs(inputs, num_filters=64, kernel_size=3, stride=1, num_layers=2)
# x = convs(x, num_filters=128, kernel_size=3, stride=1, num_layers=2)
# x = convs(x, num_filters=256, kernel_size=3, stride=1, num_layers=4)
# x = convs(x, num_filters=512, kernel_size=3, stride=1, num_layers=4)
# x = convs(x, num_filters=512, kernel_size=3, stride=1, num_layers=4)
base_model = VGG19(weights='imagenet', include_top=False, input_shape=input_shape)
inputs = base_model.inputs
x = base_model.output
# fully connected layers
x = GlobalAveragePooling2D()(x)
# x = Flatten(name="flatten")(x)
x = Dense(1024, kernel_initializer="he_normal")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# x = Dense(1560, activation="relu")(x)
if use_sigmoid:
assert input_shape[0] == input_shape[1], "Currently only support equal width and height"
outputs = Dense(4, activation="sigmoid")(x)
outputs = keras.layers.Lambda(lambda x: x * input_shape[0])(outputs)
else:
outputs = Dense(4, activation="linear")(x)
# create model
model = Model(inputs=inputs, outputs=outputs)
if pretrained_weights is not None:
model.load_weights(pretrained_weights, by_name=True)
for i in range(len(base_model.layers)):
base_model.layers[i].trainable = False
return model
|
[
"austinw@mit.edu"
] |
austinw@mit.edu
|
25cc35183020aef1120dbf3d18e7b3ab3b4fbcd8
|
202539c64a109ea1d59b2ea91fd4eaf99b9ef407
|
/world/settings.py
|
e42be1f327bc5c3336f3948fec10939a49b737a6
|
[] |
no_license
|
Ahmansee/newman
|
95a3ee006248c93d7fc8a2ecf30e2168784d587b
|
3f6d982b3701b88b2d53af6938441c8b98728853
|
refs/heads/main
| 2022-12-24T01:47:31.839599
| 2020-10-02T11:20:25
| 2020-10-02T11:20:25
| 300,582,816
| 0
| 0
| null | 2020-10-02T11:20:26
| 2020-10-02T10:42:51
| null |
UTF-8
|
Python
| false
| false
| 3,249
|
py
|
"""
Django settings for world project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR,"template")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jb4mhnnbfl(1razgjgw#3a+()pn7pe=q(-tjxb3ys+!cb8d8&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'world/static')
]
|
[
"samyisotop@gmail.com"
] |
samyisotop@gmail.com
|
06b5353e742f734e8a4918ac8cd03995c46fc663
|
aed424076253479f698cb13acecb1800c9647092
|
/Qlearn_proto/manual_control_data.py
|
0e01b7793e33078e25cfe05dca7d8188e7c0739c
|
[] |
no_license
|
starjh94/Drone-Hover
|
c2e93578ccf47c0e72b38f49441a0a393272ee4d
|
60e3f7f56ed5c884bb860f2c1630b03d419b67c2
|
refs/heads/master
| 2021-01-22T17:42:54.250271
| 2017-12-23T00:20:04
| 2017-12-23T00:20:04
| 85,031,328
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,165
|
py
|
import subprocess
subprocess.Popen(["python","degree_process.py"])
import sys, time
import navio.rcinput
import navio.util
import navio.pwm
import degree_gyro
import numpy as np
import Servo
import threading
period1 = 0
period3 = 0
start_time = 0
## Data numpy value initialize ##
np_gyro_degree = np.array([[0, 0]])
np_acc_degree = np.array([[0, 0]])
np_acc_gyro = np.array([[0, 0]])
np_left_motor = np.array([[0, 0]])
np_right_motor = np.array([[0, 0]])
np_ML_data = np.array([[0, 0, 0, 0, 0, 0]])
class Manual_control(threading.Thread):
def run(self):
global period1
global period3
rcin = navio.rcinput.RCInput()
while(True):
period1 = rcin.read(0)
period3 = rcin.read(2)
def main():
manual = Manual_control(name='recv_rc')
global period1
global period3
global np_gyro_degree
global np_acc_degree
global np_acc_gyro
global np_left_motor
global np_right_motor
global np_ML_data
global start_time
pwm_1 = 1.1
pwm_2 = 1.1
print "start"
a = Servo.servo()
b = degree_gyro.acc()
timecheck_list = []
pitch_aver = acc_gyro_pitch = gyro_pitch_degree = b.pitch()
## matplotlib data initialization ##
"""
np_gyro_degree = np.array([[0, gyro_pitch_degree]])
np_acc_degree = np.array([[0, b.pitch()]])
np_acc_gyro = np.array([[0, acc_gyro_pitch]])
np_left_motor = np.array([[0, pwm_1]])
np_right_motor = np.array([[0, pwm_2]])
"""
np_ML_data = np.array([[0, acc_gyro_pitch, b.pitch(), gyro_pitch_degree, pwm_1, pwm_2]])
manual.daemon = True
manual.start()
start_time = time.time()
timecheck_list.append(start_time)
while True:
start_action = raw_input("\nI'm ready\nAre you ready?(Y / N): ")
if start_action.upper() == "Y":
print "\nGame will be started! "
break
else:
print "\nOK! let me do it again ~"
while(True):
timecheck_list.append(time.time())
loop_time = timecheck_list[1] - timecheck_list[0]
timecheck_list.pop(0)
acc_pitch_degree = b.pitch()
gyro_pitch_degree = b.gyro_pitch(loop_time, gyro_pitch_degree)
get_gyro_degree = b.gyro_pitch(loop_time, acc_gyro_pitch)
acc_gyro_pitch = np.sign(get_gyro_degree) * ((0.97 * abs(get_gyro_degree)) + (0.03 * abs(acc_pitch_degree)))
## servo part ##
servo_pwm1 = pwm_1 + (int(period3) - 982) * 0.00049
servo_pwm2 = pwm_2 + (int(period1) - 982) * 0.00049
a.servo_1(servo_pwm1)
a.servo_2(servo_pwm2)
## for matplotlib ##
data_time = time.time() - start_time
"""
np_gyro_degree = np.append(np_gyro_degree, [[data_time, gyro_pitch_degree]], axis=0)
np_acc_degree = np.append(np_acc_degree, [[data_time, acc_pitch_degree]], axis=0)
np_acc_gyro = np.append(np_acc_gyro, [[data_time, acc_gyro_pitch]], axis=0)
np_left_motor = np.append(np_left_motor, [[data_time, servo_pwm1]], axis=0)
np_right_motor = np.append(np_right_motor, [[data_time, servo_pwm2]], axis=0)
"""
np_ML_data = np.append(np_ML_data, [[data_time, acc_gyro_pitch, acc_pitch_degree, gyro_pitch_degree, servo_pwm1, servo_pwm2]], axis=0)
print "<time: %.16s> : degree= %.16s \tpwm_1= %.5s pwm2= %.5s" % (data_time, acc_gyro_pitch, servo_pwm1, servo_pwm2)
#print "pwm_v1 = %s pwm_v2 = %s degree = C: %s\t<-\tG: %s vs A: %s" % (servo_pwm1, servo_pwm2, acc_gyro_pitch, gyro_pitch_degree, acc_pitch_degree)
time.sleep(0.01)
if __name__ == '__main__':
try :
main()
except :
print("finish")
"""
np.save('gyro_degree_Data', np_gyro_degree)
np.save('acc_degree_Data', np_acc_degree)
np.save('accGyro_degree_Data', np_acc_gyro)
np.save('left_motor_Data', np_left_motor)
np.save('right_motor_Data', np_right_motor)
"""
np.save('M_L_Data', np_ML_data)
print "time: %s, number of numpy data: %s" % (time.time() - start_time, len(np_ML_data))
|
[
"lbaksa21@gmail.com"
] |
lbaksa21@gmail.com
|
4530a5673d72ef9a8eac58ce77431858da59fecf
|
b47853b6dcf8ab14f1dafeb2d613212a71329206
|
/war/prefabs/warrior/mage.py
|
8cc1d91efe572268825c7516d0560767f4180e6b
|
[] |
no_license
|
hittun/pygamel
|
b75f5b340825a3d99a0087eb6d396dc1a5f26802
|
adc294f076d7659dd6357a3365053c62a1868876
|
refs/heads/master
| 2021-05-16T20:46:53.776772
| 2020-03-30T15:48:08
| 2020-03-30T15:48:08
| 250,463,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
法师(Mage):
法师俗称AP或APC,是具有强大魔法伤害技能、但防守能力和移动能力偏低的英雄。
一些法师可以在短时间内造成巨大伤害,一些则是以长期持续伤害为主,爆发性的法师和刺客间的界线很模糊。
# @File : mage.py
# @Time : 2020/3/28 4:46
# @GitHub: https://github.com/hittun/pygamel
"""
from .warrior import Warrior
class Mage(Warrior):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DaJi(Mage):
"""妲己"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
[
"hittun@163.com"
] |
hittun@163.com
|
fe096733995179810a8f5a1cd1a2c0c8aa89bd5d
|
5fe7929f129584688b75706334921a02bfa93a20
|
/aide_design/unit_process_design/prefab/lfom_prefab_functional.py
|
470d5e65aea0ede5f5d25e09810f58fc0555ea2f
|
[] |
no_license
|
FelixYuHengYang/moops
|
ff1b1c416151d873a13d91109b17683d6656fbed
|
82f018b2f39b58cf08c990cc986d21f67ac265f9
|
refs/heads/master
| 2021-04-30T06:13:07.663966
| 2019-05-05T18:38:11
| 2019-05-05T18:38:11
| 121,437,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,872
|
py
|
# -*- coding: utf-8 -*-
"""
Edited on September 1, 2017
@author: Monroe Weber-Shirk
Created on Wed Jun 21 17:16:46 2017
@author: cc2467
"""
#Here we import packages that we will need for this notebook. You can find out about these packages in the Help menu.
# although math is "built in" it needs to be imported so it's functions can be used.
import math
#see numpy cheat sheet https://www.dataquest.io/blog/images/cheat-sheets/numpy-cheat-sheet.pdf
#The numpy import is needed because it is renamed here as np.
import numpy as np
# add imports for AguaClara code that will be needed
# physchem has functions related to hydraulics, fractal flocs, flocculation, sedimentation, etc.
from aide_design import physchem as pc
# pipedatabase has functions related to pipe diameters
from aide_design import pipedatabase as pipe
# units allows us to include units in all of our calculations
from aide_design.units import unit_registry as u
# utility has the significant digit display function
from aide_design import utility as ut
# import export inputs and define the VC coefficient
from aide_design import expert_inputs as exp
ratio_VC_orifice= exp.RATIO_VC_ORIFICE
# The following constants need to go into the constants file
Pi_LFOM_safety = 1.2
# pipe schedule for LFOM
#SDR_LFOM = 26
#FLOW = 10*u.L/u.s
#HL_LFOM = 20*u.cm
#primary outputs from this file are
#Nominal diameter nom_diam_lfom_pipe(FLOW,HL_LFOM,Pi_LFOM_safety,SDR_LFOM)
#number of rows n_lfom_rows(FLOW,HL_LFOM)
#orifice diameter orifice_diameter(FLOW,HL_LFOM,drill_series_uom)
#number of orifices in each row n_lfom_orifices(FLOW,HL_LFOM,drill_series_uom,SDR_LFOM)
#height of the center of each row height_lfom_orifices(FLOW,HL_LFOM,drill_series_uom)
# output is width per flow rate.
@u.wraps(u.s/(u.m**2), [u.m,u.m], False)
def width_stout(HL_LFOM,z):
return (2/((2*pc.gravity*z)**(1/2)*ratio_VC_orifice*np.pi*HL_LFOM)).magnitude
@u.wraps(None, [u.m**3/u.s,u.m], False)
def n_lfom_rows(FLOW,HL_LFOM):
"""This equation states that the open area corresponding to one row can be
set equal to two orifices of diameter=row height. If there are more than
two orifices per row at the top of the LFOM then there are more orifices
than are convenient to drill and more than necessary for good accuracy.
Thus this relationship can be used to increase the spacing between the
rows and thus increase the diameter of the orifices. This spacing function
also sets the lower depth on the high flow rate LFOM with no accurate
flows below a depth equal to the first row height.
But it might be better to always set then number of rows to 10.
The challenge is to figure out a reasonable system of constraints that
reliably returns a valid solution.
"""
N_estimated = (HL_LFOM*np.pi/(2*width_stout(HL_LFOM,HL_LFOM)*FLOW))
#variablerow=min(10,max(4,math.trunc(N_estimated.magnitude)))
return 10
def dist_center_lfom_rows(FLOW,HL_LFOM):
return HL_LFOM/n_lfom_rows(FLOW,HL_LFOM)
def vel_lfom_pipe_critical(HL_LFOM):
"""The average vertical velocity of the water inside the LFOM pipe
at the very bottom of the bottom row of orifices
The speed of falling water is 0.841 m/s for all linear flow orifice meters
of height 20 cm, independent of total plant flow rate."""
return (4/(3*math.pi)*(2*u.g_0*HL_LFOM)**(1/2)).to(u.m/u.s)
def area_lfom_pipe_min(FLOW,HL_LFOM,Pi_LFOM_safety):
return (Pi_LFOM_safety*FLOW/vel_lfom_pipe_critical(HL_LFOM)).to(u.m**2)
def nom_diam_lfom_pipe(FLOW,HL_LFOM,Pi_LFOM_safety,SDR_LFOM):
ID=pc.diam_circle(area_lfom_pipe_min(FLOW,HL_LFOM,Pi_LFOM_safety))
return pipe.ND_SDR_available(ID,SDR_LFOM)
def area_lfom_orifices_max(FLOW,HL_LFOM):
"""Estimate the orifice area corresponding to the top row of orifices.
Another solution method is to use integration to solve this problem.
Here we use the width of the stout weir in the center of the top row
to estimate the area of the top orifice
"""
return ((FLOW*width_stout(HL_LFOM,HL_LFOM-0.5*dist_center_lfom_rows(FLOW,HL_LFOM))*dist_center_lfom_rows(FLOW,HL_LFOM))).to(u.m**2)
def d_lfom_orifices_max(FLOW,HL_LFOM):
return (pc.diam_circle(area_lfom_orifices_max(FLOW,HL_LFOM)))
def orifice_diameter(FLOW,HL_LFOM,drill_bits):
maxdrill = (min((dist_center_lfom_rows(FLOW,HL_LFOM)).to(u.m).magnitude,(d_lfom_orifices_max(FLOW,HL_LFOM)).to(u.m).magnitude))*u.m
return ut.floor_nearest(maxdrill,drill_bits)
def drillbit_area(FLOW,HL_LFOM,drill_bits):
return pc.area_circle(orifice_diameter(FLOW,HL_LFOM,drill_bits))
def n_lfom_orifices_per_row_max(FLOW,HL_LFOM,drill_bits,SDR_LFOM):
"""A bound on the number of orifices allowed in each row.
The distance between consecutive orifices must be enough to retain
structural integrity of the pipe.
"""
S_lfom_orifices_Min= 3*u.mm
return math.floor(math.pi*(pipe.ID_SDR(nom_diam_lfom_pipe(FLOW,HL_LFOM,Pi_LFOM_safety,SDR_LFOM),SDR_LFOM))/(orifice_diameter(FLOW,HL_LFOM,drill_bits)+S_lfom_orifices_Min))
def flow_ramp(FLOW,HL_LFOM):
n_rows = n_lfom_rows(FLOW,HL_LFOM)
return np.linspace(FLOW.magnitude/n_rows,FLOW.magnitude,n_rows)*FLOW.units
def height_lfom_orifices(FLOW,HL_LFOM,drill_bits):
"""Calculates the height of the center of each row of orifices.
The bottom of the bottom row orifices is at the zero elevation
point of the LFOM so that the flow goes to zero when the water height
is at zero.
"""
return (np.arange(((orifice_diameter(FLOW,HL_LFOM,drill_bits)*0.5).to(u.m)).magnitude,
(HL_LFOM.to(u.m)).magnitude,
((dist_center_lfom_rows(FLOW,HL_LFOM)).to(u.m)).magnitude))*u.m
#print(height_lfom_orifices(10*u.L/u.s,20*u.cm,[0.75]*u.inch))
def flow_lfom_actual(FLOW,HL_LFOM,drill_bits,Row_Index_Submerged,N_LFOM_Orifices):
"""Calculates the flow for a given number of submerged rows of orifices
"""
D_LFOM_Orifices=orifice_diameter(FLOW,HL_LFOM,drill_bits)
row_height=dist_center_lfom_rows(FLOW,HL_LFOM)
#harray is the distance from the water level to the center of the orifices when the water is at the max level
harray = (np.linspace(row_height.to(u.mm).magnitude,HL_LFOM.to(u.mm).magnitude,n_lfom_rows(FLOW,HL_LFOM)))*u.mm -0.5* D_LFOM_Orifices
FLOW_new=0*u.m**3/u.s
for i in range(Row_Index_Submerged+1):
FLOW_new = FLOW_new + (N_LFOM_Orifices[i]*(pc.flow_orifice_vert(D_LFOM_Orifices,harray[Row_Index_Submerged-i],ratio_VC_orifice)))
return FLOW_new
#Calculate number of orifices at each level given a diameter
def n_lfom_orifices(FLOW,HL_LFOM,drill_bits,SDR_LFOM):
FLOW_ramp_local = flow_ramp(FLOW,HL_LFOM)
n_orifices_max =n_lfom_orifices_per_row_max(FLOW,HL_LFOM,drill_bits,SDR_LFOM)
n_rows = (n_lfom_rows(FLOW,HL_LFOM))
D_LFOM_Orifices = orifice_diameter(FLOW,HL_LFOM,drill_bits)
# H is distance from the elevation between two rows of orifices down to the center of the orifices
H=dist_center_lfom_rows(FLOW,HL_LFOM)-D_LFOM_Orifices*0.5
n=[]
for i in range(n_rows):
#place zero in the row that we are going to calculate the required number of orifices
n=np.append(n,0)
#calculate the ideal number of orifices at the current row without constraining to an integer
n_orifices_real=((FLOW_ramp_local[i]-flow_lfom_actual(FLOW,HL_LFOM,drill_bits,i,n))/
pc.flow_orifice_vert(D_LFOM_Orifices,H,ratio_VC_orifice)).to(u.dimensionless).magnitude
#constrain number of orifices to be less than the max per row and greater or equal to 0
n[i]=min((max(0,round(n_orifices_real))),n_orifices_max)
return n
#This function calculates the error of the design based on the differences between the predicted flow rate
#and the actual flow rate through the LFOM.
def flow_lfom_error(FLOW,HL_LFOM,drill_bits,SDR_LFOM):
N_lfom_orifices=n_lfom_orifices(FLOW,HL_LFOM,drill_bits,SDR_LFOM)
FLOW_lfom_error=[]
for j in range (len(N_lfom_orifices)-1):
FLOW_lfom_error.append((flow_lfom_actual(FLOW,HL_LFOM,drill_bits,j,N_lfom_orifices)-flow_ramp(FLOW,HL_LFOM)[j])/FLOW)
return FLOW_lfom_error
def flow_lfom_ideal(FLOW,HL_LFOM,H):
flow_lfom_ideal=(FLOW*H)/HL_LFOM
return flow_lfom_ideal
def flow_lfom(FLOW,HL_LFOM,drill_bits,SDR_LFOM,H):
D_lfom_orifices=orifice_diameter(FLOW,HL_LFOM,drill_bits)
H_submerged=np.arange(H-0.5*D_lfom_orifices,HL_LFOM,H-dist_center_lfom_rows(FLOW,HL_LFOM),dtype=object)
N_lfom_orifices=n_lfom_orifices(FLOW,HL_LFOM,drill_bits,SDR_LFOM)
flow=[]
for i in range (len(H_submerged)):
flow.append(pc.flow_orifice_vert(D_lfom_orifices,H_submerged[i],ratio_VC_orifice)*N_lfom_orifices[i])
return sum (flow)
|
[
"31310824+skittlesburst@users.noreply.github.com"
] |
31310824+skittlesburst@users.noreply.github.com
|
c23dd561e8a88cfa7422931a62513d59e6b6cbf6
|
6effd74388d87b4fbad1e2b858571189f575a850
|
/5.py
|
2e463301ee81a67ecde78622424e6cd2500244b5
|
[] |
no_license
|
wiesson/tdd-project-euler
|
b1521d521f1e2304b6180337c592716b68e3b055
|
189b772172ed723658924d6832f3c403bcc3f56e
|
refs/heads/master
| 2021-01-17T20:16:40.359816
| 2015-06-23T16:27:40
| 2015-06-23T16:27:40
| 37,330,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
#! /usr/bin/env python
from functools import reduce
from math import ceil, sqrt
import unittest
def is_prime(prime):
# http://stackoverflow.com/a/4117879/2050629
return all(prime % i for i in range(2, ceil(sqrt(prime))))
def prime_factors(n):
primes_list = []
z = n
while z > 1:
i = 2
prime_found = False
while i * i <= n and not prime_found:
if z % i == 0:
prime_found = True
p = i
else:
i = i + 1
if not prime_found:
p = z
primes_list.append(p)
z = z // p
return primes_list
def smallest_multiple_slow(n_range):
n = 1
while True:
for each in n_range:
if n % each:
break
if each == 10 and each == len(n_range) + 1:
print("> " + str(n))
return True
n = n + 1
return False
def smallest_multiple_new(n_range):
primes = []
prime_factors_list = []
for each in n_range:
if is_prime(each):
primes.append(each)
prime_factors_list.append(prime_factors(each))
primes_per_range_item = {}
for prime in primes:
primes_per_range_item[prime] = 0
for each in prime_factors_list:
if primes_per_range_item.get(prime) < each.count(prime):
primes_per_range_item[prime] = each.count(prime)
num = 1
for each in [pow(k, v) for (k, v) in primes_per_range_item.items()]:
num *= each
print(num)
return True
class TestCase(unittest.TestCase):
# @unittest.skip
def test_smallest_multiple(self):
self.assertEqual(smallest_multiple_new(range(2, 11)), True) # [2, 13]
self.assertEqual(smallest_multiple_new(range(2, 21)), True) # [2, 13]
unittest.main()
|
[
"wiese@sipgate.de"
] |
wiese@sipgate.de
|
9e36dee4daba7a090054dbc2c153be60056ec165
|
99f9b73530de837297f095689b250b8ebda8f836
|
/project euler/prob1.py
|
b3fba06d1275ea9ae970cd68d930db97fe063ef9
|
[] |
no_license
|
ManimaranN/Competitive_solutions
|
18cfcf648d85ac4c253690348c180836257cb6d4
|
34f49f9b05b674796335125f7087c4393c05d886
|
refs/heads/master
| 2020-03-25T05:18:32.165026
| 2018-08-07T16:24:59
| 2018-08-07T16:24:59
| 143,440,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
list = []
for i in range(3,1000):
if i%3 == 0:
list.append(i)
elif i%5 == 0 :
list.append(i)
i += 1
add = sum(list)
print("sum :", add)
|
[
"manimaran.n36@gmail.com"
] |
manimaran.n36@gmail.com
|
a65dddcf69a9be612371a4aa3fd4b4d9a448c0ba
|
dea03a31e322a06d743b48d714389724591dd45d
|
/testProject/app/migrations/0002_auto_20191017_2144.py
|
d424e86a3efcdb901d835ff701542334e57e3659
|
[] |
no_license
|
alanpolimentes/test
|
c5d4f50148649e047c2e9bdca502cb83ad8ff36b
|
d3c0d40d3292cefd6241e2f9ffe930a701ceaa33
|
refs/heads/master
| 2020-08-17T00:56:20.518223
| 2019-10-18T22:55:58
| 2019-10-18T22:55:58
| 215,583,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,624
|
py
|
# Generated by Django 2.2.6 on 2019-10-17 21:44
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categoria',
name='id',
field=models.UUIDField(default=uuid.UUID('0837b07f-e94a-4380-abfb-39ea475d3ecd'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='marca',
name='id',
field=models.UUIDField(default=uuid.UUID('1e18fdd1-ac3b-4222-8ee0-1f4922c7da29'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='producto',
name='id',
field=models.UUIDField(default=uuid.UUID('6c4089c2-f7f9-4544-a3af-748b3730e3db'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='solicitante',
name='id',
field=models.UUIDField(default=uuid.UUID('0340a3e8-c4df-4839-8550-1a13238a6a81'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='solicitud',
name='cantidad',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='subcategoria',
name='id',
field=models.UUIDField(default=uuid.UUID('43c349e0-b97e-4c2f-a968-c27c91c1845e'), editable=False, primary_key=True, serialize=False),
),
]
|
[
"alan.garcia@polimentes.mx"
] |
alan.garcia@polimentes.mx
|
c606b6292b29221a89c5a63f93c792418b45da08
|
d58ab1038a1fb412a45c7e471ec3bcbde1ce66a2
|
/theZoo/pipelines.py
|
7133a4d2045de932e6fb358a6c45da9dfb1a3251
|
[
"MIT"
] |
permissive
|
webclinic017/Tarantula
|
af316134bead2694a4501149def06aa2a8876420
|
1eb10c7d588493fabee0f8cf5269c737389dd193
|
refs/heads/main
| 2023-08-24T05:06:22.976241
| 2021-10-27T18:35:27
| 2021-10-27T18:35:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
# from itemadapter import ItemAdapter
from sqlalchemy.orm import sessionmaker
from theZoo.models import Items, create_items_table, db_connect
from scrapy.exporters import JsonItemExporter
class ThezooPipeline:
def __init__(self):
"""
Initializes database connection and sessionmaker.
Creates items table.
"""
engine = db_connect()
create_items_table(engine)
self.Session = sessionmaker(bind=engine)
file = None
def open_spider(self, spider):
self.file = open('item.json', 'wb')
self.exporter = JsonItemExporter(self.file)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
"""
Here we are processing our item and storing to the database
"""
self.exporter.export_item(item)
session = self.Session()
# instance = session.query(Items).filter_by(**item).one_or_none()
# if instance:
# return instance
scrape_item = Items(**item)
try:
session.add(scrape_item)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
|
[
"georgereyes103@gmail.com"
] |
georgereyes103@gmail.com
|
b38a5417f4cf586733ab782b41c420ea59c10d53
|
6a084a2df2869ce3ad565610cbf92eccf00a233e
|
/states/postgres_user.py
|
c65e8d42284fe1a2a9ce2c6f70f436a95ff64235
|
[] |
no_license
|
ltxin/saltstack
|
95b5356715cc918afec378e2926d9f9a1c7a85d5
|
30a493ef5e46bd7629c8ba400e559dab023c1431
|
refs/heads/master
| 2021-01-16T17:52:56.939714
| 2017-08-11T10:13:41
| 2017-08-11T10:13:41
| 100,019,324
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,166
|
py
|
# -*- coding: utf-8 -*-
'''
Management of PostgreSQL users (roles)
======================================
The postgres_users module is used to create and manage Postgres users.
.. code-block:: yaml
frank:
postgres_user.present
'''
from __future__ import absolute_import
# Import Python libs
# Import salt libs
import logging
# Salt imports
from salt.modules import postgres
import salt.ext.six as six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the postgres module is present
'''
if 'postgres.user_exists' not in __salt__:
return (False, 'Unable to load postgres module. Make sure `postgres.bins_dir` is set.')
return True
def present(name,
createdb=None,
createroles=None,
createuser=None,
encrypted=None,
superuser=None,
replication=None,
inherit=None,
login=None,
password=None,
default_password=None,
refresh_password=None,
groups=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named user is present with the specified privileges
Please note that the user/group notion in postgresql is just abstract, we
have roles, where users can be seens as roles with the LOGIN privilege
and groups the others.
name
The name of the system user to manage.
createdb
Is the user allowed to create databases?
createroles
Is the user allowed to create other users?
createuser
Alias to create roles
encrypted
Should the password be encrypted in the system catalog?
login
Should the group have login perm
inherit
Should the group inherit permissions
superuser
Should the new user be a "superuser"
replication
Should the new user be allowed to initiate streaming replication
password
The system user's password. It can be either a plain string or a
md5 postgresql hashed password::
'md5{MD5OF({password}{role}}'
If encrypted is None or True, the password will be automatically
encrypted to the previous
format if it is not already done.
default_passwoord
The password used only when creating the user, unless password is set.
.. versionadded:: 2016.3.0
refresh_password
Password refresh flag
Boolean attribute to specify whether to password comparison check
should be performed.
If refresh_password is ``True``, the password will be automatically
updated without extra password change check.
This behaviour makes it possible to execute in environments without
superuser access available, e.g. Amazon RDS for PostgreSQL
groups
A string of comma separated groups the user should be in
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
Postres database username, if different from config or default.
db_password
Postgres user's password, if any password, for a specified db_user.
db_host
Postgres database host, if different from config or default.
db_port
Postgres database port, if different from config or default.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is already present'.format(name)}
if createuser:
createroles = True
# default to encrypted passwords
if encrypted is not False:
encrypted = postgres._DEFAULT_PASSWORDS_ENCRYPTION
# maybe encrypt if if not already and necessary
password = postgres._maybe_encrypt_password(name,
password,
encrypted=encrypted)
if default_password is not None:
default_password = postgres._maybe_encrypt_password(name,
default_password,
encrypted=encrypted)
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if user exists
mode = 'create'
user_attr = __salt__['postgres.role_get'](
name, return_password=not refresh_password, **db_args)
if user_attr is not None:
mode = 'update'
# The user is not present, make it!
cret = None
update = {}
if mode == 'update':
user_groups = user_attr.get('groups', [])
if (
createdb is not None
and user_attr['can create databases'] != createdb
):
update['createdb'] = createdb
if (
inherit is not None
and user_attr['inherits privileges'] != inherit
):
update['inherit'] = inherit
if login is not None and user_attr['can login'] != login:
update['login'] = login
if (
createroles is not None
and user_attr['can create roles'] != createroles
):
update['createroles'] = createroles
if (
replication is not None
and user_attr['replication'] != replication
):
update['replication'] = replication
if superuser is not None and user_attr['superuser'] != superuser:
update['superuser'] = superuser
if password is not None and (refresh_password or user_attr['password'] != password):
update['password'] = True
if groups is not None:
lgroups = groups
if isinstance(groups, (six.string_types, six.text_type)):
lgroups = lgroups.split(',')
if isinstance(lgroups, list):
missing_groups = [a for a in lgroups if a not in user_groups]
if missing_groups:
update['groups'] = missing_groups
if mode == 'create' and password is None:
password = default_password
if mode == 'create' or (mode == 'update' and update):
if __opts__['test']:
if update:
ret['changes'][name] = update
ret['result'] = None
ret['comment'] = 'User {0} is set to be {1}d'.format(name, mode)
return ret
cret = __salt__['postgres.user_{0}'.format(mode)](
username=name,
createdb=createdb,
createroles=createroles,
encrypted=encrypted,
superuser=superuser,
login=login,
inherit=inherit,
replication=replication,
rolepassword=password,
groups=groups,
**db_args)
else:
cret = None
if cret:
ret['comment'] = 'The user {0} has been {1}d'.format(name, mode)
if update:
ret['changes'][name] = update
else:
ret['changes'][name] = 'Present'
elif cret is not None:
ret['comment'] = 'Failed to create user {0}'.format(name)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named user is absent
name
The username of the user to remove
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if user exists and remove it
if __salt__['postgres.user_exists'](name, **db_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.user_remove'](name, **db_args):
ret['comment'] = 'User {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'User {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'User {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
|
[
"it_ltxin@163.com"
] |
it_ltxin@163.com
|
fd88c9382e2933697e301ad7e1c6fb31402f8d9c
|
9f91d88b96ce10aadd0759132330a6b353b5cbe2
|
/LeetCode/RE_add-two-numbers.py
|
dcd4373bac4d9c40521795a20dd62e5776694ff5
|
[] |
no_license
|
shivanishimpi/CP
|
b1c9bbace29892fa79183a941655ba216a79d1da
|
f74197fde545c90a8339f162adae532478ff451c
|
refs/heads/main
| 2023-09-01T16:02:39.116495
| 2020-12-31T12:30:39
| 2020-12-31T12:30:39
| 308,958,764
| 2
| 0
| null | 2021-10-09T18:49:44
| 2020-10-31T19:39:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
"""
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
"""
#RUNTIME ERROR
#CORRECT OUTPUTS
class Node:
# Singly linked node
def __init__(self, data=None):
self.data = data
self.next = None
class singly_linked_list:
def __init__(self):
# Createe an empty list
self.tail = None
self.head = None
self.count = 0
def iterate_item(self):
# Iterate the list.
current_item = self.tail
while current_item:
val = current_item.data
current_item = current_item.next
yield val
def append_item(self, data):
#Append items on the list
node = Node(data)
if self.head:
self.head.next = node
self.head = node
else:
self.tail = node
self.head = node
self.count += 1
for _ in range(int(input())):
l1 = list(map(int, input().split(' ')))
l2 = list(map(int, input().split(' ')))
l1 = l1[::-1]
l2 = l2[::-1]
#if l1==l2:
#addList = [int(l1[i])+int(l2[i]) for i in range(len(l2)-1)]
#else:
# addList = [int(l1[i])+int(l2[i]) for i in range(len(l1)-1)]
#print(int(l1), int(l2))
#print(l1)
#print(l2)
for i in l1:
val1=''.join(map(str, l1))
#print(val1)
for j in l2:
val2=''.join(map(str,l2))
#print(val2)
addedNums = int(val1)+int(val2)
lis = list(str(addedNums)[::-1])
ints = [int(i) for i in lis]
print(ints)
items= singly_linked_list()
for i in ints:
items.append_item(i)
for val in items.iterate_item():
print(val)
|
[
"shivanishimpi9@gmail.com"
] |
shivanishimpi9@gmail.com
|
c19420c7182d6d7cac399f8ef28bbe96ae2b4058
|
e00870366e2db04dfe739a5b1b17e9682de6a129
|
/Geometric/Graphics/cube_model.py
|
59859edf4ef6724693cc29e8fae753ddb936a2cc
|
[] |
no_license
|
jwatson-CO-edu/py_toybox
|
06b0243626fbe0a136b1fa4f9a74783321ee1601
|
7f3b2aaeb24e41002e9dee2f2af669006e1cbd5c
|
refs/heads/master
| 2022-03-25T05:11:55.724434
| 2022-01-27T21:07:26
| 2022-01-27T21:07:26
| 230,351,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,535
|
py
|
#! /usr/bin/env python
#
# <one line to give the program's name and a brief idea of what it does.>
# Copyright (C) 2001 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Loads a simple model format and draws it with opengl.
allows rotations and whatnot based on simple cursor input"""
__version__ = '0.05'
__date__ = '2002/05/23'
__author__ = 'Michael Urman (mu on irc.openprojects.net)'
import sys, math
try:
import pygame
from pygame.locals import *
from OpenGL.GL import *
except:
print 'model requires pygame and pyopengl'
raise SystemExit
try:
from OpenGL.GLU import *
GLU = 1
except:
print "Warning: OpenGL.GLU did not import correctly."
GLU = None
if len(sys.argv) < 2:
print 'Usage: model.py model.mf'
raise SystemExit
import Numeric as N
import types
def _cross(a, b):
return N.array((\
a[1]*b[2]-a[2]*b[1], \
a[2]*b[0]-a[0]*b[2], \
a[0]*b[1]-a[1]*b[0]))
class Quaternion:
def __init__(q, quat=None):
"""Quaternion(q, quat=None) -> Quaternion
Creates a new "pure" quaternion (of no rotation). If a quaternion
is passed in, it is copied. If a tuple of the format (degrees, (x,
y, z)) or (x, y, z, w) is passed in, it is turned into a
quaternion."""
if quat:
if isinstance(quat, Quaternion):
q.r = quat.r
q.v = quat.v[:]
elif isinstance(quat, types.TupleType):
if len(quat) == 4:
q.v = N.array(quat[0:3])
q.r = quat[3]
elif len(quat) == 2 and isinstance(quat[1], types.TupleType)\
and len(quat[1]) == 3:
angle = quat[0] * math.pi/360
q.r = math.cos(angle)
sin_a = math.sin(angle)
q.v = sin_a * N.array(quat[1])
else:
raise TypeError("Invalid tuple for argument 2")
else:
raise TypeError("Argument 2 must be a tuple")
else:
q.v = N.array((1,0,0))
q.r = 0
def rotate(self, angle, axis):
"""rotate(self, angle, axis) -> self
Rotate Quaternion self by angle (degrees) around axis (x, y, z) and
return self for each chaining"""
q = Quaternion((angle,axis)) * self
self.r = q.r
self.v = q.v
def __mul__(q, o):
"""Quaternion *= Quaternion -> Quaternion
Quaternion *= float -> Quaternion
Multiplies a Quaternion by a constant or by another Quaternion.
Remember to do (b * a) for a Quaternion representation of a rotation
by a then by b. Not Commutative!"""
if isinstance(o, Quaternion):
r = q.r; v = q.v
s = o.r; w = o.v
angle = r*s - N.dot(v,w)
vec = r*w + s*v + _cross(v,w)
n = math.sqrt(angle*angle + N.dot(vec,vec))
return Quaternion((vec[0]/n, vec[1]/n, vec[2]/n, angle/n))
else:
return Quaternion((q.v[0]*o, q.v[1]*o, q.v[2]*o, q.r*o))
def __imul__(q, o):
"""Quaternion *= Quaternion -> None
Quaternion *= float -> None
Multiplies a Quaternion in place by a constant or by another
Quaternion. Remember to do (b * a) for a Quaternion representation
of a rotation by a then by b. Not Commutative!"""
if isinstance(o, Quaternion):
r = q.r; v = q.v
s = o.r; w = o.v
q.r = r*s - N.dot(v,w)
q.v = r*w + s*v + _cross(v,w)
q.normalize()
else:
q.r *= o
q.v *= o
return q
def __abs__(q):
"""abs(Quaternion) -> float
Returns the magnitue of the Quaternion = sqrt(x*x+y*y+z*z+w*w)"""
return math.sqrt(q.r*q.r + N.dot(q.v,q.v))
def normalize(q):
"""normalize(q) -> q
Normalizes the Quaternion such that the magnitued is 1.0"""
n = abs(q)
if n: q *= (1.0/n)
return q
def angle(q):
"""angle(q) -> float
Return the angle of rotation (in degrees) represented by q"""
return math.acos(q.r)*360/math.pi
def axis(q):
"""axis(q) -> [x,y,z]
Returns a (Numeric) array of the axis of rotation represented by q.
Normalizes the vector to have magnitude of 1.0"""
n = math.sqrt(N.dot(q.v,q.v))
if not n: n = 1.0
return q.v / n
def __repr__(q):
"""repr(Quaternion) -> string
Return a string of the format '<w [x y z]>' (direct Quaternion
values)"""
return '<%f %s>'%(q.r, q.v)
def __str__(q):
"""str(Quaternion) -> string
Return a string of the format '<angle (x y z)>' (angle in degrees
and normalized axis of rotation)"""
ax = q.axis()
return '<%0.2f (%0.2f %0.2f %0.2f)>'%(q.angle(), ax[0], ax[1], ax[2])
class ModelNode:
"""Node base class for modeling system"""
def __init__(self, *children):
if children: self._children = children
else: self._children = []
def __repr__(self):
return '<ModelNode id %s>'%id(self)
def draw(self, *args, **kvargs):
for c in self._children: c.draw(*args, **kvargs)
def add(self, *children):
"""add(self, children) -> None
Add all children under the current Node"""
self._children.extend(children)
class Transform(ModelNode):
"""Transformation nodes for hierarchical models"""
def __init__(self, translate=None, rotate=None, scale=None):
ModelNode.__init__(self)
self._x_translate = translate or [0,0,0]
self._x_rotate = rotate or Quaternion((0, (1,0,0)))
self._x_scale = scale or [1,1,1]
def draw(self, *args, **kvargs):
"""draw(self, *args, **kvargs) -> None
Applies transformations and calls children's draw() routine"""
glPushMatrix()
glTranslate(*self._x_translate)
glRotatef(self._x_rotate.angle(), *self._x_rotate.axis())
glScale(*self._x_scale)
ModelNode.draw(self, *args, **kvargs)
glPopMatrix()
def rotate(self, angle, axis):
"""rotate(self, angle, axis) -> None
Rotate model by angle around the axis (x,y,z)"""
# normalize axis
al = 1/math.sqrt(axis[0]*axis[0]+axis[1]*axis[1]+axis[2]*axis[2])
axis = axis[0] * al, axis[1]*al, axis[2]*al
self._x_rotate.rotate(angle, axis)
return self._x_rotate
def translate(self, delta):
"""translate(self, delta) -> None
Translate model by delta (x,y,z)"""
self._x_translate[0] += delta[0]
self._x_translate[1] += delta[1]
self._x_translate[2] += delta[2]
return self._x_translate[:]
def scale(self, factor):
"""scale(self, factor) -> None
Scale model by factor (x,y,z)"""
if factor: self._x_scale = [self._x_scale[i]*factor[i] for i in range(3)]
return self._x_scale[:]
class SMF(ModelNode):
"""Handles loading and drawing of a simple model format"""
def __init__(self, filename=None, calcnormals=None):
"""SMF([filename]) optionally loads model stored in passed filename"""
ModelNode.__init__(self)
self.vertices = [] # list of vertices
self.colors = [] # corresponding colors
self.faces = [] # list of references to vertices
self.normals = [] # normals correspond to each face
self.texture = None
self.texturecoords = [] # S,T corresponds to each vertex
self.usedrawlist = None
self.drawlist = None
if filename:
f = open(filename)
linecount = 0
for line in f.xreadlines():
linecount += 1
items = line.split()
# v X Y Z defines a vertex at (x, y, z)
if len(items) == 4 and items[0] == 'v':
self.vertices.append(map(lambda x:float(x), items[1:4]))
# f A B C defines a face using vertices A, B, C
elif len(items) == 4 and items[0] == 'f':
self.faces.append(map(lambda x:int(x)-1, items[1:4]))
# c R G B defines a color for corresponding vertex
elif len(items) == 4 and items[0] == 'c':
self.colors.append(map(lambda x:float(x), items[1:4]))
# t S T defines a texture coordinate for corresponding vertex
elif len(items) == 3 and items[0] == 't':
self.texturecoords.append(map(lambda x:float(x), items[1:3]))
# t filename defines a texture for the model
# should be 2**k x 2**k pixels
elif len(items) == 2 and items[0] == 't':
if self.texture:
raise RuntimeError("Can't handle multiple textures")
self.texture = items[1]
elif line[0] == '#' or len(items) == 0:
pass
else:
raise RuntimeError("Invalid syntax on line %d '%s'"%(linecount, line))
if self.texture:
if not GLU:
raise NotImplementedError("textures require mipmaps require OpenGL.GLU")
# load and prepare texture image for opengl
img = pygame.image.load(self.texture)
w, h = img.get_width(), img.get_height()
rgb = pygame.image.tostring(img, "RGB", 0)
#assign a texture
self.textureid = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.textureid)
#glPixelStorei(GL_UNPACK_ALIGNMENT,1)
#build MIPMAP levels
gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGB, w, h, GL_RGB, GL_UNSIGNED_BYTE, rgb)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
def draw(self, *args, **kvargs):
"""Draw the model to the waiting screen"""
if kvargs.has_key('wireframe') and kvargs['wireframe']:
for face in self.faces:
glBegin(GL_LINE_LOOP)
for vert in face:
glColor3fv(self.colors[vert])
glVertex3fv(self.vertices[vert])
glEnd()
elif self.usedrawlist:
glCallList(self.drawlist)
else:
if self.texture:
glPushAttrib(GL_ENABLE_BIT) # save old enables
glColor4f(1,1,1,1)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.textureid)
glBegin(GL_TRIANGLES)
for face in self.faces:
for vert in face:
glTexCoord2fv(self.texturecoords[vert])
glVertex3fv(self.vertices[vert])
glEnd()
glPopAttrib()
else:
glBegin(GL_TRIANGLES)
for face in self.faces:
for vert in face:
glColor3fv(self.colors[vert])
glVertex3fv(self.vertices[vert])
glEnd()
def build_display_list(self):
"""Try to optimize the draw routine by using a display list"""
self.drawlist = glGenLists(1)
glNewList(self.drawlist, GL_COMPILE)
self.draw()
glEndList()
self.usedrawlist = 1
class OGLSprite:
"""Implement the ugly details of "blitting" to OpenGL"""
def __init__(self, surf, rect=None, mipmap=None):
"""OGLSprite(self, surf, rect=None) -> OGLSprite
Create a drawable texture out of a given surface."""
if not rect: rect = surf.get_rect()
w, h = surf.get_width(), surf.get_height()
w2, h2 = 1, 1
while w2 < w: w2 <<= 1
while h2 < h: h2 <<= 1
#surfr = pygame.surfarray.pixels3d(surf)
#surfa = pygame.surfarray.alpha(surf)
img = pygame.Surface((w2, h2), SRCALPHA, surf)
#imgr = pygame.surfarray.pixels3d(img)
#imga = pygame.surfarray.pixels_alpha(img)
#putmask(imgr,
#putmask(imga,
img.blit(surf, (0,h2-h), rect)
rgba = pygame.image.tostring(img, "RGBA", 0)
# prove that blitting sucks?
#print "0:",surf.get_at((0,0))
#print "1:",img.get_at((0,0))
#assign a texture
texid = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texid)
#glPixelStorei(GL_UNPACK_ALIGNMENT,1)
if mipmap:
if not GLU:
raise NotImplementedError("OGLSprite mipmaps require OpenGL.GLU")
#build MIPMAP levels. Ths is another slow bit
gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGBA, w2, h2, GL_RGBA, GL_UNSIGNED_BYTE, rgba)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
else:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w2, h2, 0, GL_RGBA, GL_UNSIGNED_BYTE, rgba)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
self.mipmap = mipmap
self.srcsize = w, h
self.texsize = w2, h2
self.coords = float(w)/w2, float(h)/h2
self.texid = texid
#print "TEX", self.srcsize, self.texsize, self.coords
def update(self, surf, rect=None):
"""update(self, surf, rect=None) -> None
"""
if self.mipmap:
raise TypeError("Cannot update a mipmap enabled OGLSprite")
if not rect: rect = surf.get_rect()
w, h = surf.get_width(), surf.get_height()
w2, h2 = 1, 1
while w2 < w: w2 <<= 1
while h2 < h: h2 <<= 1
img = pygame.Surface((w2, h2), SRCALPHA, surf)
img.blit(surf, (0,h2-h), rect)
rgba = pygame.image.tostring(img, "RGBA", 0)
glBindTexture(GL_TEXTURE_2D, self.texid)
if 'glTexSubImage2D' in dir() \
and w2 <= self.texsize[0] and h2 <= self.texsize[1]:
# untested; i suspect it doesn't work
w2, h2 = self.texsize
glTexSubImage2D(GL_TEXTURE_RECTANGLE_EXT, 0,
0, 0, w2, h2, GL_RGBA, GL_UNSIGNED_BYTE, rgba);
if (w, h) != self.srcsize:
self.coords = float(w)/w2, float(h)/h2
else:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
w2, h2, 0, GL_RGBA, GL_UNSIGNED_BYTE, rgba)
self.coords = float(w)/w2, float(h)/h2
self.texsize = w2, h2
self.srcsize = w, h
#print "TEX", self.srcsize, self.texsize, self.coords
def blit_at(self, *rects):
"""blit_at(self, *rects) -> self
Draw the texture at the supplied position(s). If a tuple and width and
height are not specified, the original size is used (just like you'd
expect). Returns self so ogs.enter().blit().exit() works"""
for rect in rects:
x0, y0 = rect[0:2]
try:
x1, y1 = x0 + rect[2], y0 + rect[3]
except IndexError:
x1, y1 = x0 + self.srcsize[0] - 1, y0 + self.srcsize[1] - 1
glBindTexture(GL_TEXTURE_2D, self.texid)
glBegin(GL_TRIANGLE_STRIP)
glTexCoord2f(0, 0); glVertex2f(x0, y0)
glTexCoord2f(self.coords[0], 0); glVertex2f(x1, y0)
glTexCoord2f(0, self.coords[1]); glVertex2f(x0, y1)
glTexCoord2f(self.coords[0], self.coords[1]); glVertex2f(x1, y1)
glEnd()
return self
def enter(self):
"""enter(self) -> self
Set up OpenGL for drawing textures; do this once per batch of
textures. Returns self so ogs.enter().blit().exit() works"""
glPushAttrib(GL_ENABLE_BIT) # save old enables
glDisable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
glColor4f(1,1,1,1)
glEnable(GL_TEXTURE_2D)
# XXX: in pre pygame1.5, there is no proper alpha, so this makes
# the entire texture transparent. in 1.5 and forward, it works.
if pygame.version.ver >= '1.4.9':
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#glEnable(GL_ALPHA_TEST)
#glAlphaFunc(GL_GREATER, 0.5)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(0.0, 640.0, 480.0, 0.0, 0.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
#glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
return self
def exit(self):
"""exit(self) -> None
Return OpenGL to previous settings; do this once per batch."""
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
glPopAttrib()
def get_width(self):
"""get_width(self) -> int"""
return self.srcsize[0]
def get_height(self):
"""get_height(self) -> int"""
return self.srcsize[1]
def main():
"""loads and runs the model display"""
pygame.display.init()
pygame.font.init()
screen = pygame.display.set_mode((640,480), OPENGL|DOUBLEBUF)
glEnable(GL_DEPTH_TEST) # use zbuffer
# boring camera setup
glMatrixMode(GL_PROJECTION)
if GLU:
gluPerspective(45.0, 640/480.0, 0.1, 100.0)
else:
f = 1.3 / (math.tan(45.0/2))
glMultMatrix((f*480/640, 0, 0, 0,
0, f, 0, 0,
0, 0, 100.1/-99.9, -1,
0, 0, 2*100*0.1/-99.9, 0))
glTranslatef(0.0, 0.0, -3.0)
glRotatef(25, 1,0,0)
cube = SMF(sys.argv[1])
model = Transform()
model.add(cube)
font = pygame.font.Font(None, 48)
text = OGLSprite(font.render('Pygame', 1, (255, 0, 0)))
update = 1
do_wireframe = 0
quit = 0
hide = 0
while 1:
events = [pygame.event.wait()]
if pygame.event.peek():
events.extend(pygame.event.get())
for event in events:
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
quit = 1
elif event.type == KEYDOWN and event.key == K_RETURN:
pygame.display.toggle_fullscreen()
elif event.type == KEYDOWN and event.key == K_t:
text.update(font.render('Pygame', 1, (255, 0, 0)))
update = 1
elif event.type == KEYDOWN and event.key == K_w:
do_wireframe = not do_wireframe
update = 1
elif event.type == VIDEOEXPOSE:
update = 1
elif event.type == MOUSEBUTTONDOWN:
if event.button in (1,3):
hide += 1
pygame.mouse.set_visible(0)
pygame.event.set_grab(1)
elif event.button == 2:
print 'building display list'
cube.build_display_list()
elif event.type == MOUSEBUTTONUP:
if event.button in (1,3):
hide -= 1
if not hide:
pygame.mouse.set_visible(1)
pygame.event.set_grab(0)
if event.type == MOUSEMOTION and not update:
if event.buttons[0]:
dx, dy = event.rel
dist = math.sqrt(dx*dx+dy*dy)
q = model.rotate(dist, (event.rel[1], event.rel[0], 0))
text.update(font.render(str(q), 1, (255, 127, 127)))
update = 1
if event.buttons[2]:
if pygame.key.get_mods() & KMOD_SHIFT:
s = 100 + event.rel[0]+event.rel[1]
s *= 0.01
x, y, z = model.scale((s,s,s))
text.update(font.render('<%0.2f %0.2f %0.2f>'%(x,y,z), 1, (126, 127, 255)))
else:
x, y, z = model.translate((event.rel[0]*0.02, -event.rel[1]*0.02, 0))
text.update(font.render('<%0.2f %0.2f %0.2f>'%(x,y,z), 1, (255, 127, 127)))
update = 1
if quit:
break
if update:
update = 0
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
model.draw(wireframe=do_wireframe)
text.enter()
text.blit_at(
(screen.get_width()-text.get_width()+1,
screen.get_height()-text.get_height()+1))
text.exit()
pygame.display.flip()
if __name__ == '__main__': main()
|
[
"james.watson-2@colorado.edu"
] |
james.watson-2@colorado.edu
|
76efc587e9870c67ce18c75d2a19291da2b320b2
|
d051b7c8e9375beea6e4ee1a3375d120fde178ac
|
/moveit_experimental/moveit_jog_arm/test/python_tests/vel_accel_limits/test_vel_accel_limits.py
|
2baf6a594a6baf04492bfe3afda8e1b276b15442
|
[
"BSD-3-Clause"
] |
permissive
|
anion0278/moveit
|
3e4a35f5c37efb1c79cf6c7e52e49c9e2c43bf94
|
7c733c576d34bada28ff8986b1f71f06712d34a6
|
refs/heads/master
| 2023-02-23T17:01:51.947763
| 2021-01-31T14:28:45
| 2021-01-31T14:28:45
| 267,310,801
| 0
| 0
|
BSD-3-Clause
| 2020-05-27T12:16:41
| 2020-05-27T12:16:40
| null |
UTF-8
|
Python
| false
| false
| 2,749
|
py
|
#!/usr/bin/env python
import time
import pytest
import rospy
from control_msgs.msg import JointJog
from trajectory_msgs.msg import JointTrajectory
# Import common Python test utilities
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import util
# Test that commands that are too fast are caught and flagged
# This can be run as part of a pytest, or like a normal ROS executable:
# rosrun moveit_jog_arm test_vel_accel_limits.py
JOINT_JOG_COMMAND_TOPIC = 'jog_server/joint_delta_jog_cmds'
COMMAND_OUT_TOPIC = 'jog_server/command'
# Check if jogger is initialized with this service
SERVICE_NAME = 'jog_server/change_drift_dimensions'
@pytest.fixture
def node():
return rospy.init_node('pytest', anonymous=True)
class JointJogCmd(object):
def __init__(self):
self._pub = rospy.Publisher(JOINT_JOG_COMMAND_TOPIC, JointJog, queue_size=1)
def send_joint_velocity_cmd(self, joint_pos):
jj = JointJog()
jj.header.stamp = rospy.Time.now()
jj.joint_names = ['joint_{}'.format(i) for i in range(len(joint_pos))]
jj.velocities = list(map(float, joint_pos))
self._pub.publish(jj)
def test_vel_limit(node):
# Test sending a joint command
assert util.wait_for_jogger_initialization(SERVICE_NAME)
received = []
sub = rospy.Subscriber(
COMMAND_OUT_TOPIC, JointTrajectory, lambda msg: received.append(msg)
)
joint_cmd = JointJogCmd()
TEST_DURATION = 1
PUBLISH_PERIOD = 0.01 # 'PUBLISH_PERIOD' from jog_arm config file
# Panda arm limit, from joint_limits.yaml
VELOCITY_LIMIT = rospy.get_param("/robot_description_planning/joint_limits/panda_joint1/max_velocity")
# Send a velocity command that exceeds the limit
velocities = [10 * VELOCITY_LIMIT]
# Send a command to start the jogger
joint_cmd.send_joint_velocity_cmd(velocities)
start_time = rospy.get_rostime()
received = []
while (rospy.get_rostime() - start_time).to_sec() < TEST_DURATION:
joint_cmd.send_joint_velocity_cmd(velocities)
time.sleep(0.1)
# Period of outgoing commands from the jogger, from yaml
JOGGER_COMMAND_PERIOD = rospy.get_param("/jog_server/publish_period")
# Should be no velocities greater than the limit
assert len(received) > 2
for msg_idx in range(1, len(received)):
velocity = \
(received[msg_idx].points[0].positions[0] - received[msg_idx - 1].points[0].positions[0]) / JOGGER_COMMAND_PERIOD
assert abs(velocity) <= VELOCITY_LIMIT
if __name__ == '__main__':
node = node()
test_vel_limit(node)
# TODO(andyz): add an acceleration limit test (the Panda joint_limits.yaml doesn't define acceleration limits)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6f0cf4d61aa094e7e4958d5d2d42c7ee379e097f
|
942a82cd1e34cd8f57e1d7f3272e4086605256ee
|
/config/settings.py
|
4ab609f97c0680e52cc1f2490a6f0d441b5e6b02
|
[] |
no_license
|
hanieh-mav/SocialNetwork-with-drf
|
d451126f93e3735a8c9d6dbf714a8179785e15cc
|
d929704a3d9f26e1e0ca5d961a01ba7dd5c6bf84
|
refs/heads/main
| 2023-06-13T08:17:46.591597
| 2021-07-09T13:37:06
| 2021-07-09T13:37:06
| 353,754,846
| 2
| 0
| null | 2021-07-09T13:27:27
| 2021-04-01T16:04:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,482
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@81g)s8gw+7-84o%ks%*8&j$cbb+&m%(#)+e6getb5o40@vil)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'crispy_forms',
'posts.apps.PostsConfig',
'accounts.apps.AccountsConfig',
'postapi.apps.PostapiConfig',
'accountapi.apps.AccountapiConfig',
'rest_framework',
'rest_framework.authtoken',
'dj_rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'dj_rest_auth.registration',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
#LOGIN_URL
LOGIN_URL = 'accounts:login'
LOGIN_REDIRECT_URL = 'posts:post-list'
#LOGOUT_URL
LOGOUT_REDIRECT_URL = 'posts:post-list'
STATIC_URL = '/static/'
#MEDIA
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
AUTH_USER_MODEL = 'accounts.User'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'dj_rest_auth.jwt_auth.JWTCookieAuthentication',
],
}
SITE_ID = 1
REST_USE_JWT = True
JWT_AUTH_COOKIE = 'my-app-auth'
JWT_AUTH_REFRESH_COOKIE = 'my-refresh-token'
#EMAIL SETTING
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'haniehproject.it@gmail.com'
EMAIL_HOST_PASSWORD = 'xxxxxxxxxxxxxxxxxxx'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
[
"h.mehdiabadi@gmail.com"
] |
h.mehdiabadi@gmail.com
|
37bf81f3ad11ff153ef7e0c65f8e73638bd8e747
|
76ae6d1194c4440b86eac56e1ed2d42f745e612c
|
/mcds_dcl2isa-pre-v1.py
|
c673f914ebc0520107f6229d628e42b73a175689
|
[] |
no_license
|
rheiland/mcds2isa
|
76a551df09233bd976268c44cf0fa7968f87c075
|
c0b1245fafd133701ff41fe12153543b73cb94e6
|
refs/heads/master
| 2021-07-21T00:11:43.103167
| 2019-08-27T17:23:19
| 2019-08-27T17:23:19
| 143,934,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,636
|
py
|
#
# mcds_dcl2isa.py - using a MultiCellDS digital cell line XML file, generate associated ISA-Tab files
#
# Input:
# a MultiCellDS digital cell line file <DCL-root-filename>.xml
# Output:
# 3 ISA files:
# i_<DCL-root-filename>.txt
# s_<DCL-root-filename>.txt
# a_<DCL-root-filename>.txt
#
# Author: Randy Heiland
# Date:
# v0.1 - May 2018
# v0.2 - Oct 2018 : add more tab sep_char in various rows
#
import os
import sys
import re
import xml.etree.ElementTree as ET
from pathlib import Path # Python 3?
if (len(sys.argv) < 2):
print("Usage: " + sys.argv[0] + " <MultiCellDS Digital Cell Line XML file>")
sys.exit(0)
else:
xml_file = sys.argv[1]
# for testing, just set it
#xml_file = "MCDS_L_0000000052.xml"
header = '\
ONTOLOGY SOURCE REFERENCE\n\
Term Source Name "NCIT" "UO" "NCBITAXON" "EDDA"\n\
Term Source File "https://ncit.nci.nih.gov/ncitbrowser/" "https://bioportal.bioontology.org/ontologies/UO" "http://purl.obolibrary.org/obo/NCBITaxon_1" "http://bioportal.bioontology.org/ontologies/EDDA"\n\
Term Source Version "17.02d" "" "" "2.0"\n\
Term Source Description "NCI Thesarus" "" "" "Evidence in Documents, Discovery, and Analytics (EDDA)"\
'
if not Path(xml_file).is_file():
print(xml_file + 'does not exist!')
sys.exit(-1)
if (os.sep in xml_file):
xml_base_filename = xml_file[xml_file.rfind(os.sep)+1:]
else:
xml_base_filename = xml_file
investigation_filename = "i_" + xml_base_filename[:-4] + ".txt"
study_filename = "s_" + xml_base_filename[:-4] + ".txt"
assay_filename = "a_" + xml_base_filename[:-4] + ".txt"
#=======================================================================
fp = open(investigation_filename, 'w')
tree = ET.parse(xml_file) # TODO: relative path using env var?
xml_root = tree.getroot()
sep_char = '\t' # tab
fp.write(header + '\n')
fp.write('INVESTIGATION\n')
#print(xml_root.find(".//MultiCellDB").find(".//ID").text)
i_identifier = '"' + xml_root.find(".//metadata").find(".//ID").text + '"'
#i_title = '"' + xml_root.find(".//metadata").find(".//name").text + '"'
i_title = '"' + xml_root.find(".//metadata").find(".//name").text + ' Digital Cell Line"'
i_desc = '"' + xml_root.find(".//metadata").find(".//description").text + '"'
i_desc = re.sub('\t','',i_desc)
i_desc = re.sub('\n','',i_desc)
fp.write('Investigation Identifier' + sep_char + i_identifier + '\n')
fp.write('Investigation Title' + sep_char + i_title + '\n')
fp.write('Investigation Description' + sep_char + i_desc + '\n')
fp.write('Investigation Submission Date' + sep_char + '""\n')
fp.write('Investigation Public Release Date \t "" \n')
citation_str = '"' + re.sub('[\t\n]','',xml_root.find(".//citation").find(".//text").text) + '"' # remove all tabs and newlines
fp.write('Comment [MultiCellDS/cell_line/metadata/citation/text]' + sep_char + citation_str + '\n')
# TODO: check that "citation" exists first?
if (xml_root.find(".//citation").find(".//notes")):
fp.write('Comment [MultiCellDS/cell_line/metadata/citation/notes]' + sep_char + xml_root.find(".//citation").find(".//notes").text + '\n')
fp.write('INVESTIGATION PUBLICATIONS\n')
# Extract over all <PMID> in <data_origin> and <data_analysis>
#print('Investigation PubMed ID "21988888" "23084996" "22342935" ' )
# Extract <PMID> and <DOI> in all <data_origin> and <data_analysis>
# TODO? will we have matching # of each?
pmid = []
doi = []
url = []
uep = xml_root.find('.//data_origins') # uep = unique entry point
for elm in uep.findall('data_origin'):
# doi.append(elm.find('.//DOI').text)
doi_ptr = elm.find('.//DOI')
if (doi_ptr == None):
doi_value = ""
else:
doi_value = doi_ptr.text
doi.append(doi_value) # do we want to append "" if none??
# pmid.append(elm.find('.//PMID').text)
pmid_ptr = elm.find('.//PMID')
if (pmid_ptr == None):
pmid_value = ""
else:
pmid_value = pmid_ptr.text
pmid.append(pmid_value)
# pmid.append(pmid_value)
url_ptr = elm.find('.//URL')
if (url_ptr == None):
url_value = ""
else:
url_value = url_ptr.text
url.append(url_value)
#print("(post data_origin) pmid=",pmid)
#print("(post data_origin) url=",url)
uep = xml_root.find('.//metadata')
for elm in uep.findall('data_analysis'):
# print(' "' + el.find('.//PMID').text + '"', end='')
# doi.append(elm.find('.//DOI').text)
# pmid.append(elm.find('.//PMID').text)
doi_ptr = elm.find('.//DOI')
if (doi_ptr == None):
doi_value = ""
else:
doi_value = doi_ptr.text
doi.append(doi_value) # do we want to append "" if none??
# pmid.append(elm.find('.//PMID').text)
pmid_ptr = elm.find('.//PMID')
if (pmid_ptr == None):
pmid_value = ""
else:
pmid_value = pmid_ptr.text
pmid.append(pmid_value)
# pmid.append(pmid_value)
#print("(post data_analysis) pmid=",pmid)
sep_char_sq = sep_char + '"' # tab + single quote
pmid_str = ''
for elm in pmid:
pmid_str += sep_char + '"' + elm + '"'
fp.write('Investigation PubMed ID' + sep_char + pmid_str + '\n')
doi_str = ''
for elm in doi:
doi_str += sep_char + '"' + elm + '"'
fp.write('Investigation Publication DOI' + sep_char + doi_str + '\n')
empty_str = ''.join(sep_char + '""' for x in pmid)
fp.write('Investigation Publication Author List' + sep_char + empty_str + '\n')
fp.write('Investigation Publication Title' + sep_char + empty_str + '\n')
pub_status_str = ''.join('\t"Published"' for x in pmid)
pub_title_str = ''.join('\t""' for x in pmid)
fp.write('Investigation Publication Status' + sep_char + pub_status_str + '\n')
pub_status_TA_str = ''.join('\t"C19026"' for x in pmid)
fp.write('Investigation Publication Status Term Accession' + sep_char + pub_status_TA_str + '\n')
pub_status_TSR_str = ''.join('\t"NCIT"' for x in pmid)
fp.write('Investigation Publication Status Term Source REF' + sep_char + pub_status_TSR_str + '\n')
fp.write('INVESTIGATION CONTACTS\n')
fp.write('Investigation Person Last Name' + sep_char_sq + xml_root.find(".//current_contact").find(".//family-name").text + '"\t\n')
fp.write('Investigation Person First Name' + sep_char_sq + xml_root.find(".//current_contact").find(".//given-names").text + '"\n')
fp.write('Investigation Person Mid Initials' + sep_char + '""\n')
fp.write('Investigation Person Email' + sep_char_sq + xml_root.find(".//current_contact").find(".//email").text + '"\n')
fp.write('Investigation Person Phone' + sep_char + '""\n')
fp.write('Investigation Person Fax' + sep_char + '""\n')
fp.write('Investigation Person Address' + sep_char + '""\n')
fp.write('Investigation Person Affiliation' + sep_char_sq + xml_root.find(".//current_contact").find(".//organization-name").text +
', ' + xml_root.find(".//current_contact").find(".//department-name").text + '"\n')
fp.write('Investigation Person Roles' + sep_char + '""\n')
fp.write('Investigation Person Roles Term Accession Number' + sep_char + '""\n')
fp.write('Investigation Person Roles Term Source REF' + sep_char + '""\n')
fp.write('Comment[Investigation Person REF]' + sep_char + '""\n')
fp.write('STUDY\n')
fp.write('Study Identifier\t' + i_identifier + '\n')
fp.write('Study Title\t' + i_title + '\n')
fp.write('Study Description\t' + i_desc + '\n')
fp.write('Comment[Study Grant Number]\t""\n')
fp.write('Comment[Study Funding Agency]\t""\n')
fp.write('Study Submission Date\t""\n')
fp.write('Study Public Release Date\t""\n')
fp.write('Study File Name\t' + '"' + study_filename + '"\n')
fp.write('STUDY DESIGN DESCRIPTORS\n')
fp.write('Study Design Type\t""\n')
fp.write('Study Design Type Term Accession Number\t""\n')
fp.write('Study Design Type Term Source REF\t""\n')
# TODO? are these different than the previous pubs?
fp.write('STUDY PUBLICATIONS\n')
fp.write('Study PubMed ID' + sep_char + pmid_str + '\n')
fp.write('Study Publication DOI' + sep_char + doi_str + sep_char + '\n')
fp.write('Study Publication Author List' + sep_char + empty_str + '\n')
fp.write('Study Publication Title' + sep_char + pub_title_str + '\n')
fp.write('Study Publication Status' + sep_char + pub_status_str + sep_char + '\n')
fp.write('Study Publication Status Term Accession Number' + sep_char + pub_status_TA_str + sep_char + '\n')
fp.write('Study Publication Status Term Source REF' + sep_char + pub_status_TSR_str + '\n')
fp.write('STUDY FACTORS' + 3*sep_char + '\n')
fp.write('Study Factor Name\t"phenotype_dataset"\n')
fp.write('Study Factor Type\t""\n')
fp.write('Study Factor Type Term Accession Number\t""\n')
fp.write('Study Factor Type Term Source REF\t""\n')
#fp.write('Comment[phenotype_dataset_keywords] "viable; hypoxic; physioxia(standard); physioxia(breast); necrotic,chronic hypoxia"\n')
#fp.write('Comment[phenotype_dataset_keywords] "')
comment_str = 'Comment[phenotype_dataset_keywords]\t"'
uep = xml_root.find('.//cell_line')
for elm in uep.findall('phenotype_dataset'):
comment_str += elm.attrib['keywords'] + '; '
# print(comment_str)
fp.write(comment_str[:-2] + '"\n')
fp.write('STUDY ASSAYS\t\n')
fp.write('Study Assay Measurement Type\t""\n')
fp.write('Study Assay Measurement Type Term Accession Number\t""\n')
fp.write('Study Assay Measurement Type Term Source REF\t""\n')
fp.write('Study Assay Technology Type\t"Digital Cell Line"\n')
fp.write('Study Assay Technology Type Term Accession Number\t""\n')
fp.write('Study Assay Technology Type Term Source REF\t""\n')
fp.write('Study Assay Technology Platform\t""\n')
fp.write('Study Assay File Name\t' + '"' + assay_filename + '"\n')
fp.write('STUDY PROTOCOLS\t\n')
fp.write('Study Protocol Name\t"microenvironment.measurement"\n')
fp.write('Study Protocol Type\t""\n')
fp.write('Study Protocol Type Term Accession Number\t""\n')
fp.write('Study Protocol Type Term Source REF\t""\n')
fp.write('Study Protocol Description\t""\n')
fp.write('Study Protocol URI\t""\n')
fp.write('Study Protocol Version\t""\n')
#fp.write('Study Protocol Parameters Name "oxygen.partial_pressure; DCIS_cell_density(2D).surface_density; DCIS_cell_area_fraction.area_fraction; DCIS_cell_volume_fraction.volume_fraction"\n')
comment_str = 'Study Protocol Parameters Name\t"'
# TODO? search for all phenotype_dataset/microenvironment/domain/variables/...
uep = xml_root.find('.//variables')
if (uep):
for elm in uep.findall('variable'):
if ('type' in elm.attrib.keys()): # TODO: what's desired format if 'type' is missing?
comment_str += elm.attrib['name'] + '.' + elm.attrib['type'] + '; '
else:
comment_str += elm.attrib['name'] + '; '
# comment_str += '; '
# print(comment_str)
fp.write(comment_str[:-2] + '"\n')
semicolon_sep_empty_str = ''.join('; ' for x in pmid)
fp.write('Study Protocol Parameters Name Term Accession Number\t" ' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Parameters Name Term Source REF\t" ' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Name\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type Term Accession Number\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type Term Source REF\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('STUDY CONTACTS\t\n')
fp.write('Study Person Last Name\t"' + xml_root.find(".//current_contact").find(".//family-name").text + '"\n')
fp.write('Study Person First Name\t"' + xml_root.find(".//current_contact").find(".//given-names").text + '"\n')
fp.write('Study Person Mid Initials\t""\n')
fp.write('Study Person Email\t"' + xml_root.find(".//current_contact").find(".//email").text + '"\n')
fp.write('Study Person Phone\t""\n')
fp.write('Study Person Fax\t""\n')
fp.write('Study Person Address\t""\n')
fp.write('Study Person Affiliation\t"' + xml_root.find(".//current_contact").find(".//organization-name").text +
', ' + xml_root.find(".//current_contact").find(".//department-name").text + '"\n')
fp.write('Study Person Roles\t""\n')
fp.write('Study Person Roles Term Accession Number\t""\n')
fp.write('Study Person Roles Term Source REF\t""\n')
fp.write('Comment[creator_orcid-id_family-name]\t"' + xml_root.find(".//creator").find(".//family-name").text + '"\n')
fp.write('Comment[creator_orcid-id_given-names]\t"' + xml_root.find(".//creator").find(".//given-names").text + '"\n')
fp.write('Comment[creator_orcid-id_email]\t"' + xml_root.find(".//creator").find(".//email").text + '"\n')
fp.write('Comment[creator_orcid-id_organization-name]\t"' + xml_root.find(".//creator").find(".//organization-name").text +
', ' + xml_root.find(".//creator").find(".//department-name").text + '"\n')
#curator_ptr = xml_root.find(".//curator").find(".//family-name").text + '"\n')
family_name = ""
given_names = ""
email = ""
org = ""
dept = ""
curator_ptr = xml_root.find(".//curator")
if (curator_ptr):
family_name_ptr = curator_ptr.find(".//family-name")
given_names_ptr = curator_ptr.find(".//given-names")
email_ptr = curator_ptr.find(".//email")
org_ptr = curator_ptr.find(".//organization-name")
dept_ptr = curator_ptr.find(".//department-name")
if (family_name_ptr):
family_name = family_name_ptr.find(".//family-name").text
if (given_names_ptr):
given_names = given_names_ptr.find(".//given-names").text
if (email_ptr):
email = email_ptr.find(".//email").text
if (org_ptr):
org = org_ptr.find(".//organization-name").text
if (dept_ptr):
dept = dept_ptr.find(".//department-name").text
#fp.write('Comment[curator_orcid-id_family-name]\t"' + xml_root.find(".//curator").find(".//family-name").text + '"\n')
fp.write('Comment[curator_orcid-id_family-name]\t"' + family_name + '"\n')
#fp.write('Comment[curator_orcid-id_given-names]\t"' + xml_root.find(".//curator").find(".//given-names").text + '"\n')
fp.write('Comment[curator_orcid-id_given-names]\t"' + given_names + '"\n')
#fp.write('Comment[curator_orcid-id_email]\t"' + xml_root.find(".//curator").find(".//email").text + '"\n')
fp.write('Comment[curator_orcid-id_email]\t"' + email + '"\n')
fp.write('Comment[curator_orcid-id_organization-name]\t"' + org + ', ' + dept + '"\n')
fp.write('Comment[last_modified_by_orcid-id_family-name]\t"' + xml_root.find(".//last_modified_by").find(".//family-name").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_given-names]\t"' + xml_root.find(".//last_modified_by").find(".//given-names").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_email]\t"' + xml_root.find(".//last_modified_by").find(".//email").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_organization-name]\t"' + xml_root.find(".//last_modified_by").find(".//organization-name").text +
', ' + xml_root.find(".//last_modified_by").find(".//department-name").text + '"\n')
fp.write('Comment[Study Person REF]' + sep_char + '""' + '\n')
fp.close()
print(' --> ' + investigation_filename)
#=======================================================================
fp = open(study_filename, 'w')
# row #1 (column titles)
fp.write('Source Name' + sep_char)
source_name = i_identifier[1:-1] + '.0'
uep = xml_root.find('.//data_origins') # uep = unique entry point
for elm in uep.findall('data_origin'):
for elm2 in elm.findall('citation'):
fp.write('Comment[citation]' + sep_char)
# TODO: why did I insert the following line?
# pmid_origin = elm.find('.//PMID').text
uep = xml_root.find('.//metadata')
for elm in uep.findall('data_analysis'):
for elm2 in elm.findall('citation'):
fp.write('Comment[citation]' + sep_char)
uep = xml_root.find('.//cell_origin')
cell_origin_characteristics = []
if (uep):
for elm in uep.getchildren():
fp.write('Characteristics[' + elm.tag + ']' + sep_char)
text_val = elm.text
text_val = ' '.join(text_val.split()) # strip out tabs and newlines
cell_origin_characteristics.append(text_val)
# print("cell_origin_characteristics----->",cell_origin_characteristics,"<-------")
fp.write('Factor Value[phenotype_dataset]' + sep_char + 'Sample Name\n')
# remaining rows
uep = xml_root.find('.//cell_line')
suffix = 0
for elm in uep.findall('phenotype_dataset'):
row_str = source_name + sep_char
# do we want a hierarchy of preferred citation types? (e.g., PMID,PMCID,DOI,URL)
if (len(pmid) > 0):
for p in pmid:
row_str += 'PMID: ' + p + sep_char
elif (len(url) > 0):
for p in url:
row_str += 'URL: ' + p + sep_char
# print("cell_origin_characteristics=",cell_origin_characteristics)
for c in cell_origin_characteristics:
row_str += c + sep_char
row_str += elm.attrib['keywords'] + sep_char + source_name + '.' + str(suffix)
suffix += 1
# print(row_str)
fp.write(row_str + '\n')
fp.close()
print(' --> ' + study_filename)
#=======================================================================
fp = open(assay_filename, 'w')
"""
Sample Name Protocol REF Parameter Value[oxygen.partial_pressure] Unit Parameter Value[DCIS_cell_density(2D).surface_density] Unit Parameter Value[DCIS_cell_area_fraction.area_fraction] Unit Parameter Value[DCIS_cell_volume_fraction.volume_fraction] Unit Data File
MCDS_L_0000000052.0.0 microenvironment.measurement 6.17 mmHg 0.00883 1/micron^2 0.8 dimensionless 0.8 dimensionless MCDS_L_0000000052.xml
MCDS_L_0000000052.0.1 microenvironment.measurement 8 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.2 microenvironment.measurement 38 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.3 microenvironment.measurement 52 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.4 microenvironment.measurement 5 mmHg MCDS_L_0000000052.xml
"""
# We will do a two-pass approach:
# 1st pass: parse the first instance of the <variables> element to generate the header row.
# UPDATE: cannot assume the first instance of <variables> will be sufficient. The HUVEC data proves otherwise.
#
# Columns' titles
fp.write('Sample Name' + sep_char + 'Protocol REF' + sep_char )
uep = xml_root.find('.//variables') # TODO: also req: keywords="viable"?
# TODO: what to do if there are no
if (uep):
num_vars = 0
for elm in uep.findall('variable'):
if ('type' in elm.attrib.keys()): # TODO: what's desired format if 'type' is missing?
pval_str = elm.attrib['name'] + '.' + elm.attrib['type']
else:
pval_str = elm.attrib['name']
# pval_str = elm.attrib['name'] + '.' + elm.attrib['type']
fp.write('Parameter Value[' + pval_str + '] ' + sep_char + 'Unit' + sep_char)
num_vars += 1
fp.write('Data File\n')
#print('num_vars=',num_vars)
# 2nd pass: for each <phenotype_dataset>, each <variables>, and each <variable>, extract a row of relevant
# info to match the column headings.
count = 0
# TODO: am I making too many assumptions about elements - existence, ordering, etc.?
id = xml_root.find(".//metadata").find(".//ID").text
uep = xml_root.find('.//cell_line')
for elm in uep.findall('phenotype_dataset'):
vs = elm.find('.//variables')
# print("----- found <variables>, count=",count)
nvar = 0
# for ma in v.findall('material_amount'):
if vs:
comment_str = id + '.0.' + str(count) + '\t' + 'microenvironment.measurement'
# print(comment_str)
for v in vs.findall('variable'):
nvar += 1
# print(v.attrib['units'])
# print(v.find('.//material_amount').text)
# Need to strip out tabs here (sometimes)
text_val = v.find('.//material_amount').text
# print('------ text_val --->',text_val,'<---------')
text_val = ' '.join(text_val.split())
# print('------ text_val --->',text_val,'<---------')
if ('units' in v.attrib.keys()): # TODO: what's desired format if missing?
comment_str += sep_char + text_val + sep_char + v.attrib['units']
else:
comment_str += sep_char + text_val + sep_char + ""
# comment_str += sep_char + v.find('.//material_amount').text + sep_char + v.attrib['units']
# print(comment_str)
# print('nvar=',nvar)
fp.write(comment_str)
if (nvar == num_vars):
fp.write(sep_char)
else:
for idx in range(nvar,2*num_vars):
fp.write(sep_char)
# fp.write(comment_str + sep_char + xml_file + '\n')
# fp.write(xml_file + '\n')
# print("----- ",xml_base_filename, " + CR")
fp.write(xml_base_filename + '\n')
count += 1
else: # if no 'variables' present, just print minimal info
# comment_str = id + '.0.' + str(count) + '\t' + '' + '\t' + xml_file + '\n'
comment_str = id + '.0.' + str(count) + '\t' + '' + '\t' + xml_base_filename + '\n'
count += 1
fp.write(comment_str)
fp.close()
print(' --> ' + assay_filename)
|
[
"heiland@indiana.edu"
] |
heiland@indiana.edu
|
9619928a1ab7f5e6f1375ccdfbd3e23c15e65270
|
1296db988387cc1e6a14db76f777cf2cdfca97ba
|
/corregraphe/core.py
|
7a8795545a34fc696fd2fd741e8d584dbba8d89a
|
[
"MIT"
] |
permissive
|
theodcr/corregraphe
|
30529650a86a0722ff83aacbb2fc3556258cff11
|
fec9bf844d7ea2dc870197580a4c59a1fad258b8
|
refs/heads/master
| 2020-06-25T21:25:21.888291
| 2019-09-15T08:11:53
| 2019-09-15T08:11:53
| 199,425,868
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
from typing import Dict
import hvplot.networkx as hvnx
import networkx as nx
from holoviews import Overlay
from pandas import DataFrame
class CorrelationGraph(object):
"""
Creates a correlation graph from dataframe.
Parameters
----------
data : DataFrame
data to use to compute correlations
method : str = 'kendall' {'pearson', 'kendall', 'spearman'}
correlation method, see pandas.DataFrame.corr
Attributes
----------
correlations : DataFrame
dataframe of correlations, columns and indexes are columns of `data`
graph : Graph
NetworkX graph object representing the correlations,
each node is a column of `data`, edges are correlations
pos : Dict
positions of nodes, keys are node names, value are (x, y) positions
Usage
-----
>>> df = DataFrame({'a': [1, 2, 3, 4], 'b': [2, 4, 6, 8]})
>>> cg = CorrelationGraph(df)
>>> cg.correlations
a b
a 1.0 1.0
b 1.0 1.0
>>> fig = cg.draw()
"""
def __init__(self, data: DataFrame, method: str = "kendall") -> None:
self._data = data
self._method = method
self.correlations = self._compute_correlations(self._data, self._method)
self.graph = self._create_graph(self.correlations)
self.pos = self._compute_positions(self.graph)
def draw(self, **kwargs: Dict) -> Overlay:
"""Draws the graph and returns the hvplot object.
Parameters
----------
**kwargs : Dict[str, Any]
keyword arguments given to the hvplot.networkx.draw method
Returns
-------
Overlay
HoloViews Overlay representing the correlation graph
"""
return hvnx.draw(
self.graph,
pos=self.pos,
edge_width="weight",
node_color="cluster_corr",
labels="name",
colorbar=True,
**kwargs
)
@staticmethod
def _compute_correlations(data: DataFrame, method: str) -> DataFrame:
"""Computes correlation between columns of dataframe.
Parameters
----------
data : DataFrame
method : str
correlation method
Returns
-------
DataFrame
dataframe of correlations, columns and indexes are columns of `data`
"""
return data.corr(method=method).abs()
@staticmethod
def _create_graph(correlations: DataFrame) -> nx.Graph:
"""Creates a graph object to represent correlations.
Parameters
----------
correlations : DataFrame
square dataframe of correlations, columns and indexes must be identical
Returns
-------
Graph
NetworkX graph object representing the correlations
"""
graph = nx.complete_graph(correlations.shape[1])
graph = nx.relabel_nodes(
graph, {i: col for i, col in enumerate(correlations.columns)}
)
for edge in graph.edges:
graph.edges[edge]["weight"] = correlations[edge[0]][edge[1]]
for node in graph.nodes:
graph.nodes[node]["name"] = node
for node, coef in nx.clustering(graph, weight="weight").items():
graph.nodes[node]["cluster_corr"] = coef
return graph
@staticmethod
def _compute_positions(graph: nx.Graph) -> Dict:
"""Returns positions of nodes using a spring layout.
Random seed is set and not changeable to make graphs always reproductible.
Parameters
----------
graph : Graph
correlation graph, each node is a column, each link is a correlation
Returns
-------
Dict
positions of nodes, keys are node names, value are (x, y) positions
"""
return nx.spring_layout(graph, seed=42)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"theo.delecour@gmx.com"
] |
theo.delecour@gmx.com
|
e5238361b8ddfb4b94907a15d69e41fc53033e74
|
7bc7fe046ce8393870ebba5b744d8809932f036f
|
/leetcode/344-Reverse-String.py
|
606fa4bc8a47658edd61da99b3a186270acc50dc
|
[] |
no_license
|
BrandonBlimp/Interview-Prep
|
2d6376396f8349818c6ebd58130a95acbaa0df30
|
b309883565bdc82a4a95dc441bbf0c2d1f89c3ae
|
refs/heads/master
| 2020-06-07T08:52:40.148138
| 2019-06-20T20:20:09
| 2019-06-20T20:20:09
| 192,979,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
# Write a function that reverses a string. The input string is given as an array of characters char[].
# Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
# You may assume all the characters consist of printable ascii characters.
# Example 1:
# Input: ["h","e","l","l","o"]
# Output: ["o","l","l","e","h"]
# Example 2:
# Input: ["H","a","n","n","a","h"]
# Output: ["h","a","n","n","a","H"]
class Solution:
def reverseString(self, s):
"""
:type s: List[str]
:rtype: void Do not return anything, modify s in-place instead.
"""
length = len(s)
i = 0
while i < (length//2):
front_char = s[i]
end_char = s[length-i-1]
s[i] = end_char
s[length-i-1] = front_char
i+=1
|
[
"brandonloong.lim@gmail.com"
] |
brandonloong.lim@gmail.com
|
13420aecf149f66ef9cb63a68a5a090dbc8a2e3c
|
6c3e475dcd95d14875a199466b8a7c744f61478b
|
/userProfile/userProfile.py
|
1395f4986a45fed5e4b88da12ed0cb114aa8c04b
|
[] |
no_license
|
webclinic017/tripleATradeBot
|
b4cce7a330e76f9f207c4d6f4d16327b1717a17a
|
40b6130f52eb969336c7b602e698f41a2d8f947b
|
refs/heads/main
| 2023-01-04T04:16:38.338563
| 2020-10-29T10:33:34
| 2020-10-29T10:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
from eventLogger import eventLogger as logger
from pathList import pathList
from alice_blue import *
import openpyxl
class userProfile:
userProfileWorkBook=""
profileName=""
userName = ""
password = ""
apiSecret = ""
accessToken = ""
aliceObj = ""
exchangeList = ['NSE']
def __init__(self, profileName):
self.userProfileWorkBook = openpyxl.load_workbook(pathList.userProfileFileName)
self.profileName = profileName
self.userName = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A1'].value
self.password = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A2'].value
self.apiSecret = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A3'].value
logger.info(self.userName)
logger.info(self.password)
logger.info(self.apiSecret)
def login(self):
logger.info("login")
self.accessToken = AliceBlue.login_and_get_access_token(username=self.userName, password=self.password, twoFA='a', api_secret=self.apiSecret)
self.aliceObj = AliceBlue(username=self.userName, password=self.password, access_token=self.accessToken, master_contracts_to_download=self.exchangeList)
def profileData(self):
logger.info("profileData")
print (self.aliceObj.get_profile())
print (self.aliceObj.get_balance())
|
[
"noreply@github.com"
] |
noreply@github.com
|
aaa0827ca1960e9bbf5f709391f05f25418b11bd
|
61594a19ffaca4b97f7905a82844132df6860837
|
/trio2o/tests/unit/common/scheduler/test_pod_manager.py
|
307430b4e306486df2ce0fcfff758d6dd657b1d0
|
[
"Apache-2.0"
] |
permissive
|
OpenCloudNeXt/trio2o
|
db679ab292162e564145fddaa55aa8f8c3c6c0b8
|
f4d2d5458fbba71414edebf5e9f69b98abd2d080
|
refs/heads/master
| 2020-03-14T03:26:16.805389
| 2017-10-18T07:03:20
| 2017-10-18T07:03:38
| 131,419,855
| 1
| 0
|
Apache-2.0
| 2018-04-28T15:09:21
| 2018-04-28T15:09:21
| null |
UTF-8
|
Python
| false
| false
| 5,910
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stevedore import driver
from trio2o.common import context
from trio2o.db import api
from trio2o.db import core
from trio2o.db import models
import unittest
class PodManagerTest(unittest.TestCase):
def setUp(self):
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
self.context = context.Context()
self.project_id = 'test_pm_project'
self.az_name_2 = 'b_az_pm_2'
self.az_name_1 = 'b_az_pm_1'
self.pod_manager = driver.DriverManager(
namespace='trio2o.common.schedulers',
name='pod_manager',
invoke_on_load=True
).driver
self.b_pod_1 = {'pod_id': 'b_pod_pm_uuid_1',
'pod_name': 'b_region_pm_1',
'az_name': self.az_name_1}
self.b_pod_2 = {'pod_id': 'b_pod_pm_uuid_2',
'pod_name': 'b_region_pm_2',
'az_name': self.az_name_2}
self.b_pod_3 = {'pod_id': 'b_pod_pm_uuid_3',
'pod_name': 'b_region_pm_3',
'az_name': self.az_name_2}
self.b_pod_4 = {'pod_id': 'b_pod_pm_uuid_4',
'pod_name': 'b_region_pm_4',
'az_name': self.az_name_2}
def test_get_current_binding_and_pod(self):
api.create_pod(self.context, self.b_pod_1)
api.create_pod_binding(
self.context, self.project_id, self.b_pod_1['pod_id'])
pod_b_1, pod_1 = self.pod_manager.get_current_binding_and_pod(
self.context, self.az_name_1, self.project_id, pod_group='')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': self.project_id}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(binding_q[0]['id'], pod_b_1['id'])
pod_b_2, pod_2 = self.pod_manager.get_current_binding_and_pod(
self.context, self.az_name_1, 'new_project_pm_1', pod_group='')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_1'}], [])
self.assertEqual(len(binding_q), 0)
self.assertEqual(pod_b_2, None)
self.assertEqual(pod_2, None)
pod_b_3, pod_3 = self.pod_manager.get_current_binding_and_pod(
self.context, 'unknown_az', self.project_id, pod_group='')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': self.project_id}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(pod_b_3, None)
self.assertEqual(pod_3, None)
pod_b_4, pod_4 = self.pod_manager.get_current_binding_and_pod(
self.context, self.az_name_1, self.project_id, pod_group='test')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': self.project_id}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(pod_b_4['id'], binding_q[0]['id'])
self.assertEqual(pod_4, None)
def test_create_binding(self):
api.create_pod(self.context, self.b_pod_2)
flag = self.pod_manager.create_binding(
self.context, 'new_project_pm_2', self.b_pod_2['pod_id'])
self.assertEqual(flag, True)
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_2'}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(binding_q[0]['pod_id'], self.b_pod_2['pod_id'])
self.assertEqual(binding_q[0]['tenant_id'], 'new_project_pm_2')
self.assertEqual(binding_q[0]['is_binding'], True)
def test_update_binding(self):
api.create_pod(self.context, self.b_pod_4)
api.create_pod(self.context, self.b_pod_3)
flag = self.pod_manager.create_binding(
self.context, 'new_project_pm_3', self.b_pod_3['pod_id'])
self.assertEqual(flag, True)
current_binding = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_3'}], [])
flag = self.pod_manager.update_binding(
self.context, current_binding[0], self.b_pod_4['pod_id'])
self.assertEqual(flag, True)
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_3'}], [])
self.assertEqual(len(binding_q), 2)
self.assertEqual(binding_q[0]['pod_id'], self.b_pod_3['pod_id'])
self.assertEqual(binding_q[0]['tenant_id'], 'new_project_pm_3')
self.assertEqual(binding_q[0]['is_binding'], False)
self.assertEqual(binding_q[1]['pod_id'], self.b_pod_4['pod_id'])
self.assertEqual(binding_q[1]['tenant_id'], 'new_project_pm_3')
self.assertEqual(binding_q[1]['is_binding'], True)
|
[
"yingfeicaozhang100527@gmail.com"
] |
yingfeicaozhang100527@gmail.com
|
8723a4a6f9bb16968b5f83ec44895b30cb9da123
|
d82b879f41e906589a0a6ad5a6a09e0a0032aa3f
|
/ObservationScripts/on_off/observe_moon_spec_analyser.py
|
176f9c75c90dd4f6945052404f93c17615964d9f
|
[] |
no_license
|
SETIatHCRO/ATA-Utils
|
66718eed669882792148fe0b7a2f977cd0f6ac2e
|
59f4d21b086effaf41d5e11e338ce602c803cfd0
|
refs/heads/master
| 2023-08-16T20:41:44.233507
| 2023-08-10T20:39:13
| 2023-08-10T20:39:13
| 137,617,987
| 5
| 5
| null | 2023-08-10T20:39:14
| 2018-06-17T00:07:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
#!/home/obsuser/miniconda3/envs/ATAobs/bin/python
from ATATools import ata_control, logger_defaults
from SNAPobs import snap_dada, snap_if
import time
import atexit
import numpy as np
import sys
import argparse
import logging
import os
def main():
logger = logger_defaults.getProgramLogger("observe",
loglevel=logging.INFO)
az_offset = 20.
el_offset = 0.
ant_list = ["2b"]
source = "moon"
ata_control.reserve_antennas(ant_list)
atexit.register(ata_control.release_antennas,ant_list, False)
ata_control.create_ephems2(source, az_offset, el_offset)
ata_control.point_ants2(source, "off", ant_list)
#ata_control.autotune(ant_list)
_ = input("Press any key to switch to on source")
ata_control.point_ants2(source, "on", ant_list)
print("on source acquired")
if __name__ == "__main__":
main()
|
[
"wael.a.farah@gmail.com"
] |
wael.a.farah@gmail.com
|
3f35bb4275325e1c72d173246eef8e4413773be3
|
25b1d29a9e25629b1fcd469641cf871354b78761
|
/Exercicios Listas/listas-17.py
|
2ad5b9d2a814128ac5a02e43e1edc9a57313ce68
|
[] |
no_license
|
AndersonBatalha/Programacao1
|
0a73302e216ddd189f75231cbbae910743ab67b9
|
07ef756d6984f25d294ce4e758e8a671942581fa
|
refs/heads/master
| 2021-07-15T14:17:34.661718
| 2017-10-15T17:33:20
| 2017-10-15T17:33:20
| 106,857,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 17. Em uma competição de salto em distância cada atleta tem direito a cinco saltos. O resultado do atleta será determinado pela média dos cinco valores restantes. Você deve fazer um programa que receba o nome e as cinco distâncias alcançadas pelo atleta em seus saltos e depois informe o nome, os saltos e a média dos saltos. O programa deve ser encerrado quando não for informado o nome do atleta. A saída do programa deve ser conforme o exemplo abaixo:
# Atleta: Rodrigo Curvêllo
# Primeiro Salto: 6.5 m / Segundo Salto: 6.1 m / Terceiro Salto: 6.2 m
# Quarto Salto: 5.4 m / Quinto Salto: 5.3 m
# Resultado final:
# Atleta: Rodrigo Curvêllo
# Saltos: 6.5 - 6.1 - 6.2 - 5.4 - 5.3
# Média dos saltos: 5.9 m
nomes = []
saltos = []
while True:
nome = raw_input("Nome do atleta: ")
if len(nome) > 0:
for i in range(1, 6):
salto = float(raw_input("Salto %d: " % i))
saltos.append(salto)
media = sum(saltos) / len(saltos)
nomes.append(nome)
else:
if len(nomes) == 0:
print "Sem atletas cadastrados."
else:
print "Encerrado"
for i in range(len(nomes)):
print "\nAtleta: %s" % nomes[i]
for i in range(len(saltos)):
for i in range(len(saltos)):
print "Salto %d: %.2f m" % (i + 1, saltos[i])
print "Média dos saltos: %.2f m" % media
break
|
[
"andersonpbatalha@gmail.com"
] |
andersonpbatalha@gmail.com
|
7d167e1feb92203517a6bf08e8597b19369c565e
|
42ffa887ca0ac7b54f0473880613865fe523fbfc
|
/src/viztracer/__init__.py
|
38fd0acde24ec07503595c6da251f4e74a45e921
|
[
"Apache-2.0"
] |
permissive
|
tianxie1989/viztracer
|
e61090ac286a5b4ffe4c8f0265fde38bca68837b
|
39a6314b2a5a30ede71be96bd5e174b2bdaa2664
|
refs/heads/master
| 2022-12-11T08:21:25.415858
| 2020-08-21T00:21:00
| 2020-08-21T00:21:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
from .viztracer import VizTracer
from .flamegraph import FlameGraph
from .decorator import ignore_function
__version__ = "0.3.0"
__all__ = [
"__version__",
"VizTracer",
"FlameGraph",
"ignore_function"
]
|
[
"gaogaotiantian@hotmail.com"
] |
gaogaotiantian@hotmail.com
|
b2df86aff826ef401a2e795e3a010d9464621cb0
|
2a0ec173d3e60ee01e07d348728b8517a07d6d0d
|
/light/select_max_region.py
|
7746c4e201022dae8d4055452a0c275874b51860
|
[] |
no_license
|
pabogdan/spinnaker_vision
|
62c22572e95cfc30a1ad60ef7473d215326b1dfa
|
b1939a432a672cb6dcd33966a175eb9f2027aca9
|
refs/heads/master
| 2021-01-18T10:33:41.928880
| 2015-10-21T14:07:45
| 2015-10-21T14:07:45
| 42,939,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
__author__ = 'bogdanp'
import nengo
import nengo_spinnaker
import nengo_pushbot
import numpy
|
[
"dwTheresnosugar2013"
] |
dwTheresnosugar2013
|
f65170ad67be87a0b687ba1d668c0e6b07848267
|
fa03d0932cda3030a1b8da8f6ecfe1fe7314ba72
|
/pyproject/scenario04/server/config.py
|
d4cb406de011985c9701cbfa9a0e9480aa41a2cc
|
[] |
no_license
|
mathcircle/ccircle
|
05d9310c535b5a7f613b3a8fda2eebc105f17b0c
|
dfc672a1b6a96169d179bb7292a43dd9d9510640
|
refs/heads/master
| 2020-12-03T00:43:11.324286
| 2019-06-02T05:21:45
| 2019-06-02T05:22:00
| 96,068,861
| 1
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
SERVER_HOST = '130.39.91.40'
SERVER_HOST_LOCAL = '127.0.0.1'
SERVER_PORT = 50382
SOCKET_BUFFER_SIZE = 4096
MAX_MESSAGE_SIZE = 2048
MIN_REQUEST_WAIT = 1.0 / 10.0
STATUS_GOOD = 'good'
|
[
"josh@ltheory.com"
] |
josh@ltheory.com
|
62abf1b5cf573596ca943d290748c41b37bd2e49
|
4dfb1731e42654d2694b9ea109b0da26f0e6215c
|
/qbittorrent_mod.py
|
d6a8fc5bab59ef956ce8f458554ba67a2d766cb4
|
[
"MIT"
] |
permissive
|
y2038558528/flexget_qbittorrent_mod
|
3e89e13c8814e21de51e101f3430ce660b4cfcb5
|
a49dacf0b4bf20217cb43df0ad94112b7dc67364
|
refs/heads/master
| 2023-03-22T22:00:04.330858
| 2021-03-15T13:45:02
| 2021-03-15T13:45:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,029
|
py
|
import math
import os
import re
from datetime import datetime
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from loguru import logger
from .ptsites.client.qbittorrent_client import QBittorrentClientFactory
class QBittorrentModBase:
def __init__(self):
self.client = None
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('enabled', True)
config.setdefault('host', 'localhost')
config.setdefault('port', 8080)
config.setdefault('use_ssl', False)
config.setdefault('verify_cert', True)
return config
def create_client(self, config):
client = QBittorrentClientFactory().get_client(config)
return client
def on_task_start(self, task, config):
self.client = None
config = self.prepare_config(config)
if config['enabled']:
if task.options.test:
logger.info('Trying to connect to qBittorrent...')
self.client = self.create_client(config)
if self.client:
logger.info('Successfully connected to qBittorrent.')
else:
logger.error('It looks like there was a problem connecting to qBittorrent.')
class PluginQBittorrentModInput(QBittorrentModBase):
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'use_ssl': {'type': 'boolean'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'verify_cert': {'type': 'boolean'},
'server_state': {'oneOf': [{'type': 'boolean'}, {'type': 'string'}]},
'force_update': {'type': 'boolean'},
'enabled': {'type': 'boolean'},
},
'additionalProperties': False
}
def prepare_config(self, config):
config = QBittorrentModBase.prepare_config(self, config)
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
server_state = config.get('server_state')
if server_state:
entry = Entry(
title='qBittorrent Server State' if isinstance(server_state, bool) else server_state,
url=config.get('host')
)
entry['time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
entry['server_state'] = {}
try:
self.client = self.create_client(config)
entry['server_state'] = self.client.get_main_data_snapshot(id(task)).get('server_state')
entry['server_state']['flexget_connected'] = True
except plugin.PluginError:
entry['server_state']['flexget_connected'] = False
return [entry]
else:
self.client = self.create_client(config)
force_update = config.get('force_update', False)
return list(
self.client.get_main_data_snapshot(id(task), force_update=force_update).get('entry_dict').values())
class PluginQBittorrentMod(QBittorrentModBase):
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'use_ssl': {'type': 'boolean'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'verify_cert': {'type': 'boolean'},
'action': {
'type': 'object',
'properties': {
'add': {
'type': 'object',
'properties': {
'savepath': {'type': 'string'},
'cookie': {'type': 'string'},
'category': {'type': 'string'},
'skip_checking': {'type': 'boolean'},
'paused': {'type': 'string'},
'root_folder': {'type': 'string'},
'rename': {'type': 'string'},
'upLimit': {'type': 'integer'},
'dlLimit': {'type': 'integer'},
'autoTMM': {'type': 'boolean'},
'sequentialDownload': {'type': 'string'},
'firstLastPiecePrio': {'type': 'string'},
'reject_on': {
'type': 'object',
'properties': {
'bandwidth_limit': {'type': 'integer'},
'dl_speed': {
'oneOf': [
{'type': 'boolean'},
{'type': 'integer'},
{'type': 'number', 'minimum': 0.1, 'maximum': 0.9},
]
},
'dl_limit': {'oneOf': [{'type': 'boolean'}, {'type': 'integer'}]}
}
}
}
},
'remove': {
'type': 'object',
'properties': {
'keeper': {
'type': 'object',
'properties': {
'keep_disk_space': {'type': 'integer'},
'check_reseed': {
'oneOf': [{'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string'}}]},
'delete_files': {'type': 'boolean'},
'dl_limit_on_succeeded': {'type': 'integer'},
'alt_dl_limit_on_succeeded': {'type': 'integer'},
'dl_limit_interval': {'type': 'integer'}
},
},
'cleaner': {
'type': 'object',
'properties': {
'delete_files': {'type': 'boolean'}
}
}
},
"minProperties": 1,
"maxProperties": 1,
},
'resume': {
'type': 'object',
'properties': {
'recheck_torrents': {'type': 'boolean'}
}
},
'pause': {
'type': 'boolean'
},
'modify': {
'type': 'object',
'properties': {
'tag_by_tracker': {'type': 'boolean'},
'replace_trackers': {
'type': 'object',
'properties': {
}
}
}
},
'manage_conn': {
'type': 'object',
'properties': {
'min': {'type': 'integer'},
'max': {'type': 'integer'}
}
},
'limit_upload_by_tracker': {
'type': 'object',
'properties': {
'working': {'type': 'integer'},
'not_working': {'type': 'integer'}
}
}
},
"minProperties": 1,
"maxProperties": 1,
},
'fail_html': {'type': 'boolean'},
},
'additionalProperties': False
}
def prepare_config(self, config):
config = super().prepare_config(config)
config.setdefault('fail_html', True)
return config
@plugin.priority(120)
def on_task_download(self, task, config):
config = self.prepare_config(config)
add_options = config.get('action').get('add')
if not add_options or not task.accepted:
return
if not self.client:
self.client = self.create_client(config)
if self.client:
logger.debug('Successfully connected to qBittorrent.')
else:
raise plugin.PluginError("Couldn't connect to qBittorrent.")
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
server_state = main_data_snapshot.get('server_state')
reject_on = add_options.get('reject_on')
bandwidth_limit = reject_on.get('bandwidth_limit')
reject_on_dl_speed = reject_on.get('dl_speed')
reject_on_dl_limit = reject_on.get('dl_limit')
reject_reason = ''
dl_rate_limit = server_state.get('dl_rate_limit')
if reject_on_dl_limit:
if dl_rate_limit and dl_rate_limit < reject_on_dl_limit:
reject_reason = 'dl_limit: {:.2F} MiB < reject_on_dl_limit: {:.2F} MiB'.format(
dl_rate_limit / (1024 * 1024), reject_on_dl_limit / (1024 * 1024))
if reject_on_dl_speed:
if isinstance(reject_on_dl_speed, float):
dl_rate_limit = dl_rate_limit if dl_rate_limit else bandwidth_limit
reject_on_dl_speed = int(dl_rate_limit * reject_on_dl_speed)
dl_info_speed = server_state.get('dl_info_speed')
if dl_info_speed and dl_info_speed > reject_on_dl_speed:
reject_reason = 'dl_speed: {:.2F} MiB > reject_on_dl_speed: {:.2F} MiB'.format(
dl_info_speed / (1024 * 1024), reject_on_dl_speed / (1024 * 1024))
for entry in task.accepted:
if reject_reason:
entry.reject(reason=reject_reason, remember=True)
site_name = self._get_site_name(entry.get('url'))
logger.info('reject {}, because: {}, site: {}', entry['title'], reject_reason, site_name)
continue
if 'download' not in task.config:
download = plugin.get('download', self)
download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])
@plugin.priority(135)
def on_task_output(self, task, config):
config = self.prepare_config(config)
action_config = config.get('action')
if len(action_config) != 1:
raise plugin.PluginError('There must be and only one action')
# don't add when learning
if task.options.learn:
return
if not task.accepted and not action_config.get('remove'):
return
if not self.client:
self.client = self.create_client(config)
if self.client:
logger.debug('Successfully connected to qBittorrent.')
else:
raise plugin.PluginError("Couldn't connect to qBittorrent.")
(action_name, option), = action_config.items()
action = getattr(self, action_name + '_entries', None)
if action:
action(task, option)
else:
raise plugin.PluginError('Unknown action.')
def add_entries(self, task, add_options):
options = {}
for entry in task.accepted:
for attr_str in ['savepath',
'cookie',
'category',
'skip_checking',
'paused',
'root_folder',
'rename',
'upLimit',
'dlLimit',
'autoTMM',
'sequentialDownload',
'firstLastPiecePrio']:
attr = entry.get(attr_str, add_options.get(attr_str))
if attr:
options[attr_str] = attr
if options.get('autoTMM') and options.get('category') and options.get('savepath'):
del options['savepath']
is_magnet = entry['url'].startswith('magnet:')
if not is_magnet:
if 'file' not in entry:
entry.fail('File missing?')
return
if not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
logger.debug('entry: {}', entry)
logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
return
self.client.add_torrent_file(entry['file'], options)
else:
self.client.add_torrent_url(entry['url'], options)
def remove_entries(self, task, remove_options):
(mode_name, option), = remove_options.items()
mode = getattr(self, 'remove_entries_' + mode_name, None)
if mode:
mode(task, option)
else:
raise plugin.PluginError('Unknown mode.')
def remove_entries_keeper(self, task, keeper_options):
delete_files = keeper_options.get('delete_files')
check_reseed = keeper_options.get('check_reseed')
keep_disk_space = keeper_options.get('keep_disk_space')
dl_limit_interval = keeper_options.get('dl_limit_interval', 24 * 60 * 60)
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
server_state = main_data_snapshot.get('server_state')
dl_rate_limit = server_state.get('dl_rate_limit')
use_alt_speed_limits = server_state.get('use_alt_speed_limits')
free_space_on_disk = server_state.get('free_space_on_disk')
dl_limit_mode = 'dl_limit'
dl_limit_on_succeeded = keeper_options.get('dl_limit_on_succeeded', 0)
alt_dl_limit_on_succeeded = keeper_options.get('alt_dl_limit_on_succeeded', 0)
if use_alt_speed_limits:
dl_limit_mode = 'alt_dl_limit'
dl_limit_on_succeeded = alt_dl_limit_on_succeeded
keep_disk_space = keep_disk_space * 1024 * 1024 * 1024
if keep_disk_space < free_space_on_disk:
if dl_limit_on_succeeded is not None:
dl_limit = math.floor(dl_limit_on_succeeded / 1024) * 1024
if dl_limit != dl_rate_limit:
self.client.set_application_preferences('{{"{}": {}}}'.format(dl_limit_mode, dl_limit))
logger.info("set {} to {} KiB/s", dl_limit_mode, dl_limit / 1024)
return
accepted_entry_hashes = []
delete_hashes = []
delete_size = 0
if not task.accepted:
self.calc_and_set_dl_limit(keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode)
return
entry_dict = main_data_snapshot.get('entry_dict')
reseed_dict = main_data_snapshot.get('reseed_dict')
for entry in task.accepted:
accepted_entry_hashes.append(entry['torrent_info_hash'])
for entry_hash in accepted_entry_hashes:
if entry_hash in delete_hashes:
continue
server_entry = entry_dict.get(entry_hash)
if not server_entry:
self.client.reset_rid()
save_path_with_name = server_entry.get('qbittorrent_save_path_with_name')
reseed_entry_list = reseed_dict.get(save_path_with_name)
check_hashes = []
torrent_hashes = []
torrent_size = 0
for reseed_entry in reseed_entry_list:
if reseed_entry['qbittorrent_completed'] != 0:
torrent_size = reseed_entry['qbittorrent_completed']
if isinstance(check_reseed, list):
trackers = reseed_entry['qbittorrent_trackers']
site_names = []
for tracker in trackers:
site_names.append(self._get_site_name(tracker.get('url')))
if len(set(check_reseed) & set(site_names)) > 0:
check_hashes.append(reseed_entry['torrent_info_hash'])
else:
check_hashes.append(reseed_entry['torrent_info_hash'])
torrent_hashes.append(reseed_entry['torrent_info_hash'])
if check_reseed and not set(accepted_entry_hashes) >= set(check_hashes):
for torrent_hash in torrent_hashes:
entry_dict.get(torrent_hash).reject(
reason='torrents with the same save path are not all tested')
continue
else:
if keep_disk_space > free_space_on_disk + delete_size:
delete_size += torrent_size
self._build_delete_hashes(delete_hashes, torrent_hashes, entry_dict, keep_disk_space,
free_space_on_disk, delete_size)
if keep_disk_space < free_space_on_disk + delete_size:
break
self.calc_and_set_dl_limit(keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode)
if len(delete_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_hashes), delete_files)
def calc_and_set_dl_limit(self, keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode):
if keep_disk_space > free_space_on_disk + delete_size:
dl_limit = (free_space_on_disk + delete_size) / dl_limit_interval
if dl_limit_on_succeeded and dl_limit > dl_limit_on_succeeded:
dl_limit = dl_limit_on_succeeded
dl_limit = math.floor(dl_limit / 1024) * 1024
if dl_limit != dl_rate_limit:
self.client.set_application_preferences('{{"{}": {}}}'.format(dl_limit_mode, dl_limit))
logger.warning("not enough disk space, set {} to {} KiB/s", dl_limit_mode, dl_limit / 1024)
def _build_delete_hashes(self, delete_hashes, torrent_hashes, all_entry_map, keep_disk_space, free_space_on_disk,
delete_size):
delete_hashes.extend(torrent_hashes)
logger.info('keep_disk_space: {:.2F} GiB, free_space_on_disk: {:.2f} GiB, delete_size: {:.2f} GiB',
keep_disk_space / (1024 * 1024 * 1024), free_space_on_disk / (1024 * 1024 * 1024),
delete_size / (1024 * 1024 * 1024))
entries = []
for torrent_hash in torrent_hashes:
entry = all_entry_map.get(torrent_hash)
entry.accept(reason='torrent with the same save path are all pass tested')
entries.append(entry)
entries.sort(key=lambda e: e['qbittorrent_last_activity'], reverse=True)
for entry in entries:
logger.info(
'{}, size: {:.2f} GiB, seeding_time: {:.2f} h, share_ratio: {:.2f}, last_activity: {}, site: {}',
entry['title'],
entry['qbittorrent_completed'] / (1024 * 1024 * 1024),
entry['qbittorrent_seeding_time'] / (60 * 60),
entry['qbittorrent_share_ratio'],
entry['qbittorrent_last_activity'],
entry['qbittorrent_tags'])
def remove_entries_cleaner(self, task, cleaner_options):
delete_files = cleaner_options.get('delete_files')
delete_hashes = []
delete_files_hashes = []
accepted_entry_hashes = []
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
entry_dict = main_data_snapshot.get('entry_dict')
reseed_dict = main_data_snapshot.get('reseed_dict')
for entry in task.accepted:
accepted_entry_hashes.append(entry['torrent_info_hash'])
for entry_hash in accepted_entry_hashes:
if entry_hash in delete_hashes or entry_hash in delete_files_hashes:
continue
server_entry = entry_dict.get(entry_hash)
if not server_entry:
self.client.reset_rid()
continue
save_path_with_name = server_entry.get('qbittorrent_save_path_with_name')
reseed_entry_list = reseed_dict.get(save_path_with_name)
torrent_hashes = []
for reseed_entry in reseed_entry_list:
torrent_hashes.append(reseed_entry['torrent_info_hash'])
if not set(accepted_entry_hashes) >= set(torrent_hashes):
delete_hashes.extend(set(accepted_entry_hashes) & set(torrent_hashes))
else:
delete_files_hashes.extend(torrent_hashes)
if len(delete_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_hashes), False)
self.print_clean_log(entry_dict, delete_hashes, False)
if len(delete_files_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_files_hashes), delete_files)
self.print_clean_log(entry_dict, delete_files_hashes, delete_files)
def print_clean_log(self, entry_dict, hashes, delete_files):
for torrent_hash in hashes:
entry = entry_dict.get(torrent_hash)
logger.info(
'{}, size: {:.2f} GiB, seeding_time: {:.2f} h, share_ratio: {:.2f}, last_activity: {}, tracker_msg: {}, site: {}, delete_files: {}',
entry['title'],
entry['qbittorrent_completed'] / (1024 * 1024 * 1024),
entry['qbittorrent_seeding_time'] / (60 * 60),
entry['qbittorrent_share_ratio'],
entry['qbittorrent_last_activity'],
entry['qbittorrent_tracker_msg'],
entry['qbittorrent_tags'],
delete_files
)
def resume_entries(self, task, resume_options):
recheck_torrents = resume_options.get('recheck_torrents')
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
reseed_dict = main_data_snapshot.get('reseed_dict')
hashes = []
recheck_hashes = []
for entry in task.accepted:
save_path_with_name = entry['qbittorrent_save_path_with_name']
reseed_entry_list = reseed_dict.get(save_path_with_name)
resume = False
for reseed_entry in reseed_entry_list:
seeding = 'up' in reseed_entry['qbittorrent_state'].lower() and reseed_entry[
'qbittorrent_state'] != 'pausedUP'
if seeding:
hashes.append(entry['torrent_info_hash'])
logger.info('{}', entry['title'])
resume = True
break
if not resume and entry['qbittorrent_state'] != 'checkingUP':
entry.reject(reason='can not find seeding torrent in same save path')
recheck_hashes.append(entry['torrent_info_hash'])
if recheck_torrents and len(recheck_hashes) > 0:
logger.info('recheck {}', recheck_hashes)
self.client.recheck_torrents(str.join('|', recheck_hashes))
self.client.resume_torrents(str.join('|', hashes))
def pause_entries(self, task, pause_options):
if not pause_options:
return
hashes = []
for entry in task.accepted:
hashes.append(entry['torrent_info_hash'])
logger.info('pause: {}', entry['title'])
self.client.pause_torrents(str.join('|', hashes))
def modify_entries(self, task, modify_options):
tag_by_tracker = modify_options.get('tag_by_tracker')
replace_trackers = modify_options.get('replace_trackers')
for entry in task.accepted:
tags = entry.get('qbittorrent_tags')
torrent_trackers = entry.get('qbittorrent_trackers')
for tracker in torrent_trackers:
if tag_by_tracker:
site_name = self._get_site_name(tracker.get('url'))
if site_name and site_name not in tags:
self.client.add_torrent_tags(entry['torrent_info_hash'], site_name)
tags += ', {}'.format(site_name)
logger.info('{} add tag {}', entry.get('title'), site_name)
if replace_trackers:
for orig_url, new_url in replace_trackers.items():
if tracker.get('url') == orig_url:
if new_url:
logger.info('{} update tracker {}', entry.get('title'), new_url)
self.client.edit_trackers(entry.get('torrent_info_hash'), orig_url, new_url)
else:
logger.info('{} remove tracker {}', entry.get('title'), orig_url)
self.client.remove_trackers(entry.get('torrent_info_hash'), orig_url)
def manage_conn_entries(self, task, manage_conn_options):
min_conn = manage_conn_options.get('min')
max_conn = manage_conn_options.get('max')
for entry in task.accepted:
step = entry.get('step')
if not step:
return
server_state = entry.get('server_state')
server_queued_io_jobs = server_state.get('queued_io_jobs')
server_total_peer_connections = server_state.get('total_peer_connections')
application_preferences = self.client.get_application_preferences()
max_connect = application_preferences.get('max_connec')
if max_connect == -1:
max_connect = float('inf')
if (step > 0 and max_connect <= server_total_peer_connections) or step < 0:
max_connect_changed = server_total_peer_connections + step
if max_connect_changed < min_conn:
max_connect_changed = min_conn
elif max_connect_changed > max_conn:
max_connect_changed = max_conn
self.client.set_application_preferences('{{"max_connec": {}}}'.format(max_connect_changed))
logger.info('queued_io_jobs: {} , total_peer_connections: {}, set max_connec to {}',
server_queued_io_jobs, server_total_peer_connections, max_connect_changed)
def limit_upload_by_tracker_entries(self, task, limit_when_not_working_options):
working_speed = limit_when_not_working_options.get('working')
not_working_speed = limit_when_not_working_options.get('not_working')
working_hashes = []
not_working_hashes = []
for entry in task.accepted:
torrent_trackers = entry.get('qbittorrent_trackers')
is_working = False
updating = False
for tracker in torrent_trackers:
status = tracker.get('status')
if status == 2:
is_working = True
elif status == 3:
updating = True
if updating:
continue
up_limit = 0 if entry['qbittorrent_up_limit'] == -1 else entry['qbittorrent_up_limit']
if is_working:
entry_working = entry.get('working') if entry.get('working') else working_speed
if up_limit != entry_working:
if entry.get('working'):
self.client.set_torrent_upload_limit(entry['torrent_info_hash'], entry_working)
else:
working_hashes.append(entry['torrent_info_hash'])
logger.debug(
f'{entry["title"]} site: {entry["qbittorrent_tags"]} tracker is working, set torrent upload limit to {entry_working} B/s')
else:
if up_limit != not_working_speed:
not_working_hashes.append(entry['torrent_info_hash'])
logger.debug(
f'{entry["title"]} site: {entry["qbittorrent_tags"]} tracker is not working, set torrent upload limit to {not_working_speed} B/s')
if working_hashes:
self.client.set_torrent_upload_limit(str.join('|', working_hashes), working_speed)
if not_working_hashes:
self.client.set_torrent_upload_limit(str.join('|', not_working_hashes), not_working_speed)
def _get_site_name(self, tracker_url):
re_object = re.search('(?<=//).*?(?=/)', tracker_url)
if re_object:
domain = re_object.group().split('.')
if len(domain) > 1:
site_name = domain[len(domain) - 2]
if site_name == 'edu':
site_name = domain[len(domain) - 3]
return site_name
def on_task_learn(self, task, config):
""" Make sure all temp files are cleaned up when entries are learned """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get('download', self)
download.cleanup_temp_files(task)
on_task_abort = on_task_learn
@event('plugin.register')
def register_plugin():
plugin.register(PluginQBittorrentMod, 'qbittorrent_mod', api_ver=2)
plugin.register(PluginQBittorrentModInput, 'from_qbittorrent_mod', api_ver=2)
|
[
"12468675@qq.com"
] |
12468675@qq.com
|
cada27b71e62290be03d07aabfae414035418c63
|
465422bf294af104fe6963900f93d89280c211ec
|
/generator.py
|
5ec266646aac7b586f740b247cc3c0a034133759
|
[] |
no_license
|
hevervie/Python
|
9272215d31ba8cd83741beb4db80b9fe810be94d
|
eda55dd49d4b405cf434bdb96357bdf4c4856107
|
refs/heads/master
| 2021-06-04T10:49:04.122122
| 2016-10-18T13:22:05
| 2016-10-18T13:22:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
#!/usr/bin/env python
#coding=utf-8
g=(x*x for x in range(1,11))
for i in g:
print(i)
#生成斐波那契数列
def fib(max):
n,a,b=0,0,1
while n<max:
print(b)
a,b=b,a+b
n=n+1
return 'done'
fib(6)
|
[
"zhoupans_mail@163.com"
] |
zhoupans_mail@163.com
|
567c4f1b87268b45b3e5955082e71554b4e4551e
|
e3abb55ba514fb102ce01601ab0e9ebc15f5d26f
|
/code/l010_await.py
|
1c1c6228bf6292b72ebae15c80d040f4c8a0b5a4
|
[] |
no_license
|
joshmarshall/coroutine-presentation
|
1d8dec7a6c31a0ee5e8875883a326ea801300e93
|
a6d07e70bdff286f45785f4127d854ea701a6a08
|
refs/heads/master
| 2023-09-03T04:23:20.422823
| 2018-01-03T10:19:50
| 2018-01-03T10:19:50
| 64,452,999
| 1
| 0
| null | 2017-11-19T21:17:58
| 2016-07-29T05:29:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
import asyncio
class Session(object):
@classmethod
def connect(cls):
return Session()
async def __aenter__(self):
print("Creating session...")
await asyncio.sleep(1)
return self
async def __aexit__(self, exc_typ, exc, tb):
# can also handle exceptions as necessary
await asyncio.sleep(1)
print("Disconnected.")
async def __aiter__(self):
self.records = [Record(), Record()]
return self
async def __anext__(self):
print("Finding record...")
await asyncio.sleep(1)
if not self.records:
raise StopAsyncIteration()
return self.records.pop(0)
def find(self):
return self
class Record(object):
async def update(self, **kwargs):
await asyncio.sleep(1)
print("Updating record: {0}".format(kwargs))
async def wait():
async with Session.connect() as session:
i = 0
async for record in session.find():
i += 1
await record.update(foo=i)
def main():
loop = asyncio.get_event_loop()
print("Starting...")
loop.run_until_complete(wait())
print("Finishing...")
loop.close()
if __name__ == "__main__":
main()
|
[
"catchjosh@gmail.com"
] |
catchjosh@gmail.com
|
ee80ca6b31092e5fc1369e74a7618bf280402a55
|
cf0ee22c5e880eae8098b09a5476e293cdd5c15e
|
/mod1.py
|
b0e32edc899d1fb3d8140d4222272f99e2b40b70
|
[] |
no_license
|
90075sourab/daydictionary
|
94124c52fe38b654b8a3025506fb1393c2e3d1fd
|
43c37ca1f11fd0a93db6fec0d3768b649c8ea4fb
|
refs/heads/main
| 2023-01-28T22:05:51.152902
| 2020-12-07T17:45:24
| 2020-12-07T17:45:24
| 319,397,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
import math
#l=[1,2,None,None,None,8,10,9,None,None,8]
def fillNone(num_list):
#this function find where None value start and where end
#and pass start and end index,None count to setMean function with list
start_none=False
count=0
for i in range(0,len(num_list)):
if num_list[i]==None and start_none==False:
start_index=i
start_none=True
count+=1
elif(num_list[i]==None and start_none==True):
count+=1
continue
elif(num_list[i]!=None and start_none==True):
end_index=i-1
start_none=False
setMean(num_list,start_index,end_index,count)
#return start_index,end_index
def setMean(num_list,start_index,end_index,count):
'''this function take first and last
None value index and put mean to all of them
'''
f_num=num_list[start_index-1]
l_num=num_list[end_index+1]
diff=math.floor((l_num-f_num)/(count+1)) if l_num>f_num else -math.floor((f_num-l_num)/(count+1))
#print(f_num,l_num,diff)
for i in range(start_index,end_index+1):
num_list[i]=num_list[i-1]+diff
#print(setNone(l))
#print(l)
|
[
"sourabmajh@gmail.com"
] |
sourabmajh@gmail.com
|
dfe222014934e58ee6918b968a783bb1b48102ec
|
10475b80244955f380820898b0197de8b82cf41e
|
/user_mailbox/models/res_users.py
|
d1ef7eb440898de1151113aef51d2830182be539
|
[] |
no_license
|
marionumza/base
|
ec92de4ee50d319b4e9b95309059c99b766c5b11
|
044a5f5da659957d31e1c063375c5e83fc5d5134
|
refs/heads/master
| 2020-12-28T06:39:40.489441
| 2019-10-28T06:18:39
| 2019-10-28T06:18:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
# -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
"""Res Users Model."""
from odoo import models, fields
class ResUsers(models.Model):
"""Added list of incoming and outgoing mail server for user."""
_inherit = 'res.users'
incoming_mail_server_ids = fields.One2many("fetchmail.server", "user_id",
string="Incoming Mail Servers")
outgoing_mail_server_ids = fields.One2many("ir.mail_server", "user_id",
string="Outgoing Mail Servers")
|
[
"promitgt@gmail.com"
] |
promitgt@gmail.com
|
7f6f59e39a0cf95c2270f55d00248d46fc8634da
|
b1abf1f549b9e3029f3fb56bfcf3bf91a4258f4e
|
/Class_Weights_Mutual_Cold/MLP_Cold_hyper_KHI_500.py
|
eae1a01308f40d1e39b5393411f0135f15c7927f
|
[] |
no_license
|
tundik/coldcompare2017
|
d223954b62bc004781884a8f103bcae3c1c48c96
|
0d69828cf22198e31cc441a1c750d3c0f4fb3888
|
refs/heads/master
| 2021-01-18T04:10:53.003705
| 2017-04-01T19:29:17
| 2017-04-01T19:29:17
| 85,758,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,893
|
py
|
# coding: utf-8
from __future__ import print_function
import os
import numpy as np
from numpy import newaxis
np.random.seed(1337)
import keras
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Convolution1D, MaxPooling1D, Embedding, Dense,Dropout , Activation
from keras.models import Model,Sequential
import sys
from keras.optimizers import SGD
from sklearn.metrics import classification_report,recall_score,accuracy_score,confusion_matrix,roc_curve,roc_auc_score
import pandas as pd
import random
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
from myfunc import encode
from myfunc import create_class_weight
from myfunc import delete_column
def data():
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
print("Data mfcc read started...")
data = pd.read_csv("ComParE2017_Cold.ComParE.train.arff",delimiter=',',skiprows=range(0, 6379))
data=data.as_matrix()
print ("Data mfcc read finished.")
data=data[:,1:6375]
Y_train=[x[6373] for x in data]
Y_train[300]
x_train=data[:,0:6373]
labels = ['C','NC']
label2ind = {label: (index + 1) for index, label in enumerate(labels)}
ind2label = {(index + 1): label for index, label in enumerate(labels)}
print (label2ind)
max_label = max(label2ind.values())+1
y_enc = [[label2ind[ey] for ey in Y_train]]
y_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]
y_enci =delete_column(y_enc)
y_train=y_enci[0]
print("Data mfcc read started...")
data2 = pd.read_csv("ComParE2017_Cold.ComParE.devel.arff",delimiter=',',skiprows=range(0, 6379))
data2=data2.as_matrix()
print ("Data mfcc read finished.")
data2=data2[:,1:6375]
Y_val=[x[6373] for x in data2]
x_val=data2[:,0:6373]
y_enc = [[label2ind[ey] for ey in Y_val]]
y_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]
y_enci =delete_column(y_enc)
y_val=y_enci[0]
scaler = preprocessing.StandardScaler().fit(x_train)
train_data_input = scaler.transform(x_train)
valid_data_input = scaler.transform(x_val)
test_data_input = scaler.transform(x_val)
train_output = y_train
validation_output = y_val
test_output = y_val
print(train_data_input.shape)
print(test_data_input.shape)
print(valid_data_input.shape)
print(train_output.shape)
print(test_output.shape)
print(validation_output.shape)
kbest = SelectKBest(score_func=mutual_info_classif, k=500).fit(train_data_input, train_output[:,0])
train_input = kbest.transform(train_data_input)
validation_input = kbest.transform(valid_data_input)
test_input = kbest.transform(test_data_input)
import pickle
pickle.dump( kbest, open( "best_mut500.pickle", "wb" ) )
label_count={}
for i in range(train_output.shape[-1]):
label_count.update({int(i):len(train_output[train_output[:,int(i)]==1])})
cweights=create_class_weight(label_count)
return train_input, train_output, validation_input, validation_output, test_input, test_output, cweights
def model(train_input, train_output, validation_input, validation_output, test_input, test_output, cweights):
earlyStopping=keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
model = Sequential()
model.add(Dense({{choice([150,300,500,750,1000])}}, input_shape=(500,), init={{choice(['glorot_normal','glorot_uniform'])}}))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([150,300,500,750,1000])}}, activation='relu', init={{choice(['glorot_normal','glorot_uniform'])}}))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([150,300,500,750,1000])}}, activation='relu', init={{choice(['glorot_normal','glorot_uniform'])}}))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(2, activation='softmax'))
epochs = 100
model.compile(loss='binary_crossentropy',optimizer={{choice(['rmsprop','adam'])}},metrics=['acc'])
model.fit(train_input, train_output, nb_epoch=epochs,batch_size={{choice([50,100,150,200,250,300])}}, callbacks=[earlyStopping], shuffle=True, validation_data = (validation_input, validation_output), class_weight=cweights)
score = model.evaluate(test_input, test_output)
accuracy = score[1]
loss = score[0]
print("Accuracy: ", accuracy, " Loss: ", loss)
pr = model.predict_classes(test_input)
yh = test_output.argmax(1)
print("\n")
print (recall_score(yh, pr, average="macro"))
uar=recall_score(yh, pr, average="macro")
print (uar)
return {'loss': -uar, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
train_input, train_output, validation_input, validation_output, test_input, test_output, cweights = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=70,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(test_input, test_output))
print (best_run)
pr = best_model.predict_classes(test_input)
yh = test_output.argmax(1)
print (recall_score(yh, pr, average="macro"))
print (classification_report(yh,pr))
best_model.save('best_model500.h5')
|
[
"czbalint14@gmail.com"
] |
czbalint14@gmail.com
|
7ab917ac2d5b6dbd613df8ad73eaa04c6fd703b9
|
e042a2437aa60fdc966c4bb97d87f27fb6378c9c
|
/vae-mnist/utils.py
|
cbc53886b453559793ea1d4b8a743196b76eca8f
|
[] |
no_license
|
JZDBB/OOC-for-research
|
a8653f69a01fe9edd024411234ca422e220a437f
|
265fbd1732460acbe2a36f4273635485abf0eb0c
|
refs/heads/master
| 2020-07-04T04:08:51.130198
| 2019-08-21T13:00:38
| 2019-08-21T13:00:38
| 202,150,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
import numpy as np
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j*h:j*h+h, i*w:i*w+w] = image
return img
|
[
"oxuyining@gmail.com"
] |
oxuyining@gmail.com
|
208e6516ab0e2ac5658d992587c4ae1606eb7ff5
|
c473ecff4600ade1ad126e483999a89677bfd635
|
/lightcone_FRB_decreasingz_xlos_forHaloFinder.py
|
8e9aa60945fd1516774db04bbf6f17c6fec64a77
|
[] |
no_license
|
pagano-michael/FRB
|
1f34b5adac49ddfa09cd781e37a71f3ad173e5b9
|
33c17ed854ea2e37b44e350dbbdc6dba4ac16975
|
refs/heads/master
| 2023-02-06T07:54:51.310251
| 2020-12-27T15:18:26
| 2020-12-27T15:18:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,268
|
py
|
#!/usr/bin/env python
# coding: utf-8
#This code takes in an arbitrary set of boxes, whose directory and redshift ranges you must provide, and makes a lightcone
import numpy as np
import matplotlib.pyplot as pl
import sys
import os
import astropy
from astropy.cosmology import Planck15 as p15
from astropy.cosmology import z_at_value
from astropy.cosmology import WMAP9 as cosmo
def lightcone(**kwargs ):
#set defaults:
mode = 'xH'
marker = 'xH_'
N = 500
DIM = 200
Box_length = 300
z_start = 10
z_end = 6
nboxes = 21
directory = '/Users/michael/Documents/MSI-C/21cmFAST/Boxes/z6-10/OriginalTemperatureBoxesNoGaussianities/'
slice = DIM - 1
#sort arguments
if 'marker' in kwargs:
marker = kwargs.get('marker')
if 'DIM' in kwargs:
DIM = kwargs.get('DIM')
if 'z_range_of_boxes' in kwargs:
z_range_of_boxes = kwargs.get('z_range_of_boxes')
nboxes = len(z_range_of_boxes)
z_start = np.max(z_range_of_boxes)
z_end = np.min(z_range_of_boxes)
print(z_start,z_end, nboxes)
if 'N' in kwargs:
N = kwargs.get('N')
else:
N = 50*nboxes
if 'Box_length' in kwargs:
Box_length = kwargs.get('Box_length')
if 'box_slice' in kwargs:
slice = kwargs.get('box_slice')
if 'return_redshifts' in kwargs:
return_redshifts = kwargs.get('return_redshifts')
else:
return_redshifts = False
if 'sharp_cutoff' in kwargs:
sharp_cutoff = kwargs.get('sharp_cutoff')
else:
sharp_cutoff = np.inf
if 'halo_boxes_z' in kwargs:
halo_boxes_z = kwargs.get('halo_boxes_z')
#21cmFAST boxes have different naming tags, if it is an ionization box the redshifts info will be found
#at different parts of the filename as compared to a density box
if 'smoothed' in marker:
#this is a density box box
s,e=25,31
else:
if 'xH' in marker:
s,e = 10, 20
else:
if 'halos' in marker:
s , e = 5, 10
else:
print('We can not identify what box this is')
return -1
#the total range of redshifts that this lightcone will span
z_range_of_boxes = np.linspace(z_start,z_end,nboxes)
####################################################
# useful functions
####################################################
#this function determines which boxes a given redshift lies between
def find_sandwiched_bins(z_range, z):
z_floor = np.max(z_range)
z_ceil = z_range[1]
binn = 1
while(z_ceil >= np.min(z_range)):
if ((z <= z_floor) and (z > z_ceil)):
return ( z_ceil, z_floor)
z_floor = z_ceil
if z_ceil == np.max(z_range):
print('looking for ' , z_range, z)
break
z_ceil = z_range[binn+1]
binn += 1
#safety net
if binn > 1000:
print('breaking')
break
#function which converts a comoving distance to a pixel location within the box
def comoving2pixel(DIM, Box_length, comoving_distance):
return int(float(comoving_distance * DIM)/float(Box_length))
#function which determines whether we have exceeded the maximum allowable redshift for a box
def didweswitchbox(historyofzminus, z_plus, ctr):
if z_plus < historyofzminus[ctr - 1 ]:
return True
else:
return False
####################################################
# initialize all relevant arrays
####################################################
lightcone = np.zeros((N, DIM, DIM))
lightcone_halo = np.zeros((N))
z_range = np.linspace(z_start,z_end,N)
zs = []
z = z_range[0]
ctr = 0
comoving_distance_z0_zstart = cosmo.comoving_distance(z_range[0]).value
prev_pix_loc = 0
pixel_addition = 0
pixel_origin = 0
pixel_location_relative_to_origin = 0
historyofzminus = []
####################################################
# loop through redshifts
####################################################
box_path_redshifts = z_range_of_boxes
#scroll through all the redshifts and pick out the slice of the box that corresponds to that z
while(z > np.min(z_range)):
#this redshift is sandwiched between the following z
z_sandwhich = find_sandwiched_bins(box_path_redshifts, z)
z_minus = z_sandwhich[0]
z_plus = z_sandwhich[1]
historyofzminus.append(z_plus)
#these are the boxes that z is sandwiched between
xH_minus = halo_boxes_z[list(box_path_redshifts).index(z_minus)]
xH_plus = halo_boxes_z[list(box_path_redshifts).index(z_plus)]
#convert that redshift to a comoving distance
comoving_distance_z = cosmo.comoving_distance(z).value
comoving_distance_z0_to_z = comoving_distance_z0_zstart - comoving_distance_z
comoving_distance_from_last_switch = cosmo.comoving_distance(z_plus).value
if ctr == 0:
pixel_addition = comoving2pixel(DIM,Box_length, comoving_distance_z0_to_z)
prev_pix_loc = -pixel_addition + slice
pixel_origin = slice
#save this redshift
zs.append(z)
lightcone[ctr,:,:] = (xH_plus[:,:,slice] - xH_minus[:,:,slice])*((z - z_minus)/(z_plus - z_minus)) + xH_minus[:,:,slice]
#increment counter and redshift
ctr += 1
z = z_range[ctr]
#skip to the next step
continue
else:
if didweswitchbox(historyofzminus, z_plus, ctr):
pixel_origin = prev_pix_loc
pixel_location_relative_to_origin = -comoving2pixel(DIM,Box_length, comoving_distance_from_last_switch - comoving_distance_z)
pixel_addition = (pixel_location_relative_to_origin + pixel_origin)%DIM
prev_pix_loc = pixel_addition
#save this redshift
zs.append(z)
#save the box information for this particular lightcone slice
lightcone[ctr,:,:] = (xH_plus[pixel_addition,:,:] - xH_minus[pixel_addition,:,:])*((z - z_minus)/(z_plus - z_minus)) + xH_minus[pixel_addition,:,:]
ctr += 1
z = z_range[ctr]
#pl.savefig(str(ctr)+'.png')
#safety net
if ctr > N:
break
#does the user want us to stop the z scroll after a particular value?
if ctr >= sharp_cutoff:
if return_redshifts:
return lightcone[0:sharp_cutoff,:,] , np.array(zs[0:])
else:
return lightcone[0:sharp_cutoff,:,]
#return the lightcone history as the redshift log (should the user specify that)
if return_redshifts:
return lightcone[0:int(N-1),:,] , np.array(zs)
else:
return lightcone[0:int(N-1),:,]
#lightconepng = lightcone(N = 500 )
#directory = '/Users/michael/Research/LAE_Clustering/Boxes_w_HaloFinder/'
#pl.imshow(np.swapaxes(lightconepng,0,2)[100])
#pl.savefig('Lightcone.png')
#pl.ylabel('Box slice at x = 0')
#pl.xlabel('Redshift')
#pl.show()
#pl.close()
|
[
"michael.pagano@mail.mcgill.ca"
] |
michael.pagano@mail.mcgill.ca
|
0c0dd439cd2989daf5ab75b214422e48e706d813
|
023c5085b5aa58b1ee07e602ac2afdb17fc11ec5
|
/Decision_Tree_Classifier/ReadData.py
|
2463bb556dcc60857eb27d7fdcd7fe4e0579912e
|
[] |
no_license
|
AnubhavGupta3377/Machine-Learning-Algorithms
|
b87b7d6c5934ca4f1c6cce6bcf6988156518faa3
|
c454f88387f4e6a8cb5357826d793f0582df1efc
|
refs/heads/master
| 2021-05-06T06:29:01.262205
| 2017-12-11T13:47:28
| 2017-12-11T13:47:28
| 113,857,275
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
from collections import defaultdict
class ReadData:
'''
1. labels : list of all the labels of all the examples
2. features : list of list of feature values of all the examples
3. values : dictionary that maps every feature to the set of
possible values it can take
4. numExamples : number of (training) examples in the dataset
5. n : #features + 1(label)
6. names : maps every number to associated name of the attribute
7. nums : Reverse of names
8. outputLabel : Label of predicted output
'''
def __init__(self, labels=[], features=[], values=defaultdict(set), numExamples=0, n=0):
self.labels = labels[:]
self.features = features[:]
self.values = values
self.numExamples = numExamples
self.n = n
#self.m = m
self.names = defaultdict(str)
self.nums = defaultdict(int)
self.outputLabel = None
'''
This function reads the data from the file named filename and
stores the information in the form that is easier to process
for the decision tree.
'''
def readData(self, filename):
dataFile = open(filename, 'r')
linesRead = 0
for line in dataFile:
linesRead += 1
if linesRead == 1:
self.n, self.m = map(int, line.split())
elif linesRead == 2:
attrs = line.split()
self.outputLabel = attrs[0]
for i in range(1,self.n):
self.names[i-1] = attrs[i]
self.nums[attrs[i]] = i-1
elif linesRead == 3:
labelType = line[0]
types = [0 for i in range(self.n)]
for i in range(1,self.n+1):
types[i-1] = line[i]
if labelType != 'b':
print 'Only binary classification is allowed'
return 2
else:
line = line.split()
self.labels.append(int(line[0]))
self.features.append(line[1:])
for i in range(1,self.n):
self.values[i-1].add(line[i])
self.numExamples = linesRead - 3
|
[
"anubhagu@adobe.com"
] |
anubhagu@adobe.com
|
21ac72b7d43b4b2cd092a87758265844e747314f
|
7be17509048a46e8d33d051495f6c01d97aa1b14
|
/plot_price.py
|
f735c59fce937df3e662928fdf125d59f865db16
|
[
"Apache-2.0"
] |
permissive
|
javierarilos/market_session
|
7aca2d1fc0aaa37825d44fa6d4e15af67a2b93f6
|
35f3020acfdc771cb6a7bfddabb7ca481c792aa5
|
refs/heads/master
| 2021-01-10T08:56:45.200902
| 2016-03-17T14:13:40
| 2016-03-17T14:13:40
| 52,529,884
| 0
| 0
| null | 2016-03-17T14:13:40
| 2016-02-25T14:14:33
|
Python
|
UTF-8
|
Python
| false
| false
| 327
|
py
|
""" Load data from pickle session file, with only one instrument.
Plot prices after removing zeroes
"""
import matplotlib.pyplot as plt
import preprocess_data
session_file = 'f_mupssan20140901.F:FESXU4.pkl'
mkt = preprocess_data.load_session(session_file)
ts = mkt[:, 1]
last = mkt[:, 4]
plt.plot(ts, last)
plt.show()
|
[
"javier.arilos@gmail.com"
] |
javier.arilos@gmail.com
|
44cea4fe3778010257338bd771d9d18ac7df6092
|
125c9f3d0180e2f11dce03c8edbd5f5924c26165
|
/api/envProjet/Scripts/pasteurize-script.py
|
a41b70ca648548d889ad5856e91983d007be18b7
|
[] |
no_license
|
pasterp/WhatBrandIsThat
|
816bbfd4918ad60e5f74952b1d50ec60b799d7cc
|
3e70b7578d6029a3367630863db93064d2a6185a
|
refs/heads/master
| 2023-01-23T10:26:02.119102
| 2020-05-26T13:26:26
| 2020-05-26T13:26:26
| 163,280,347
| 0
| 0
| null | 2022-12-09T18:26:23
| 2018-12-27T10:20:52
|
Objective-C
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
#!C:\Users\Vbourdon\PycharmProjects\testRest\envProjet\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','pasteurize'
__requires__ = 'future==0.17.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.17.1', 'console_scripts', 'pasteurize')()
)
|
[
"32929884+ValentinBou@users.noreply.github.com"
] |
32929884+ValentinBou@users.noreply.github.com
|
9967bfbb48682fff74e8fa93da453b918a2d908b
|
43715a10381ec37c275850c2e4f5302cde18de8c
|
/rooms/models.py
|
8544758b5c7d49ad504a4a43c4f38656f611174b
|
[] |
no_license
|
dongdong-e/airbnb-clone
|
443f290baca4ea5c8f22f6c573383d11de4140f4
|
32c083c4e7f562d968639099d8439f26a666b175
|
refs/heads/master
| 2023-05-02T22:08:32.232594
| 2019-11-25T12:13:13
| 2019-11-25T12:13:13
| 219,305,006
| 0
| 0
| null | 2023-04-21T20:42:00
| 2019-11-03T13:27:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
from django.db import models
from django.urls import reverse
from django_countries.fields import CountryField
from core import models as core_models
class AbstractItem(core_models.TimeStampedModel):
""" Abstract Item """
name = models.CharField(max_length=80)
class Meta:
abstract = True
def __str__(self):
return self.name
class RoomType(AbstractItem):
""" RoomType Model Definition """
class Meta:
verbose_name = "Room Type"
ordering = ["name"]
class Amenity(AbstractItem):
""" Amenity Model Definition """
class Meta:
verbose_name_plural = "Amenities"
class Facility(AbstractItem):
""" Facility Model Definition """
class Meta:
verbose_name_plural = "Facilities"
class HouseRule(AbstractItem):
""" HouseRule Model Definition """
class Meta:
verbose_name = "House Rule"
class Photo(core_models.TimeStampedModel):
""" Photo Model Definition """
caption = models.CharField(max_length=80)
file = models.ImageField(upload_to="room_photos")
room = models.ForeignKey("Room", related_name="photos", on_delete=models.CASCADE)
def __str__(self):
return self.caption
class Room(core_models.TimeStampedModel):
""" Room Model Definition """
name = models.CharField(max_length=140)
description = models.TextField()
country = CountryField()
city = models.CharField(max_length=80)
price = models.IntegerField()
address = models.CharField(max_length=140)
guests = models.IntegerField()
beds = models.IntegerField()
bedrooms = models.IntegerField()
baths = models.IntegerField()
check_in = models.TimeField()
check_out = models.TimeField()
instant_book = models.BooleanField(default=False)
host = models.ForeignKey(
"users.User", related_name="rooms", on_delete=models.CASCADE
)
room_type = models.ForeignKey(
"RoomType", related_name="rooms", on_delete=models.SET_NULL, null=True
)
amenities = models.ManyToManyField("Amenity", related_name="rooms", blank=True)
facilities = models.ManyToManyField("Facility", related_name="rooms", blank=True)
house_rules = models.ManyToManyField("HouseRule", related_name="rooms", blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.city = str.capitalize(self.city)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("rooms:detail", kwargs={"pk": self.pk})
def total_rating(self):
all_reviews = self.reviews.all()
all_ratings = 0
if len(all_reviews) > 0:
for review in all_reviews:
all_ratings += review.rating_average()
return round(all_ratings / len(all_reviews), 2)
return 0
|
[
"youngdong9800@gmail.com"
] |
youngdong9800@gmail.com
|
4ed59eebd6c684deb8e7f456b283309a733233df
|
0942f23ffad253850099d7b994415ba3ab50d896
|
/pyinstaller/PyInstaller/build.py
|
44ace10c67ccba3daa0f291034dfae8d36ea8776
|
[] |
no_license
|
fabiomdiniz/Frey
|
d5fa09c67c82201d8f6a6df61e23f24b1e71c923
|
6d1f133b33afb4e810737e1690f89e1faf9ae0ee
|
refs/heads/master
| 2020-05-17T15:48:33.106859
| 2013-11-05T14:25:02
| 2013-11-05T14:25:02
| 2,523,746
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63,105
|
py
|
#
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Build packages using spec files.
import sys
import os
import shutil
import pprint
import py_compile
import imp
import tempfile
import UserList
import bindepend
from PyInstaller.loader import pyi_archive, pyi_carchive
import PyInstaller.depend.imptracker
import PyInstaller.depend.modules
from PyInstaller import HOMEPATH, CONFIGDIR, PLATFORM
from PyInstaller.compat import is_win, is_unix, is_aix, is_darwin, is_cygwin
import PyInstaller.compat as compat
from PyInstaller.compat import hashlib
from PyInstaller.depend import dylib
from PyInstaller.utils import misc
import PyInstaller.log as logging
if is_win:
from PyInstaller.utils import winmanifest
logger = logging.getLogger(__name__)
STRINGTYPE = type('')
TUPLETYPE = type((None,))
UNCOMPRESSED, COMPRESSED = range(2)
DEFAULT_BUILDPATH = os.path.join('SPECPATH', 'build',
'pyi.TARGET_PLATFORM', 'SPECNAME')
SPEC = None
SPECPATH = None
BUILDPATH = None
WARNFILE = None
NOCONFIRM = None
# Some modules are included if they are detected at build-time or
# if a command-line argument is specified. (e.g. --ascii)
HIDDENIMPORTS = []
rthooks = {}
def _save_data(filename, data):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
outf = open(filename, 'w')
pprint.pprint(data, outf)
outf.close()
def _load_data(filename):
return eval(open(filename, 'rU').read())
def setupUPXFlags():
f = compat.getenv("UPX", "")
if is_win:
# Binaries built with Visual Studio 7.1 require --strip-loadconf
# or they won't compress. Configure.py makes sure that UPX is new
# enough to support --strip-loadconf.
f = "--strip-loadconf " + f
# Do not compress any icon, so that additional icons in the executable
# can still be externally bound
f = "--compress-icons=0 " + f
f = "--best " + f
compat.setenv("UPX", f)
def mtime(fnm):
try:
return os.stat(fnm)[8]
except:
return 0
def absnormpath(apath):
return os.path.abspath(os.path.normpath(apath))
def compile_pycos(toc):
"""Given a TOC or equivalent list of tuples, generates all the required
pyc/pyo files, writing in a local directory if required, and returns the
list of tuples with the updated pathnames.
"""
global BUILDPATH
# For those modules that need to be rebuilt, use the build directory
# PyInstaller creates during the build process.
basepath = os.path.join(BUILDPATH, "localpycos")
new_toc = []
for (nm, fnm, typ) in toc:
if typ != 'PYMODULE':
new_toc.append((nm, fnm, typ))
continue
# Trim the terminal "c" or "o"
source_fnm = fnm[:-1]
# We need to perform a build ourselves if the source is newer
# than the compiled, or the compiled doesn't exist, or if it
# has been written by a different Python version.
needs_compile = (mtime(source_fnm) > mtime(fnm)
or
open(fnm, 'rb').read()[:4] != imp.get_magic())
if needs_compile:
try:
py_compile.compile(source_fnm, fnm)
logger.debug("compiled %s", source_fnm)
except IOError:
# If we're compiling on a system directory, probably we don't
# have write permissions; thus we compile to a local directory
# and change the TOC entry accordingly.
ext = os.path.splitext(fnm)[1]
if "__init__" not in fnm:
# If it's a normal module, use last part of the qualified
# name as module name and the first as leading path
leading, mod_name = nm.split(".")[:-1], nm.split(".")[-1]
else:
# In case of a __init__ module, use all the qualified name
# as leading path and use "__init__" as the module name
leading, mod_name = nm.split("."), "__init__"
leading = os.path.join(basepath, *leading)
if not os.path.exists(leading):
os.makedirs(leading)
fnm = os.path.join(leading, mod_name + ext)
needs_compile = (mtime(source_fnm) > mtime(fnm)
or
open(fnm, 'rb').read()[:4] != imp.get_magic())
if needs_compile:
py_compile.compile(source_fnm, fnm)
logger.debug("compiled %s", source_fnm)
new_toc.append((nm, fnm, typ))
return new_toc
def addSuffixToExtensions(toc):
"""
Returns a new TOC with proper library suffix for EXTENSION items.
"""
new_toc = TOC()
for inm, fnm, typ in toc:
if typ in ('EXTENSION', 'DEPENDENCY'):
binext = os.path.splitext(fnm)[1]
if not os.path.splitext(inm)[1] == binext:
inm = inm + binext
new_toc.append((inm, fnm, typ))
return new_toc
#--- functons for checking guts ---
def _check_guts_eq(attr, old, new, last_build):
"""
rebuild is required if values differ
"""
if old != new:
logger.info("building because %s changed", attr)
return True
return False
def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):
"""
rebuild is required if mtimes of files listed in old toc are newer
than ast_build
if pyc=1, check for .py files, too
"""
for (nm, fnm, typ) in old:
if mtime(fnm) > last_build:
logger.info("building because %s changed", fnm)
return True
elif pyc and mtime(fnm[:-1]) > last_build:
logger.info("building because %s changed", fnm[:-1])
return True
return False
def _check_guts_toc(attr, old, toc, last_build, pyc=0):
"""
rebuild is required if either toc content changed if mtimes of
files listed in old toc are newer than ast_build
if pyc=1, check for .py files, too
"""
return (_check_guts_eq(attr, old, toc, last_build)
or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc))
def _check_path_overlap(path):
"""
Check that path does not overlap with BUILDPATH or SPECPATH (i.e.
BUILDPATH and SPECPATH may not start with path, which could be
caused by a faulty hand-edited specfile)
Raise SystemExit if there is overlap, return True otherwise
"""
specerr = 0
if BUILDPATH.startswith(path):
logger.error('Specfile error: The output path "%s" contains '
'BUILDPATH (%s)', path, BUILDPATH)
specerr += 1
if SPECPATH.startswith(path):
logger.error('Specfile error: The output path "%s" contains '
'SPECPATH (%s)', path, SPECPATH)
specerr += 1
if specerr:
raise SystemExit('Error: Please edit/recreate the specfile (%s) '
'and set a different output name (e.g. "dist").'
% SPEC)
return True
def _rmtree(path):
"""
Remove directory and all its contents, but only after user confirmation,
or if the -y option is set
"""
if NOCONFIRM:
choice = 'y'
elif sys.stdout.isatty():
choice = raw_input('WARNING: The output directory "%s" and ALL ITS '
'CONTENTS will be REMOVED! Continue? (y/n)' % path)
else:
raise SystemExit('Error: The output directory "%s" is not empty. '
'Please remove all its contents or use the '
'-y option (remove output directory without '
'confirmation).' % path)
if choice.strip().lower() == 'y':
logger.info('Removing dir %s', path)
shutil.rmtree(path)
else:
raise SystemExit('User aborted')
def check_egg(pth):
"""Check if path points to a file inside a python egg file (or to an egg
directly)."""
if os.path.altsep:
pth = pth.replace(os.path.altsep, os.path.sep)
components = pth.split(os.path.sep)
sep = os.path.sep
for i, name in zip(range(0, len(components)), components):
if name.lower().endswith(".egg"):
eggpth = sep.join(components[:i + 1])
if os.path.isfile(eggpth):
# eggs can also be directories!
return True
return False
#--
class Target:
invcnum = 0
def __init__(self):
# Get a (per class) unique number to avoid conflicts between
# toc objects
self.invcnum = self.__class__.invcnum
self.__class__.invcnum += 1
self.out = os.path.join(BUILDPATH, 'out%02d-%s.toc' %
(self.invcnum, self.__class__.__name__))
self.outnm = os.path.basename(self.out)
self.dependencies = TOC()
def __postinit__(self):
logger.info("checking %s", self.__class__.__name__)
if self.check_guts(mtime(self.out)):
self.assemble()
GUTS = []
def check_guts(self, last_build):
pass
def get_guts(self, last_build, missing='missing or bad'):
"""
returns None if guts have changed
"""
try:
data = _load_data(self.out)
except:
logger.info("building because %s %s", os.path.basename(self.out), missing)
return None
if len(data) != len(self.GUTS):
logger.info("building because %s is bad", self.outnm)
return None
for i, (attr, func) in enumerate(self.GUTS):
if func is None:
# no check for this value
continue
if func(attr, data[i], getattr(self, attr), last_build):
return None
return data
class Analysis(Target):
_old_scripts = set((
absnormpath(os.path.join(HOMEPATH, "support", "_mountzlib.py")),
absnormpath(os.path.join(CONFIGDIR, "support", "useUnicode.py")),
absnormpath(os.path.join(CONFIGDIR, "support", "useTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "useUnicode.py")),
absnormpath(os.path.join(HOMEPATH, "support", "useTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "unpackTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "removeTK.py")),
))
def __init__(self, scripts=None, pathex=None, hiddenimports=None,
hookspath=None, excludes=None):
Target.__init__(self)
# Include initialization Python code in PyInstaller analysis.
_init_code_path = os.path.join(HOMEPATH, 'PyInstaller', 'loader')
self.inputs = [
os.path.join(HOMEPATH, "support", "_pyi_bootstrap.py"),
os.path.join(_init_code_path, 'pyi_archive.py'),
os.path.join(_init_code_path, 'pyi_carchive.py'),
os.path.join(_init_code_path, 'pyi_iu.py'),
]
for script in scripts:
if absnormpath(script) in self._old_scripts:
logger.warn('Ignoring obsolete auto-added script %s', script)
continue
if not os.path.exists(script):
raise ValueError("script '%s' not found" % script)
self.inputs.append(script)
self.pathex = []
if pathex:
self.pathex = [absnormpath(path) for path in pathex]
self.hiddenimports = hiddenimports or []
# Include modules detected at build time. Like 'codecs' and encodings.
self.hiddenimports.extend(HIDDENIMPORTS)
self.hookspath = hookspath
self.excludes = excludes
self.scripts = TOC()
self.pure = TOC()
self.binaries = TOC()
self.zipfiles = TOC()
self.datas = TOC()
self.dependencies = TOC()
self.__postinit__()
GUTS = (('inputs', _check_guts_eq),
('pathex', _check_guts_eq),
('hookspath', _check_guts_eq),
('excludes', _check_guts_eq),
('scripts', _check_guts_toc_mtime),
('pure', lambda *args: apply(_check_guts_toc_mtime,
args, {'pyc': 1})),
('binaries', _check_guts_toc_mtime),
('zipfiles', _check_guts_toc_mtime),
('datas', _check_guts_toc_mtime),
('hiddenimports', _check_guts_eq),
)
def check_guts(self, last_build):
if last_build == 0:
logger.info("building %s because %s non existent", self.__class__.__name__, self.outnm)
return True
for fnm in self.inputs:
if mtime(fnm) > last_build:
logger.info("building because %s changed", fnm)
return True
data = Target.get_guts(self, last_build)
if not data:
return True
scripts, pure, binaries, zipfiles, datas, hiddenimports = data[-6:]
self.scripts = TOC(scripts)
self.pure = TOC(pure)
self.binaries = TOC(binaries)
self.zipfiles = TOC(zipfiles)
self.datas = TOC(datas)
self.hiddenimports = hiddenimports
return False
def assemble(self):
logger.info("running Analysis %s", os.path.basename(self.out))
# Reset seen variable to correctly discover dependencies
# if there are multiple Analysis in a single specfile.
bindepend.seen = {}
python = sys.executable
if not is_win:
while os.path.islink(python):
python = os.path.join(os.path.dirname(python), os.readlink(python))
depmanifest = None
else:
depmanifest = winmanifest.Manifest(type_="win32", name=specnm,
processorArchitecture=winmanifest.processor_architecture(),
version=(1, 0, 0, 0))
depmanifest.filename = os.path.join(BUILDPATH,
specnm + ".exe.manifest")
binaries = [] # binaries to bundle
# Always add Python's dependencies first
# This ensures that its assembly depencies under Windows get pulled in
# first, so that .pyd files analyzed later which may not have their own
# manifest and may depend on DLLs which are part of an assembly
# referenced by Python's manifest, don't cause 'lib not found' messages
binaries.extend(bindepend.Dependencies([('', python, '')],
manifest=depmanifest)[1:])
###################################################
# Scan inputs and prepare:
dirs = {} # input directories
pynms = [] # python filenames with no extension
for script in self.inputs:
if not os.path.exists(script):
raise SystemExit("Error: Analysis: script %s not found!" % script)
d, base = os.path.split(script)
if not d:
d = os.getcwd()
d = absnormpath(d)
pynm, ext = os.path.splitext(base)
dirs[d] = 1
pynms.append(pynm)
###################################################
# Initialize importTracker and analyze scripts
importTracker = PyInstaller.depend.imptracker.ImportTracker(
dirs.keys() + self.pathex, self.hookspath, self.excludes)
PyInstaller.__pathex__ = self.pathex[:]
scripts = [] # will contain scripts to bundle
for i, script in enumerate(self.inputs):
logger.info("Analyzing %s", script)
importTracker.analyze_script(script)
scripts.append((pynms[i], script, 'PYSOURCE'))
PyInstaller.__pathex__ = []
# analyze the script's hidden imports
for modnm in self.hiddenimports:
if modnm in importTracker.modules:
logger.info("Hidden import %r has been found otherwise", modnm)
continue
logger.info("Analyzing hidden import %r", modnm)
importTracker.analyze_one(modnm)
if not modnm in importTracker.modules:
logger.error("Hidden import %r not found", modnm)
###################################################
# Fills pure, binaries and rthookcs lists to TOC
pure = [] # pure python modules
zipfiles = [] # zipfiles to bundle
datas = [] # datafiles to bundle
rthooks = [] # rthooks if needed
# Find rthooks.
logger.info("Looking for run-time hooks")
for modnm, mod in importTracker.modules.items():
rthooks.extend(_findRTHook(modnm))
# Analyze rthooks. Runtime hooks has to be also analyzed.
# Otherwise some dependencies could be missing.
# Data structure in format:
# ('rt_hook_mod_name', '/rt/hook/file/name.py', 'PYSOURCE')
for hook_mod, hook_file, mod_type in rthooks:
logger.info("Analyzing rthook %s", hook_file)
importTracker.analyze_script(hook_file)
for modnm, mod in importTracker.modules.items():
# FIXME: why can we have a mod == None here?
if mod is None:
continue
datas.extend(mod.datas)
if isinstance(mod, PyInstaller.depend.modules.BuiltinModule):
pass
elif isinstance(mod, PyInstaller.depend.modules.ExtensionModule):
binaries.append((mod.__name__, mod.__file__, 'EXTENSION'))
# allows hooks to specify additional dependency
# on other shared libraries loaded at runtime (by dlopen)
binaries.extend(mod.binaries)
elif isinstance(mod, (PyInstaller.depend.modules.PkgInZipModule, PyInstaller.depend.modules.PyInZipModule)):
zipfiles.append(("eggs/" + os.path.basename(str(mod.owner)),
str(mod.owner), 'ZIPFILE'))
else:
# mf.PyModule instances expose a list of binary
# dependencies, most probably shared libraries accessed
# via ctypes. Add them to the overall required binaries.
binaries.extend(mod.binaries)
if modnm != '__main__':
pure.append((modnm, mod.__file__, 'PYMODULE'))
# Add remaining binary dependencies
binaries.extend(bindepend.Dependencies(binaries,
manifest=depmanifest))
if is_win:
depmanifest.writeprettyxml()
self._check_python_library(binaries)
if zipfiles:
scripts.insert(-1, ("_pyi_egg_install.py", os.path.join(HOMEPATH, "support/_pyi_egg_install.py"), 'PYSOURCE'))
# Add realtime hooks just before the last script (which is
# the entrypoint of the application).
scripts[-1:-1] = rthooks
self.scripts = TOC(scripts)
self.pure = TOC(pure)
self.binaries = TOC(binaries)
self.zipfiles = TOC(zipfiles)
self.datas = TOC(datas)
try: # read .toc
oldstuff = _load_data(self.out)
except:
oldstuff = None
self.pure = TOC(compile_pycos(self.pure))
newstuff = tuple([getattr(self, g[0]) for g in self.GUTS])
if oldstuff != newstuff:
_save_data(self.out, newstuff)
wf = open(WARNFILE, 'w')
for ln in importTracker.getwarnings():
wf.write(ln + '\n')
wf.close()
logger.info("Warnings written to %s", WARNFILE)
return 1
logger.info("%s no change!", self.out)
return 0
def _check_python_library(self, binaries):
"""
Verify presence of the Python dynamic library. If missing
from the binaries try to find the Python library. Set
the library name for the bootloader.
Some linux distributions (e.g. debian-based) statically build the
Python executable to the libpython, so bindepend doesn't include
it in its output.
Darwin custom builds could possibly also have non-framework style libraries,
so this method also checks for that variant as well.
"""
pyver = sys.version_info[:2]
if is_win:
names = ('python%d%d.dll' % pyver,)
elif is_cygwin:
names = ('libpython%d%d.dll' % pyver,)
elif is_darwin:
names = ('Python', '.Python', 'libpython%d.%d.dylib' % pyver)
elif is_aix:
# Shared libs on AIX are archives with shared object members, thus the ".a" suffix.
names = ('libpython%d.%d.a' % pyver,)
elif is_unix:
# Other *nix platforms.
names = ('libpython%d.%d.so.1.0' % pyver,)
else:
raise SystemExit('Your platform is not yet supported.')
for (nm, fnm, typ) in binaries:
for name in names:
if typ == 'BINARY' and fnm.endswith(name):
# Python library found.
# FIXME Find a different way how to pass python libname to CArchive.
os.environ['PYI_PYTHON_LIBRARY_NAME'] = name
return # Stop fuction.
# Resume search using the first item in names.
name = names[0]
logger.info('Looking for Python library %s', name)
if is_unix:
lib = bindepend.findLibrary(name)
if lib is None:
raise IOError("Python library not found!")
elif is_darwin:
# On MacPython, Analysis.assemble is able to find the libpython with
# no additional help, asking for sys.executable dependencies.
# However, this fails on system python, because the shared library
# is not listed as a dependency of the binary (most probably it's
# opened at runtime using some dlopen trickery).
# This happens on Mac OS X when Python is compiled as Framework.
# Python compiled as Framework contains same values in sys.prefix
# and exec_prefix. That's why we can use just sys.prefix.
# In virtualenv PyInstaller is not able to find Python library.
# We need special care for this case.
if compat.is_virtualenv:
py_prefix = sys.real_prefix
else:
py_prefix = sys.prefix
logger.info('Looking for Python library in %s', py_prefix)
lib = os.path.join(py_prefix, name)
if not os.path.exists(lib):
raise IOError("Python library not found!")
# Python library found.
# FIXME Find a different way how to pass python libname to CArchive.
os.environ['PYI_PYTHON_LIBRARY_NAME'] = name
# Include Python library as binary dependency.
binaries.append((os.path.basename(lib), lib, 'BINARY'))
def _findRTHook(modnm):
rslt = []
for script in rthooks.get(modnm) or []:
nm = os.path.basename(script)
nm = os.path.splitext(nm)[0]
if os.path.isabs(script):
path = script
else:
path = os.path.join(HOMEPATH, script)
rslt.append((nm, path, 'PYSOURCE'))
return rslt
class PYZ(Target):
typ = 'PYZ'
def __init__(self, toc, name=None, level=9, crypt=None):
Target.__init__(self)
self.toc = toc
self.name = name
if name is None:
self.name = self.out[:-3] + 'pyz'
# Level of zlib compression.
self.level = level
if config['useCrypt'] and crypt is not None:
self.crypt = pyi_archive.Keyfile(crypt).key
else:
self.crypt = None
self.dependencies = compile_pycos(config['PYZ_dependencies'])
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('level', _check_guts_eq),
('crypt', _check_guts_eq),
('toc', _check_guts_toc), # todo: pyc=1
)
def check_guts(self, last_build):
if not os.path.exists(self.name):
logger.info("rebuilding %s because %s is missing",
self.outnm, os.path.basename(self.name))
return True
data = Target.get_guts(self, last_build)
if not data:
return True
return False
def assemble(self):
logger.info("building PYZ %s", os.path.basename(self.out))
pyz = pyi_archive.ZlibArchive(level=self.level, crypt=self.crypt)
toc = self.toc - config['PYZ_dependencies']
pyz.build(self.name, toc)
_save_data(self.out, (self.name, self.level, self.crypt, self.toc))
return 1
def cacheDigest(fnm):
data = open(fnm, "rb").read()
digest = hashlib.md5(data).digest()
return digest
def checkCache(fnm, strip=0, upx=0, dist_nm=None):
"""
Cache prevents preprocessing binary files again and again.
'dist_nm' Filename relative to dist directory. We need it on Mac
to determine level of paths for @loader_path like
'@loader_path/../../' for qt4 plugins.
"""
# On darwin a cache is required anyway to keep the libaries
# with relative install names. Caching on darwin does not work
# since we need to modify binary headers to use relative paths
# to dll depencies and starting with '@loader_path'.
if ((not strip and not upx and not is_darwin and not is_win)
or fnm.lower().endswith(".manifest")):
return fnm
if strip:
strip = 1
else:
strip = 0
if upx:
upx = 1
else:
upx = 0
# Load cache index
# Make cachedir per Python major/minor version.
# This allows parallel building of executables with different
# Python versions as one user.
pyver = ('py%d%s') % (sys.version_info[0], sys.version_info[1])
cachedir = os.path.join(CONFIGDIR, 'bincache%d%d_%s' % (strip, upx, pyver))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
cacheindexfn = os.path.join(cachedir, "index.dat")
if os.path.exists(cacheindexfn):
cache_index = _load_data(cacheindexfn)
else:
cache_index = {}
# Verify if the file we're looking for is present in the cache.
basenm = os.path.normcase(os.path.basename(fnm))
digest = cacheDigest(fnm)
cachedfile = os.path.join(cachedir, basenm)
cmd = None
if basenm in cache_index:
if digest != cache_index[basenm]:
os.remove(cachedfile)
else:
# On Mac OS X we need relative paths to dll dependencies
# starting with @executable_path
if is_darwin:
dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)
return cachedfile
if upx:
if strip:
fnm = checkCache(fnm, 1, 0)
bestopt = "--best"
# FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out)
# A better configure-time check is due.
if config["hasUPX"] >= (3,) and os.name == "nt":
bestopt = "--lzma"
upx_executable = "upx"
if config.get('upx_dir'):
upx_executable = os.path.join(config['upx_dir'], upx_executable)
cmd = [upx_executable, bestopt, "-q", cachedfile]
else:
if strip:
# -S = strip only debug symbols.
# The default strip behaviour breaks some shared libraries
# under Mac OSX
cmd = ["strip", "-S", cachedfile]
shutil.copy2(fnm, cachedfile)
os.chmod(cachedfile, 0755)
if pyasm and fnm.lower().endswith(".pyd"):
# If python.exe has dependent assemblies, check for embedded manifest
# of cached pyd file because we may need to 'fix it' for pyinstaller
try:
res = winmanifest.GetManifestResources(os.path.abspath(cachedfile))
except winresource.pywintypes.error, e:
if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT:
# Not a win32 PE file
pass
else:
logger.error(os.path.abspath(cachedfile))
raise
else:
if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]):
for name in res[winmanifest.RT_MANIFEST]:
for language in res[winmanifest.RT_MANIFEST][name]:
try:
manifest = winmanifest.Manifest()
manifest.filename = ":".join([cachedfile,
str(winmanifest.RT_MANIFEST),
str(name),
str(language)])
manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language],
False)
except Exception, exc:
logger.error("Cannot parse manifest resource %s, "
"%s from", name, language)
logger.error(cachedfile)
logger.exception(exc)
else:
# Fix the embedded manifest (if any):
# Extension modules built with Python 2.6.5 have
# an empty <dependency> element, we need to add
# dependentAssemblies from python.exe for
# pyinstaller
olen = len(manifest.dependentAssemblies)
_depNames = set([dep.name for dep in
manifest.dependentAssemblies])
for pydep in pyasm:
if not pydep.name in _depNames:
logger.info("Adding %r to dependent "
"assemblies of %r",
pydep.name, cachedfile)
manifest.dependentAssemblies.append(pydep)
_depNames.update(pydep.name)
if len(manifest.dependentAssemblies) > olen:
try:
manifest.update_resources(os.path.abspath(cachedfile),
[name],
[language])
except Exception, e:
logger.error(os.path.abspath(cachedfile))
raise
if cmd:
try:
compat.exec_command(*cmd)
except OSError, e:
raise SystemExit("Execution failed: %s" % e)
# update cache index
cache_index[basenm] = digest
_save_data(cacheindexfn, cache_index)
# On Mac OS X we need relative paths to dll dependencies
# starting with @executable_path
if is_darwin:
dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)
return cachedfile
UNCOMPRESSED, COMPRESSED, ENCRYPTED = range(3)
class PKG(Target):
typ = 'PKG'
xformdict = {'PYMODULE': 'm',
'PYSOURCE': 's',
'EXTENSION': 'b',
'PYZ': 'z',
'PKG': 'a',
'DATA': 'x',
'BINARY': 'b',
'ZIPFILE': 'Z',
'EXECUTABLE': 'b',
'DEPENDENCY': 'd'}
def __init__(self, toc, name=None, cdict=None, exclude_binaries=0,
strip_binaries=0, upx_binaries=0, crypt=0):
Target.__init__(self)
self.toc = toc
self.cdict = cdict
self.name = name
self.exclude_binaries = exclude_binaries
self.strip_binaries = strip_binaries
self.upx_binaries = upx_binaries
self.crypt = crypt
if name is None:
self.name = self.out[:-3] + 'pkg'
if self.cdict is None:
self.cdict = {'EXTENSION': COMPRESSED,
'DATA': COMPRESSED,
'BINARY': COMPRESSED,
'EXECUTABLE': COMPRESSED,
'PYSOURCE': COMPRESSED,
'PYMODULE': COMPRESSED}
if self.crypt:
self.cdict['PYSOURCE'] = ENCRYPTED
self.cdict['PYMODULE'] = ENCRYPTED
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('cdict', _check_guts_eq),
('toc', _check_guts_toc_mtime),
('exclude_binaries', _check_guts_eq),
('strip_binaries', _check_guts_eq),
('upx_binaries', _check_guts_eq),
('crypt', _check_guts_eq),
)
def check_guts(self, last_build):
if not os.path.exists(self.name):
logger.info("rebuilding %s because %s is missing",
self.outnm, os.path.basename(self.name))
return 1
data = Target.get_guts(self, last_build)
if not data:
return True
# todo: toc equal
return False
def assemble(self):
logger.info("building PKG %s", os.path.basename(self.name))
trash = []
mytoc = []
seen = {}
toc = addSuffixToExtensions(self.toc)
for inm, fnm, typ in toc:
if not os.path.isfile(fnm) and check_egg(fnm):
# file is contained within python egg, it is added with the egg
continue
if typ in ('BINARY', 'EXTENSION', 'DEPENDENCY'):
if self.exclude_binaries and typ != 'DEPENDENCY':
self.dependencies.append((inm, fnm, typ))
else:
fnm = checkCache(fnm, self.strip_binaries,
self.upx_binaries and (is_win or is_cygwin)
and config['hasUPX'], dist_nm=inm)
# Avoid importing the same binary extension twice. This might
# happen if they come from different sources (eg. once from
# binary dependence, and once from direct import).
if typ == 'BINARY' and fnm in seen:
continue
seen[fnm] = 1
mytoc.append((inm, fnm, self.cdict.get(typ, 0),
self.xformdict.get(typ, 'b')))
elif typ == 'OPTION':
mytoc.append((inm, '', 0, 'o'))
else:
mytoc.append((inm, fnm, self.cdict.get(typ, 0), self.xformdict.get(typ, 'b')))
# Bootloader has to know the name of Python library.
# FIXME Find a different way how to pass python libname to CArchive.
archive = pyi_carchive.CArchive(
pylib_name=os.environ['PYI_PYTHON_LIBRARY_NAME'])
archive.build(self.name, mytoc)
_save_data(self.out,
(self.name, self.cdict, self.toc, self.exclude_binaries,
self.strip_binaries, self.upx_binaries, self.crypt))
for item in trash:
os.remove(item)
return 1
class EXE(Target):
typ = 'EXECUTABLE'
exclude_binaries = 0
append_pkg = 1
def __init__(self, *args, **kws):
Target.__init__(self)
self.console = kws.get('console', 1)
self.debug = kws.get('debug', 0)
self.name = kws.get('name', None)
self.icon = kws.get('icon', None)
self.versrsrc = kws.get('version', None)
self.manifest = kws.get('manifest', None)
self.resources = kws.get('resources', [])
self.strip = kws.get('strip', None)
self.upx = kws.get('upx', None)
self.crypt = kws.get('crypt', 0)
self.exclude_binaries = kws.get('exclude_binaries', 0)
self.append_pkg = kws.get('append_pkg', self.append_pkg)
if self.name is None:
self.name = self.out[:-3] + 'exe'
if not os.path.isabs(self.name):
self.name = os.path.join(SPECPATH, self.name)
if is_win or is_cygwin:
self.pkgname = self.name[:-3] + 'pkg'
else:
self.pkgname = self.name + '.pkg'
self.toc = TOC()
for arg in args:
if isinstance(arg, TOC):
self.toc.extend(arg)
elif isinstance(arg, Target):
self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
self.toc.extend(arg.dependencies)
else:
self.toc.extend(arg)
if is_win:
filename = os.path.join(BUILDPATH, specnm + ".exe.manifest")
self.manifest = winmanifest.create_manifest(filename, self.manifest,
self.console)
self.toc.append((os.path.basename(self.name) + ".manifest", filename,
'BINARY'))
self.pkg = PKG(self.toc, cdict=kws.get('cdict', None),
exclude_binaries=self.exclude_binaries,
strip_binaries=self.strip, upx_binaries=self.upx,
crypt=self.crypt)
self.dependencies = self.pkg.dependencies
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('console', _check_guts_eq),
('debug', _check_guts_eq),
('icon', _check_guts_eq),
('versrsrc', _check_guts_eq),
('resources', _check_guts_eq),
('strip', _check_guts_eq),
('upx', _check_guts_eq),
('crypt', _check_guts_eq),
('mtm', None,), # checked bellow
)
def check_guts(self, last_build):
if not os.path.exists(self.name):
logger.info("rebuilding %s because %s missing",
self.outnm, os.path.basename(self.name))
return 1
if not self.append_pkg and not os.path.exists(self.pkgname):
logger.info("rebuilding because %s missing",
os.path.basename(self.pkgname))
return 1
data = Target.get_guts(self, last_build)
if not data:
return True
icon, versrsrc, resources = data[3:6]
if (icon or versrsrc or resources) and not config['hasRsrcUpdate']:
# todo: really ignore :-)
logger.info("ignoring icon, version, manifest and resources = platform not capable")
mtm = data[-1]
crypt = data[-2]
if crypt != self.crypt:
logger.info("rebuilding %s because crypt option changed", self.outnm)
return 1
if mtm != mtime(self.name):
logger.info("rebuilding %s because mtimes don't match", self.outnm)
return True
if mtm < mtime(self.pkg.out):
logger.info("rebuilding %s because pkg is more recent", self.outnm)
return True
return False
def _bootloader_file(self, exe):
if not self.console:
exe = exe + 'w'
if self.debug:
exe = exe + '_d'
return os.path.join("support", "loader", PLATFORM, exe)
def assemble(self):
logger.info("building EXE from %s", os.path.basename(self.out))
trash = []
if not os.path.exists(os.path.dirname(self.name)):
os.makedirs(os.path.dirname(self.name))
outf = open(self.name, 'wb')
exe = self._bootloader_file('run')
exe = os.path.join(HOMEPATH, exe)
if is_win or is_cygwin:
exe = exe + '.exe'
if config['hasRsrcUpdate'] and (self.icon or self.versrsrc or
self.resources):
tmpnm = tempfile.mktemp()
shutil.copy2(exe, tmpnm)
os.chmod(tmpnm, 0755)
if self.icon:
icon.CopyIcons(tmpnm, self.icon)
if self.versrsrc:
versioninfo.SetVersion(tmpnm, self.versrsrc)
for res in self.resources:
res = res.split(",")
for i in range(1, len(res)):
try:
res[i] = int(res[i])
except ValueError:
pass
resfile = res[0]
restype = resname = reslang = None
if len(res) > 1:
restype = res[1]
if len(res) > 2:
resname = res[2]
if len(res) > 3:
reslang = res[3]
try:
winresource.UpdateResourcesFromResFile(tmpnm, resfile,
[restype or "*"],
[resname or "*"],
[reslang or "*"])
except winresource.pywintypes.error, exc:
if exc.args[0] != winresource.ERROR_BAD_EXE_FORMAT:
logger.exception(exc)
continue
if not restype or not resname:
logger.error("resource type and/or name not specified")
continue
if "*" in (restype, resname):
logger.error("no wildcards allowed for resource type "
"and name when source file does not "
"contain resources")
continue
try:
winresource.UpdateResourcesFromDataFile(tmpnm,
resfile,
restype,
[resname],
[reslang or 0])
except winresource.pywintypes.error, exc:
logger.exception(exc)
trash.append(tmpnm)
exe = tmpnm
exe = checkCache(exe, self.strip, self.upx and config['hasUPX'])
self.copy(exe, outf)
if self.append_pkg:
logger.info("Appending archive to EXE %s", self.name)
self.copy(self.pkg.name, outf)
else:
logger.info("Copying archive to %s", self.pkgname)
shutil.copy2(self.pkg.name, self.pkgname)
outf.close()
os.chmod(self.name, 0755)
guts = (self.name, self.console, self.debug, self.icon,
self.versrsrc, self.resources, self.strip, self.upx,
self.crypt, mtime(self.name))
assert len(guts) == len(self.GUTS)
_save_data(self.out, guts)
for item in trash:
os.remove(item)
return 1
def copy(self, fnm, outf):
inf = open(fnm, 'rb')
while 1:
data = inf.read(64 * 1024)
if not data:
break
outf.write(data)
class DLL(EXE):
def assemble(self):
logger.info("building DLL %s", os.path.basename(self.out))
outf = open(self.name, 'wb')
dll = self._bootloader_file('inprocsrvr')
dll = os.path.join(HOMEPATH, dll) + '.dll'
self.copy(dll, outf)
self.copy(self.pkg.name, outf)
outf.close()
os.chmod(self.name, 0755)
_save_data(self.out,
(self.name, self.console, self.debug, self.icon,
self.versrsrc, self.manifest, self.resources, self.strip, self.upx, mtime(self.name)))
return 1
class COLLECT(Target):
def __init__(self, *args, **kws):
Target.__init__(self)
self.name = kws.get('name', None)
if self.name is None:
self.name = 'dist_' + self.out[:-4]
self.strip_binaries = kws.get('strip', 0)
self.upx_binaries = kws.get('upx', 0)
if not os.path.isabs(self.name):
self.name = os.path.join(SPECPATH, self.name)
self.toc = TOC()
for arg in args:
if isinstance(arg, TOC):
self.toc.extend(arg)
elif isinstance(arg, Target):
self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
if isinstance(arg, EXE):
for tocnm, fnm, typ in arg.toc:
if tocnm == os.path.basename(arg.name) + ".manifest":
self.toc.append((tocnm, fnm, typ))
if not arg.append_pkg:
self.toc.append((os.path.basename(arg.pkgname), arg.pkgname, 'PKG'))
self.toc.extend(arg.dependencies)
else:
self.toc.extend(arg)
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('strip_binaries', _check_guts_eq),
('upx_binaries', _check_guts_eq),
('toc', _check_guts_eq), # additional check below
)
def check_guts(self, last_build):
# COLLECT always needs to be executed, since it will clean the output
# directory anyway to make sure there is no existing cruft accumulating
return 1
def assemble(self):
if _check_path_overlap(self.name) and os.path.isdir(self.name):
_rmtree(self.name)
logger.info("building COLLECT %s", os.path.basename(self.out))
os.makedirs(self.name)
toc = addSuffixToExtensions(self.toc)
for inm, fnm, typ in toc:
if not os.path.isfile(fnm) and check_egg(fnm):
# file is contained within python egg, it is added with the egg
continue
tofnm = os.path.join(self.name, inm)
todir = os.path.dirname(tofnm)
if not os.path.exists(todir):
os.makedirs(todir)
if typ in ('EXTENSION', 'BINARY'):
fnm = checkCache(fnm, self.strip_binaries,
self.upx_binaries and (is_win or is_cygwin)
and config['hasUPX'], dist_nm=inm)
if typ != 'DEPENDENCY':
shutil.copy2(fnm, tofnm)
if typ in ('EXTENSION', 'BINARY'):
os.chmod(tofnm, 0755)
_save_data(self.out,
(self.name, self.strip_binaries, self.upx_binaries, self.toc))
return 1
class BUNDLE(Target):
def __init__(self, *args, **kws):
# BUNDLE only has a sense under Mac OS X, it's a noop on other platforms
if not is_darwin:
return
# icns icon for app bundle.
self.icon = kws.get('icon', os.path.join(os.path.dirname(__file__),
'..', 'source', 'images', 'icon-windowed.icns'))
Target.__init__(self)
self.name = kws.get('name', None)
if self.name is not None:
self.appname = os.path.splitext(os.path.basename(self.name))[0]
self.version = kws.get("version", "0.0.0")
self.toc = TOC()
for arg in args:
if isinstance(arg, EXE):
self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
self.toc.extend(arg.dependencies)
elif isinstance(arg, TOC):
self.toc.extend(arg)
elif isinstance(arg, COLLECT):
self.toc.extend(arg.toc)
else:
logger.info("unsupported entry %s", arg.__class__.__name__)
# Now, find values for app filepath (name), app name (appname), and name
# of the actual executable (exename) from the first EXECUTABLE item in
# toc, which might have come from a COLLECT too (not from an EXE).
for inm, name, typ in self.toc:
if typ == "EXECUTABLE":
self.exename = name
if self.name is None:
self.appname = "Mac%s" % (os.path.splitext(inm)[0],)
self.name = os.path.join(SPECPATH, self.appname + ".app")
else:
self.name = os.path.join(SPECPATH, self.name)
break
self.__postinit__()
GUTS = (('toc', _check_guts_eq), # additional check below
)
def check_guts(self, last_build):
# BUNDLE always needs to be executed, since it will clean the output
# directory anyway to make sure there is no existing cruft accumulating
return 1
def assemble(self):
if _check_path_overlap(self.name) and os.path.isdir(self.name):
_rmtree(self.name)
logger.info("building BUNDLE %s", os.path.basename(self.out))
# Create a minimal Mac bundle structure
os.makedirs(os.path.join(self.name, "Contents", "MacOS"))
os.makedirs(os.path.join(self.name, "Contents", "Resources"))
os.makedirs(os.path.join(self.name, "Contents", "Frameworks"))
# Copy icns icon to Resources directory.
if os.path.exists(self.icon):
shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))
else:
logger.warn("icon not found %s" % self.icon)
# Key/values for a minimal Info.plist file
info_plist_dict = {"CFBundleDisplayName": self.appname,
"CFBundleName": self.appname,
# Fix for #156 - 'MacOS' must be in the name - not sure why
"CFBundleExecutable": 'MacOS/%s' % os.path.basename(self.exename),
"CFBundleIconFile": os.path.basename(self.icon),
"CFBundleInfoDictionaryVersion": "6.0",
"CFBundlePackageType": "APPL",
"CFBundleShortVersionString": self.version,
# Setting this to 1 will cause Mac OS X *not* to show
# a dock icon for the PyInstaller process which
# decompresses the real executable's contents. As a
# side effect, the main application doesn't get one
# as well, but at startup time the loader will take
# care of transforming the process type.
"LSBackgroundOnly": "1",
}
info_plist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>"""
for k, v in info_plist_dict.items():
info_plist += "<key>%s</key>\n<string>%s</string>\n" % (k, v)
info_plist += """</dict>
</plist>"""
f = open(os.path.join(self.name, "Contents", "Info.plist"), "w")
f.write(info_plist)
f.close()
toc = addSuffixToExtensions(self.toc)
for inm, fnm, typ in toc:
# Copy files from cache. This ensures that are used files with relative
# paths to dynamic library dependencies (@executable_path)
if typ in ('EXTENSION', 'BINARY'):
fnm = checkCache(fnm, dist_nm=inm)
tofnm = os.path.join(self.name, "Contents", "MacOS", inm)
todir = os.path.dirname(tofnm)
if not os.path.exists(todir):
os.makedirs(todir)
shutil.copy2(fnm, tofnm)
## For some hooks copy resource to ./Contents/Resources dir.
# PyQt4 hook: On Mac Qt requires resources 'qt_menu.nib'.
# It is copied from dist directory.
qt_menu_dir = os.path.join(self.name, 'Contents', 'MacOS', 'qt_menu.nib')
qt_menu_dest = os.path.join(self.name, 'Contents', 'Resources', 'qt_menu.nib')
if os.path.exists(qt_menu_dir):
shutil.copytree(qt_menu_dir, qt_menu_dest)
return 1
class TOC(UserList.UserList):
def __init__(self, initlist=None):
UserList.UserList.__init__(self)
self.fltr = {}
if initlist:
for tpl in initlist:
self.append(tpl)
def append(self, tpl):
try:
fn = tpl[0]
if tpl[2] == "BINARY":
# Normalize the case for binary files only (to avoid duplicates
# for different cases under Windows). We can't do that for
# Python files because the import semantic (even at runtime)
# depends on the case.
fn = os.path.normcase(fn)
if not self.fltr.get(fn):
self.data.append(tpl)
self.fltr[fn] = 1
except TypeError:
logger.info("TOC found a %s, not a tuple", tpl)
raise
def insert(self, pos, tpl):
fn = tpl[0]
if tpl[2] == "BINARY":
fn = os.path.normcase(fn)
if not self.fltr.get(fn):
self.data.insert(pos, tpl)
self.fltr[fn] = 1
def __add__(self, other):
rslt = TOC(self.data)
rslt.extend(other)
return rslt
def __radd__(self, other):
rslt = TOC(other)
rslt.extend(self.data)
return rslt
def extend(self, other):
for tpl in other:
self.append(tpl)
def __sub__(self, other):
fd = self.fltr.copy()
# remove from fd if it's in other
for tpl in other:
if fd.get(tpl[0], 0):
del fd[tpl[0]]
rslt = TOC()
# return only those things still in fd (preserve order)
for tpl in self.data:
if fd.get(tpl[0], 0):
rslt.append(tpl)
return rslt
def __rsub__(self, other):
rslt = TOC(other)
return rslt.__sub__(self)
def intersect(self, other):
rslt = TOC()
for tpl in other:
if self.fltr.get(tpl[0], 0):
rslt.append(tpl)
return rslt
class Tree(Target, TOC):
def __init__(self, root=None, prefix=None, excludes=None):
Target.__init__(self)
TOC.__init__(self)
self.root = root
self.prefix = prefix
self.excludes = excludes
if excludes is None:
self.excludes = []
self.__postinit__()
GUTS = (('root', _check_guts_eq),
('prefix', _check_guts_eq),
('excludes', _check_guts_eq),
('toc', None),
)
def check_guts(self, last_build):
data = Target.get_guts(self, last_build)
if not data:
return True
stack = [data[0]] # root
toc = data[3] # toc
while stack:
d = stack.pop()
if mtime(d) > last_build:
logger.info("building %s because directory %s changed",
self.outnm, d)
return True
for nm in os.listdir(d):
path = os.path.join(d, nm)
if os.path.isdir(path):
stack.append(path)
self.data = toc
return False
def assemble(self):
logger.info("building Tree %s", os.path.basename(self.out))
stack = [(self.root, self.prefix)]
excludes = {}
xexcludes = {}
for nm in self.excludes:
if nm[0] == '*':
xexcludes[nm[1:]] = 1
else:
excludes[nm] = 1
rslt = []
while stack:
dir, prefix = stack.pop()
for fnm in os.listdir(dir):
if excludes.get(fnm, 0) == 0:
ext = os.path.splitext(fnm)[1]
if xexcludes.get(ext, 0) == 0:
fullfnm = os.path.join(dir, fnm)
rfnm = prefix and os.path.join(prefix, fnm) or fnm
if os.path.isdir(fullfnm):
stack.append((fullfnm, rfnm))
else:
rslt.append((rfnm, fullfnm, 'DATA'))
self.data = rslt
try:
oldstuff = _load_data(self.out)
except:
oldstuff = None
newstuff = (self.root, self.prefix, self.excludes, self.data)
if oldstuff != newstuff:
_save_data(self.out, newstuff)
return 1
logger.info("%s no change!", self.out)
return 0
class MERGE(object):
"""
Merge repeated dependencies from other executables into the first
execuable. Data and binary files are then present only once and some
disk space is thus reduced.
"""
def __init__(self, *args):
"""
Repeated dependencies are then present only once in the first
executable in the 'args' list. Other executables depend on the
first one. Other executables have to extract necessary files
from the first executable.
args dependencies in a list of (Analysis, id, filename) tuples.
Replace id with the correct filename.
"""
# The first Analysis object with all dependencies.
# Any item from the first executable cannot be removed.
self._main = None
self._dependencies = {}
self._id_to_path = {}
for _, i, p in args:
self._id_to_path[i] = p
# Get the longest common path
self._common_prefix = os.path.dirname(os.path.commonprefix([os.path.abspath(a.scripts[-1][1]) for a, _, _ in args]))
if self._common_prefix[-1] != os.sep:
self._common_prefix += os.sep
logger.info("Common prefix: %s", self._common_prefix)
self._merge_dependencies(args)
def _merge_dependencies(self, args):
"""
Filter shared dependencies to be only in first executable.
"""
for analysis, _, _ in args:
path = os.path.abspath(analysis.scripts[-1][1]).replace(self._common_prefix, "", 1)
path = os.path.splitext(path)[0]
if path in self._id_to_path:
path = self._id_to_path[path]
self._set_dependencies(analysis, path)
def _set_dependencies(self, analysis, path):
"""
Syncronize the Analysis result with the needed dependencies.
"""
for toc in (analysis.binaries, analysis.datas):
for i, tpl in enumerate(toc):
if not tpl[1] in self._dependencies.keys():
logger.debug("Adding dependency %s located in %s" % (tpl[1], path))
self._dependencies[tpl[1]] = path
else:
dep_path = self._get_relative_path(path, self._dependencies[tpl[1]])
logger.debug("Referencing %s to be a dependecy for %s, located in %s" % (tpl[1], path, dep_path))
analysis.dependencies.append((":".join((dep_path, tpl[0])), tpl[1], "DEPENDENCY"))
toc[i] = (None, None, None)
# Clean the list
toc[:] = [tpl for tpl in toc if tpl != (None, None, None)]
# TODO move this function to PyInstaller.compat module (probably improve
# function compat.relpath()
def _get_relative_path(self, startpath, topath):
start = startpath.split(os.sep)[:-1]
start = ['..'] * len(start)
if start:
start.append(topath)
return os.sep.join(start)
else:
return topath
def TkTree():
raise SystemExit('TkTree has been removed in PyInstaller 2.0. '
'Please update your spec-file. See '
'http://www.pyinstaller.org/wiki/MigrateTo2.0 for details')
def TkPKG():
raise SystemExit('TkPKG has been removed in PyInstaller 2.0. '
'Please update your spec-file. See '
'http://www.pyinstaller.org/wiki/MigrateTo2.0 for details')
def build(spec, buildpath):
global SPECPATH, BUILDPATH, WARNFILE, rthooks, SPEC, specnm
rthooks = _load_data(os.path.join(HOMEPATH, 'support', 'rthooks.dat'))
SPEC = spec
SPECPATH, specnm = os.path.split(spec)
specnm = os.path.splitext(specnm)[0]
if SPECPATH == '':
SPECPATH = os.getcwd()
BUILDPATH = os.path.join(SPECPATH, 'build',
"pyi." + sys.platform, specnm)
# Check and adjustment for build path
if buildpath != DEFAULT_BUILDPATH:
bpath = buildpath
if os.path.isabs(bpath):
BUILDPATH = bpath
else:
BUILDPATH = os.path.join(SPECPATH, bpath)
WARNFILE = os.path.join(BUILDPATH, 'warn%s.txt' % specnm)
if not os.path.exists(BUILDPATH):
os.makedirs(BUILDPATH)
# Executing the specfile (it's a valid python file)
execfile(spec)
def __add_options(parser):
parser.add_option('--buildpath', default=DEFAULT_BUILDPATH,
help='Buildpath (default: %default)')
parser.add_option('-y', '--noconfirm',
action="store_true", default=False,
help='Remove output directory (default: %s) without '
'confirmation' % os.path.join('SPECPATH', 'dist', 'SPECNAME'))
parser.add_option('--upx-dir', default=None,
help='Directory containing UPX (default: search in path)')
parser.add_option("-a", "--ascii", action="store_true",
help="do NOT include unicode encodings "
"(default: included if available)")
def main(specfile, buildpath, noconfirm, ascii=False, **kw):
global config
global icon, versioninfo, winresource, winmanifest, pyasm
global HIDDENIMPORTS, NOCONFIRM
NOCONFIRM = noconfirm
# Test unicode support.
if not ascii:
HIDDENIMPORTS.extend(misc.get_unicode_modules())
# FIXME: this should be a global import, but can't due to recursive imports
import PyInstaller.configure as configure
config = configure.get_config(kw.get('upx_dir'))
if config['hasRsrcUpdate']:
from PyInstaller.utils import icon, versioninfo, winresource
pyasm = bindepend.getAssemblies(sys.executable)
else:
pyasm = None
if config['hasUPX']:
setupUPXFlags()
if not config['useELFEXE']:
EXE.append_pkg = 0
build(specfile, buildpath)
|
[
"fabiomachadodiniz@gmail.com"
] |
fabiomachadodiniz@gmail.com
|
84bb2ebc15bbae9433e6730fe93e4c27375addca
|
f19937a6e18fe1b3e53d082e00bb00e931c9f4bb
|
/element/app/main/__init__.py
|
234e186929b64d10542c905c3489a16baafd17bb
|
[] |
no_license
|
239103/hello-world
|
3ac9a2bb5d61ed22baa30a209f20a8beaf0219fd
|
ad6434b6d95c8186df387254670f02c735339346
|
refs/heads/master
| 2021-01-13T00:37:13.662641
| 2016-05-10T06:32:26
| 2016-05-10T06:32:26
| 47,692,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views
views.init_apis(main)
|
[
"cw0319@qq.com"
] |
cw0319@qq.com
|
c97349deca021fa02bd1829b3ff9ee1936879849
|
e943b5a6580cac653272c5cf85d4d46867c419fb
|
/3/3_dht.py
|
358d35357e9ceca52e82169fdd97479ecad20227
|
[] |
no_license
|
afrizaloky/Praktikum-IoT
|
a505b078136d4e1c5961a3f34c997ee5a39b5856
|
b719020bc2e6cff6adf298b24973fc23e93552ff
|
refs/heads/master
| 2022-11-30T01:21:58.045866
| 2020-08-18T14:17:49
| 2020-08-18T14:17:49
| 287,882,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# Libraries import os import time import sys
import paho.mqtt.client as mqtt
import json
import RPi.GPIO as GPIO
import time
# GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM) # set GPIO Pins
GPIO_DATA = 4
# set GPIO direction (IN / OUT)
GPIO.setup(GPIO_DATA, GPIO.IN)
THINGSBOARD_HOST = 'demo.thingsboard.io'
ACCESS_TOKEN = 'LD3XPkG3Q4hgCaVqXrBs'
# Data capture and upload interval in seconds. Less interval will eventually.
INTERVAL = 1
sensor_data = {'temperature': 0, 'humidity': 0}
next_reading = time.time()
client = mqtt.Client()
# Set access token
client.username_pw_set(ACCESS_TOKEN)
# Connect to ThingsBoard using default MQTT port and 60 seconds keepalive interval
client.connect(THINGSBOARD_HOST, 1883, 60)
client.loop_start()
if __name__ == "__main__":
try:
while True:
humidity,temperature = dht.read_retry(dht.DHT22, 4)
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print(u"Temperature: {:g}\u00b0C, Humidity: {:g}%".format(temperature, humidity))
sensor_data['temperature'] = temperature
sensor_data['humidity'] = humidity
# Sending humidity and temperature data to ThingsBoard
client.publish('v1/devices/me/telemetry', json.dumps(sensor_data), 1)
next_reading += INTERVAL
sleep_time = next_reading-time.time()
if sleep_time > 0:
time.sleep(sleep_time)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
client.loop_stop()
client.disconnect()
|
[
"afrizaloky@outlook.com"
] |
afrizaloky@outlook.com
|
d2a67d571a6ae128e18235f827a76b271bc6e6e8
|
cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b
|
/ecloud/code/src/main/python/manor/streamlet/create_nodes.py
|
159486c27b7fd7132e26361dfada9a5c35673aba
|
[] |
no_license
|
1026237416/Python
|
ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14
|
ffa8f9ffb8bfec114b0ca46295db05c4213c4c30
|
refs/heads/master
| 2021-07-05T00:57:00.456886
| 2019-04-26T10:13:46
| 2019-04-26T10:13:46
| 114,510,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,090
|
py
|
# coding=utf-8
import yaml
from tornado import gen
from manor.screwdriver import stack_util
from manor.screwdriver.vendor_ecloud import list_app_resources
from manor.streamlet import StreamletBase,get_stack_resources
from manor.streamlet import download_path
from manor.util import generals
from manor.util import redis_tool
SUCCESS_FLAG='CREATE_COMPLETE'
CREATING_FLAG='CREATE_IN_PROGRESS'
def get_instance(params,node_id,serial):
return CreateNodes(params,serial,node_id)
class CreateNodes(StreamletBase):
def __init__(self,params,serial,node_id):
super(CreateNodes,self).__init__(node_id,params,serial)
self.result=None
self.created_resources=[]
self.stack_status=''
self.ips=[]
@gen.coroutine
def execute(self):
if not self.executed:
self.executed=True
# todo: check input parameters...
self.log.debug('params:')
self.log.debug(self.params)
data_module={
'name':'create node',
'resources':{},
'group_name':self.get_resource('group_name')
}
self.log.debug('calculate data module ..')
try:
if self.get_resource('group_name')=='':
raise Exception('group name is empty.')
if self.get_resource('max')!='':
_max=int(self.get_resource('max'))
group_name=self.get_resource('group_name')
rs=yield list_app_resources(self.serial)
rs=[_ for _ in rs if _['group_name']==group_name]
if len(rs)>=_max:
raise Exception('manor.create.node.upper.limited')
os_name=yield download_path(self.get_resource('image'))
data_module['resources'][self.get_resource('group_name')]={
"count":self.get_resource('amount'),
"group_name":self.get_resource('group_name'),
"image":self.get_resource('image'),
'flavor':self.get_resource('flavors'),
"memory":self.get_resource('memory'),
"cores":self.get_resource('cores'),
'tenant':self.get_resource('tenant'),
'size':self.get_resource('disk_capacity'),
"os":os_name,
"network":[
{
"network":self.get_resource('network'),
"subnet":self.get_resource('subnet')
}
]
}
self.log.debug(data_module)
self.stack_id=yield stack_util.create_action(data_module,
self.serial)
except Exception as e:
self.log.error(generals.trace())
raise e
@gen.coroutine
def calculate_created_resources(self):
resources=yield get_stack_resources(self.stack_id)
self.log.debug('calculate created:\n %s'%yaml.safe_dump(resources))
self.created_resources=resources
@gen.coroutine
def get_stack_status(self):
future=yield stack_util.get_stack(self.stack_id)
self.stack_status=future.to_dict()['stack_status']
def get_resource(self,key):
if key in self.params:
return self.params[key]
else:
return ''
def __ips_not_in_road_map(self,ips):
return [_ for _ in ips if _ not in self.__get_road_map()]
def __get_road_map(self):
r=redis_tool.get_it()
road_map=r.keys('mapup*')
return [_.split('_$_')[3] for _ in road_map]
def check_finish(self):
"""
注意,此方法运行在一个线程中,每秒会执行一次。
"""
try:
self.log.debug('create_nodes step. check finish. stack_id %s'%
self.stack_id)
if self.stack_id is None:
return False
if self.stack_status!=CREATING_FLAG:
if self.stack_status==SUCCESS_FLAG:
if len(self.created_resources)==0:
self.calculate_created_resources()
if len(self.ips)==0:
self.ips=[_['ip'] for _ in self.created_resources]
checked=[_ for _ in self.ips if _ in self.__get_road_map()]
self.log.debug('%s - %s'%(self.ips,checked))
if len(self.ips)>0 and self.ips==checked:
return True
else:
return False
else:
self.get_stack_status()
else:
self.log.debug('the stack stack_status is %s'%self.stack_status)
self.get_stack_status()
return False
except:
self.log.error(generals.trace())
raise Exception('error.manor.stream.check.create.node.finish')
|
[
"1026237416@qq.com"
] |
1026237416@qq.com
|
3890719b1de619a46527dd653f3b42ca89a5dcb1
|
430cfece27c54180baf29b3199a67f79fe7d155c
|
/pygmt/tests/test_grdimage.py
|
5ad3913c5bef4b786a5d1de55c30b647ec31619e
|
[
"BSD-3-Clause"
] |
permissive
|
JamieJQuinn/pygmt
|
139f25a3f4280b2d2d43c3fa63179437a9227d31
|
9269fbcb2fc7fca2d5c412acdb794be375c260ab
|
refs/heads/main
| 2023-08-24T16:19:27.673739
| 2021-10-29T09:51:44
| 2021-10-29T09:51:44
| 384,119,354
| 0
| 0
|
BSD-3-Clause
| 2021-07-08T12:37:21
| 2021-07-08T12:37:21
| null |
UTF-8
|
Python
| false
| false
| 7,213
|
py
|
"""
Test Figure.grdimage.
"""
import numpy as np
import pytest
import xarray as xr
from pygmt import Figure
from pygmt.datasets import load_earth_relief
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers.testing import check_figures_equal
@pytest.fixture(scope="module", name="grid")
def fixture_grid():
"""
Load the grid data from the sample earth_relief file.
"""
return load_earth_relief(registration="gridline")
@pytest.fixture(scope="module", name="grid_360")
def fixture_grid_360(grid):
"""
Earth relief grid with longitude range from 0 to 360 (instead of -180 to
180).
"""
_grid = grid.copy() # get a copy of original earth_relief grid
_grid.encoding.pop("source") # unlink earth_relief NetCDF source
_grid["lon"] = np.arange(0, 361, 1) # convert longitude from -180:180 to 0:360
return _grid
@pytest.fixture(scope="module", name="xrgrid")
def fixture_xrgrid():
"""
Create a sample xarray.DataArray grid for testing.
"""
longitude = np.arange(0, 360, 1)
latitude = np.arange(-89, 90, 1)
x = np.sin(np.deg2rad(longitude))
y = np.linspace(start=0, stop=1, num=179)
data = y[:, np.newaxis] * x
return xr.DataArray(
data,
coords=[
("latitude", latitude, {"units": "degrees_north"}),
("longitude", longitude, {"units": "degrees_east"}),
],
attrs={"actual_range": [-1, 1]},
)
@pytest.mark.mpl_image_compare
def test_grdimage(grid):
"""
Plot an image using an xarray grid.
"""
fig = Figure()
fig.grdimage(grid, cmap="earth", projection="W0/6i")
return fig
@pytest.mark.mpl_image_compare
def test_grdimage_slice(grid):
"""
Plot an image using an xarray grid that has been sliced.
"""
grid_ = grid.sel(lat=slice(-30, 30))
fig = Figure()
fig.grdimage(grid_, cmap="earth", projection="M6i")
return fig
@pytest.mark.mpl_image_compare
def test_grdimage_file():
"""
Plot an image using file input.
"""
fig = Figure()
fig.grdimage(
"@earth_relief_01d_g",
cmap="ocean",
region=[-180, 180, -70, 70],
projection="W0/10i",
shading=True,
)
return fig
@check_figures_equal()
@pytest.mark.parametrize(
"shading",
[True, 0.5, "+a30+nt0.8", "@earth_relief_01d_g+d", "@earth_relief_01d_g+a60+nt0.8"],
)
def test_grdimage_shading_xarray(grid, shading):
"""
Test that shading works well for xarray.
The ``shading`` can be True, a constant intensity, some modifiers, or
a grid with modifiers.
See https://github.com/GenericMappingTools/pygmt/issues/364 and
https://github.com/GenericMappingTools/pygmt/issues/618.
"""
fig_ref, fig_test = Figure(), Figure()
kwargs = dict(
region=[-180, 180, -90, 90],
frame=True,
projection="Cyl_stere/6i",
cmap="geo",
shading=shading,
)
fig_ref.grdimage("@earth_relief_01d_g", **kwargs)
fig_test.grdimage(grid, **kwargs)
return fig_ref, fig_test
@pytest.mark.xfail(
reason="Incorrect scaling of geo CPT on xarray.DataArray grdimage plot."
"See https://github.com/GenericMappingTools/gmt/issues/5294",
)
@check_figures_equal()
def test_grdimage_grid_and_shading_with_xarray(grid, xrgrid):
"""
Test that shading works well when xarray.DataArray is input to both the
``grid`` and ``shading`` arguments.
"""
fig_ref, fig_test = Figure(), Figure()
fig_ref.grdimage(
grid="@earth_relief_01d_g", region="GL", cmap="geo", shading=xrgrid, verbose="i"
)
fig_ref.colorbar()
fig_test.grdimage(grid=grid, region="GL", cmap="geo", shading=xrgrid, verbose="i")
fig_test.colorbar()
return fig_ref, fig_test
def test_grdimage_fails():
"""
Should fail for unrecognized input.
"""
fig = Figure()
with pytest.raises(GMTInvalidInput):
fig.grdimage(np.arange(20).reshape((4, 5)))
@pytest.mark.mpl_image_compare
def test_grdimage_over_dateline(xrgrid):
"""
Ensure no gaps are plotted over the 180 degree international dateline.
Specifically checking that `xrgrid.gmt.gtype = 1` sets `GMT_GRID_IS_GEO`,
and that `xrgrid.gmt.registration = 0` sets `GMT_GRID_NODE_REG`. Note that
there would be a gap over the dateline if a pixel registered grid is used.
See also https://github.com/GenericMappingTools/pygmt/issues/375.
"""
fig = Figure()
assert xrgrid.gmt.registration == 0 # gridline registration
xrgrid.gmt.gtype = 1 # geographic coordinate system
fig.grdimage(grid=xrgrid, region="g", projection="A0/0/1c")
return fig
@pytest.mark.mpl_image_compare
def test_grdimage_global_subset(grid_360):
"""
Ensure subsets of grids are plotted correctly on a global map.
Specifically checking that xarray.DataArray grids can wrap around the left
and right sides on a Mollweide projection (W) plot correctly. Note that a
Cartesian grid is used here instead of a Geographic grid (i.e.
GMT_GRID_IS_CARTESIAN). This is a regression test for
https://github.com/GenericMappingTools/pygmt/issues/732.
"""
# Get a slice of South America and Africa only (lat=-90:31, lon=-180:41)
sliced_grid = grid_360[0:121, 0:221]
assert sliced_grid.gmt.registration == 0 # gridline registration
assert sliced_grid.gmt.gtype == 0 # Cartesian coordinate system
fig = Figure()
fig.grdimage(
grid=sliced_grid, cmap="vik", region="g", projection="W0/3.5c", frame=True
)
return fig
@check_figures_equal()
@pytest.mark.parametrize("lon0", [0, 123, 180])
@pytest.mark.parametrize("proj_type", ["H", "W"])
def test_grdimage_central_meridians(grid, proj_type, lon0):
"""
Test that plotting a grid with different central meridians (lon0) using
Hammer (H) and Mollweide (W) projection systems work.
"""
fig_ref, fig_test = Figure(), Figure()
fig_ref.grdimage(
"@earth_relief_01d_g", projection=f"{proj_type}{lon0}/15c", cmap="geo"
)
fig_test.grdimage(grid, projection=f"{proj_type}{lon0}/15c", cmap="geo")
return fig_ref, fig_test
# Cylindrical Equidistant (Q) projections plotted with xarray and NetCDF grids
# are still slightly different with an RMS error of 25, see issue at
# https://github.com/GenericMappingTools/pygmt/issues/390
# TO-DO remove tol=1.5 and pytest.mark.xfail once bug is solved in upstream GMT
@check_figures_equal(tol=1.5)
@pytest.mark.parametrize("lat0", [0, 30])
@pytest.mark.parametrize("lon0", [0, 123, 180])
@pytest.mark.parametrize("proj_type", [pytest.param("Q", marks=pytest.mark.xfail), "S"])
def test_grdimage_central_meridians_and_standard_parallels(grid, proj_type, lon0, lat0):
"""
Test that plotting a grid with different central meridians (lon0) and
standard_parallels (lat0) using Cylindrical Equidistant (Q) and General
Stereographic (S) projection systems work.
"""
fig_ref, fig_test = Figure(), Figure()
fig_ref.grdimage(
"@earth_relief_01d_g", projection=f"{proj_type}{lon0}/{lat0}/15c", cmap="geo"
)
fig_test.grdimage(grid, projection=f"{proj_type}{lon0}/{lat0}/15c", cmap="geo")
return fig_ref, fig_test
|
[
"noreply@github.com"
] |
noreply@github.com
|
8b10787326d6e24fc474df2d039a94863b0a3aab
|
9969ee7cfa666c3ba4ec101fd983284d31eadf35
|
/leetcode/remove-element.py
|
f14c018552b59c22efba9b82ec09b70be98fa85a
|
[] |
no_license
|
QinGeneral/Algorithm
|
d0385575fdde3aa0b535b3c15ecbadf2c3cc3ff4
|
251b2b8bab88d3cbd9463b7a3c3120587aa281a2
|
refs/heads/master
| 2023-08-18T18:45:28.958941
| 2023-08-08T14:46:53
| 2023-08-08T14:46:53
| 265,273,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
class BetterSolution:
def removeElement(self, nums, val: int) -> int:
length_range = range(len(nums))
length = 0
for i in length_range:
if nums[i] != val:
nums[length] = nums[i]
length += 1
return length
class Solution:
def removeElement(self, nums, val: int) -> int:
length_range = range(len(nums))
not_val_num = 0
for i in length_range:
if nums[i] != val:
not_val_num += 1
for j in range(0, i):
if nums[j] == val:
nums[j] = nums[i]
nums[i] = val
break
return not_val_num
Solution = Solution()
print(Solution.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
BetterSolution = BetterSolution()
print(BetterSolution.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
|
[
"qingeneral@gmail.com"
] |
qingeneral@gmail.com
|
02b6660eee24fb762865d74b05c9a9efb1bdd81b
|
0de10fdcc1ef06a33611219a464fbdd1c7c4ae8b
|
/pickupfinder/trunk/bin/pip-2.7
|
9de063f867c62edc35320e97bffd0d7c012351db
|
[] |
no_license
|
steinbachr/WebDev
|
aa3f1577aed94be4ef9dbb3170c5ddebe309df66
|
7a8c53e1816ba0155ac818a83027000212396180
|
refs/heads/master
| 2020-06-04T08:59:04.309930
| 2016-12-07T01:04:34
| 2016-12-07T01:04:34
| 7,824,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
7
|
#!/Users/Bobby/Documents/Projects/PickupFinder/trunk/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==1.1','console_scripts','pip-2.7'
__requires__ = 'pip==1.1'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('pip==1.1', 'console_scripts', 'pip-2.7')()
)
|
[
"steinbach.r@husky.neu.edu"
] |
steinbach.r@husky.neu.edu
|
95c245281551b533a9da487b67a62881b1c8df20
|
3d3995ffd1844e4d19b15d9da48696d087ddbe6d
|
/qa/rpc-tests/wallet-dump.py
|
04d12ad4bcdd630086cf56f23e3882b430645ee3
|
[
"MIT"
] |
permissive
|
MassGrid/MassGrid
|
2f6f0cb8638f97aca3bb3d500af846becddd3103
|
5634dda1b9997c3d230678b1b19b6af17b590a3d
|
refs/heads/master
| 2021-06-12T01:11:59.651512
| 2019-07-15T03:03:12
| 2019-07-15T03:03:12
| 118,768,672
| 18
| 14
|
MIT
| 2019-04-30T07:28:27
| 2018-01-24T13:25:02
|
C++
|
UTF-8
|
Python
| false
| false
| 5,212
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import MassGridTestFramework
from test_framework.util import *
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(MassGridTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-usehd=1"]]
def setup_chain(self):
# TODO remove this when usehd=1 becomes the default
# use our own cache and -usehd=1 as extra arg as the default cache is run with -usehd=0
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir + "/hd", ["-usehd=1"], redirect_stderr=True)
set_cache_mocktime()
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60, redirect_stderr=True)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 180) # keypool size (external+internal)
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
massgridd_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
# TODO clarify if we want the behavior that is tested below in MassGrid (only when HD seed was generated and not user-provided)
# assert_equal(found_addr_chg, 180 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 180) # keypool size
if __name__ == '__main__':
WalletDumpTest().main ()
|
[
"873040807@qq.com"
] |
873040807@qq.com
|
8dacb18cc5f4b37537297886d225802a28f1729d
|
7746611d41fb12db544bb871b270258b563fd108
|
/Instanciating and global variable.py
|
5d3723d6b589de161e2c4122ac197b1608e8b740
|
[] |
no_license
|
christianjoy/OOP
|
d1c5411e83b8c51ae4cf0898fc85e53e06ef80c2
|
a17de30c7f98dbfa48857e69d6179d7d417fe46b
|
refs/heads/master
| 2021-01-19T14:56:50.828685
| 2012-08-29T02:33:24
| 2012-08-29T02:33:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
class Score
def __init__(self):
self.score=0
def Add (self):
self.score += 1
def show(self):
print("The score is %i" % (score))
Basketball=Score()
Volleyball=Score()
Chess=Score()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Volleyball.Add()
Volleyball.Add()
Volleyball.Add()
Volleyball.Add()
Volleyball.Add()
Chess.Add()
Chess.Add()
Chess.Add()
Basketball.show()
Volleyball.show()
Chess.show()
|
[
"christianjoyventura@yahoo.com"
] |
christianjoyventura@yahoo.com
|
28c30da6018c2bb5ce3deb9c731631034ca15789
|
25ec110360c3d743c33528a5dac59cf0dd997254
|
/site_KFU/registration/functions.py
|
8d875a482538a22a53b3fed38f47e186d999aeb1
|
[] |
no_license
|
Nikiouch/KFUsite
|
9130b92d6daceb4ef1ac9ded327d0d3e4ecba3e1
|
c786d31723b89710200de23086d5b338522b8258
|
refs/heads/master
| 2021-08-29T01:35:29.637617
| 2017-12-11T15:01:17
| 2017-12-11T15:01:17
| 113,872,790
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import string, random
alphabet = {'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e',
'ё': 'yo','ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'y', 'к': 'k',
'л': 'l', 'м': 'm', 'н': 'n', 'о': 'o', 'п': 'p', 'р': 'r',
'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'h', 'ц': 'c',
'ч': 'ch', 'ш': 'sh', 'щ': 'sh', 'ъ': 'y', 'ы': 'y', 'ь': "'",
'э': 'e', 'ю': 'yu', 'я': 'ya',
'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E',
'Ё': 'Yo', 'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'Y', 'К': 'K',
'Л': 'L', 'М': 'M', 'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R',
'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'H', 'Ц': 'Ts',
'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Sh', 'Ъ': 'Y', 'Ы': 'Y',
'Ь': "'", 'Э': 'E', 'Ю': 'Yu', 'Я': 'Ya',}
def eng_translate(word):
for i,j in alphabet.items():
word = word.replace(i, j)
return word
def GeneratePassword(login):
password = ""
for i in range(0, len(login)):
password += str(ord(login[i]))
return password
|
[
"hanouchh@gmail.com"
] |
hanouchh@gmail.com
|
c8856f83a16833e017ba42874f29a120fe5c05d5
|
8d46b7767ee1dc8737247772fe8696263676fa09
|
/app/recipe/serializers.py
|
488f4974a99559614e326bb88b86aa0e6a8e0975
|
[] |
no_license
|
Diaga/recipe-api
|
866997d58348923d039fcf18049f0f8e5738ce13
|
e3796253c82e447566e46978e7fd67b0b01ce892
|
refs/heads/master
| 2020-06-22T18:40:32.851888
| 2019-07-26T17:59:51
| 2019-07-26T17:59:51
| 197,775,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag model"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for ingredient model"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer for recipe model"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'time_minutes', 'price',
'link', 'tags', 'ingredients')
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serializer for recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
|
[
"diagaaos@gmail.com"
] |
diagaaos@gmail.com
|
f74296653aa5f909d55be6b01db02cd11a8f0142
|
69533190b829ae8d37fe87e6990ecb9cc250bef3
|
/old/teach_pendant/switch_map.py
|
d91d5db1a81cd2eaa23f0f5cc8e4f22691e1cba2
|
[] |
no_license
|
chxb1987/idx6dof
|
a3ebd70d9901845b3a72f611e021caaba8814602
|
b6a2a1b79673cdc3d929c469116ff4eaf3f7583d
|
refs/heads/master
| 2020-08-03T21:46:51.620409
| 2017-06-14T20:50:22
| 2017-06-14T20:50:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
SWITCH_UP=1
SWITCH_DOWN=3
sw_map = (
( 4, 2, SWITCH_UP),
( 12, 2, SWITCH_DOWN),
( 16, 17, SWITCH_UP),
( 17, 16, SWITCH_UP),
( 18, 15, SWITCH_UP),
( 19, 14, SWITCH_UP),
( 20, 13, SWITCH_UP),
( 21, 12, SWITCH_UP),
( 22, 10, SWITCH_UP),
( 23, 11, SWITCH_UP),
( 24, 17, SWITCH_DOWN),
( 25, 16, SWITCH_DOWN),
( 26, 15, SWITCH_DOWN),
( 27, 14, SWITCH_DOWN),
( 28, 13, SWITCH_DOWN),
( 29, 12, SWITCH_DOWN),
( 30, 10, SWITCH_DOWN),
( 31, 11, SWITCH_DOWN),
( 32, 7, SWITCH_UP),
( 33, 6, SWITCH_UP),
( 34, 5, SWITCH_UP),
( 35, 4, SWITCH_UP),
( 36, 3, SWITCH_UP),
( 37, 8, SWITCH_UP),
( 38, 1, SWITCH_UP),
( 39, 9, SWITCH_UP),
( 40, 7, SWITCH_DOWN),
( 41, 6, SWITCH_DOWN),
( 42, 5, SWITCH_DOWN),
( 43, 4, SWITCH_DOWN),
( 44, 3, SWITCH_DOWN),
( 45, 8, SWITCH_DOWN),
( 46, 1, SWITCH_DOWN),
( 47, 9, SWITCH_DOWN),
)
for sw_code, sw_n, sw_pos in sw_map:
if sw_pos == SWITCH_UP:
vn = 'this->swbits_ups'
mn = 'SET_SW_UP'
else:
vn = 'this->swbits_downs'
mn = 'SET_SW_DOWN'
print "case {sw_code}: {mn}({sw_n}); break; ".format(**locals())
|
[
"eric@clarinova.com"
] |
eric@clarinova.com
|
84c9f0305d09c62aeebc0f058ea4b46557611c42
|
627eccf21233ec99a8fb0a770d4f11a7f887eac7
|
/Lab-7/es3.py
|
dad87720f1f3f43914e0d5c634e4f520f03ea65e
|
[] |
no_license
|
frollo/AdvancedProgramming
|
76e15cc5fd9ac21fb1db36806d8a3d2136da1252
|
f4c27a9b1067ead9720ea23d6630d5b7611d4d68
|
refs/heads/master
| 2021-01-21T04:40:35.984135
| 2016-06-14T16:46:09
| 2016-06-14T16:46:09
| 49,653,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,419
|
py
|
from functools import reduce
class Matrix(object):
def __init__(self,height, width, values):
self.height = height
self.width = width
index = 0
self.matrix = list()
line = list()
while index < len(values):
line.append(values[index])
index += 1
if index % self.width == 0:
self.matrix.append(line)
line = list()
def __eq__(self, other):
if self.height == other.height and self.width == other.width:
return reduce(lambda x,y: x and y, [x[0] == x[1] for x in zip(self.matrix, other.matrix)])
else:
return False
def __add__(self, other):
if self.height != other.height or self.width != other.width:
raise Exception("Cannot sum matrix of different dimensions!")
v1 = reduce(lambda x,y: x+y, self.matrix)
v2 = reduce(lambda x,y: x+y, other.matrix)
return Matrix(self.height, self.width, list(map(lambda x: x[0] + x[1], zip(v1,v2))))
def __mul__(self, other):
try:
if self.height == other.width:
mult_values = [sum([self.matrix[j][x] * other.matrix[x][i] for x in range(self.width)]) for j in range (self.width) for i in range (self.height)]
except Exception as e:
print(other)
mult_values = list(map(lambda x: x * other, reduce(lambda x,y: x+y, self.matrix)))
return Matrix(self.height, self.width, mult_values)
def tras(self):
new_values = [self.matrix[j][i] for i in range(self.width) for j in range(self.height)]
return Matrix(self.height, self.width, new_values)
def copy(self):
return Matrix(self.height, self.width, reduce(lambda x,y: x + y, self.matrix))
def __repr__(self):
rep = ""
for line in self.matrix:
rep += "|"
for el in line:
rep += " {0}".format(el)
rep += " |\n"
return rep
if __name__ == '__main__':
muno = Matrix(2,2,[1,2,3,4])
mdue = Matrix(2,2,[1,2,3,4])
mtre = Matrix(2,2,[5,6,7,8])
mquattro = Matrix(1,1, [0])
print(muno == mdue)
print(mtre == mdue)
print(mtre == mquattro)
mcinque = muno.copy()
print(mcinque == muno)
print (muno + mdue)
print (mquattro * mquattro)
print(muno * mdue)
mid = Matrix(2,2,[1,0,0,1])
print(mid * muno)
|
[
"rossi.lorenzo@yandex.com"
] |
rossi.lorenzo@yandex.com
|
4b6132798d6c0859d76c627890ed98d48b68a471
|
9694ed0bc0e90eed9e275ef304b6cffc6d4b38f1
|
/peasoup/config.py
|
4557bb6304385af9d3cbf8803d2bf8a445ba49dd
|
[] |
no_license
|
pkronstrom/peasoup
|
3d27a8ebc5ed4657ef711024e9e5d10d2244b2cf
|
2a53b75558d9d0d674d53de7d04d988fb64ea067
|
refs/heads/master
| 2021-01-09T02:58:54.165600
| 2020-02-22T15:09:10
| 2020-02-22T15:09:10
| 242,223,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
# Change this to a directory where the podcast files lie.
# The server currently only supports for flat directories
PODCASTS_DIRECTORY = "/path/to/samba/shared/podcasts/dir"
# The baseurl should be something that is accessible from
# the outside world (or at least from local network) in
# order to use with your podcast client.
# Probably something like http://my-ddns-name.dy.fi:9999
SERVER_BASEURL = "http://localhost:9999"
# Current list of allowed extensions.
# The rest of the filetypes are ignored.
ALLOWED_EXTENSIONS = [".mp3", ".m4a", ".m4b", "aac", ".ogg", ".wma", ".flac", ".alac"]
# Podcast specific info that is
# rendered to the XML file
PODCAST_NAME = "Peasoup Podcast"
PODCAST_DESCRIPTION = (
"This is my peasoup.\n"
"Probably needs some mustard and onion."
)
# This might need to be something accessible depending on
# the podcast client
PODCAST_WEBSITE = "http://www.my-podcast-website.com"
PODCAST_CONTAINS_EXPLICIT_CONTENT = False
|
[
"peter.kronstrom@reaktor.com"
] |
peter.kronstrom@reaktor.com
|
fa911cce9aa47bb0db4769337cacc5ab8c3bdae2
|
832117e293908ac349e96620a9b549d93f74092d
|
/Simulations/Python/EG Statistic/Generate Herfindahl Bins for Each N given a Range of Sigmas/HSimulation
|
236229f13522781df2942d9437174b346cf1aaff
|
[] |
no_license
|
seilkhanov/EconScripts
|
602a665515ff9e816476a67ad5003f10b47dc45d
|
f093bd9209db3fccbd08f57f7473623e9a6da656
|
refs/heads/master
| 2023-03-23T03:36:50.353039
| 2019-12-18T12:54:53
| 2019-12-18T12:54:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,741
|
#!/usr/bin/env python
import optparse
import os
import platform
import sys
import codecs
from hSimulation import *
import csv
from decimal import *
import numpy
from RandomIntVal import *
import pprint
def isNumeric(value):
return str(value).replace('.','').strip().isdigit()
def fileExists(value):
if os.path.isfile(os.path.expanduser(value.strip())):
return os.path.abspath(os.path.expanduser(value.strip()))
else:
print "I can't find the file " + value
sys.exit()
def isReturnFile(myfile):
if os.path.abspath(os.path.expanduser(myfile.strip())) != False:
return os.path.abspath(os.path.expanduser(myfile.strip()))
else:
print 'You can\'t save to that location'
sys.exit()
def WriteFile(filename,firms,data):
if os.path.isfile(filename) == False:
fieldList = ['Plants']
for i in range(11):
fieldList.append((i*9.5+2.5))
mf = open(filename, 'wb')
myfile = csv.writer(mf)
myfile.writerow(fieldList)
mf.close()
mergedlist = []
mergedlist.append(firms)
mergedlist.extend(data)
mf = open(filename,'ab+')
myfile = csv.writer(mf)
myfile.writerow(mergedlist)
mf.close()
print "Saving # of Firms: " + str(firms)
def RunSimulation(rState, numberoffirmsList,firmsizeList,sdevList,loopsc,destination):
for x in range(len(numberoffirmsList)):
for y in range(len(firmsizeList)):
cGS = hSimulation(rState, firmsizeList[y], sdevList, int(numberoffirmsList[x]),loopsc)
herfs = cGS.Run()
WriteFile(destination,int(numberoffirmsList[x]),herfs)
def loadFile(value):
empty_data = []
with open(value.strip(), 'rU') as f:
read_data = f.readlines()
for x in range(len(read_data)):
if isNumeric(read_data[x].strip()):
empty_data.append(float(read_data[x].strip()))
return empty_data
def main():
desc = 'Tool to simulate Herfindahl values'
p = optparse.OptionParser(description=desc)
p.add_option('--firmsize', '-f', dest="firmsize", help="File containing firm size (head count)", default='', metavar='"<File Path>"')
p.add_option('--sdev', '-s', dest="sdev", help="File containing the standard deviations to test", default='', metavar='"<File Path>"')
p.add_option('--numberoffirms', '-n', dest="numberoffirms", help="File containing the number of firms (in an industry) to test", default='', metavar='"<File Path>"')
p.add_option('--iterations', '-i', type="int", dest="iterations", help="Number of iterations to run for each simulation", default=1000)
p.add_option('--destination', '-d', dest="destination", help="Main csv file to save simulation(s) output", default='', metavar='"<File Path>"')
p.add_option("--seed", type="int", dest="seed", default=1012810, help="Seed the random generator with a specified value")
(options, arguments) = p.parse_args();
if len(options.destination)>0:
destination = isReturnFile(options.destination.strip())
else:
print 'You must specify a destination file'
sys.exit()
if int(options.iterations)<=0 or int(options.seed)<0:
print 'You must specify a positive value for both iterations and seeding the random number generator'
sys.exit()
if len(options.firmsize)>0 and len(options.numberoffirms)>0 and len(options.sdev)>0:
firmsizefile = fileExists(options.firmsize)
numberoffirmsfile = fileExists(options.numberoffirms)
sdevfile = fileExists(options.sdev)
firmsizeList = loadFile(firmsizefile)
numberoffirmsList = loadFile(numberoffirmsfile)
sdevList = loadFile(sdevfile)
rState = RandomIntVal(int(options.seed))
RunSimulation(rState, numberoffirmsList,firmsizeList,sdevList,int(options.iterations),destination)
del rState
else:
print 'You must specify files for firm size, number of firms, sigma'
sys.exit()
if __name__ == '__main__':
main()
|
[
"ben@wbpsystems.com"
] |
ben@wbpsystems.com
|
|
b6fe08f420ddbed60467a9e8b8b25ffed8c39c50
|
a34a034586ef5bff7d452ced878a75a6790ecf62
|
/src/frames/convertGtts.py
|
aaa098aa1720d4c6d57c6f6c252356a7b35ec125
|
[
"MIT"
] |
permissive
|
AR4Z/COLIBRI
|
8041ca16fba7b9be03cd90372e9d3116fafe8393
|
2edcaf8cea7813f44d9c3dea783e728eb59f9a58
|
refs/heads/master
| 2021-09-20T09:43:21.261382
| 2018-06-24T20:56:56
| 2018-06-24T20:56:56
| 124,479,791
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,195
|
py
|
import tkinter as tk
from tkinter import PhotoImage, filedialog, StringVar, Radiobutton, IntVar
from tkinter.ttk import Progressbar
import tkinter.messagebox
import threading
from utils.utils import len_file_pdf, extract_text, text_to_audio, extract_name_audio, len_audio_file
from .audioPage import AudioPage
import platform
# tipo y numero de fuente
LARGE_FONT = ("Verdana", 16)
class ConvertGtts(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# compartir info entre frames
self.controller = controller
self.duration_audio_file = ""
# boton para seleccionar archivo a convertir
self.button_browse_file = tk.Button(self, text="EXAMINAR",
command=self.select_pdf, font=LARGE_FONT, bg="#000000", fg="#ffff00",
activebackground="#000000", activeforeground="#ffff00")
self.button_browse_file.pack()
# label archivo
self.label_file = tk.Label(self, text="ARCHIVO: ", font=LARGE_FONT)
self.label_file.pack()
# donde se va a guardar la ruta del pdf seleccionado para la conversion
self.path_selected_file = tk.StringVar(None)
# campo donde sera mostrada la ruta del pdf seleccionado
self.field_path_selected_file = tk.Entry(self, width='65', textvariable=self.path_selected_file,
font=LARGE_FONT)
self.field_path_selected_file.pack()
self.option_type_conversion = StringVar()
self.label_conversion = tk.Label(self, text="TIPO DE CONVERSIÓN: ", font=LARGE_FONT)
self.label_conversion.pack()
scanned = Radiobutton(self, text="PDF ESCANEADO", value="ocr", var=self.option_type_conversion)
normal = Radiobutton(self, text="PDF NORMAL", value="pymupdf", var=self.option_type_conversion)
scanned.pack()
normal.pack()
# numero de paginas
self.until_number_page = tk.IntVar()
self.from_number_page = tk.IntVar()
# boton y label numero de pagina inicial
self.label_from_number_page = tk.Label(self, text="DESDE: ", font=LARGE_FONT)
self.field_from_number_page = tk.Entry(self, width='5', textvariable=self.from_number_page)
self.label_from_number_page.pack()
self.field_from_number_page.pack()
# bboton y label numero de pagina final
self.label_until_number_page = tk.Label(self, text="HASTA: ", font=LARGE_FONT)
self.field_until_number_page = tk.Entry(self, width='5', textvariable=self.until_number_page)
self.label_until_number_page.pack()
self.field_until_number_page.pack()
# nombre archivo
self.name_conversion = tk.StringVar()
self.label_name = tk.Label(self, text="NOMBRE: ", font=LARGE_FONT)
self.field_name_conversion = tk.Entry(self, width='40', textvariable=self.name_conversion, font=LARGE_FONT)
self.label_name.pack()
self.field_name_conversion.pack()
# slider para configurar la velocidad
self.label_speed = tk.Label(self, text="VELOCIDAD: ", font=LARGE_FONT)
self.label_speed.pack()
self.scale_speed = tk.Scale(self, orient='horizontal', from_=1, to=2, activebackground="black", bg="#ffff00", resolution=0.1)
self.scale_speed.set(1.5)
self.scale_speed.pack()
# boton para realizar la conversion
self.button_conversion = tk.Button(self, text="CONVERTIR", command=self.conversion, font=LARGE_FONT,
bg="#000000", fg="#ffff00", activebackground="#000000",
activeforeground="#ffff00")
self.button_conversion.pack()
# imagen y boton return
if platform.system() == "Windows":
height = 380
self.icon_return = PhotoImage(file="img/ic_home_black_24dp_1x.png")
else:
height = 380
self.icon_return = PhotoImage(file="../img/ic_home_black_24dp_1x.png")
self.button_return = tk.Button(self, text="ATRÁS",
command=lambda: self.controller.show_frame(self.controller.data["menu_frame"],
450, height),
image=self.icon_return)
self.button_return.pack()
# barra de progreso de conversion
self.progress_bar = Progressbar(self, orient=tk.HORIZONTAL, mode='indeterminate', takefocus=True)
self.is_valid = False
def select_pdf(self):
"""
Se encarga de abrir el explorador de archivos para elegir el pdf a convertir.
:return: None
"""
# toma la ruta del file seleccionado
selected_file = filedialog.askopenfilename(initialdir=self.controller.data["home_user"],
title="SELECCIONAR LIBRO",
filetypes=(("archivos pdf", "*.pdf"), ("todos los archivos", "*.*")))
# llena el campo con esa ruta para mostrarse en el frame
self.path_selected_file.set(selected_file)
# la primera pagina es 0
self.from_number_page.set(0)
# la ultima pagina es la longitud del pdf
self.until_number_page.set(len_file_pdf(selected_file))
def conversion(self):
"""
Se encarga de ejecutar los hilos para realizar la conversion
:return: None
"""
self.validate()
if self.is_valid:
# muestra la barra para informar que el proceso se esta ejecutando
self.show_progress(True)
# bloquear botones mientras se realiza la conversion
self.button_conversion.config(state='disabled')
self.button_browse_file.config(state='disabled')
self.button_return.config(state='disabled')
# crea un hilo para realizar la conversion que va a ser ejecutada por conversion_worker
self.thread = threading.Thread(target=self.conversion_worker)
self.thread.daemon = True
self.thread.start()
# verifica cuando termina el hilo
self.conversion_check()
else:
return
def conversion_check(self):
"""
Se encarga de verificar cuando termina el hilo
:return: None
"""
# verifica si el hilo esta vivo, en caso contrario oculta la barra de proceso y muestra el frame de
# reproducción de audio
if self.thread.is_alive():
self.after(10, self.conversion_check)
else:
self.show_progress(False)
self.controller.show_frame(AudioPage, 450, 200)
def conversion_worker(self):
"""
Se encarga de realizar la conversión
:return: None
"""
#
# extrae el texto y genera un .txt con el
extract_text(self.field_path_selected_file.get(), self.from_number_page.get(), self.until_number_page.get(),
self.option_type_conversion.get())
self.controller.data["path_file"] = text_to_audio(self.scale_speed.get(),
self.name_conversion.get(),
0, self.controller.data["path_audios"],
"gtts")
# toma el nombre del audio y la duracion del archivo para ser guardado en base de datos
path_audio = self.controller.data["path_file"]
duration_audio_file = len_audio_file(self.controller.data["path_file"])
# inserta registro en db
self.controller.data["manage_db"].add_file(self.name_conversion.get(), duration_audio_file, path_audio)
self.controller.data["name_file"] = self.name_conversion.get()
def show_progress(self, start):
"""
muestra la barra de progreso
:param start: True o False
:return: None
"""
if start:
self.progress_bar.pack()
self.progress_bar.start()
else:
self.progress_bar.stop()
def error(self, message):
tkinter.messagebox.showerror("Error", message)
def validate(self):
if self.field_path_selected_file.get() == "":
self.error("SELECCIONE UN ARCHIVO")
return
if self.option_type_conversion.get() != "pymupdf" and self.option_type_conversion.get() != "ocr":
self.error("ELIJA UN MODO DE CONVERSION")
return
if self.from_number_page.get() > self.until_number_page.get():
self.error("LA PAGINA INICIAL NO PUEDE SER MAYOR QUE LA FINAL")
return
if self.name_conversion.get() == "":
self.error("INGRESE UN NOMBRE PARA EL ARCHIVO")
return
if not self.controller.data["manage_db"].get_file(self.name_conversion.get()) is None:
self.error("EL NOMBRE YA HA SIDO USADO")
return
self.is_valid = True
|
[
"oaraz02@gmail.com"
] |
oaraz02@gmail.com
|
8f1829ee69b87b02cc106601fc364e928bd4864f
|
6275b8eee6f8f0f69c1f7d1b74a82db22329d560
|
/src/train_v4.py
|
fe583be223d6b303b0b94e9af04e688c97169fb1
|
[
"MIT"
] |
permissive
|
khodwe56/kaggle-birdsong-recognition
|
081575ea02e663f98292c5e579c14de4bcdb7e22
|
95a902c37355619cf02558968f000038e487db47
|
refs/heads/master
| 2023-01-01T21:35:20.101880
| 2020-10-27T17:03:06
| 2020-10-27T17:03:06
| 299,716,450
| 0
| 0
|
MIT
| 2020-09-29T19:21:48
| 2020-09-29T19:21:47
| null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
from argparse import ArgumentParser, Namespace
from engine.main_engine_v4 import MainEngineV4
import importlib
import torch
import ignite.distributed as idist
torch.backends.cudnn.benchmark = True
def run(local_rank, config):
pe = MainEngineV4(local_rank, config)
pe.train(config.run_params)
def main(hyperparams):
with idist.Parallel(**hyperparams.dist_params) as parallel:
parallel.run(run, hyperparams)
if __name__ == '__main__':
parser = ArgumentParser(parents=[])
parser.add_argument('--config', type=str)
params = parser.parse_args()
module = importlib.import_module(params.config, package=None)
hyperparams = module.Parameters()
main(hyperparams)
|
[
"ryancwongsa@gmail.com"
] |
ryancwongsa@gmail.com
|
657c8ecbb66d3f2b883c96e348062764079177a8
|
b833ac109361dbad4f0707c64db214b5542778d4
|
/mfow_compfin/risk/consumer_credit/model_metrics.py
|
bce7716cbb6a74924f24df11190b6cfbae7aee20
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mfow/compfin
|
d794c77edb49902ec06d2fa9dfd700faf81d7ded
|
d513ef69b0aa25a298cb2187c2211642fd080db4
|
refs/heads/master
| 2023-03-04T10:40:22.701846
| 2021-02-06T07:30:42
| 2021-02-06T07:30:42
| 335,581,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
import numpy as np
from sklearn.metrics import roc_auc_score
def get_model_metrics(**kwargs) -> dict:
prediction: np.ndarray = kwargs.get('prediction')
actual: np.ndarray = kwargs.get('actual')
assert prediction.shape == actual.shape
result = dict()
result['accuracy'] = np.sum((prediction >= 0.5) == actual) / len(prediction)
result['auroc'] = roc_auc_score(actual, prediction)
return result
|
[
"mfow@outlook.com"
] |
mfow@outlook.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.