blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9631a6411f099359301c0f1709dc4854de83442
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02792/s333976944.py
|
822b45df844116947cf79fed9d47791edd103ea9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
def solve():
N = int(input())
cnt = [[0]*10 for i in range(10)]
for i in range(1, N+1):
target_str = str(i)
cnt[int(target_str[0])][int(target_str[-1])] += 1
ans = 0
for i in range(1, 10):
for j in range(1, 10):
ans += cnt[i][j] * cnt[j][i]
print(ans)
if __name__ == "__main__":
solve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9471d90433996588d7b327c4ec36c851b54ea325
|
1699300e1225f0994fbfd5e13a7eb4436a5df14d
|
/05_Mini_Scans_Optimised/04_SbS_Tomo_Lattice_H_07/Make_SLURM_submission_script.py
|
abcabc237978907bae9272a61fb53666e9224f34
|
[
"MIT"
] |
permissive
|
HaroonRafique/PyORBIT_MD4224
|
26307a60ed79f3e170fbd655eb8cbe8cc9a0dfa9
|
6f68a80b2f8bf1cbeb9e2fc840925efe8a8b5672
|
refs/heads/master
| 2023-04-25T13:27:49.756836
| 2020-08-25T10:26:07
| 2020-08-25T10:26:07
| 215,249,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,315
|
py
|
#!/usr/bin/env python
# Python script to create a SLURM submission script for PyORBIT
# 21 March 2019 Haroon Rafique CERN BE-ABP-HSI
import os
#-----------------------------------------------------------------------
# SETTINGS
#-----------------------------------------------------------------------
script_name = "SLURM_submission_script.sh"
# Switches
hyperthreading = False # Enable hyperthreading
exclusive = True # Exclusive (see SLURM documentation)
autotime = True # 2 days for short queues, 2 weeks for long queues
autotask = True # Automatically set nodes to maximum tasks
clean_all = True # Clean simulation folder before running (False when resuming pickle checkpoint)
# Must be chosen
# ~ queue = 'inf-long', 'inf-short', 'batch-long', 'batch-short'
queue = 'inf-short'
n_nodes = 2
jobname = '05_04_07'
path_to_simulation = os.path.dirname(os.path.realpath(__file__)) # This directory
# Optional - have to use with correct switches
manual_time = '504:00:00' # manually set using format 'hours:minutes:seconds'
manual_tasks = 40 # manually change ntasks
# Defaults - can be changed
output_file_name = 'slurm.%N.%j.out'
error_file_name = 'slurm.%N.%j.err'
root_dir = '/hpcscratch/user/harafiqu'
simulation_file = 'pyOrbit.py'
#-----------------------------------------------------------------------
# AUTOMATICALLY FORMAT SCRIPT
#-----------------------------------------------------------------------
n_tasks = 0
if autotask:
if hyperthreading:
if 'batch' in queue: n_tasks = 32
elif 'inf' in queue: n_tasks = 40
else:
print 'queue not recognised'
exit(0)
else:
if 'batch' in queue: n_tasks = 16
elif 'inf' in queue: n_tasks = 20
else:
print 'queue not recognised'
exit(0)
else: n_tasks = manual_tasks
time = '48:00:00'
if autotime:
if queue == 'batch-short': time = '48:00:00'
elif queue == 'inf-short': time = '120:00:00'
elif queue == 'inf-long' or 'batch-long': time = '504:00:00'
else:
print 'queue not recognised'
exit(0)
else: time = manual_time
#-----------------------------------------------------------------------
# WRITE FILE
#-----------------------------------------------------------------------
if os.path.exists(script_name):
print 'SLURM submission script ' + script_name + ' already exists. Deleting'
os.remove(script_name)
print "Creating ", script_name
f= open(script_name,"w")
f.write('#!/bin/bash')
f.write('\n#SBATCH --job-name=' + str(jobname))
f.write('\n#SBATCH --output=' + str(output_file_name))
f.write('\n#SBATCH --error=' + str(error_file_name))
f.write('\n#SBATCH --nodes=' + str(n_nodes))
f.write('\n#SBATCH --ntasks-per-node=' + str(n_tasks))
f.write('\n#SBATCH --partition=' + str(queue))
f.write('\n#SBATCH --time=' + str(time))
f.write('\n#SBATCH --mem-per-cpu=3200M')
if (exclusive): f.write('\n#SBATCH --exclusive')
if not hyperthreading: f.write('\n#SBATCH --hint=nomultithread')
f.write('\n')
f.write('\nBATCH_ROOT_DIR=' + str(root_dir))
f.write('\nRUN_DIR=' + str(path_to_simulation))
f.write('\nOrigIwd=$(pwd)')
f.write('\n')
f.write('\n# Make an output folder in the root directory to hold SLURM info file')
f.write('\ncd ${BATCH_ROOT_DIR}')
f.write('\noutput_dir="output"')
f.write('\nmkdir -p $output_dir')
f.write('\n')
f.write('\n# Fill the SLURM info file')
f.write('\nsimulation_info_file="${BATCH_ROOT_DIR}/${output_dir}/simulation_info_${SLURM_JOB_ID}.${SLURM_NODEID}.${SLURM_PROCID}.txt"')
f.write('\necho "PyOrbit path: `readlink -f ${ORBIT_ROOT}`" >> ${simulation_info_file}')
f.write('\necho "Run path: `readlink -f ${RUN_DIR}`" >> ${simulation_info_file}')
f.write('\necho "Submit host: `readlink -f ${SLURM_SUBMIT_HOST}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job name: `readlink -f ${SLURM_JOB_NAME}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job ID: `readlink -f ${SLURM_JOB_ID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Nodes allocated: `readlink -f ${SLURM_JOB_NUM_NODES}`" >> ${simulation_info_file}')
f.write('\necho "SLURM CPUS per Node: `readlink -f ${SLURM_CPUS_ON_NODE}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Node ID: `readlink -f ${SLURM_NODEID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM total cores for job: `readlink -f ${SLURM_NTASKS}`" >> ${simulation_info_file}')
f.write('\necho "SLURM process ID: `readlink -f ${SLURM_PROCID}`" >> ${simulation_info_file}')
f.write('\necho "****************************************" >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Enter job directory, clean it, and setup environment -> SLURM info file')
f.write('\ncd ${RUN_DIR}')
if clean_all:f.write('\n./clean_all.sh')
f.write('\n. setup_environment.sh >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Load correct MPI')
f.write('\nmodule load mpi/mvapich2/2.3')
f.write('\n')
f.write('\ntstart=$(date +%s)')
f.write('\n')
f.write('\n# Run the job')
if hyperthreading:f.write('\nsrun ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
else:f.write('\nsrun --hint=nomultithread ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
f.write('\n')
f.write('\ntend=$(date +%s)')
f.write('\ndt=$(($tend - $tstart))')
f.write('\necho "total simulation time (s): " $dt >> ${simulation_info_file}')
f.close()
print 'SLURM submission script creation finished'
|
[
"haroon.rafique@protonmail.com"
] |
haroon.rafique@protonmail.com
|
0296081c7b5ffd91876af93bf00f5ccdda287dd6
|
292cec77b5003a2f80360d0aee77556d12d990f7
|
/src/bentoml_cli/worker/grpc_api_server.py
|
87a8e336eb31fd6ceeb1eba0e4e6b26cc1e24fd8
|
[
"Apache-2.0"
] |
permissive
|
yubozhao/BentoML
|
194a6ec804cc1c6dbe7930c49948b6707cbc3c5f
|
d4bb5cbb90f9a8ad162a417103433b9c33b39c84
|
refs/heads/master
| 2022-12-17T00:18:55.555897
| 2022-12-06T00:11:39
| 2022-12-06T00:11:39
| 178,978,385
| 3
| 0
|
Apache-2.0
| 2020-12-01T18:17:15
| 2019-04-02T01:53:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,379
|
py
|
from __future__ import annotations
import json
import typing as t
import click
@click.command()
@click.argument("bento_identifier", type=click.STRING, required=False, default=".")
@click.option("--host", type=click.STRING, required=False, default=None)
@click.option("--port", type=click.INT, required=False, default=None)
@click.option(
"--runner-map",
type=click.STRING,
envvar="BENTOML_RUNNER_MAP",
help="JSON string of runners map, default sets to envars `BENTOML_RUNNER_MAP`",
)
@click.option(
"--working-dir",
type=click.Path(exists=True),
help="Working directory for the API server",
)
@click.option(
"--prometheus-dir",
type=click.Path(exists=True),
help="Required by prometheus to pass the metrics in multi-process mode",
)
@click.option(
"--worker-id",
required=False,
type=click.INT,
default=None,
help="If set, start the server as a bare worker with the given worker ID. Otherwise start a standalone server with a supervisor process.",
)
@click.option(
"--enable-reflection",
type=click.BOOL,
is_flag=True,
help="Enable reflection.",
)
@click.option(
"--enable-channelz",
type=click.BOOL,
is_flag=True,
help="Enable channelz.",
default=False,
)
@click.option(
"--max-concurrent-streams",
type=click.INT,
help="Maximum number of concurrent incoming streams to allow on a HTTP2 connection.",
default=None,
)
@click.option(
"--ssl-certfile",
type=str,
default=None,
help="SSL certificate file",
)
@click.option(
"--ssl-keyfile",
type=str,
default=None,
help="SSL key file",
)
@click.option(
"--ssl-ca-certs",
type=str,
default=None,
help="CA certificates file",
)
def main(
bento_identifier: str,
host: str,
port: int,
prometheus_dir: str | None,
runner_map: str | None,
working_dir: str | None,
worker_id: int | None,
enable_reflection: bool,
enable_channelz: bool,
max_concurrent_streams: int | None,
ssl_certfile: str | None,
ssl_keyfile: str | None,
ssl_ca_certs: str | None,
):
"""
Start BentoML API server.
\b
This is an internal API, users should not use this directly. Instead use `bentoml serve-grpc <path> [--options]`
"""
import bentoml
from bentoml._internal.log import configure_server_logging
from bentoml._internal.context import component_context
from bentoml._internal.configuration.containers import BentoMLContainer
component_context.component_type = "grpc_api_server"
component_context.component_index = worker_id
configure_server_logging()
if worker_id is None:
# worker ID is not set; this server is running in standalone mode
# and should not be concerned with the status of its runners
BentoMLContainer.config.runner_probe.enabled.set(False)
BentoMLContainer.development_mode.set(False)
if prometheus_dir is not None:
BentoMLContainer.prometheus_multiproc_dir.set(prometheus_dir)
if runner_map is not None:
BentoMLContainer.remote_runner_mapping.set(json.loads(runner_map))
svc = bentoml.load(bento_identifier, working_dir=working_dir, standalone_load=True)
if not port:
port = BentoMLContainer.grpc.port.get()
if not host:
host = BentoMLContainer.grpc.host.get()
# setup context
component_context.component_name = svc.name
if svc.tag is None:
component_context.bento_name = svc.name
component_context.bento_version = "not available"
else:
component_context.bento_name = svc.tag.name
component_context.bento_version = svc.tag.version or "not available"
from bentoml._internal.server import grpc
grpc_options: dict[str, t.Any] = {
"bind_address": f"{host}:{port}",
"enable_reflection": enable_reflection,
"enable_channelz": enable_channelz,
}
if max_concurrent_streams:
grpc_options["max_concurrent_streams"] = int(max_concurrent_streams)
if ssl_certfile:
grpc_options["ssl_certfile"] = ssl_certfile
if ssl_keyfile:
grpc_options["ssl_keyfile"] = ssl_keyfile
if ssl_ca_certs:
grpc_options["ssl_ca_certs"] = ssl_ca_certs
grpc.Server(svc.grpc_servicer, **grpc_options).run()
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
[
"noreply@github.com"
] |
yubozhao.noreply@github.com
|
7cb73ceefb5f84a42e46e124b96ab48221531a18
|
368ef102345a6575d97727b314addcd65f150e19
|
/Modules/Crawler/Test/test_urlencode.py
|
1551c2f5ebf5acfefcf1449c5331ebfdb14c4aa6
|
[] |
no_license
|
newstar123/Pythonlab_DEV
|
844b43b6a8aef977515e367bd367e658e37cfa19
|
cdb2f0c2369291d6bccd597c33354830e5606eab
|
refs/heads/master
| 2020-04-07T18:08:06.631487
| 2018-11-21T19:47:54
| 2018-11-21T19:47:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# -*- coding: utf-8 -*-
#---------------------------------------
# 程序:test_urlencode.py
# 版本:0.1
# 作者:ctang
# 日期:2016-02-19
# 语言:Python 2.7.10
# 说明:测试urlencode方法
#---------------------------------------
import urllib
import urllib2
data = {}
data['word'] = 'python'
url_value = urllib.urlencode(data)
url = "http://www.baidu.com/s?"
full_url = url + url_value
print full_url
page = urllib2.urlopen(full_url).read()
result = page.decode('utf-8')
print result
|
[
"devilcuruso@gmail.com"
] |
devilcuruso@gmail.com
|
8cbb4e9c73c074d346ac82b353623f3c95b574d0
|
37c6021f06d5d5aca2693b12449aab483d123669
|
/backend/task_profile/migrations/0001_initial.py
|
37879e374141ed157d4e079eb19ba798a8d6f010
|
[] |
no_license
|
crowdbotics-apps/den-sc-ity-21727
|
8165971a3611a143b2ba2a5bf865375a10cb7744
|
13790db00a09483f4a23df177c544b6d07acd2c3
|
refs/heads/master
| 2022-12-31T04:32:18.963986
| 2020-10-19T21:46:40
| 2020-10-19T21:46:40
| 305,520,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,239
|
py
|
# Generated by Django 2.2.16 on 2020-10-19 21:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TaskerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('vehicle', models.CharField(blank=True, max_length=50, null=True)),
('closing_message', models.TextField(blank=True, null=True)),
('work_area_radius', models.FloatField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=20)),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ManyToManyField(related_name='notification_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitecode_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CustomerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
410d4bb5e5e781c634ccb36147315481554ca815
|
542d6c7a1303916a60bf0d7d24c8499a02f961c1
|
/lib/python3.7/site-packages/azure/mgmt/recoveryservicesbackup/models/mab_file_folder_protected_item.py
|
f04f303870c1f5e21e635c8a9ec6e22c29c9d67c
|
[] |
no_license
|
jim-minter/azhack
|
e9918a916d2b71f3adcc4f1ea208312ad9c59210
|
0e8631fd067014a9f3000101a886e7a9a94acc95
|
refs/heads/master
| 2020-05-07T19:50:43.980332
| 2019-04-11T15:37:43
| 2019-04-11T22:20:29
| 180,830,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,151
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protected_item import ProtectedItem
class MabFileFolderProtectedItem(ProtectedItem):
"""MAB workload-specific backup item.
:param backup_management_type: Type of backup managemenent for the backed
up item. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB', 'DPM',
'AzureBackupServer', 'AzureSql'
:type backup_management_type: str or :class:`BackupManagementType
<azure.mgmt.recoveryservicesbackup.models.BackupManagementType>`
:param workload_type: Type of workload this item represents. Possible
values include: 'Invalid', 'VM', 'FileFolder', 'AzureSqlDb', 'SQLDB',
'Exchange', 'Sharepoint', 'VMwareVM', 'SystemState', 'Client',
'GenericDataSource'
:type workload_type: str or :class:`DataSourceType
<azure.mgmt.recoveryservicesbackup.models.DataSourceType>`
:param container_name: Unique name of container
:type container_name: str
:param source_resource_id: ARM ID of the resource to be backed up.
:type source_resource_id: str
:param policy_id: ID of the backup policy with which this item is backed
up.
:type policy_id: str
:param last_recovery_point: Timestamp when the last (latest) backup copy
was created for this backup item.
:type last_recovery_point: datetime
:param protected_item_type: Polymorphic Discriminator
:type protected_item_type: str
:param friendly_name: Friendly name of this backup item.
:type friendly_name: str
:param computer_name: Name of the computer associated with this backup
item.
:type computer_name: str
:param last_backup_status: Status of last backup operation.
:type last_backup_status: str
:param protection_state: Protected, ProtectionStopped, IRPending or
ProtectionError
:type protection_state: str
:param is_scheduled_for_deferred_delete: Specifies if the item is
scheduled for deferred deletion.
:type is_scheduled_for_deferred_delete: bool
:param deferred_delete_sync_time_in_utc: Sync time for deferred deletion.
:type deferred_delete_sync_time_in_utc: long
:param extended_info: Additional information with this backup item.
:type extended_info: :class:`MabFileFolderProtectedItemExtendedInfo
<azure.mgmt.recoveryservicesbackup.models.MabFileFolderProtectedItemExtendedInfo>`
"""
_validation = {
'protected_item_type': {'required': True},
}
_attribute_map = {
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'workload_type': {'key': 'workloadType', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'policy_id': {'key': 'policyId', 'type': 'str'},
'last_recovery_point': {'key': 'lastRecoveryPoint', 'type': 'iso-8601'},
'protected_item_type': {'key': 'protectedItemType', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'last_backup_status': {'key': 'lastBackupStatus', 'type': 'str'},
'protection_state': {'key': 'protectionState', 'type': 'str'},
'is_scheduled_for_deferred_delete': {'key': 'isScheduledForDeferredDelete', 'type': 'bool'},
'deferred_delete_sync_time_in_utc': {'key': 'deferredDeleteSyncTimeInUTC', 'type': 'long'},
'extended_info': {'key': 'extendedInfo', 'type': 'MabFileFolderProtectedItemExtendedInfo'},
}
def __init__(self, backup_management_type=None, workload_type=None, container_name=None, source_resource_id=None, policy_id=None, last_recovery_point=None, friendly_name=None, computer_name=None, last_backup_status=None, protection_state=None, is_scheduled_for_deferred_delete=None, deferred_delete_sync_time_in_utc=None, extended_info=None):
super(MabFileFolderProtectedItem, self).__init__(backup_management_type=backup_management_type, workload_type=workload_type, container_name=container_name, source_resource_id=source_resource_id, policy_id=policy_id, last_recovery_point=last_recovery_point)
self.friendly_name = friendly_name
self.computer_name = computer_name
self.last_backup_status = last_backup_status
self.protection_state = protection_state
self.is_scheduled_for_deferred_delete = is_scheduled_for_deferred_delete
self.deferred_delete_sync_time_in_utc = deferred_delete_sync_time_in_utc
self.extended_info = extended_info
self.protected_item_type = 'MabFileFolderProtectedItem'
|
[
"jminter@redhat.com"
] |
jminter@redhat.com
|
90c756f512fb32649c9ec73fa43d1af2fd58ef50
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/advanced/164/test_links.py
|
8d9d7847acae4547f25225d0f966e663dbd91d85
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,463
|
py
|
import os
from pathlib import Path
import platform
import subprocess
import pytest
# no need to import make_html_links as we call links.py from CLI!
TMP = Path(os.getenv("TMP", "/tmp"))
SCRIPT = 'links.py'
IS_LOCAL = platform.system() == 'Darwin'
MY_CODE = SCRIPT if IS_LOCAL else TMP / SCRIPT
# https://docs.pytest.org/en/latest/tmpdir.html#the-tmpdir-factory-fixture
@pytest.fixture
def my_file(tmp_path):
f = tmp_path / "some_file.txt"
return f
def _create_and_verify_links(my_file, lines, expected_links):
my_file.write_bytes(b'\n'.join(lines))
cmd = f'cat {my_file.r..()} | python {MY_CODE}'
output = subprocess.check_output(cmd, shell=True).splitlines()
assert all(link in output for link in expected_links)
def test_make_html_links_first_data_set(my_file):
lines = [b"https://www.python.org, Python Homepage",
b"bad data,blabla,123",
(b"https://pybit.es/generators.html , "
b"Generators are Awesome "),
b"more bad data"]
expected_links = [(b'<a href="https://www.python.org" target="_blank">'
b'Python Homepage</a>'),
(b'<a href="https://pybit.es/generators.html">'
b'Generators are Awesome</a>')]
_create_and_verify_links(my_file, lines, expected_links)
def test_make_html_links_second_data_set(my_file):
lines = [b"bogus data, again",
b"https://codechalleng.es/bites/ , Bites of Py",
(b"https://stackoverflow.com/a/12927564,How to capture"
b" subprocess.call stdout"),
b"https://pybit.es/,Our labor of love",
b"https://pybit.es/pages/about.html, About Us",
b"https://nu.nl, Dutch news site",
b"And some more bad data !!"]
expected_links = [(b'<a href="https://codechalleng.es/bites/">'
b'Bites of Py</a>'),
(b'<a href="https://stackoverflow.com/a/12927564" '
b'target="_blank">How to capture subprocess.call '
b'stdout</a>'),
b'<a href="https://pybit.es/">Our labor of love</a>',
(b'<a href="https://pybit.es/pages/about.html">'
b'About Us</a>'),
(b'<a href="https://nu.nl" target="_blank">'
b'Dutch news site</a>')]
_create_and_verify_links(my_file, lines, expected_links)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
7b6a4d6df8f67ad85480b8a58255130a952e5298
|
bc441bb06b8948288f110af63feda4e798f30225
|
/monitor_sdk/model/flowable_service/bpmn_end_event_pb2.py
|
2ff0cb3c70459b361343271db62a5a44c17ff8f6
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 3,046
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: bpmn_end_event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from monitor_sdk.model.flowable_service import bpmn_links_pb2 as monitor__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='bpmn_end_event.proto',
package='flowable_service',
syntax='proto3',
serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_service'),
serialized_pb=_b('\n\x14\x62pmn_end_event.proto\x12\x10\x66lowable_service\x1a\x33monitor_sdk/model/flowable_service/bpmn_links.proto\"F\n\x0c\x42PMNEndEvent\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x05links\x18\x02 \x01(\x0b\x32\x1b.flowable_service.BPMNLinksBLZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_serviceb\x06proto3')
,
dependencies=[monitor__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2.DESCRIPTOR,])
_BPMNENDEVENT = _descriptor.Descriptor(
name='BPMNEndEvent',
full_name='flowable_service.BPMNEndEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flowable_service.BPMNEndEvent.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='links', full_name='flowable_service.BPMNEndEvent.links', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=165,
)
_BPMNENDEVENT.fields_by_name['links'].message_type = monitor__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2._BPMNLINKS
DESCRIPTOR.message_types_by_name['BPMNEndEvent'] = _BPMNENDEVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BPMNEndEvent = _reflection.GeneratedProtocolMessageType('BPMNEndEvent', (_message.Message,), {
'DESCRIPTOR' : _BPMNENDEVENT,
'__module__' : 'bpmn_end_event_pb2'
# @@protoc_insertion_point(class_scope:flowable_service.BPMNEndEvent)
})
_sym_db.RegisterMessage(BPMNEndEvent)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
dbe0605dfc8836252a7be025717a2c025a583ef3
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/iotsecurity/v20210201preview/__init__.py
|
4fdb5150fbe12dc9d3eb365657246551620919aa
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .defender_setting import *
from .get_defender_setting import *
from .get_on_premise_sensor import *
from .get_sensor import *
from .get_site import *
from .on_premise_sensor import *
from .sensor import *
from .site import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:iotsecurity/v20210201preview:DefenderSetting":
return DefenderSetting(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:iotsecurity/v20210201preview:OnPremiseSensor":
return OnPremiseSensor(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:iotsecurity/v20210201preview:Sensor":
return Sensor(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:iotsecurity/v20210201preview:Site":
return Site(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "iotsecurity/v20210201preview", _module_instance)
_register_module()
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
38208982c2d014d233c50e8817e0a80f71c021ff
|
2f963d7989749037a3ec27aaa39b31416b33cbb2
|
/ib_users/views/get_user/get_user.py
|
183ff7a74ce5cd00bbb88a71c9efbd66b445cd96
|
[] |
no_license
|
migsantos121/phd3-backend
|
3cd014908856c995de3c4473d82059bc9c1b5794
|
9d1d2bd6f55dc89719ce5a1916c5db3d573aec1e
|
refs/heads/master
| 2022-12-12T17:25:59.334509
| 2020-03-09T09:24:08
| 2020-03-09T09:24:08
| 245,991,086
| 0
| 0
| null | 2022-06-28T14:45:50
| 2020-03-09T09:17:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
def get_user(*args, **kwargs):
"""
Note: replace below mock implementation with your actual implementation
Request:
kwargs["user"] -> request user
kwargs["request_object"] -> request body type object
kwargs["request_data"] -> request body data dict
kwargs["request_headers_obj"] -> request headers object
kwargs["request_query_params"] -> request query parameters object
Response :
return: tuple(response_status_code, response_object, response_headers_object)
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response import endpoint_response
return endpoint_response(response_object)
"""
# mock response implementation starts
# from ib_users.views.get_user.tests.test_case_01 import test_case
# from django_swagger_utils.drf_server.utils.server_gen.mock_response import mock_response
# response_tuple = mock_response(app_name="ib_users", operation_name="get_user", test_case=test_case,
# kwargs=kwargs)
# end of mock view implementation
user = kwargs["user"]
request_data= kwargs['request_data']
source = args[0].META.get('HTTP_X_SOURCE', '')
if not source:
source = args[0].META.get('HTTP_SOURCE', '')
from ib_users.models.ib_user import IBUser
response_object = IBUser.get_user_details(user, source)
print response_object
# uncomment below lines for actual implementation when you have response_object / response_data
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response import endpoint_response
response_tuple = endpoint_response(response_object)
return response_tuple
|
[
"migsantos121@outlook.com"
] |
migsantos121@outlook.com
|
9d8cc9f43fad40403b595b3943f0646b153de858
|
4d0f3e2d7455f80caea978e4e70621d50c6c7561
|
/MongoDB/BigData_Bulk/Update_unordered.py
|
0879aac9fe0ae108a62abacca7d7a3a7501a65e4
|
[] |
no_license
|
mhdr/PythonSamples
|
66940ee2353872d2947c459e3865be42140329c6
|
1a9dccc05962033ea02b081a39cd67c1e7b29d0c
|
refs/heads/master
| 2020-04-14T01:10:13.033940
| 2016-05-28T15:33:52
| 2016-05-28T15:33:52
| 30,691,539
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
# http://api.mongodb.org/python/current/examples/bulk.html
__author__ = 'Mahmood'
import time
from pymongo import MongoClient
from bson.objectid import ObjectId
client=MongoClient()
# drop last db
client.drop_database("test-db")
# database
db=client["test-db"]
# table
people_bulk=db["People"].initialize_unordered_bulk_op()
people=db["People"]
print("Starting insert loop : {0}".format(time.asctime( time.localtime(time.time()) )))
for i in range(1,100000):
new_person={"FirstName" : "Mahmood",
"LastName" : "Ramzani",
"Gender": "Male",
"BirthDate":{"Year":1985,"Month":5,"Day":22},
"Country":"Iran",
"City":"Rasht",
"email":"mahmoodramzani@gmail.com",
"user_name":"mahmoodramzani",
"password":"1234"}
ids=people_bulk.insert(new_person)
print("End of insert loop : {0}".format(time.asctime( time.localtime(time.time()) )))
print("Starting execute : {0}".format(time.asctime( time.localtime(time.time()) )))
people_bulk.execute()
print("End of execute : {0}".format(time.asctime( time.localtime(time.time()) )))
matches=people.find()
# initialize again because you can run execute command once
people_bulk=db["People"].initialize_ordered_bulk_op()
print("Starting update loop: {0}".format(time.asctime( time.localtime(time.time()) )))
for match in matches:
match["LastName"]="Ramzani Sesemasi"
id=match["_id"]
people_bulk.find({"_id":id}).update({"$set":match})
print("End of update loop: {0}".format(time.asctime( time.localtime(time.time()) )))
print("Starting execute : {0}".format(time.asctime( time.localtime(time.time()) )))
people_bulk.execute()
print("End of execute : {0}".format(time.asctime( time.localtime(time.time()) )))
|
[
"ramzani.mahmood@gmail.com"
] |
ramzani.mahmood@gmail.com
|
abb9e8d07146fd93787a23e3c00f4f035f2e6a3c
|
d767a2048c050421e7213be2ecccff09014e270e
|
/Day 24/Set Intersection(Hackerrank).py
|
86fd4076cac1c920fcd2f36da4bae154bb48429b
|
[] |
no_license
|
Benson1198/31-Days-of-CP
|
23ff16f9899d37e2ca9a1eba81a87b521233fd2f
|
0e5de1d0b4e1d4811fb096455de951f37c3d69d0
|
refs/heads/master
| 2022-09-18T22:26:53.178381
| 2020-06-03T14:20:41
| 2020-06-03T14:20:41
| 260,527,724
| 2
| 1
| null | 2020-05-04T17:36:36
| 2020-05-01T18:15:21
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
for i in range(int(input())):
a = set(map(int,input().split()))
break
for i in range(int(input())):
b = set(map(int,input().split()))
break
print(len(a&b))
|
[
"34964177+Benson1198@users.noreply.github.com"
] |
34964177+Benson1198@users.noreply.github.com
|
22bbe05b376afb7261d2ccc0c655fab46d2139f9
|
d42a9128898d504a9831f1afee3198c4677236c9
|
/Level_2/카펫.py
|
7590489b9346b70f2da4fc35051fa1209028eb67
|
[] |
no_license
|
ketkat001/Programmers-coding
|
6848a9c8cffd97b792cfc8856ec135b72af5d688
|
799baba8d66a9971b43233d231cecbf262b4ea27
|
refs/heads/master
| 2023-09-02T23:07:25.614820
| 2021-10-17T18:12:02
| 2021-10-17T18:12:02
| 235,016,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
def solution(brown, yellow):
answer = []
for i in range(yellow, 0, -1):
if yellow % i != 0:
continue
if (2*i) + (2*(yellow//i)) + 4 == brown:
answer.append(i+2)
answer.append(yellow//i+2)
break
return answer
print(solution(10, 2))
|
[
"ketkat001@gmail.com"
] |
ketkat001@gmail.com
|
c3d4e81a3c0d45e1f41190732262efc7428078ae
|
5f1ce3d168695f38f8fec53ab56d464042fbcbae
|
/meraki_v1/api/insight.py
|
48f25659153929344e3df744090bf80d0ca950b4
|
[
"MIT"
] |
permissive
|
fsandberg/dashboard-api-python
|
e209b927f1b5d1b29a76e36def1587b873764b0f
|
c01ff038643a39bd12660d2719375eeb05c7ba24
|
refs/heads/master
| 2022-06-23T06:25:37.599413
| 2020-05-11T16:47:31
| 2020-05-11T16:47:31
| 263,100,991
| 0
| 0
|
MIT
| 2020-05-11T16:43:14
| 2020-05-11T16:43:13
| null |
UTF-8
|
Python
| false
| false
| 4,514
|
py
|
class Insight(object):
def __init__(self, session):
super(Insight, self).__init__()
self._session = session
def getOrganizationInsightMonitoredMediaServers(self, organizationId: str):
"""
**List the monitored media servers for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!get-organization-insight-monitored-media-servers
- organizationId (string)
"""
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'getOrganizationInsightMonitoredMediaServers',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers'
return self._session.get(metadata, resource)
def createOrganizationInsightMonitoredMediaServer(self, organizationId: str, name: str, address: str):
"""
**Add a media server to be monitored for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!create-organization-insight-monitored-media-server
- organizationId (string)
- name (string): The name of the VoIP provider
- address (string): The IP address (IPv4 only) or hostname of the media server to monitor
"""
kwargs = locals()
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'createOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers'
body_params = ['name', 'address']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationInsightMonitoredMediaServer(self, organizationId: str, monitoredMediaServerId: str):
"""
**Return a monitored media server for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!get-organization-insight-monitored-media-server
- organizationId (string)
- monitoredMediaServerId (string)
"""
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'getOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers/{monitoredMediaServerId}'
return self._session.get(metadata, resource)
def updateOrganizationInsightMonitoredMediaServer(self, organizationId: str, monitoredMediaServerId: str, **kwargs):
"""
**Update a monitored media server for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!update-organization-insight-monitored-media-server
- organizationId (string)
- monitoredMediaServerId (string)
- name (string): The name of the VoIP provider
- address (string): The IP address (IPv4 only) or hostname of the media server to monitor
"""
kwargs.update(locals())
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'updateOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers/{monitoredMediaServerId}'
body_params = ['name', 'address']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationInsightMonitoredMediaServer(self, organizationId: str, monitoredMediaServerId: str):
"""
**Delete a monitored media server from this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!delete-organization-insight-monitored-media-server
- organizationId (string)
- monitoredMediaServerId (string)
"""
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'deleteOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers/{monitoredMediaServerId}'
return self._session.delete(metadata, resource)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
0096083f9246d661a3f54e1a514347e517ac5392
|
c074fb834cb4a8ac75d107146df10f9496590792
|
/shows/migrations/0013_auto_20200924_0651.py
|
48e9ac2712f64fd60afd0c97b326d8e90fc0b462
|
[
"Unlicense"
] |
permissive
|
jmhubbard/quote_of_the_day_custom_user
|
4d5ffd4183d7e6290161b84cae2aa1f7ad621a99
|
27024b2953c1c94fd2970563c3ab31ad444912b6
|
refs/heads/master
| 2023-02-19T00:59:27.372671
| 2021-01-10T02:45:56
| 2021-01-10T02:45:56
| 293,443,918
| 1
| 0
|
Unlicense
| 2020-12-03T17:59:59
| 2020-09-07T06:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 363
|
py
|
# Generated by Django 3.1.1 on 2020-09-24 06:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shows', '0012_remove_episode_number'),
]
operations = [
migrations.AlterUniqueTogether(
name='episode',
unique_together={('name', 'season', 'show')},
),
]
|
[
"jasonhubb@gmail.com"
] |
jasonhubb@gmail.com
|
5d77eb09e4b6cf34c16b1f1dd27fa8980347f031
|
cb30d1a3a4fa6c8f7a6f89a671fbdb4a808e19e3
|
/c3/func-name.py
|
68b47cdad8ff7c3dad23e07a02f312777672a6df
|
[] |
no_license
|
systemchip/python-for-everyone
|
0b45172ca5b41c3b5fc1a835fbccf4a479c282ea
|
9fb7f751a97fb6a110079e1e3e1dd9601fb24374
|
refs/heads/master
| 2021-09-02T09:18:22.013704
| 2017-07-17T07:46:19
| 2017-07-17T07:46:19
| 115,913,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
# 함수 정의
def calcTime(dist, speed):
t = dist / speed
t = round(t, 1)
return t
# 일반적인 호출 --- (*1)
print( calcTime(500, 100) )
# 키워드 인수를 사용한 호출 --- (*2)
print( calcTime(dist=500, speed=100) )
|
[
"dylee@wikibook.co.kr"
] |
dylee@wikibook.co.kr
|
1f320c3de6e2963e75238e0f5091347cc938b7a7
|
85ac9f05432a2a4299cb43969395fd7865e78267
|
/entrega1/src/dump_movies_info.py
|
b32c447efa6d015870e60b43c41c52b8932cb071
|
[] |
no_license
|
pablodanielrey/twss
|
72d8056c2f3fd2a70d465d3176802dbc019fd022
|
b533fa6e0ea86460d8ccb49ec554a6f6e7ab4352
|
refs/heads/master
| 2023-05-20T03:06:23.078921
| 2021-06-12T23:31:13
| 2021-06-12T23:31:13
| 352,428,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
"""
script utilitario para hacer un json con info plana sobre cada película.
solo para proceso de debug y verificación
procesa el archivo de merge
"""
import json
import datetime
from common import Merge, MergeInfo, Movie, Show
if __name__ == '__main__':
movies = []
with open('data/merged.json', 'r') as f:
merges = json.loads(f.read())
_movies = merges[Merge.MOVIES.value]
for m in _movies:
m['SHOWS'] = [s for s in merges[Merge.SHOWS.value] if s[Show.MOVIE.value] == m[Movie.ID.value]]
m['MERGES'] = [s[MergeInfo.MOVIES.value] for s in merges[Merge.MERGES.value] if s[MergeInfo.NEW_ID.value] == m[Movie.ID.value]]
movies.append(m)
dumped = {
'DATE': str(datetime.datetime.utcnow()),
'MOVIES': movies
}
with open('data/dumped.json', 'w') as f:
f.write(json.dumps(dumped, ensure_ascii=False))
|
[
"pablodanielrey@gmail.com"
] |
pablodanielrey@gmail.com
|
4c6392fa5873487af848e8d1467ac221f4033f0b
|
2d1769af5eee0c764e1a917fca7a0f58c8751a13
|
/cnn/faces/face_utils.py
|
73aa05568ee350ac35e6283d6bcfd48a4e504288
|
[] |
no_license
|
m-learning/tensorflow_ann_modules
|
aca785991ba9700da0a1e7a70cce9f32fc6ac6e9
|
914a9fb2c6a4400ea4b7643e3f3fd6aac5f94f8d
|
refs/heads/master
| 2020-05-22T03:59:18.650205
| 2017-08-27T14:34:16
| 2017-08-27T14:34:16
| 61,343,012
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
"""
Created on Jan 12, 2017
Utility module for FaceNet model
@author: Levan Tsinadze
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from scipy import misc
from cnn.faces import detect_face, facenet
import numpy as np
import tensorflow as tf
GRAPH_FILE = 'face_embeddings.pb'
INPUT_NAME = 'input'
INPUT_LAYER = 'input:0'
TRAIN_LAYER = 'phase_train:0'
EMBEDDINGS_LAYER = 'embeddings:0'
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction, _files):
"""Loads and alighn face images from files
Args:
image_paths - image file paths
image_size - image size
margin - margin for alignment
gpu_memory_fraction - GPU memory fraction for parallel processing
Returns:
images - aligned images from files
"""
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default() as g:
sess = tf.Session(graph=g, config=tf.ConfigProto(log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, _files.model_dir)
nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in xrange(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images
|
[
"levantsinadze@gmail.com"
] |
levantsinadze@gmail.com
|
ada1387065207483a988baa6ad1f33206c2dd449
|
e4066b34668bbf7fccd2ff20deb0d53392350982
|
/project_scrapy/spiders/dulux.py
|
34f2fba74f7d2b55e236d9094a345fdac84c6e22
|
[] |
no_license
|
sushma535/WebSites
|
24a688b86e1c6571110f20421533f0e7fdf6e1a8
|
16a3bfa44e6c7e22ae230f5b336a059817871a97
|
refs/heads/master
| 2023-08-18T09:09:16.052555
| 2021-10-11T00:41:50
| 2021-10-11T00:41:50
| 415,621,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,539
|
py
|
import scrapy
from scrapy.crawler import CrawlerProcess
import os
import csv
from csv import reader
import re
total_data = {}
class SimilarWeb(scrapy.Spider):
name = 'SW'
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
start_urls = ['https://www.dulux.com.au/', 'https://www.similarsites.com/site/dulux.com.au/']
csv_columns = ['Category', 'Description', 'Name', 'Url']
csv_file = 'websites1_data.csv'
count = 0
def parse(self, response):
data, desc, cat = '', '', ''
print('response url:', response.url)
if response.url == self.start_urls[0]:
data = response.css('title::text').get()
if data:
data = re.sub("\n\t\t", '', data)
total_data['Name'] = data
self.count += 1
elif response.url == self.start_urls[1]:
cat = response.css(
'div[class="StatisticsCategoriesDistribution__CategoryTitle-fnuckk-6 jsMDeK"]::text').getall()
desc = response.css('div[class="SiteHeader__Description-sc-1ybnx66-8 hhZNQm"]::text').get()
if cat:
cat = ": ".join(cat[:])
total_data['Category'] = cat
total_data['Description'] = desc
total_data['Url'] = self.start_urls[0]
self.count += 1
if self.count == 2:
print("total data", total_data)
new_data = [total_data['Category'], total_data['Description'], total_data['Name'],
total_data['Url']]
print("new data", new_data)
self.row_appending_to_csv_file(new_data)
def row_appending_to_csv_file(self, data):
if os.path.exists(self.csv_file):
need_to_add_headers = False
with open(self.csv_file, 'a+', newline='') as file:
file.seek(0)
csv_reader = reader(file)
if len(list(csv_reader)) == 0:
need_to_add_headers = True
csv_writer = csv.writer(file)
if need_to_add_headers:
csv_writer.writerow(self.csv_columns)
csv_writer.writerow(data)
else:
with open(self.csv_file, 'w', newline='') as file:
csv_writer = csv.writer(file)
csv_writer.writerow(self.csv_columns) # header
csv_writer.writerow(data)
process = CrawlerProcess()
process.crawl(SimilarWeb)
process.start()
|
[
"sushmakusumareddy@gmail.com"
] |
sushmakusumareddy@gmail.com
|
c5b11a0dd5db5c6fdf0dfbc2ffa87ba1ab6f03e7
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/WxkFoXTLYiAq57uDq_6.py
|
ef17e6cb7e3ba1e66c2d2c4130f086ae67aa0da7
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
"""
The insurance guy calls. They were about to pay you all that fortune you've
been anxiously waiting for, but they detected further irregularities; the list
of stolen items is misformatted and appears to contain other entries that
don't belong there. Find and remove them.
You receive a dict with nested dicts with `strings` as values. Convert their
values to `number` and return a dict with entries that evaluate to type `int`.
### Examples
find_and_remove({
"workshop": {
"bedsheets": "2000",
"working": "v0g89t7t",
"pen": "370",
"movies": "wo1a3d5d",
},
}), {
"workshop": {
"bedsheets": 2000,
"pen": 370
}
}
find_and_remove({
"bedroom": {
"slippers": "10000",
"piano": "5500",
"call": "vet",
"travel": "world",
},
}), {
"bedroom": {
"slippers": 10000,
"piano": 5500,
},
}
### Notes
* This challenge was translated from Miguel Carvalho's JavaScript Burglary Series. The following are links to his Javascript series:
* If you have suggestions on how to present or further test this challenge please leave a comment.
* This series is part of a [collection that focuses on objects](https://edabit.com/collection/6NzWEMSwrSw4fnKkL). If you are interested in following the breath-taking narrative skills of yours truly or just do some object focused challenges (the challenges are ordered in ascending difficulty order), you can more easily [do that here](https://edabit.com/collection/6NzWEMSwrSw4fnKkL).
"""
def find_and_remove(dct):
for room,items in dct.items():
for item,price in items.items():
try:
dct[room][item] = int(price)
except:
dct[room][item] = -1
dct[room] = {k:v for k,v in items.items() if v > 0}
return dct
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
800ad7f7ceb5c6d0b024288fbf63c40b291b9cf8
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R2/benchmark/startQiskit_noisy398.py
|
211c288f57aab268aa0776e4eb56d9b981a0e392
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,510
|
py
|
# qubit number=3
# total number=78
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=59
prog.cz(input_qubit[0],input_qubit[2]) # number=60
prog.h(input_qubit[2]) # number=61
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=71
prog.cz(input_qubit[0],input_qubit[2]) # number=72
prog.h(input_qubit[2]) # number=73
prog.x(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=67
prog.cz(input_qubit[0],input_qubit[2]) # number=68
prog.h(input_qubit[2]) # number=69
prog.h(input_qubit[2]) # number=64
prog.cz(input_qubit[0],input_qubit[2]) # number=65
prog.h(input_qubit[2]) # number=66
prog.h(input_qubit[2]) # number=75
prog.cz(input_qubit[0],input_qubit[2]) # number=76
prog.h(input_qubit[2]) # number=77
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.rx(2.3310617489636263,input_qubit[2]) # number=58
prog.x(input_qubit[2]) # number=74
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=62
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.rx(-0.9173450548482197,input_qubit[1]) # number=57
prog.cx(input_qubit[2],input_qubit[1]) # number=63
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.cx(input_qubit[2],input_qubit[1]) # number=70
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy398.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
55a8ce672f93c1ac8c14ad9db132aa3cb25e038f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/EYojuPCtvSzF2chkZ_24.py
|
1b12d28fa0ef8d5bd219097ad16e1cf7a8231166
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
"""
Create a function that returns the selected **filename** from a path. Include
the **extension** in your answer.
### Examples
get_filename("C:/Projects/pil_tests/ascii/edabit.txt") ➞ "edabit.txt"
get_filename("C:/Users/johnsmith/Music/Beethoven_5.mp3") ➞ "Beethoven_5.mp3"
get_filename("ffprobe.exe") ➞ "ffprobe.exe"
### Notes
* Tests will include both absolute and relative paths.
* For simplicity, all paths will include forward slashes.
"""
def get_filename(path):
return path.split("/")[-1]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6289659853a3f52588b54dee16761d5fd5d92783
|
fcc88521f63a3c22c81a9242ae3b203f2ea888fd
|
/Python3/1061-Lexicographically-Smallest-Equivalent-String/soln.py
|
e5984b3c6419925571e515793f7e0abedf137ee7
|
[
"MIT"
] |
permissive
|
wyaadarsh/LeetCode-Solutions
|
b5963e3427aa547d485d3a2cb24e6cedc72804fd
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
refs/heads/master
| 2022-12-06T15:50:37.930987
| 2020-08-30T15:49:27
| 2020-08-30T15:49:27
| 291,811,790
| 0
| 1
|
MIT
| 2020-08-31T19:57:35
| 2020-08-31T19:57:34
| null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
class Solution:
def smallestEquivalentString(self, A: str, B: str, S: str) -> str:
parents = {ch : ch for ch in string.ascii_lowercase}
def find(x):
if x == parents[x]:
return parents[x]
parents[x] = find(parents[x])
return parents[x]
def unite(x, y):
rx, ry = find(x), find(y)
if rx > ry:
rx, ry = ry, rx
# rx < ry
parents[ry] = parents[rx]
for a, b in zip(A, B):
unite(a, b)
return ''.join(map(find, S))
|
[
"zhang623@wisc.edu"
] |
zhang623@wisc.edu
|
71ad0515fca562d7f5067aec818dc3aa4556943e
|
561e84bcf8e81e325795a7f917eda62fa850f23e
|
/tests/Mujoco/multi_plot_loss.py
|
6629eec90d2de05b8a6341404e6897bbe30888d1
|
[] |
no_license
|
maxiaoba/SoftAC
|
e397de3c82f7c6c3c97c7a9e483bc9f669e5a07e
|
668c91511fa5b1a77676197115561eb7a8ecf5c5
|
refs/heads/master
| 2020-05-23T12:16:22.721219
| 2020-03-19T04:44:39
| 2020-03-19T04:44:39
| 186,754,090
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,744
|
py
|
import csv
import os.path
import matplotlib
matplotlib.rcParams.update({'font.size': 10})
from matplotlib import pyplot as plt
import numpy as np
itr_interval = 10
max_itr = 2e4
fields = [
'return-average',
# 'vf--avg',
# 'vf1-avg',
# 'vf2-avg',
# 'log-pi-mean',
# 'mean-sq-bellman-error1',
]
itr_name = 'epoch'
min_loss = [-np.inf,-np.inf,-np.inf,-np.inf,-np.inf]
max_loss = [np.inf,np.inf,np.inf,np.inf,np.inf]
exp_name = ["Hopper","Ant","Walker2d","HalfCheetah","Humanoid",
"Swimmer","Reacher","SwimmerRllab","HumanoidRllab",
"InvertedDoublePendulum"][3]
prepath = "./Data/"+exp_name
plot_path = "./Data/"+exp_name
policies = [
"SAC_Gaussiansr5.0",
"FlowQ7_Gaussiansr5.0cg10.0",
"FlowQ7_Gaussiansr5.0cg100.0",
"FlowQ7_Gaussiansr5.0cg1000.0",
]
policy_names = policies
colors = []
for pid in range(len(policies)):
colors.append('C'+str(pid))
extra_name = 'FlowQsr'
pre_name = ''
post_name = ''
plot_name = extra_name
for fid,field in enumerate(fields):
print(field)
fig = plt.figure(fid,figsize=(5,5))
legends = []
plts = []
for (policy_index,policy) in enumerate(policies):
policy_path = pre_name+policy+post_name
Itrs = []
Losses = []
min_itr = np.inf
for trial in range(3):
file_path = prepath+'/'+policy_path+'/'+'seed'+str(trial)+'/process.csv'
print(file_path)
if os.path.exists(file_path):
print(policy+'_'+str(trial))
itrs = []
losses = []
loss = []
with open(file_path) as csv_file:
if '\0' in open(file_path).read():
print("you have null bytes in your input file")
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
else:
csv_reader = csv.reader(csv_file, delimiter=',')
for (i,row) in enumerate(csv_reader):
if i == 0:
entry_dict = {}
for index in range(len(row)):
entry_dict[row[index]] = index
# print(entry_dict)
else:
itr = i-1#int(float(row[entry_dict[itr_name]]))
if itr > max_itr:
break
loss.append(np.clip(float(row[entry_dict[field]]),
min_loss[fid],max_loss[fid]))
if itr % itr_interval == 0:
itrs.append(itr)
loss = np.mean(loss)
losses.append(loss)
loss = []
if len(losses) < min_itr:
min_itr = len(losses)
Losses.append(losses)
Losses = [losses[:min_itr] for losses in Losses]
itrs = itrs[:min_itr]
Losses = np.array(Losses)
print(Losses.shape)
y = np.mean(Losses,0)
yerr = np.std(Losses,0)
plot, = plt.plot(itrs,y,colors[policy_index])
plt.fill_between(itrs,y+yerr,y-yerr,linewidth=0,
facecolor=colors[policy_index],alpha=0.3)
plts.append(plot)
legends.append(policy_names[policy_index])
plt.legend(plts,legends,loc='best')
plt.xlabel('Itr')
plt.ylabel(field)
fig.savefig(plot_path+'/'+plot_name+'_'+"_".join(field.split('/'))+'.pdf')
plt.close(fig)
|
[
"maxiaoba@umich.edu"
] |
maxiaoba@umich.edu
|
506ad697da20580fe7c04f475cd6a9627d12a143
|
3fb0ce33f00b96ae3808a32da44de3e887434afb
|
/.提出一覧/AtCoder/ABC152/b/main.py
|
db01e75914ac864f9d5405badbb44bdf931bfdbe
|
[] |
no_license
|
Yukikazari/kyoupuro
|
ca3d74d8db024b1988cd0ff00bf069ab739783d7
|
343de455c4344dbcfa4524b492f7f6205c9db26f
|
refs/heads/master
| 2023-02-21T01:53:52.403729
| 2021-01-27T03:55:01
| 2021-01-27T03:55:01
| 282,222,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
#!/usr/bin/env python3
#import
#import math
#import numpy as np
#= int(input())
#= input()
a, b = map(int, input().split())
print(str(a) * b if a < b else str(b) * a)
|
[
"haya_nanakusa793@yahoo.co.jp"
] |
haya_nanakusa793@yahoo.co.jp
|
ddcdca81b7542c683b67f1aafaf5ef342f08229f
|
8b9897577d3278e6070bb99f5fcfcc4f49df538e
|
/l10n_be_coda_advanced/wizard/coda_helpers.py
|
d2ae4c5393cc9fd1251b93a8e8ca0967ddef06ea
|
[] |
no_license
|
ilexius/noviat-apps
|
422eb3df8ab4f6b944dcc841d3cf442b8da2aad1
|
84510bab01251d96bcb4a2ed9c14db8737495592
|
refs/heads/8.0
| 2021-01-15T12:37:56.579920
| 2015-03-16T15:39:56
| 2015-03-16T15:39:56
| 35,040,239
| 0
| 0
| null | 2015-05-04T14:42:51
| 2015-05-04T14:42:51
| null |
UTF-8
|
Python
| false
| false
| 3,482
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
#
# Copyright (c) 2010-now Noviat nv/sa (www.noviat.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_iban.base_iban import _ref_iban, _format_iban
import time
def calc_iban_checksum(country, bban):
bban = bban.replace(' ', '').upper() + country.upper() + '00'
base = ''
for c in bban:
if c.isdigit():
base += c
else:
base += str(ord(c) - ord('A') + 10)
kk = 98 - int(base) % 97
return str(kk).rjust(2, '0')
def check_bban(country, bban):
if country == 'BE':
try:
int(bban)
except:
return False
if len(bban) != 12:
return False
return True
def check_iban(iban):
"""
Check the IBAN number
Logic partially based upon base_iban module, cf. is_iban_valid method
"""
iban = _format_iban(iban).lower()
if iban[:2] not in _ref_iban:
return False
if len(iban) != len(_format_iban(_ref_iban[iban[:2]])):
return False
# the four first digits have to be shifted to the end
iban = iban[4:] + iban[:4]
# letters have to be transformed into numbers (a = 10, b = 11, ...)
iban2 = ""
for char in iban:
if char.isalpha():
iban2 += str(ord(char)-87)
else:
iban2 += char
# iban is correct if modulo 97 == 1
return int(iban2) % 97 == 1
def get_iban_and_bban(number):
"""
return IBAN and BBAN numbers
Logic partially based upon base_iban module, cf. get_bban_from_iban method
"""
mapping_list = {
# TODO add rules for others countries
'be': lambda x: x[4:],
'fr': lambda x: x[14:],
'ch': lambda x: x[9:],
'gb': lambda x: x[14:],
}
number = number.replace(' ', '')
for code, function in mapping_list.items():
if number.lower().startswith(code):
return [function(number), number]
return [number]
def repl_special(s):
s = s.replace("\'", "\'" + "'")
return s
def str2date(date_str):
try:
return time.strftime('%Y-%m-%d', time.strptime(date_str, '%d%m%y'))
except:
return False
def str2time(time_str):
return time_str[:2] + ':' + time_str[2:]
def str2float(str):
try:
return float(str)
except:
return 0.0
def list2float(lst):
try:
return str2float((lambda s: s[:-3] + '.' + s[-3:])(lst))
except:
return 0.0
def number2float(s, d):
try:
return float(s[:len(s) - d] + '.' + s[len(s) - d:])
except:
return False
|
[
"luc.demeyer@noviat.com"
] |
luc.demeyer@noviat.com
|
1b2fb14a7985f07866ac3a63d467662a7aec1bae
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/compose/2016/8/setup.py
|
5cb52dae4a36d541c4ac4370ce84ff326b9ec86f
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import os
import re
import sys
from setuptools import find_packages
from setuptools import setup
def read(*parts):
path = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(path, encoding='utf-8') as fobj:
return fobj.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
install_requires = [
'cached-property >= 1.2.0, < 2',
'docopt >= 0.6.1, < 0.7',
'PyYAML >= 3.10, < 4',
'requests >= 2.6.1, < 2.8',
'texttable >= 0.8.1, < 0.9',
'websocket-client >= 0.32.0, < 1.0',
'docker-py >= 1.9.0, < 2.0',
'dockerpty >= 0.4.1, < 0.5',
'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3',
]
tests_require = [
'pytest',
]
if sys.version_info[:2] < (3, 4):
tests_require.append('mock >= 1.0.1')
install_requires.append('enum34 >= 1.0.4, < 2')
setup(
name='docker-compose',
version=find_version("compose", "__init__.py"),
description='Multi-container orchestration for Docker',
url='https://www.docker.com/',
author='Docker, Inc.',
license='Apache License 2.0',
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
tests_require=tests_require,
entry_points="""
[console_scripts]
docker-compose=compose.cli.main:main
""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
8846947bbe4c37c7b7a6464de1a539be071a0adf
|
94579f95eef969aec090f98a3c6841ea341af36f
|
/covids/views.py
|
143c9976747cf3d5c02064a6af21d8ae6e4a5d31
|
[] |
no_license
|
smilejakdu/django_covid
|
d8b10e401dcf6fa7fe65c72e3c2b03f4fe2171d5
|
1092d6798ca30270d3f6c65345063e0376dfbbb8
|
refs/heads/master
| 2022-04-18T12:13:42.451188
| 2020-04-15T19:00:40
| 2020-04-15T19:00:40
| 255,989,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
import json , requests
from .models import Covid ,KoreaCovid
from django.views import View
from django.http import HttpResponse, JsonResponse
from django.db.models import Count, Q , Sum
class CovidApiView(View):
def get(self , request):
try:
query = request.GET.get('keyword' , None) # 코로나 데이터 검색
if query :
world_data = Covid.objects.filter(Q(area__icontains=query) | Q(country__icontains=query)).all()
korea_data = KoreaCovid.objects.filter(Q(area__icontains=query)).all()
world_patient_count = world_data.count()
korea_patient_count = korea_data.count()
data = {
'country_covid_count' : world_patient_count,
'korea_covid_count' : korea_patient_count,
'world_covid_data' : [{
'id' : world.id,
'area' : world.area,
'country' : world.country,
'patient' : world.patient,
'dead' : world.dead,
} for world in world_data],
'korea_covid_data' : [{
'id' : korea.id,
'area' : korea.area,
'patient' : korea.patient,
} for korea in korea_data]
}
return JsonResponse({"data" : data},status=200)
country_covid = Covid.objects.values()
korea_covid = KoreaCovid.objects.values()
korea_covid_count = KoreaCovid.objects.all().aggregate(Sum('patient'))
country_covid_count = Covid.objects.all().aggregate(Sum('patient'))
return JsonResponse({'data' : {
'country_covid_count' : country_covid_count,
'korea_covid_count' : korea_covid_count,
'world_covid_data' : list(country_covid),
'korea_covid_data' : list(korea_covid),
}}, status=200)
except Covid.DoesNotExist:
return JsonResponse({'message': 'Not found'}, status=400)
except TypeError:
return JsonResponse({'message': 'error'}, status=400)
|
[
"ash982416@gmail.com"
] |
ash982416@gmail.com
|
0d5e647977a66fe9030108befd680373845aee95
|
25dda94672497e3287a7403e283fb279ad171b79
|
/boj/11054 가장 긴 바이토닉 부분수열.py
|
458adceb31b78b34b5a3924ec2c09a7a983434dd
|
[] |
no_license
|
woorud/Algorithm
|
c94b844e8c96a446c5fdee5c0abb159bfee384d7
|
f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541
|
refs/heads/master
| 2023-02-23T13:53:28.645036
| 2021-01-29T12:24:23
| 2021-01-29T12:24:23
| 230,908,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
n = int(input())
num_list = list(map(int, input().split()))
dpi = [0 for i in range(n)]
dpd = [0 for i in range(n)]
for i in range(n):
dpi[i] = 1
for j in range(i):
if num_list[j] < num_list[i]:
dpi[i] = max(dpi[i], dpi[j]+1)
for i in range(n-1, -1, -1):
dpd[i] = 1
for j in range(n-1, i-1, -1):
if num_list[j] < num_list[i]:
dpd[i] = max(dpd[i], dpd[j]+1)
cnt = 0
for i in range(n):
if cnt < dpi[i] + dpd[i]-1:
cnt = dpi[i] + dpd[i]-1
print(dpi, dpd, cnt)
|
[
"woorud96@gmail.com"
] |
woorud96@gmail.com
|
e77a6ff25e6a3e458ac0d9464dce70336b45709a
|
88a2f57b7d660228ca1ac922f0f582910bcacb3d
|
/algorithm/problem/AD sol/AD_3day_솔루션_김은경/주사위던지기1.py
|
d6af23c180c38cbdd427b3caf0f2b150d4c6d32b
|
[] |
no_license
|
chelseashin/TIL
|
adc5ed0bd4ba084e85b74baa9699096a7af5585e
|
376b56844985b3ff43b94fa18086a449e6deac69
|
refs/heads/master
| 2022-12-10T02:13:39.680936
| 2020-11-19T13:18:30
| 2020-11-19T13:18:30
| 162,103,813
| 2
| 0
| null | 2022-12-08T04:53:38
| 2018-12-17T09:11:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
import sys
sys.stdin = open("in.txt")
def DFS1(no): # M=1 : 눈의 중복순열
if no>N:
for i in range(1, N+1): print(rec[i], end=' ')
print()
return
for i in range(1, 7): # 눈
rec[no]=i #눈 기록
DFS1(no+1)
def DFS3(no): # M=3 : 눈의 중복 배제한 순열
if no>N:
for i in range(1, N+1): print(rec[i], end=' ')
print()
return
for i in range(1, 7): # 눈
if chk[i]: continue
chk[i] =1
rec[no]=i #눈 기록
DFS3(no+1)
chk[i]=0
def DFS2(no, start): # M=2 : 중복조합
if no > N:
for i in range(1, N + 1): print(rec[i], end=' ')
print()
return
for i in range(start, 7): # 눈
rec[no] = i # 눈 기록
DFS2(no + 1, i)
def DFS4(no, start): # M=4 : 조합
if no > N:
for i in range(1, N + 1): print(rec[i], end=' ')
print()
return
for i in range(start, 7): # 눈
rec[no] = i # 눈 기록
DFS4(no + 1, i+1)
#main -----------------------------
N, M = map(int, input().split())
rec =[0]*(N+1)
chk =[0]*7 #눈체크배열
if M==1: DFS1(1) # 눈의 중복순열 1번 주사위부터 시작
elif M==3: DFS3(1) # 눈의 중복배재한 순열
elif M==2: DFS2(1, 1) # 눈 중복조합 : 1번 주사위부터 시작, 1눈부터 시작
elif M==4: DFS4(1,1)
|
[
"chaewonshin95@gmail.com"
] |
chaewonshin95@gmail.com
|
3bc1353a645d0c71c621ab33cb88163e3daf3652
|
d3eea2056dd9798938162b07bee105751943f2ed
|
/install.py
|
ce2865f02442894e29996b04e54956d9ce809278
|
[] |
no_license
|
ExpLangcn/VulScanner
|
193ce6591a7b77774d6eab1dc5d2fc95e21f92fb
|
b249e00cacaff42d6eb99e3f4e60532dcf3416ff
|
refs/heads/main
| 2023-06-20T11:29:02.677327
| 2021-07-20T01:04:07
| 2021-07-20T01:04:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
import configparser
import os
import pymysql
from django.conf import settings
settings.configure()
conf = configparser.ConfigParser()
conf.read(os.getcwd() + "/" + "config.ini")
mysql_config = { # for mysql and postgresql
'host': conf.get('global', 'ip'),
'port': int(conf.get('global', 'port')),
'user': conf.get('global', 'uname'),
'password': conf.get('global', 'passwd'),
'database': conf.get('global', 'table'),
"connect_timeout": 1
}
def exec_sql(conn, sql):
pass
if __name__ == '__main__':
sql_file = open("poc.sql", "rb")
try:
conn = pymysql.connect(**mysql_config)
cursor = conn.cursor()
for i in sql_file:
result = (cursor.execute(i.strip().decode()))
if not result == 1:
print("[-]execute sql fail")
break
conn.commit()
conn.close()
print("[+]install pocs success")
except Exception as e:
print("[-]can't connect to mysql")
|
[
"2998430232@qq.com"
] |
2998430232@qq.com
|
68f6b2841c0268d392a9d100f17394c1db3517a7
|
5163470734c20167148271381c549dadf30dc846
|
/setup.py
|
053623d127c358077949c873db5878f419fc85ee
|
[] |
no_license
|
fabiommendes/uritool
|
2c9a590da5bafa2d268fa1990df44803d760b7a4
|
224dfc33fa65f40e90b0ac61b27a1199439e1a28
|
refs/heads/master
| 2016-08-11T06:21:39.424548
| 2015-12-03T22:15:05
| 2015-12-03T22:15:05
| 46,719,564
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
import os
import setuptools
from setuptools import setup
VERSION = '0.1.1'
AUTHOR = 'Fábio Macêdo Mendes'
# Save version in __meta__ file
with open(os.path.join('src', 'uritool', 'meta.py'), 'w') as F:
F.write(
'# -*- coding: utf-8 -*-\n'
'# This is an autogenerated file! Do not edit.\n'
'__version__ = %r\n'
'__author__ = %r\n' % (VERSION, AUTHOR)
)
#
# Main configuration script
#
setup(
name='uritool',
version=VERSION,
description='Extract data from the URI Online Judge website at https://www.urionlinejudge.com.br/',
author='Fábio Macêdo Mendes',
author_email='fabiomacedomendes@gmail.com',
url='https://github.com/fabiommendes/uritool/',
long_description=(
r'''Extract data from URI website and grade the submissions by your
students.'''),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
],
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
license='GPL',
install_requires=['lxml', 'requests', 'pandas', 'numpy'],
#
# Scripts
#
entry_points={
'console_scripts': ['uritool = uritool.__main__:main'],
},
zip_safe=False,
)
|
[
"fabiomacedomendes@gmail.com"
] |
fabiomacedomendes@gmail.com
|
0b1340655bdd9f4c9193f15b06aeb6d4345b1142
|
89cb758310a5319f4b4ce88ae4339e4a486cf3af
|
/app_controllers/infrastructure/kubernetes-deployments/services/nginx-modsecurity/02_service.py
|
25f5a05fc161c277101dd375f26110e8330e008a
|
[] |
no_license
|
cavalrytactics/securethebox-server-legacy
|
df843c4bea87dfee139cf2661f680af2f3b9af4e
|
ded8dc68bfcaceee6b626f01d2d03c606155da06
|
refs/heads/master
| 2022-04-04T13:21:28.813670
| 2020-02-08T20:56:04
| 2020-02-08T20:56:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
import sys
def writeConfig(**kwargs):
template = """
apiVersion: v1
kind: Service
metadata:
name: {serviceName}-{userName}
annotations:
external-dns.alpha.kubernetes.io/hostname: {serviceName}-{userName}.{clusterName}.securethebox.us
spec:
selector:
app: {serviceName}-{userName}
ports:
- name: http
targetPort: 80
port: 80
- name: cloudcmd
targetPort: 9000
port: 9000
"""
with open('./app_controllers/infrastructure/kubernetes-deployments/services/'+str(sys.argv[2])+'/02_service-'+str(sys.argv[1])+'-'+str(sys.argv[2])+'-'+str(sys.argv[3])+'.yml', 'w') as yfile:
yfile.write(template.format(**kwargs))
if __name__ == "__main__":
writeConfig(clusterName=str(sys.argv[1]),serviceName=str(sys.argv[2]),userName=str(sys.argv[3]))
|
[
"charleschong@Charless-MacBook-Pro.local"
] |
charleschong@Charless-MacBook-Pro.local
|
2366599f0e567f608fa33005f7ca20f30c303877
|
462c56e7454c97e0541588b9be66a4e216ea20fd
|
/119.pascals-triangle-ii.py
|
7fd4a9b31ba4be874c9ab04d023f055a5b8a5048
|
[] |
no_license
|
LouisYLWang/leetcode_python
|
d5ac6289e33c5d027f248aa3e7dd66291354941c
|
2ecaeed38178819480388b5742bc2ea12009ae16
|
refs/heads/master
| 2020-05-27T08:38:48.532000
| 2019-12-28T07:08:57
| 2019-12-28T07:08:57
| 188,549,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
#
# @lc app=leetcode id=119 lang=python3
#
# [119] Pascal's Triangle II
#
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
if rowIndex == 1:
return [1,1]
res = [1]
priorRow = self.getRow(rowIndex - 1)
i = 0
while i <= rowIndex - 2:
res.append(priorRow[i] + priorRow[i+1])
i += 1
return res + [1]
|
[
"louis.yl.wang@outlook.com"
] |
louis.yl.wang@outlook.com
|
98b26fe08f7c83923cba3d13f8204b817a4a9ce8
|
2581f2c98d497a6adf9bbb62730b02efea08cf80
|
/stubs/scales/meter.pyi
|
cc7fd7bfd725f7e6a9b3eac07b4e7b0c4866789b
|
[] |
no_license
|
drewp/rdfdb
|
1ebbb5cf892fd86f6e3c571b94a97ecd07dd7340
|
8c71f02f989b2de1a4921640d1ca765e6d9efdb6
|
refs/heads/master
| 2021-04-27T00:31:22.493060
| 2019-08-09T06:15:15
| 2019-08-09T06:15:15
| 123,776,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
pyi
|
# Stubs for scales.meter (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from collections import UserDict
from greplin.scales import Stat
from typing import Any, Optional
TICKERS: Any
TICKER_THREAD: Any
class MeterStatDict(UserDict):
def __init__(self) -> None: ...
def __getitem__(self, item: Any): ...
def tick(self) -> None: ...
def mark(self, value: int = ...) -> None: ...
class MeterStat(Stat):
def __init__(self, name: Any, _: Optional[Any] = ...) -> None: ...
def __set__(self, instance: Any, value: Any) -> None: ...
class MeterDict(UserDict):
parent: Any = ...
instance: Any = ...
def __init__(self, parent: Any, instance: Any) -> None: ...
def __getitem__(self, item: Any): ...
class MeterDictStat(Stat): ...
|
[
"drewp@bigasterisk.com"
] |
drewp@bigasterisk.com
|
e829bbaaca6fdc6a724b41833d5f5934453f1b83
|
75e951dcf749f62f2a292774968fe95fc4a353c8
|
/boa3/neo/__init__.py
|
d2af9ae09d866dbb6fe33b73fd8e4d6db9a71ff1
|
[
"Apache-2.0"
] |
permissive
|
jplippi/neo3-boa
|
e0a199d1ed2fa39abe09ebd3c013c360ca87f544
|
052be4adebb665113715bb80067d954f7ad85ad5
|
refs/heads/development
| 2022-08-19T10:17:43.610854
| 2020-05-25T20:30:42
| 2020-05-25T20:30:42
| 265,959,419
| 0
| 0
|
Apache-2.0
| 2020-05-25T20:39:59
| 2020-05-21T21:54:24
|
Python
|
UTF-8
|
Python
| false
| false
| 727
|
py
|
def to_script_hash(data_bytes: bytes) -> bytes:
"""
Converts a data to a script hash.
:param data_bytes: data to hash.
:type data_bytes: bytearray or bytes
:return: the scripthash of the data
:rtype: bytes
"""
from boa3.neo import cryptography
return cryptography.hash160(data_bytes)
def to_hex_str(data_bytes: bytes) -> str:
"""
Converts bytes into its string hex representation.
:param data_bytes: data to represent as hex.
:type data_bytes: bytearray or bytes
:return: the hex representation of the data
:rtype: str
"""
if isinstance(data_bytes, bytes):
data_bytes = bytearray(data_bytes)
data_bytes.reverse()
return data_bytes.hex()
|
[
"mirellamedeiros.09@hotmail.com"
] |
mirellamedeiros.09@hotmail.com
|
399e58269bd4f66695f421807152aa52627ff652
|
82a9077bcb5a90d88e0a8be7f8627af4f0844434
|
/google-cloud-sdk/lib/tests/unit/surface/storage/list_test.py
|
eb6c5f661193d257bacafa028af1ae6c5b203704
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
piotradamczyk5/gcloud_cli
|
1ae2553595e569fad6ce84af62b91a7ee5489017
|
384ece11040caadcd64d51da74e0b8491dd22ca3
|
refs/heads/master
| 2023-01-01T23:00:27.858583
| 2020-10-21T04:21:23
| 2020-10-21T04:21:23
| 290,238,061
| 0
| 0
| null | 2020-10-19T16:43:36
| 2020-08-25T14:31:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
# Lint as: python3
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for the gcloud storage list command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base as calliope_base
from tests.lib import test_case
from tests.lib.surface.storage.gcs_api_unit_test_base import GcsApiUnitTestBase
class ListTestAlpha(GcsApiUnitTestBase):
"""Test cases for features in Alpha.
When a feature moves to beta, move the corresponding tests to a superclass of
this one where self.track = calliope_base.ReleaseTrack.BETA, details here:
go/gcloud-test-howto#how-to-test-multiple-release-tracks.
This will ensure that tests from past releases run for the alpha release.
"""
def PreSetUp(self):
self.track = calliope_base.ReleaseTrack.ALPHA
def SetUp(self):
self.bucket_name = 'bucket1'
self.object_names = ['file0', 'file1', 'asdf']
self.bucket_contents = self.messages.Objects(
items=[self.messages.Object(name=i) for i in self.object_names])
self.client.objects.List.Expect(
self.messages.StorageObjectsListRequest(bucket=self.bucket_name),
self.bucket_contents)
def test_list_bucket(self):
observed = self.Run('storage list gs://' + self.bucket_name)
observed_paths = [i['path'] for i in observed]
expected_paths = [
'gs://%s/%s' % (self.bucket_name, i) for i in self.object_names
]
self.assertCountEqual(observed_paths, expected_paths)
if __name__ == '__main__':
test_case.main()
|
[
"code@bootstraponline.com"
] |
code@bootstraponline.com
|
802caa784aff470ce4839a56761cdd49d43d388a
|
06b2eed882d8d7034fb7c57b648d5aa37d7f617b
|
/pycharmproject/爬虫/菜鸡/gifduoxiancheng.py
|
4c717acdfc70ad2136f7dce63eb021d9e85d0c52
|
[] |
no_license
|
1974410167/python_projects
|
558e2e7a4ea66b083ebd6d2f808b725e1bd153d6
|
81a97cbf41de12bdc3dbc46edf2931075ac4f758
|
refs/heads/main
| 2023-06-22T09:41:22.038620
| 2023-06-09T15:09:44
| 2023-06-09T15:09:44
| 355,036,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,002
|
py
|
import requests
import queue
import time
from bs4 import BeautifulSoup
import threading
def g_url(q1):
number=0
for i in range(46300,46391):
if number==10:
break
for j in range(0,10):
if number==10:
break
url = "https://www.youquba.net/xieedongtaitu/2017/1217/"
url=url+str(i)+"_"+str(j)+".html"
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
r=requests.get(url,headers=headers)
r.encoding = "utf-8"
if r.status_code==200:
soup=BeautifulSoup(r.text,"lxml")
b=soup.select("p a img")
for n in b:
q1.put(n.get("src"))
number=number+1
print(f"获得第{number}个URL成功!")
if number==10:
break
print(f"获取{number}个URL完毕")
def down_gif(q1):
number2=0
root = "D://gif//"
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
while not q1.empty():
m=q1.get()
try:
r1=requests.get(m,headers=headers,timeout=0.3)
except error.URLError as e1:
print(e1.reason)
except error.HTTPError as e2:
print(e2.reason)
else:
if r1.status_code == 200:
path = root + m.split("/")[-1]
with open(path, "wb") as f:
f.write(r1.content)
number2=number2+1
print(f"写入第{number2}个文件成功!")
if __name__=="__main__":
q1=queue.Queue()
t1=threading.Thread(target=g_url,args=(q1,))
t2=threading.Thread(target=down_gif,args=(q1,))
t1.start()
t2.start()
t1.join()
t2.join()
|
[
"1974410167@qq.com"
] |
1974410167@qq.com
|
2e7a302f37e01983e5cce2dbf5631abfb32dbcac
|
e811662c890217c77b60aa2e1295dd0f5b2d4591
|
/pinduoduo.py
|
1e343affa54327a68a706b640607c00d12be0d0f
|
[] |
no_license
|
rewonderful/MLC
|
95357f892f8cf76453178875bac99316c7583f84
|
7012572eb192c29327ede821c271ca082316ff2b
|
refs/heads/master
| 2022-05-08T05:24:06.929245
| 2019-09-24T10:35:22
| 2019-09-24T10:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
# #!/usr/bin/env python
# # _*_ coding:utf-8 _*_
# import random
# # def partition(nums,l,r):
# # pivot = nums[l]
# # while l<r:
# # while l<r and nums[r] >= pivot:
# # r -= 1
# # nums[l] = nums[r]
# # while l < r and nums[l] < pivot:
# # l += 1
# # nums[r] = nums[l]
# # nums[l] = pivot
# # return l
#
#
# def quick_sort(nums,lo,hi):
# if lo < hi:
# l ,r = lo,hi
# pivot = nums[l]
# while l<r:
# while l<r and nums[r] >= pivot:
# r -= 1
# nums[l] = nums[r]
# while l < r and nums[l] < pivot:
# l += 1
# nums[r] = nums[l]
# nums[l] = pivot
# quick_sort(nums,lo,l-1)
# quick_sort(nums,l+1,hi)
# if __name__ == '__main__':
# n = 50
# print("BEFORE")
# nums = [ random.randrange(n) for _ in range(n//2)]
# print(nums)
# quick_sort(nums,0,len(nums)-1)
# print('AFTER')
# print(nums)
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# def binarySearch(nums,target):
# low = 0
# high = len(nums)-1
#
# while(low <= high):
# mid = int((low + high) / 2)
#
# if target == nums[mid]:
# return mid
# if target > nums[mid]:
# low = mid+1
# else:
# high = mid - 1
# return 0
# def binarySearch(nums,target):
# lo ,hi = 0,len(nums)-1
# while lo <= hi:
# mid = (lo+hi)//2
# if mid == target:
# return mid
# elif nums[mid] < target:
# lo = mid + 1
# else:
# hi = mid - 1
# return -1
#
#
# if __name__ == '__main__':
# nums = [1,2,3,4,5,9,11,13,222,333,444,555]
# target = 5
# print(binarySearch(nums,target))
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
def merge(nums):
if len(nums) < 2:
return nums
mid = len(nums)//2
left_part = merge(nums[:mid])
right_part = merge(nums[mid:])
return mergesort(left_part,right_part)
def mergesort(left,right):
i ,j = 0,0
result = []
while i < len(left) and j < len(right):
if left[i] < right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
if i < len(left):
result.extend(left[i:])
if j < len(right):
result.extend(right[j:])
return result
if __name__ == '__main__':
print(merge([9,8,7,6,5,4,3,2,1,0]))
#print(mergesort([1,5,7],[2,4,6]))
|
[
"457261336@qq.com"
] |
457261336@qq.com
|
b4f8fae687ffc6edc12f388af9fc9e0d9dd49822
|
f28adfe93e04efb3d915965bc5339b25324f5d8c
|
/19_df2dict.py
|
31929424b98dc2cf9ce62980489b56e36b778983
|
[] |
no_license
|
aihill/start_Pandas
|
57059f8fec9b0fc74d39eaae2744dda0a82e8b08
|
ac1ee9a67f6fb05841258dbfcfe2c980059301c1
|
refs/heads/master
| 2022-04-20T17:03:19.167668
| 2017-08-28T03:09:06
| 2017-08-28T03:09:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
#load pandas as pd
import pandas as pd
#raw_data
raw_data = {'name':['song','park','kim','na','won','jun','song'],
'residence':['seoul','seoul','busan','busan','incheon','incheon','seoul'],
'univ':['seoul','chungang','korea','hanyang','seoul','kaist','seoul'],
'score':[90,99,96,78,70,100,90],
'student_number':[2000,2001,2002,2003,2004,2005,2000],
'sex':['male','male','female','female','male','male','male']}
df = pd.DataFrame(raw_data, columns= ['student_number','name','sex','residence','univ','score'])
'''
student_number name sex residence univ score
0 2000 song male seoul seoul 90
1 2001 park male seoul chungang 99
2 2002 kim female busan korea 96
3 2003 na female busan hanyang 78
4 2004 won male incheon seoul 70
5 2005 jun male incheon kaist 100
6 2000 song male seoul seoul 90
'''
#df2dict
#orient: dict
print(df.to_dict())
'''
{'student_number': {0: 2000, 1: 2001, 2: 2002, 3: 2003, 4: 2004, 5: 2005, 6: 2000},
'name': {0: 'song', 1: 'park', 2: 'kim', 3: 'na', 4: 'won', 5: 'jun', 6: 'song'},
'sex': {0: 'male', 1: 'male', 2: 'female', 3: 'female', 4: 'male', 5: 'male', 6: 'male'},
'residence': {0: 'seoul', 1: 'seoul', 2: 'busan', 3: 'busan', 4: 'incheon', 5: 'incheon', 6: 'seoul'},
'univ': {0: 'seoul', 1: 'chungang', 2: 'korea', 3: 'hanyang', 4: 'seoul', 5: 'kaist', 6: 'seoul'},
'score': {0: 90, 1: 99, 2: 96, 3: 78, 4: 70, 5: 100, 6: 90}}
'''
#orienct:list
print(df.to_dict('list'))
'''
{'student_number': [2000, 2001, 2002, 2003, 2004, 2005, 2000],
'name': ['song', 'park', 'kim', 'na', 'won', 'jun', 'song'],
'sex': ['male', 'male', 'female', 'female', 'male', 'male', 'male'],
'residence': ['seoul', 'seoul', 'busan', 'busan', 'incheon', 'incheon', 'seoul'],
'univ': ['seoul', 'chungang', 'korea', 'hanyang', 'seoul', 'kaist', 'seoul'],
'score': [90, 99, 96, 78, 70, 100, 90]}
'''
|
[
"win778500@gmail.com"
] |
win778500@gmail.com
|
15ed0f8428b2d1f891f40718268c67e0d7c67e20
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/2278.py
|
9906776748b33be02c1af34a2e169e7c60b0f786
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#!/usr/bin/env python3
ip=open("A2.in",'r')
no = ip.readline()
no = int(no)
case_no = 0;
for line in ip:
#print line
case_no+=1
line = line.split(' ',1)
smax = int(line[0])
shy = line[1]
#print smax
minf = 0
count = int(shy[0])
for i in range(1,smax+1):
x=int(shy[i])
if(count<i):
extraf=i-count
count+=extraf
minf+=extraf
count+=x
print "Case #" + str(case_no) + ": " + str(minf)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d35d195c09580ae5b7a11b85392eb618093b5bd9
|
b5fbc01deb2060b2222f885fca0433844a9e7cd1
|
/web/lib/python3.6/site-packages/coreapi/codecs/python.py
|
d0bebf645b79b6bea9fa954a1c5cc1ec426eb4c1
|
[] |
no_license
|
Carlosdher/reposicao
|
50973b15f8a2bd3a5a6b83b06efe0050f612bb83
|
71ef93e694888e54c79e98e8568c3417ee82ec96
|
refs/heads/master
| 2020-03-18T04:13:59.493126
| 2018-08-02T13:06:55
| 2018-08-02T13:06:55
| 134,277,105
| 2
| 0
| null | 2018-07-27T19:20:36
| 2018-05-21T14:01:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,784
|
py
|
# Note that `DisplayCodec` is deliberately omitted from the documentation,
# as it is considered an implementation detail.
# It may move into a utility function in the future.
from __future__ import unicode_literals
from coreapi.codecs.base import BaseCodec
from coreapi.document import Document, Link, Array, Object, Error, Field
def _to_repr(node):
if isinstance(node, Document):
content = ', '.join([
'%s: %s' % (repr(key), _to_repr(value))
for key, value in node.items()
])
return 'Document(url=%s, title=%s, content={%s})' % (
repr(node.url), repr(node.title), content
)
elif isinstance(node, Error):
content = ', '.join([
'%s: %s' % (repr(key), _to_repr(value))
for key, value in node.items()
])
return 'Error(title=%s, content={%s})' % (
repr(node.title), content
)
elif isinstance(node, Object):
return '{%s}' % ', '.join([
'%s: %s' % (repr(key), _to_repr(value))
for key, value in node.items()
])
elif isinstance(node, Array):
return '[%s]' % ', '.join([
_to_repr(value) for value in node
])
elif isinstance(node, Link):
args = "url=%s" % repr(node.url)
if node.action:
args += ", action=%s" % repr(node.action)
if node.encoding:
args += ", encoding=%s" % repr(node.encoding)
if node.transform:
args += ", transform=%s" % repr(node.transform)
if node.description:
args += ", description=%s" % repr(node.description)
if node.fields:
fields_repr = ', '.join(_to_repr(item) for item in node.fields)
args += ", fields=[%s]" % fields_repr
return "Link(%s)" % args
elif isinstance(node, Field):
args = repr(node.name)
if not node.required and not node.location:
return args
if node.required:
args += ', required=True'
if node.location:
args += ', location=%s' % repr(node.location)
if node.description:
args += ', description=%s' % repr(node.description)
return 'Field(%s)' % args
return repr(node)
class PythonCodec(BaseCodec):
"""
A Python representation of a Document, for use with '__repr__'.
"""
media_type = 'text/python'
def encode(self, document, **options):
# Object and Array only have the class name wrapper if they
# are the outermost element.
if isinstance(document, Object):
return 'Object(%s)' % _to_repr(document)
elif isinstance(document, Array):
return 'Array(%s)' % _to_repr(document)
return _to_repr(document)
|
[
"carlosabc436@gmail.com"
] |
carlosabc436@gmail.com
|
beddfd8cd214a7b078fb1134232bc8c211a8892c
|
1c43f97456f3cab00067932dfbd971c22e91267e
|
/rqt_yn_btn/setup.py
|
ebca86f4e2fab0109e87d3f3f2bb239780f922e3
|
[] |
no_license
|
m1a1k0o/2014-semi
|
661cc2692d70eadaed6a4d11ef85a9ac20914cb7
|
d57f088bf6b1eeed7e5d14b42034a70517281293
|
refs/heads/master
| 2021-01-15T21:15:26.915602
| 2015-03-25T09:17:36
| 2015-03-25T09:17:36
| 32,906,069
| 0
| 0
| null | 2015-03-26T03:44:15
| 2015-03-26T03:44:15
| null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['rqt_yn_btn'],
package_dir={'': 'src'},
requires=['std_msgs', 'rospy']
)
setup(**setup_args)
|
[
"www.kentaro.wada@gmail.com"
] |
www.kentaro.wada@gmail.com
|
af41e25e54049b6a7d90ced32ac25553bd20a520
|
1fbb4d511dd15490ab70bd85cc404dab06a7e37c
|
/model.py
|
0bef3b02a2b12da4e4510d201b0445ac1d872b5a
|
[] |
no_license
|
aboerzel/udacity-deep-reinforcement-learning-p2-continuous-control
|
7f32f3cd444d3ed5e4f255c4bc9642e6469209bc
|
0416434749b7758187a2306833d328eba9150668
|
refs/heads/master
| 2023-03-01T18:29:04.285342
| 2021-02-06T11:15:32
| 2021-02-06T11:15:32
| 333,823,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,116
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=200, fc2_units=200): # fc1_units=400, fc2_units=300
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.bn1 = nn.BatchNorm1d(state_size) # BN added
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = self.bn1(state) # BN added
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
return torch.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=200, fc2_units=200): # fc1_units=400, fc2_units=300
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.bn1 = nn.BatchNorm1d(state_size) # BN added
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = self.bn1(state) # BN added
# xs = F.relu(self.fcs1(xs))
xs = F.leaky_relu(self.fcs1(xs))
x = torch.cat((xs, action), dim=1)
# x = F.relu(self.fc2(x))
x = F.leaky_relu(self.fc2(x))
return self.fc3(x)
|
[
"andreas.boerzel@gmx.de"
] |
andreas.boerzel@gmx.de
|
292073a7cb374caacd4a351d4ed2d1ae9d8195b8
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/day08/homework/core/certification.py
|
7e34aeb34cbafe6aab843acb6b2657e232b577e5
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,740
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'caiqinxiong_cai'
# 2019/8/21 10:37
import pickle
import hashlib
from conf import settings as ss
from core.log import Log as log
class Certification:
'''登录认证类'''
def __init__(self):
pass
@staticmethod # 类的装饰器,静态方法 没有用到默认参数(self),不能使用类变量和实例变量,实现实例化使用 C().f(),也可以不实例化调用该方法 C.f()
def written_information(file_name, content, mode='ab'):
'''写入信息'''
with open('%s' % file_name, mode='%s' % mode) as f:
pickle.dump(content, f)
@staticmethod
def read_information(file_name):
'''读取信息'''
try:
with open('%s' % file_name, mode='rb') as f:
while True:
try:
r = pickle.load(f)
for k, v in r.__dict__.items():
yield k, v # 以迭代器方式返回,节省内存使用
except EOFError:
break
except:
return ("", "")
@staticmethod
def change_hashlib(password):
'''将明文转换成密文'''
md5 = hashlib.md5()
md5.update(password.encode('utf-8'))
ret = md5.hexdigest()
return ret
@classmethod # 类的装饰器,类方法,该函数只能访问到类的数据属性,不能获取实例的数据属性,默认传参数cls,表示当前所在的类名
def check_user(cls, user, name, kind):
'''校验账号'''
for k, v in name:
if 'username' == k and user == v.split(':')[-1].strip():
passwd = cls.change_hashlib(input('请输入密码 :').strip())
if passwd == name.__next__()[-1].split(':')[-1].strip():
return user, passwd, name.__next__()[-1], name.__next__()[-1], name.__next__()[-1], kind
else:
log.warning('密码校验失败!')
return False
@property # 类的装饰器,将一个方法伪装成类的属性来使用,注意:函数没有传入参数时使用
def login(self):
'''登录验证'''
for i in range(3): # 3次机会
user = input('请输入登录名 :').strip()
if user in ss.admin_dict: # 优先校验管理员账号,管理员账号总会比普通账号少嘛。
passwd = self.change_hashlib(input('请输入管理员密码 :').strip())
if ss.admin_dict[user] == passwd:
print("*" * 25 + '\n登录成功!%s管理员!' % user)
return user
else:
log.warning('管理员密码校验失败!')
continue
# 讲师账号校验
teacher = Certification().read_information(ss.teacher_file) # 读取讲师账号信息文件,返回迭代器
ret = self.check_user(user, teacher, 'teacher')
if ret:
print("*" * 25 + '\n登录成功!%s讲师!' % user)
return ret
# 学生账号校验
student = Certification().read_information(ss.student_file) # 读取学生账号信息文件,返回迭代器
ret = self.check_user(user, student, 'student')
if ret:
print("*" * 25 + '\n登录成功!%s同学!' % user)
return ret
else:
print('账号不存在!')
else:
log.warning('您的3次尝试机会已用完,谢谢使用!')
return False
if __name__ == '__main__':
print(Certification().login)
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
7284c0b3e3f5f25dae2144460b7e2736b30e6aea
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03326/s393771722.py
|
df69bc9b8b591bef556ca44e75fdf6235600e367
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
N,M=map(int,input().split())
ans1=[]
ans2=[]
ans3=[]
ans4=[]
ans5=[]
ans6=[]
ans7=[]
ans8=[]
ANS1=0
ANS2=0
ANS3=0
ANS4=0
ANS5=0
ANS6=0
ANS7=0
ANS8=0
for i in range(N):
A,B,C=map(int,input().split())
ans1.append(A+B+C)
ans2.append(-A+B+C)
ans3.append(A-B+C)
ans4.append(A+B-C)
ans5.append(-A-B+C)
ans6.append(A-B-C)
ans7.append(-A+B-C)
ans8.append(-A-B-C)
re_ans1=sorted(ans1)
re_ans2=sorted(ans2)
re_ans3=sorted(ans3)
re_ans4=sorted(ans4)
re_ans5=sorted(ans5)
re_ans6=sorted(ans6)
re_ans7=sorted(ans7)
re_ans8=sorted(ans8)
for j in range(1,M+1):
ANS1+=re_ans1[(-1)*j]
ANS2+=re_ans2[(-1)*j]
ANS3+=re_ans3[(-1)*j]
ANS4+=re_ans4[(-1)*j]
ANS5+=re_ans5[(-1)*j]
ANS6+=re_ans6[(-1)*j]
ANS7+=re_ans7[(-1)*j]
ANS8+=re_ans8[(-1)*j]
print(max(ANS1,ANS2,ANS3,ANS4,ANS5,ANS6,ANS7,ANS8))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
caa596988026c177b5d1631eda972783f68d471b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_fuzes.py
|
8286425583021c697aa4a9041dbe2357f3c0b7ed
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
#calss header
class _FUZES():
def __init__(self,):
self.name = "FUZES"
self.definitions = fuze
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['fuze']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9006b74c31a0bbba91bf577dd8129129896bca4b
|
d74ccf6290b7acb0011fd9b9132cd8beac0bd9d3
|
/back/movies/views.py
|
e3db8ef93368b3d5064833a71fab135bd04f821a
|
[] |
no_license
|
gaberani/final_netflix
|
a0687c9cec9157712c9fe2a8627d3624e5fe00b6
|
637016fd6a0c589f1ff96ed5e9225deffc8f18cb
|
refs/heads/master
| 2022-11-09T10:42:22.460795
| 2020-06-21T00:30:21
| 2020-06-21T00:30:21
| 272,981,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,769
|
py
|
from django.shortcuts import render,get_object_or_404,get_list_or_404
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from .models import Movie, Comment, Genre
from django.db.models import Q,Avg,Sum
from .serializers import MovieSerializer, MovieListSerializer, CommentSerializer, GenreSerializer
# Get All Movies
@api_view(['GET'])
def index(request):
movies = Movie.objects.all()
serializer = MovieListSerializer(movies)
return Response(serializer.data)
# Get Detail Movie
@api_view(['GET'])
def detail(request, movie_pk):
movie = get_object_or_404(Movie, pk=movie_pk)
serializer=MovieSerializer(movie)
return Response(serializer.data)
# Register Comments
@api_view(['POST'])
# @permission_classes([IsAuthenticated])
def create(request,movie_pk):
serializer = CommentSerializer(data=request.data)
comments = Comment.objects.filter(movie_id=movie_pk)
users = []
for comment in comments:
if comment.user not in users:
users.append(comment.user)
if request.user not in users:
if serializer.is_valid(raise_exception=True):
serializer.save(user=request.user, movie_id=movie_pk)
return Response(serializer.data)
else:
return Response({'Messages': '이미 평점을 작성한 영화입니다.'})
# Recommend Movies
@api_view(['GET'])
def recommendMovies(request):
comments=Comment.objects.filter(user=request.user).order_by('-rating')
new_genres = {}
for comment in comments:
movie = get_object_or_404(Movie,id=comment.movie_id)
serializer=MovieSerializer(movie)
for genre in serializer.data['genres']:
if genre not in new_genres:
if comment.rating:
new_genres[genre] =comment.rating
else:
if comment.rating:
new_genres[genre] += comment.rating
new_genres=sorted(new_genres.items(), key=lambda x:x[1],reverse=True)
recommend={}
# Genres Ordered By User Ratings
for k,v in new_genres:
movies=Movie.objects.filter(genres=k)
# Movies in Genres
for i in movies:
total=0
# Summation of Comments Ratings
ratings=Comment.objects.filter(movie_id=i.id)
if ratings:
for k in ratings:
total+=ratings[0].rating
if total:
recommend[i.id]=total
recommend=sorted(recommend.items(),key=lambda x:x[1],reverse=True)
if len(recommend)<5:
movie=Movie.objects.order_by("?")[:5]
serializer=MovieSerializer(movie,many=True)
else:
result=[]
for i in recommend[:5]:
result.append(i[0])
movie=Movie.objects.filter(id__in=result)
serializer=MovieSerializer(movie,many=True)
return Response(serializer.data)
# Update and Delete Comments
@api_view(['PUT', 'DELETE'])
def comment_update_and_delete(request,movie_pk,comment_pk):
comment=get_object_or_404(Comment,pk=comment_pk)
if request.user == comment.user:
print('User OK')
if request.method=='PUT':
serializer=CommentSerializer(data=request.data,instance=comment)
print('Method PUT')
if serializer.is_valid(raise_exception=True):
print('is_valid OK')
serializer.save()
return Response(serializer.data)
else:
print('Method DELETE')
comment.delete()
return Response({'message':'삭제 완료!'})
else:
return Response({'message': '다른 사용자는 불가합니다'})
# Popular Top3
@api_view(['GET'])
def many3(request):
many3 = Movie.objects.order_by('-popularity')[:3]
serializer = MovieSerializer(many3, many=True)
return Response(serializer.data)
# Top Rated Top3
@api_view(['GET'])
def top3(request):
top3 = Movie.objects.order_by('-vote_average')[:3]
serializer = MovieSerializer(top3, many=True)
return Response(serializer.data)
# Movie Search
@api_view(['GET'])
def search(request,movie_title):
movie=Movie.objects.filter(title__contains=movie_title)
if movie.exists():
serializer=MovieSerializer(movie,many=True)
return Response(serializer.data)
else:
return Response({'message':'No Result'})
@api_view(['POST'])
def wannawatch(request, movie_pk):
movie = get_object_or_404(Movie,id=movie_pk)
if movie.like_users.filter(pk=request.user.id).exists():
movie.like_users.remove(request.user)
else:
movie.like_users.add(request.user)
serializer=MovieSerializer(movie)
return Response(serializer.data)
# return Response({'message': '필요없음'})
@api_view(['GET'])
def confirmWatch(request,movie_pk):
movie=get_object_or_404(Movie,id=movie_pk)
if request.user in movie.like_users.all():
return Response({'result':1})
else:
return Response({'result':0})
@api_view(['GET'])
# @permission_classes([IsAuthenticated])
def getwannawatch(request):
user=request.user
movies=user.like_movies.all()
serializer=MovieListSerializer(movies,many=True)
return Response(serializer.data)
# Find All Genres
@api_view(['GET'])
def findgenre(request):
genre=Genre.objects.all()
serializer=GenreSerializer(genre,many=True)
return Response(serializer.data)
# Find Specific Genres
@api_view(['GET'])
def getGenre(request,genre_id):
movies=Movie.objects.filter(genres=genre_id)
print(len(movies))
serializer=MovieSerializer(movies,many=True)
return Response(serializer.data)
|
[
"khs0783@naver.com"
] |
khs0783@naver.com
|
7001df5b1c8067eb4650e631e6538486f1968571
|
846b11ccf549aba144c1824a24292a4850860ca7
|
/3-EstruturaDeRepeticao/14.py
|
6bdc78dafd567fe611d5f74f89ef5b66eb0168d1
|
[] |
no_license
|
paulocesarcsdev/ExerciciosPython
|
6d1feff293e7efc4cd3fbc62eee0add93f76db99
|
25bfaa6dc5cb294242e478a2b253a8ca5d9c7078
|
refs/heads/master
| 2023-05-15T00:53:22.151884
| 2021-06-10T03:04:04
| 2021-06-10T03:04:04
| 337,847,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
'''
Faça um programa que peça 10 números inteiros, calcule e
mostre a quantidade de números pares e a quantidade de números impares.
'''
lista = []
contador = 0
while(contador <= 10):
numero = int(input('Entre com dez {} números inteiros :'.format(contador)))
lista.append(numero)
for i in range(len(lista)):
if(i % 2 != 0):
print(i)
contador += 1
|
[
"paulocesarcs.dev@gmail.com"
] |
paulocesarcs.dev@gmail.com
|
bbe95919f6a86c4969e52c8d4b83fac015417ecf
|
21b632797ed6257b13574c341cdd14e6534728a9
|
/ryu/tests/unit/app/test_ofctl_rest.py
|
e9c64d2ca45a8b9816adb0f84b00b81ef71a983e
|
[
"Apache-2.0"
] |
permissive
|
MrCocoaCat/ryu
|
0473f04e2a840e027e9002f8a6af81745eaf7094
|
9e9571991a73380099b7ba7c6f37e0e587080a6a
|
refs/heads/master
| 2021-06-19T18:09:52.833590
| 2020-05-12T08:17:21
| 2020-05-12T08:17:21
| 163,072,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import functools
import json
import logging
import os
import sys
import unittest
try:
import mock # Python 2
except ImportError:
from unittest import mock # Python 3
from nose.tools import eq_
from ryu.app import ofctl_rest
from ryu.app.wsgi import Request
from ryu.app.wsgi import WSGIApplication
from ryu.controller.dpset import DPSet
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_5
from ryu.tests import test_lib
LOG = logging.getLogger(__name__)
class DummyDatapath(ofproto_protocol.ProtocolDesc):
def __init__(self, version):
super(DummyDatapath, self).__init__(version)
self.id = 1
_kw = {'port_no': 1, 'hw_addr': 'aa:bb:cc:dd:ee:ff',
'name': 's1-eth1', 'config': 1, 'state': 1}
# for OpenFlow 1.0
if version in [ofproto_v1_0.OFP_VERSION]:
_kw.update(
{'curr': 2112, 'advertised': 0, 'supported': 0, 'peer': 0})
port_info = self.ofproto_parser.OFPPhyPort(**_kw)
# for OpenFlow 1.2 or 1.3
elif version in [ofproto_v1_2.OFP_VERSION, ofproto_v1_3.OFP_VERSION]:
_kw.update(
{'curr': 2112, 'advertised': 0, 'supported': 0, 'peer': 0,
'curr_speed': 10000000, 'max_speed': 0})
port_info = self.ofproto_parser.OFPPort(**_kw)
# for OpenFlow 1.4+
else:
_kw.update({'properties': []})
port_info = self.ofproto_parser.OFPPort(**_kw)
self.ports = {1: port_info}
class Test_ofctl_rest(unittest.TestCase):
def _test(self, name, dp, method, path, body):
# print('processing %s ...' % name)
dpset = DPSet()
dpset._register(dp)
wsgi = WSGIApplication()
contexts = {
'dpset': dpset,
'wsgi': wsgi,
}
ofctl_rest.RestStatsApi(**contexts)
req = Request.blank(path)
req.body = json.dumps(body).encode('utf-8')
req.method = method
with mock.patch('ryu.lib.ofctl_utils.send_stats_request'),\
mock.patch('ryu.lib.ofctl_utils.send_msg'):
res = req.get_response(wsgi)
eq_(res.status, '200 OK')
def _add_tests():
_ofp_vers = {
'of10': ofproto_v1_0.OFP_VERSION,
'of12': ofproto_v1_2.OFP_VERSION,
'of13': ofproto_v1_3.OFP_VERSION,
'of14': ofproto_v1_4.OFP_VERSION,
'of15': ofproto_v1_5.OFP_VERSION,
}
this_dir = os.path.dirname(sys.modules[__name__].__file__)
ofctl_rest_json_dir = os.path.join(this_dir, 'ofctl_rest_json/')
for ofp_ver in _ofp_vers:
# read a json file
json_path = os.path.join(ofctl_rest_json_dir, ofp_ver + '.json')
if os.path.exists(json_path):
_test_cases = json.load(open(json_path))
else:
# print("Skip to load test cases for %s" % ofp_ver)
continue
# add test
for test in _test_cases:
method = test['method']
path = test['path']
body = test.get('body', {})
name = 'test_ofctl_rest_' + method + '_' + ofp_ver + '_' + path
# print('adding %s ...' % name)
f = functools.partial(
Test_ofctl_rest._test,
name=name,
dp=DummyDatapath(_ofp_vers[ofp_ver]),
method=test['method'],
path=test['path'],
body=body
)
test_lib.add_method(Test_ofctl_rest, name, f)
_add_tests()
if __name__ == "__main__":
unittest.main()
|
[
"MrCocoaCat@aliyun.com"
] |
MrCocoaCat@aliyun.com
|
0c87c480f5967e98463f781434699181e8465a21
|
429211c01057abcd51e5120d566c7daa0a8e2f33
|
/1804/二/day9/gun.py
|
708ce593b58389a50875961117c80df6abc145ac
|
[] |
no_license
|
LDZ-RGZN/b1804
|
2788c922a6d1a6dc11267920a90336d1df93a453
|
c57f8b7cf14686036cae3c30a30f07514622b5ca
|
refs/heads/master
| 2021-07-19T12:54:07.031858
| 2018-10-12T02:48:39
| 2018-10-12T02:48:39
| 133,500,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
#!/usr/bin/env python
# coding=utf-8
#老王开抢
#四大类 人类 抢类 弹夹类 子弹类
class Ren:
def __init__(self,name):
self.namd = name
self.xue = 100
self.qiang = None
def __str__(self):
return self.name + '剩余血量为:' + str(self.xue)
def anzidan(self,danjia,zidan):
danjia.baocunzidan(zidan)
def andanjia(self,qiang,danjia):
qiang.lianjiedanjia(danjia)
def naqiang(self,qiang):
self.qiang = qiang
def kaiqing(self,diren):
self.qiang.she(diren)
def diaoxue(self,shashangli):
self.xue -= shashangli
#弹夹类
class Danjia:
def __init__(self,rongliang):
self.rongliang = rongliang
self.ronglist = []
def __str__(self):
return "子弹数量为" + str(len(self.ronglist)) + "/" + str(self.rongliang)
def baocounzidan(self,zidan):
if len(self.ronglist) < self.rongliang:
self.ronglist.append(zidan)
def chuzidan(self):
if len(self.ronglist) > 0:
zidan = self.ronglist[-1]
self.ronglist.pop()
return zidan
else:
return None
#子弹类
class Zidan:
def __init__(self,shashangli):
self.shashangli = shashangli
def shanghai(self,diren):
diren.diaoxue(self.shashangli)
#枪
class Qiang:
def __init__(self):
self.danjian = None
def __str__(self):
if self.danjian:
return "枪当前有弹夹"
else:
return "枪当前没有弹夹"
def lianjiedanjia(self,danjia):
if not self.danjian:
self.danjian = danjia
def she(self,diren):
zidan = self.danjian.chuzidan()
if zidan:
zidan.shanghai(diren)
else:
print ("没有子弹了,放了空枪..")
#创建一个对象
laowang = Ren("老王")
#创建一个弹夹
danjia = Danjia(20)
print (danjia)
i = 0
while i < 5:
zidan = Zidan(5)
laowang.anzidan(danjia,zidan)
i += 1
print (danjia)
qiang = Qiang()
print (qiang)
laowanga.andanjia(qiang,danjia)
print(qiang)
diren = Ren("敌人")
print (diren)
laowang.kaiqiang(diren)
print(diren)
print(danjia)
laowang.kaiqiang(diren)
print (diren)
print (danjia)
|
[
"2654213432@qq.com"
] |
2654213432@qq.com
|
ae9104c7eda0a9bab8afe8a967157bb5b7283c3e
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part000047.py
|
db44526b51ca3eb6cfa5a431845cc6192b31309e
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher103223(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher103223._instance is None:
CommutativeMatcher103223._instance = CommutativeMatcher103223()
return CommutativeMatcher103223._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 103222
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
68e493b2d7f735d95ac4aa98be268df361a49576
|
51b6d2fc53d5c632fcf01319842baebf13901e84
|
/atcoder.jp/arc119/arc119_b/Main.py
|
4f5348383754ef09ebd1273a7cb050def7fe502a
|
[] |
no_license
|
mono-0812/procon
|
35db3b2c21eff74fbd7b52db07f249380f6834ef
|
68a4b53880a228a0164052b23d1326363efcbc20
|
refs/heads/master
| 2023-05-30T17:02:58.935074
| 2021-06-27T12:15:10
| 2021-06-27T12:15:10
| 345,896,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
import bisect,collections,copy,heapq,itertools,math,string,sys,queue,time
from typing import Counter
input = lambda: sys.stdin.readline().rstrip()
start_time=time.time()
from decimal import Decimal
def I(): return input()
def IS(): return input().split()
def II(): return int(input())
def IIS(): return map(int,input().split())
def LIIS(): return list(map(int,input().split()))
def debug(*args): print(*args) if len(args)>0 else print(False);return
def nt(): print(time.time()-start_time);return
def comb(n, r):return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))
def combinations_with_replacement_count(n, r):
return comb(n + r - 1, r)
def tr():pass
def try_run():
try:tr()
except:pass
def make_divisors(n):
lower_divisors , upper_divisors = [], []
i = 1
while i*i <= n:
if n % i == 0:
lower_divisors.append(i)
if i != n // i:
upper_divisors.append(n//i)
i += 1
return lower_divisors + upper_divisors[::-1]
def prime_factorize(n):
a = []
while n % 2 == 0:
a.append(2)
n //= 2
f = 3
while f * f <= n:
if n % f == 0:
a.append(f)
n //= f
else:
f += 2
if n != 1:
a.append(n)
return a
ragen=range
INF=10**18
MOD=998244353
##############################################################################
n=II()
s=I()
t=I()
a=[]
for i in range(n):
if s[i]=="0":
a.append(i)
b=[]
for i in range(n):
if t[i]=="0":
b.append(i)
if len(a)!=len(b):
print(-1)
exit()
ans=0
for i in range(len(a)):
if a[i]!=b[i]:
ans+=1
print(ans)
|
[
"frisk02.jar@gmail.com"
] |
frisk02.jar@gmail.com
|
c2ad53098881126cf3216483b39d65efcb8d5136
|
807a9f48de01fe9c2ae8200dbce0f590dcc6d0a7
|
/jd/api/rest/__init__.py
|
a29e7abd846b521334424ed92761313fefa929b0
|
[
"MIT"
] |
permissive
|
onsunsl/DjangoBlog
|
51cb01082253ebf6010a7d57ba6ce838f4809461
|
1df83f7ac0ef95433dd5a68cd8d00d37f6e8d6bd
|
refs/heads/master
| 2020-04-02T13:49:17.860496
| 2020-03-22T13:50:01
| 2020-03-22T13:50:01
| 154,498,765
| 0
| 0
|
MIT
| 2018-10-24T12:38:39
| 2018-10-24T12:38:39
| null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
# 商品类目查询
from .jdUnionOpenCategoryGoodsGet import jdUnionOpenCategoryGoodsGet
# 优惠券领取情况查询接口
from .jdUnionOpenCouponQuery import jdUnionOpenCouponQuery
# 京粉精选商品查询接口
from .jdUnionOpenGoodsJingfenQuery import jdUnionOpenGoodsJingfenQuery
# 链接商品查询接口
from .jdUnionOpenGoodsLinkQuery import jdUnionOpenGoodsLinkQueryt
# 获取推广商品信息接口
from .jdUnionOpenGoodsPromotiongoodsinfoQuery import jdUnionOpenGoodsPromotiongoodsinfoQuery
# 关键词商品查询接口
from .jdUnionOpenGoodsQuery import jdUnionOpenGoodsQuery
# 秒杀商品查询接口
from .jdUnionOpenGoodsSeckillQuery import jdUnionOpenGoodsSeckillQuery
# 学生价商品查询接口
from .jdUnionOpenGoodsStupriceQuery import jdUnionOpenGoodsStupriceQuery
# 奖励订单查询接口
from .jdUnionOpenOrderBonusQuery import jdUnionOpenOrderBonusQuery
# 查询推广订单及佣金信息,会随着订单状态变化更新数据,支持按下单时间、完成时间或状态更新时间查询,
from .jdUnionOpenOrderQuery import jdUnionOpenOrderQuery
# 获取PID
from .jdUnionOpenUserPidGet import jdUnionOpenUserPidGet
# 通过unionId获取推广链接
from .jdUnionOpenPromotionByunionidGet import jdUnionOpenPromotionByunionidGet
from .jdUnionOpenoPromotionCommonGet import jdUnionOpenPromotionCommonGet
|
[
"onsunsl@foxmail.com"
] |
onsunsl@foxmail.com
|
c8b3f7877605ee22e4fb7352e90bf9d0438a9629
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/aphotomanager/testcase/firstcases/testcase5_011.py
|
c948b113972a58ee17f3e78f54b86d42dff91b4c
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,009
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.k3b.android.androFotoFinder',
'appActivity' : 'de.k3b.android.androFotoFinder.FotoGalleryActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.k3b.android.androFotoFinder/de.k3b.android.androFotoFinder.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase011
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Delete\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
element = getElememtBack(driver, "new UiSelector().text(\"Cancel\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/image\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Mais opções\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Copiar\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"OK\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/cmd_any\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.8, 0.5, 0.2)
swipe(driver, 0.5, 0.8, 0.5, 0.2)
element = getElememtBack(driver, "new UiSelector().text(\"OK\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/cmd_any\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"OK\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/cmd_any\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"acct \")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sbin \")", "new UiSelector().className(\"android.widget.TextView\").instance(9)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Delete\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_011\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.k3b.android.androFotoFinder'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
67f834797b3b8e84a0342902ea5aaec610872b32
|
695803cf1ae81f7a8ad63faa80545c3c913cee02
|
/Part1/week3/chapter11/exercise/employee_test_11.3/employee.py
|
911dfb1dcd2147d814debf66f37c4af7a0925ca2
|
[] |
no_license
|
superstones/LearnPython
|
fa72a249a69323927da81887ce4b9f400552a1d0
|
5ea25f9b9922654d67c6b31475cdf02b9fe99c7e
|
refs/heads/master
| 2023-06-24T10:40:18.639288
| 2021-07-22T08:34:11
| 2021-07-22T08:34:11
| 370,679,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
class Employee():
"""一个表示雇员的类"""
def __init__(self, first_name, last_name, salary):
"""初始化雇员"""
self.first_name = first_name.title()
self.last_name = last_name.title()
self.salary = salary
def give_raise(self, amount=5000):
self.salary += amount
|
[
"292454993@qq.com"
] |
292454993@qq.com
|
181b8daf2945fae532bbd054c7f7d63f2582dbe1
|
95c3c587907ae38b11faacc4d2ebe1df8f5b3335
|
/ASSGN-FlowControl-Aug16-Q3-Jyoti.py
|
2933fbd0945cae6d8492dde2787e2ed8aee49a60
|
[] |
no_license
|
sandhyalethakula/Iprimed_16_python
|
d59cb47d2d2a63c04a658c8b302505efc8f24ff4
|
c34202ca155819747a5c5ac4a8a5511d425f41a1
|
refs/heads/main
| 2023-08-16T11:40:11.005919
| 2021-09-29T13:22:32
| 2021-09-29T13:22:32
| 411,956,547
| 0
| 0
| null | 2021-09-30T06:57:13
| 2021-09-30T06:57:12
| null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
'''
1.ask the user to enter the username
2.if the username is admin then ask the user to enter the password.
3.if the password is admin123 then greet the user and end the program.
4.if the password is not admin123 then display wrong password.
3.if the username is not admin then ask the user to enter the username again.'''
while True:
UserName=input('enter the user name : ') #asks user to enter name
if UserName=='Jyoti':
password=input('enter the passsword : ') #asks user to enter pwd
if password =='jan369': #checks pwd
print('hello',UserName)
break
else:
print('wrong password')
|
[
"noreply@github.com"
] |
sandhyalethakula.noreply@github.com
|
5c5f3cdad8b7993f138f1ac54983ef6ff5b698e7
|
170026ff5b435027ce6e4eceea7fff5fd0b02973
|
/glycan_profiling/output/report/glycan_lcms/render.py
|
69e118fb688103df4b9b16b637b8f25d227bd523
|
[
"Apache-2.0"
] |
permissive
|
mstim/glycresoft
|
78f64ae8ea2896b3c4f4c185e069387824e6c9f5
|
1d305c42c7e6cba60326d8246e4a485596a53513
|
refs/heads/master
| 2022-12-24T23:44:53.957079
| 2020-09-29T13:38:20
| 2020-09-29T13:38:20
| 276,471,357
| 0
| 0
|
NOASSERTION
| 2020-07-01T20:04:43
| 2020-07-01T20:04:42
| null |
UTF-8
|
Python
| false
| false
| 6,222
|
py
|
import os
from glycan_profiling import serialize
from glycan_profiling.plotting import summaries, figax, SmoothingChromatogramArtist
from glycan_profiling.plotting.chromatogram_artist import ChargeSeparatingSmoothingChromatogramArtist
from glycan_profiling.scoring.utils import logit
from glycan_profiling.chromatogram_tree import ChromatogramFilter
from jinja2 import Markup, Template
from glycan_profiling.output.report.base import (
svguri_plot, ReportCreatorBase)
def chromatogram_figures(chroma):
figures = []
plot = SmoothingChromatogramArtist(
[chroma], colorizer=lambda *a, **k: 'green', ax=figax()).draw(
label_function=lambda *a, **k: "", legend=False).ax
plot.set_title("Aggregated\nExtracted Ion Chromatogram", fontsize=24)
chroma_svg = svguri_plot(
plot, bbox_inches='tight', height=5, width=9, svg_width="100%")
figures.append(chroma_svg)
if len(chroma.mass_shifts) > 1:
mass_shifts = list(chroma.mass_shifts)
labels = {}
rest = chroma
for mass_shift in mass_shifts:
with_mass_shift, rest = rest.bisect_mass_shift(mass_shift)
labels[mass_shift] = with_mass_shift
mass_shift_plot = SmoothingChromatogramArtist(
labels.values(),
colorizer=lambda *a, **k: 'green', ax=figax()).draw(
label_function=lambda *a, **k: tuple(a[0].mass_shifts)[0].name,
legend=False).ax
mass_shift_plot.set_title(
"mass_shift-Separated\nExtracted Ion Chromatogram", fontsize=24)
mass_shift_separation = svguri_plot(
mass_shift_plot, bbox_inches='tight', height=5, width=9, svg_width="100%")
figures.append(mass_shift_separation)
if len(chroma.charge_states) > 1:
charge_separating_plot = ChargeSeparatingSmoothingChromatogramArtist(
[chroma], ax=figax()).draw(
label_function=lambda x, *a, **kw: str(
tuple(x.charge_states)[0]), legend=False).ax
charge_separating_plot.set_title(
"Charge-Separated\nExtracted Ion Chromatogram", fontsize=24)
charge_separation = svguri_plot(
charge_separating_plot, bbox_inches='tight', height=5, width=9,
svg_width="100%")
figures.append(charge_separation)
return figures
def chromatogram_link(chromatogram):
id_string = str(chromatogram.id)
return Markup("<a href=\"#detail-{0}\">{1}</a>").format(id_string, str(chromatogram.key))
class GlycanChromatogramReportCreator(ReportCreatorBase):
def __init__(self, database_path, analysis_id, stream=None, threshold=5):
super(GlycanChromatogramReportCreator, self).__init__(
database_path, analysis_id, stream)
self.set_template_loader(os.path.dirname(__file__))
self.threshold = threshold
self.glycan_chromatograms = ChromatogramFilter([])
self.unidentified_chromatograms = ChromatogramFilter([])
def glycan_link(self, key):
match = self.glycan_chromatograms.find_key(key)
if match is not None:
return chromatogram_link(match)
match = self.unidentified_chromatograms.find_key(key)
if match is not None:
return chromatogram_link(match)
return None
def prepare_environment(self):
super(GlycanChromatogramReportCreator, self).prepare_environment()
self.env.filters["logit"] = logit
self.env.filters['chromatogram_figures'] = chromatogram_figures
self.env.filters['glycan_link'] = self.glycan_link
def make_template_stream(self):
template_obj = self.env.get_template("overview.templ")
ads = serialize.AnalysisDeserializer(
self.database_connection._original_connection,
analysis_id=self.analysis_id)
self.glycan_chromatograms = gcs = ads.load_glycan_composition_chromatograms()
# und = ads.load_unidentified_chromatograms()
self.unidentified_chromatograms = und = ChromatogramFilter(
ads.query(serialize.UnidentifiedChromatogram).filter(
serialize.UnidentifiedChromatogram.analysis_id == self.analysis_id).all())
if len(gcs) == 0:
self.log("No glycan compositions were identified. Skipping report building")
templ = Template('''
<html>
<style>
body {
font-family: sans-serif;
}
</style>
<body>
<h3>No glycan compositions were identified</h3>
</body>
</html>
''')
return templ.stream()
summary_plot = summaries.GlycanChromatographySummaryGraphBuilder(
filter(lambda x: x.score > self.threshold, gcs + und))
lcms_plot, composition_abundance_plot = summary_plot.draw(min_score=5)
try:
lcms_plot.ax.legend_.set_visible(False)
except AttributeError:
# The legend may not have been created
pass
lcms_plot.ax.set_title("Glycan Composition\nLC-MS Aggregated EICs", fontsize=24)
fig = lcms_plot.ax.figure
fig.set_figwidth(fig.get_figwidth() * 2.)
fig.set_figheight(fig.get_figheight() * 2.)
composition_abundance_plot.ax.set_title("Glycan Composition\nTotal Abundances", fontsize=24)
composition_abundance_plot.ax.set_xlabel(
composition_abundance_plot.ax.get_xlabel(), fontsize=14)
def resolve_key(key):
match = gcs.find_key(key)
if match is None:
match = und.find_key(key)
return match
template_stream = (template_obj.stream(
analysis=ads.analysis, lcms_plot=svguri_plot(
lcms_plot.ax, bbox_inches='tight', patchless=True,
svg_width="100%"),
composition_abundance_plot=svguri_plot(
composition_abundance_plot.ax, bbox_inches='tight', patchless=True,
svg_width="100%"),
glycan_chromatograms=gcs,
unidentified_chromatograms=und,
resolve_key=resolve_key
))
return template_stream
|
[
"mobiusklein@gmail.com"
] |
mobiusklein@gmail.com
|
4027b8b586e455538c009e3f9d23e2020e8e842c
|
ac9b8a7b6a84a9abc357fc0904459008a90a55b4
|
/Model/MarkMethodPixelOffset.py
|
58aaaf225b8b2ce1dd41505fe21122bc362ba78c
|
[] |
no_license
|
alex-ong/TFRevolution
|
a8a242e657cb9318d0ce8b6b013b2c2a4c911468
|
f321a182a9b08b65c22b507bbd221c5e7c8c2d58
|
refs/heads/master
| 2021-04-27T08:00:16.931824
| 2018-09-13T14:50:40
| 2018-09-13T14:50:40
| 122,644,858
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,767
|
py
|
from Model.PlayerData import PlayerData
def hexNoLeader(number):
return hex(number).replace("0x", "")
def ToHex(numbers):
return ('#' + hexNoLeader(numbers[0]).zfill(2) +
hexNoLeader(numbers[1]).zfill(2) +
hexNoLeader(numbers[2]).zfill(2))
#method for marking an image based purely on pixel offsets.
def markPlayerPreview(pixels, imgsize, startOffset, garbageOffset, gs):
markColor = (255, 255, 255)
garboColor = (0, 255, 0)
w, h = imgsize
for y in range(20):
yPix = round(y * gs + startOffset[1])
if yPix >= h:
break
for x in range(10):
xPix = round(x * gs + startOffset[0])
if xPix >= w:
break
pixels[xPix, yPix] = markColor
xPix = round(x * gs + startOffset[0] + garbageOffset)
if xPix >= w:
continue
pixels[xPix, yPix] = garboColor
def markImagePreview(fullImageMarker, image):
pixels = image.load()
startOffset = [20, 20] # magic number :(
garbageOffset = fullImageMarker.WindowSettings.garbageXOffset
PixelOffsetArgs = (pixels, image.size, startOffset, garbageOffset, fullImageMarker.WindowSettings.gridSize)
# mark player 1
markPlayerPreview(*PixelOffsetArgs)
startOffset[0] += fullImageMarker.WindowSettings.playerDistance
# mark player 2
markPlayerPreview(*PixelOffsetArgs)
# Section below is marking for output to external programs
def markImageOutput(imageMarker, image):
pixels = image.load()
garbageOffset = imageMarker.WindowSettings.garbageXOffset
startOffset = [20, 20] # magic number :(
# mark player 1
for player in imageMarker.data:
markPlayerOutput(imageMarker, player, pixels, image.size, garbageOffset, startOffset)
startOffset[0] += imageMarker.WindowSettings.playerDistance
def markPlayerOutput(imageMarker, player, pixels, imgsize, garbageOffset, startOffset):
player.resetGarbage()
gs = imageMarker.WindowSettings.gridSize
w, h = imgsize
y = 0
x = 0
for y in range(PlayerData.MATRIX_Y):
yPix = round(y * gs + startOffset[1])
if yPix >= h:
break
for x in range(PlayerData.MATRIX_X):
xPix = round(x * gs + startOffset[0])
if xPix >= w:
break
player.updateField(x, y, ToHex(pixels[xPix, yPix]))
# garbage detection
for y in range(PlayerData.MATRIX_Y - 1, -1, -1):
yPix = round(y * gs + startOffset[1])
xPix = round(x * gs + startOffset[0] + garbageOffset)
if xPix >= w or yPix >= h:
continue
player.updateGarbage(20 - y, pixels[xPix, yPix])
|
[
"the.onga@gmail.com"
] |
the.onga@gmail.com
|
e532c4a0865c915ecd68f08f632acc6da4255359
|
ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0
|
/02_algorithm/sw_expert_academy/code_problem/D5/1256.K번째 접미어/1256.py
|
99e4ac4b61ae293181607c9a7012cb3c05371e32
|
[] |
no_license
|
wally-wally/TIL
|
93fc1d0e3bc7d030341ed54155294c68c48b4c7d
|
936783bc86f563646c0398c24e2fcaa707f0ed23
|
refs/heads/master
| 2023-04-28T08:59:48.235747
| 2023-04-12T12:06:52
| 2023-04-12T12:06:52
| 195,918,111
| 40
| 7
| null | 2020-09-29T16:20:46
| 2019-07-09T02:31:02
|
Python
|
UTF-8
|
Python
| false
| false
| 647
|
py
|
import sys
sys.stdin = open('input_1256.txt', 'r')
for test_case in range(int(input())):
alphabet = [[] for _ in range(26)]
K = int(input())
string_data = input()
for i in range(len(string_data)):
sub_data = string_data[i:]
if sub_data not in alphabet[ord(sub_data[0]) - 97]:
alphabet[ord(sub_data[0]) - 97].append(sub_data)
print(alphabet)
order = 0
for alpha in alphabet:
order += len(alpha)
if order >= K:
print('#{} {}'.format(test_case + 1, sorted(alpha, reverse=True)[order - K]))
break
else:
print('#{} none'.format(test_case + 1))
|
[
"wallys0213@gmail.com"
] |
wallys0213@gmail.com
|
7e2dbeb214ee92c378f430cf5cc247563ad2cd30
|
412a330e85ad845a79732277e291acb087d7caaa
|
/src/bs_basic/demo03.py
|
b238facde660a7b386d60d19f49a7941aed3e694
|
[] |
no_license
|
zhaopufeng/python_scrawler
|
d3f1284d0f25d6c09fb1c0c35f7c0c72e6c7602e
|
04efb6ea7646ccc9281244468d892519c5a46d2d
|
refs/heads/master
| 2022-01-08T13:38:32.020065
| 2019-05-29T00:02:38
| 2019-05-29T00:02:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
# 读取和设置节点的属性
html = '''
<html>
<head><title>index</title></head>
<body attr='test xyz' class='style1 style2'>
<a rel='ok1 ok2 ok3' class='a1 a2' href='a.html'>first page</a>
<p>
<a href='b.html'>second page</a>
<p>
<a href='c.html'>third page</a>
<p>
<x k='123' attr1='hello' attr2='world'>hello</x>
</body>
</html>
'''
from bs4 import *
soup = BeautifulSoup(html,'lxml')
print(type(soup.body.attrs))
print('body.class','=',soup.body['class'])
print('body.attr','=',soup.body['attr'])
print('a.class','=',soup.a['class'])
print('x.attr1','=',soup.x['attr1'])
soup.body['class'] = ['x','y','z']
#print(soup.body)
#soup.body['class'] = 'xyz123 uio'
#print(soup.body)
soup.body['class'].append('ok')
print(soup.body)
#soup.body['ok'] = '443'
#del soup.body['class']
#print(soup.body)
print(soup.a['rel'])
# rel,rev,accept-charset,headers,accesskey
|
[
"helloworld@126.com"
] |
helloworld@126.com
|
70ed2e78c1bdba1fb9adfa58a67b26b2e1e983e8
|
fc8137f6a4df69640657a0af5d7201de3c6eb261
|
/accepted/LRU Cache.py
|
18dd102997ba63e17d76e3d14388bc4477cb9995
|
[] |
no_license
|
hustlrr/leetcode
|
68df72b49ee3bbb9f0755028e024cc9fea2c21aa
|
56e33dff3918e371f14d6f7ef03f8951056cc273
|
refs/heads/master
| 2020-04-12T08:14:25.371761
| 2017-01-01T12:19:34
| 2017-01-01T12:19:34
| 77,119,341
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
# coding=utf-8
from collections import OrderedDict
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.cache = OrderedDict()
def get(self, key):
"""
:rtype: int
"""
value = self.cache.pop(key, default =None)
if value is None:
return -1
self.cache[key] = value
return value
def set(self, key, value):
"""
:type key: int
:type value: int
:rtype: nothing
"""
if self.cache.pop(key, default=None) is None and len(self.cache) == self.capacity:
self.cache.popitem(last=False) # 先进先出
self.cache[key] = value
|
[
"823729390@qq.com"
] |
823729390@qq.com
|
e19465b00dea86d31c9705849f57f03e7d300d22
|
ba602dc67ad7bb50133aeb312f3c6c54627b3dec
|
/data/3919/WA_py/508306.py
|
1daf789b3af32025e1f3a08fbb540b77f5386101
|
[] |
no_license
|
Dearyyyyy/TCG
|
0d21d89275906157372d775f33309ce337e6bc95
|
7b80de16de2d3f5d95a7c4ed95d45a9e38882e67
|
refs/heads/master
| 2020-12-27T23:19:44.845918
| 2020-02-04T01:59:23
| 2020-02-04T01:59:23
| 238,101,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
# coding=utf-8
a,b,c=input().split()
d,e,f=input().split()
a=int(a)
b=int(b)
c=int(c)
d=int(d)
e=int(e)
f=int(f)
if a+b>c and a+c>b and c+b>a:
if a==b or a==c or b==c:
print("DY")
elif a*a+b*b==c*c or a*a+c*c==b*b or b*b+c*c==a*a:
print("ZJ")
elif a==b==c:
print("DB")
else:
print("PT")
else:
print("ERROR")
while True:
if d+e>f and d+f>e and e+f>d:
if d==e or d==f or e==f:
print("DY")
elif d*d+e*e==f*f or d*d+f*f==e*e or f*f+e*e==d*d:
print("ZJ")
elif d==e==f:
print("DB")
else:
print("PT")
else:
print("ERROR")
break
|
[
"543271544@qq.com"
] |
543271544@qq.com
|
1224931d246e52a66ddfe9428645e7a3d0e3fcee
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_4335.py
|
454b8cb811aaf1aaa5ceae079d6b859bb77e86e8
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
# returning a single instance of a regex object's contents
line.replace('<a href="' + test_str + '">', '<a href="' + re_string + '">')
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
1a3a8531445dfd974cb045babbdf1ceea1b12e4d
|
084177c601eeb7ce99a343b94cbad8eb15cb7f95
|
/flask/jinja2/sample/sample.py
|
f3f84ff17d340f59d35709ea8617ea5f1d481738
|
[] |
no_license
|
CaesarLinsa/flask_learn_note
|
bb82360c1ca15a48ba136c460b6b6159a8ff4034
|
260ae68ed6494f995a75f21f16fc493d10031a2a
|
refs/heads/master
| 2022-09-28T03:38:23.626341
| 2020-02-07T08:42:52
| 2020-02-07T08:42:52
| 237,619,223
| 0
| 0
| null | 2022-09-16T18:17:46
| 2020-02-01T13:30:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request
from flask_script import Manager
from livereload import Server
from flask_bootstrap import Bootstrap
from flask_nav import Nav
from flask_nav.elements import *
app = Flask(__name__)
manager = Manager(app)
Bootstrap(app)
nav = Nav()
nav.register_element('top', Navbar('Flask入门',
View('主页', 'index'),
View('登录', 'login')
))
nav.init_app(app)
@app.route("/")
def index():
return render_template("index.html", title="hello world")
@app.route('/login', methods=["GET", "POST"])
def login():
from forms import LoginForm
form = LoginForm()
if request.method == "POST":
form_obj = LoginForm(request.form)
if form_obj.validate():
ers = request.form.to_dict()
print(ers)
print(form_obj.data)
return "登录成功"
return render_template("login.html", form=form)
if __name__ == '__main__':
live_server = Server(app.wsgi_app)
live_server.watch('**/*.*')
live_server.serve(open_url_delay=True)
|
[
"Caesar_Linsa@163.com"
] |
Caesar_Linsa@163.com
|
c19c4b35c726c1dfed536061da95efa490d4e473
|
a110cda0dd755a0aeeccaa349de5b7c8f836f7d9
|
/005_PrintingPDFs/renameFiles.py
|
87db8921f363b65d6f0c00339428f095356404ef
|
[] |
no_license
|
ksobon/archi-lab
|
26d93ef07e4f571e73a78bc40299edd3dc84c2a6
|
9a8a57eccca899ace78a998dc7698ff7754fae6b
|
refs/heads/master
| 2021-01-15T09:37:06.045588
| 2020-06-03T15:55:46
| 2020-06-03T15:55:46
| 26,090,112
| 6
| 5
| null | 2020-02-09T04:24:41
| 2014-11-02T19:02:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
#Copyright(c) 2015, Konrad Sobon
# @arch_laboratory, http://archi-lab.net
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
uiapp = DocumentManager.Instance.CurrentUIApplication
app = uiapp.Application
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
from Autodesk.Revit import *
import System
import sys
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
import os
filePath = IN[0]
identifiers = IN[1]
newNames = IN[2]
RunIt = IN[3]
files = os.listdir(filePath)
if RunIt:
message = "Success"
for file in files:
currentFileName = filePath + "\\" + file
for i, j in zip(identifiers, newNames):
newFileName = filePath + "\\" + j
if i in file and currentFileName != newFileName:
try:
os.rename(currentFileName, newFileName)
except:
message = "Your intended file name is not a compatible file name. Make sure that you are not strings like..."
pass
else:
message = "Please set RunIt to True."
#docName = uiapp.ActiveUIDocument.Document.Title
#Assign your output to the OUT variable
OUT = message
|
[
"ksobon1986@gmail.com"
] |
ksobon1986@gmail.com
|
e9bf9486a87bbe6c79f4f9baac13b0737938295b
|
9a553930cf5fc5c9a39cbf2373f9a16b6a3461f7
|
/example03/blog/views.py
|
86dbf1b2bd3da2459a45de85d94300affe07e4de
|
[] |
no_license
|
lee-seul/django_example
|
8ad45ad277d2e69b0108b7609be7fd37de6540f2
|
bf2736e42f0a03e603f5a34eab89bae0ed43d0a3
|
refs/heads/master
| 2021-01-12T06:30:18.723640
| 2017-05-17T09:55:43
| 2017-05-17T09:55:43
| 77,370,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
from django.views.generic import ListView, DetailView
from blog.models import Post
class PostLV(ListView):
model = Post
template_name = 'blog/post_all.html'
context_object_name = 'posts'
paginate_by = 2
class PostDV(DetailView):
model = Post
|
[
"blacksangi14@naver.com"
] |
blacksangi14@naver.com
|
c546d06a29da7625de0d5f87dedab6c1c3b88244
|
90f52d0348aa0f82dc1f9013faeb7041c8f04cf8
|
/wxPython3.0 Docs and Demos/demo/DelayedResult.py
|
a8342a9a2c2f81a93a8e289ffba1eaa8a9ed8cce
|
[] |
no_license
|
resource-jason-org/python-wxPythonTool
|
93a25ad93c768ca8b69ba783543cddf7deaf396b
|
fab6ec3155e6c1ae08ea30a23310006a32d08c36
|
refs/heads/master
| 2021-06-15T10:58:35.924543
| 2017-04-14T03:39:27
| 2017-04-14T03:39:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,014
|
py
|
"""
This demonstrates a simple use of delayedresult: get/compute
something that takes a long time, without hanging the GUI while this
is taking place.
The top button runs a small GUI that uses wx.lib.delayedresult.startWorker
to wrap a long-running function into a separate thread. Just click
Get, and move the slider, and click Get and Abort a few times, and
observe that GUI responds. The key functions to look for in the code
are startWorker() and __handleResult().
The second button runs the same GUI, but without delayedresult. Click
Get: now the get/compute is taking place in main thread, so the GUI
does not respond to user actions until worker function returns, it's
not even possible to Abort.
"""
import wx
import wx.lib.delayedresult as delayedresult
class FrameSimpleDelayedBase(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
pnl = wx.Panel(self)
self.checkboxUseDelayed = wx.CheckBox(pnl, -1, "Using delayedresult")
self.buttonGet = wx.Button(pnl, -1, "Get")
self.buttonAbort = wx.Button(pnl, -1, "Abort")
self.slider = wx.Slider(pnl, -1, 0, 0, 10, size=(100,-1),
style=wx.SL_HORIZONTAL|wx.SL_AUTOTICKS)
self.textCtrlResult = wx.TextCtrl(pnl, -1, "", style=wx.TE_READONLY)
self.checkboxUseDelayed.SetValue(1)
self.checkboxUseDelayed.Enable(False)
self.buttonAbort.Enable(False)
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
vsizer.Add(self.checkboxUseDelayed, 0, wx.ALL, 10)
hsizer.Add(self.buttonGet, 0, wx.ALL, 5)
hsizer.Add(self.buttonAbort, 0, wx.ALL, 5)
hsizer.Add(self.slider, 0, wx.ALL, 5)
hsizer.Add(self.textCtrlResult, 0, wx.ALL, 5)
vsizer.Add(hsizer, 0, wx.ALL, 5)
pnl.SetSizer(vsizer)
vsizer.SetSizeHints(self)
self.Bind(wx.EVT_BUTTON, self.handleGet, self.buttonGet)
self.Bind(wx.EVT_BUTTON, self.handleAbort, self.buttonAbort)
class FrameSimpleDelayed(FrameSimpleDelayedBase):
"""This demos simplistic use of delayedresult module."""
def __init__(self, *args, **kwargs):
FrameSimpleDelayedBase.__init__(self, *args, **kwargs)
self.jobID = 0
self.abortEvent = delayedresult.AbortEvent()
self.Bind(wx.EVT_CLOSE, self.handleClose)
def setLog(self, log):
self.log = log
def handleClose(self, event):
"""Only needed because in demo, closing the window does not kill the
app, so worker thread continues and sends result to dead frame; normally
your app would exit so this would not happen."""
if self.buttonAbort.IsEnabled():
self.log( "Exiting: Aborting job %s" % self.jobID )
self.abortEvent.set()
self.Destroy()
def handleGet(self, event):
"""Compute result in separate thread, doesn't affect GUI response."""
self.buttonGet.Enable(False)
self.buttonAbort.Enable(True)
self.abortEvent.clear()
self.jobID += 1
self.log( "Starting job %s in producer thread: GUI remains responsive"
% self.jobID )
delayedresult.startWorker(self._resultConsumer, self._resultProducer,
wargs=(self.jobID,self.abortEvent), jobID=self.jobID)
def _resultProducer(self, jobID, abortEvent):
"""Pretend to be a complex worker function or something that takes
long time to run due to network access etc. GUI will freeze if this
method is not called in separate thread."""
import time
count = 0
while not abortEvent() and count < 50:
time.sleep(0.1)
count += 1
return jobID
def handleAbort(self, event):
"""Abort the result computation."""
self.log( "Aborting result for job %s" % self.jobID )
self.buttonGet.Enable(True)
self.buttonAbort.Enable(False)
self.abortEvent.set()
def _resultConsumer(self, delayedResult):
jobID = delayedResult.getJobID()
assert jobID == self.jobID
try:
result = delayedResult.get()
except Exception, exc:
self.log( "Result for job %s raised exception: %s" % (jobID, exc) )
return
# output result
self.log( "Got result for job %s: %s" % (jobID, result) )
self.textCtrlResult.SetValue(str(result))
# get ready for next job:
self.buttonGet.Enable(True)
self.buttonAbort.Enable(False)
class FrameSimpleDirect(FrameSimpleDelayedBase):
"""This does not use delayedresult so the GUI will freeze while
the GET is taking place."""
def __init__(self, *args, **kwargs):
self.jobID = 1
FrameSimpleDelayedBase.__init__(self, *args, **kwargs)
self.checkboxUseDelayed.SetValue(False)
def setLog(self, log):
self.log = log
def handleGet(self, event):
"""Use delayedresult, this will compute result in separate
thread, and will affect GUI response because a thread is not
used."""
self.buttonGet.Enable(False)
self.buttonAbort.Enable(True)
self.log( "Doing job %s without delayedresult (same as GUI thread): GUI hangs (for a while)" % self.jobID )
result = self._resultProducer(self.jobID)
self._resultConsumer( result )
def _resultProducer(self, jobID):
"""Pretend to be a complex worker function or something that takes
long time to run due to network access etc. GUI will freeze if this
method is not called in separate thread."""
import time
time.sleep(5)
return jobID
def handleAbort(self, event):
"""can never be called"""
pass
def _resultConsumer(self, result):
# output result
self.log( "Got result for job %s: %s" % (self.jobID, result) )
self.textCtrlResult.SetValue(str(result))
# get ready for next job:
self.buttonGet.Enable(True)
self.buttonAbort.Enable(False)
self.jobID += 1
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
vsizer = wx.BoxSizer(wx.VERTICAL)
b = wx.Button(self, -1, "Long-running function in separate thread")
vsizer.Add(b, 0, wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.OnButton1, b)
b = wx.Button(self, -1, "Long-running function in GUI thread")
vsizer.Add(b, 0, wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.OnButton2, b)
bdr = wx.BoxSizer()
bdr.Add(vsizer, 0, wx.ALL, 50)
self.SetSizer(bdr)
self.Layout()
def OnButton1(self, evt):
frame = FrameSimpleDelayed(self, title="Long-running function in separate thread")
frame.setLog(self.log.WriteText)
frame.Show()
def OnButton2(self, evt):
frame = FrameSimpleDirect(self, title="Long-running function in GUI thread")
frame.setLog(self.log.WriteText)
frame.Show()
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#---------------------------------------------------------------------------
overview = __doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
[
"869999860@qq.com"
] |
869999860@qq.com
|
cb5613bfd8d6ec14e6bc38d726a3a675e9cbae55
|
b563023f73eec953afc43396bf1c26519d69a236
|
/web/components/commons/view_mixins.py
|
2aa435d87a2e250f12544365c4b1cb1dcd4b18ea
|
[
"MIT"
] |
permissive
|
pkdevbox/goodtables-web
|
bf3b18a9ab6e0394320ec9dfa6077e8e47d7a0c8
|
5fe41db5361b54e0a553dbea4cbb73fd55b6418c
|
refs/heads/master
| 2021-01-12T19:25:07.958490
| 2015-08-28T08:31:43
| 2015-08-28T08:31:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from werkzeug.datastructures import FileStorage
from flask import current_app as app
from goodtables.pipeline import Pipeline
from . import utilities
class RunPipelineMixin(object):
def run_pipeline(self, with_permalinks=False):
payload = utilities.clean_payload(utilities.get_runargs())
data = {}
data['sources'] = utilities.get_data_urls()
data['success'] = False
data['report'] = app.config['GOODTABLES_PIPELINE_BUILD_ERROR_RESPONSE']
if with_permalinks:
data['permalinks'] = utilities.get_report_permalinks(payload)
if isinstance(payload['data'], FileStorage):
payload['data'] = payload['data'].stream
# build and run a validation pipeline
try:
pipeline = utilities.get_pipeline(payload)
except Exception as e:
pipeline = None
data['report']['error_title'] = e.__class__.__name__
data['report']['error_message'] = e.msg
if isinstance(pipeline, Pipeline):
success, report = pipeline.run()
data.update({'success': success, 'report': report.generate()})
return data
|
[
"paulywalsh@gmail.com"
] |
paulywalsh@gmail.com
|
02fb262b75b7de58af4b869892556e16c808e01e
|
fdcd1058df2e42ce9a6c7a38b76757997f53cb2a
|
/muted/system/cmd_say.py
|
31d6ec251e2b21423c50c77712481ca4d2855a50
|
[
"MIT"
] |
permissive
|
LZJ861124/mute
|
afb12d516ae1a4106079b51999dd0aa484618b07
|
f278d9cd2e9c1a4551d5ecdffde919d22ab2f6bb
|
refs/heads/master
| 2020-05-26T09:41:57.238217
| 2019-05-23T08:19:16
| 2019-05-23T08:19:16
| 188,191,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
from __future__ import annotations
from typing import Type
from component.name import Name
from component.role import Role
from event.event import Event
from message.message import Message
from system.channel import Channel
from logcat.logcat import LogCat
class CmdSay:
@LogCat.log_func
def __init__(self, servant: Type[Handler]):
servant.on(Event.CMD_SAY, self._on_cmd_say)
@LogCat.log_func
def _on_cmd_say(
self, e: Event, entity: str = '', args: str = ''
) -> None:
if not args:
text = f'你想說什麼?'
Channel.toRole(entity, Message.TEXT, text)
else:
text = f'{Name.instance(entity).text}說:{" ".join(args)}'
role = Role.instance(entity)
Channel.toRoom(role.room, Message.TEXT, text)
# cmd_say.py
|
[
"ywchiao@gmail.com"
] |
ywchiao@gmail.com
|
63e71d8483e8d40290c82c2af36178fd719de51a
|
cfcd117378664e4bea080b3c1011a25a575b3d51
|
/hawc/apps/materialized/apps.py
|
c804446ee46e38f9bba133e2af6ddd04d9bf72c5
|
[
"MIT"
] |
permissive
|
shapiromatron/hawc
|
9d3a625da54d336334da4576bd5dac6915c18d4f
|
51177c6fb9354cd028f7099fc10d83b1051fd50d
|
refs/heads/main
| 2023-08-03T13:04:23.836537
| 2023-08-01T18:39:16
| 2023-08-01T18:39:16
| 25,273,569
| 25
| 15
|
NOASSERTION
| 2023-09-14T17:03:48
| 2014-10-15T21:06:33
|
Python
|
UTF-8
|
Python
| false
| false
| 214
|
py
|
from django.apps import AppConfig
class MaterializedViewsConfig(AppConfig):
name = "hawc.apps.materialized"
verbose_name = "Materialized Views"
def ready(self):
from . import signals # noqa
|
[
"noreply@github.com"
] |
shapiromatron.noreply@github.com
|
2e790eddb7e4e03c1d35f0ce13e45eca4d59f04c
|
99697559d046cdd04dd9068bd518e4da4177aaa2
|
/Empty/M797_All_Paths_From_Source_to_Target.py
|
1cf154c64c0e3ba9c99528c0ca04d71a5727b4b7
|
[] |
no_license
|
Azurisky/Leetcode
|
3e3621ef15f2774cfdfac8c3018e2e4701760c3b
|
8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04
|
refs/heads/master
| 2020-03-18T22:46:35.780864
| 2018-10-07T05:45:30
| 2018-10-07T05:45:30
| 135,364,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
class Solution:
def allPathsSourceTarget(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[List[int]]
"""
|
[
"andrew0704us@gmail.com"
] |
andrew0704us@gmail.com
|
048e3950baa1ef3fb53501de60e5d22aae233701
|
01c00a769156b010012ce6150c737be43a34a5a7
|
/RegressionOneApp/serializers.py
|
4176bbb946783bb0d02bdcd4b08ffaeedcef27e5
|
[] |
no_license
|
chelseatroy/RegressionOne
|
87844893f91aebe4488f23db9121498e6560ee53
|
85d2f2ff5e62f3b291adb575fb980e741223a5ee
|
refs/heads/master
| 2021-04-09T16:09:15.153337
| 2016-07-16T17:06:52
| 2016-07-16T17:06:52
| 62,819,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
from rest_framework import serializers
from RegressionOneApp.models import Thingamabob
class ThingamabobSerializer(serializers.ModelSerializer):
class Meta:
model = Thingamabob
fields = ('description', 'done', 'updated')
|
[
"chelsea.dommert@gmail.com"
] |
chelsea.dommert@gmail.com
|
e67c12e5d99b0eab27528388d44d661202817111
|
15d710d6de2033f95c9970f14c22aa0e4bab9647
|
/supervised/preprocessing/preprocessing_utils.py
|
2ee4c3efdb18ce473b28442df6650c345194372a
|
[
"MIT"
] |
permissive
|
mmejdoubi/mljar-supervised
|
3c9ea1c706e2b279502d57f68ba917c8c9de4890
|
59e7b5b1d005af98681335dbd323bb8b24a32075
|
refs/heads/master
| 2020-04-21T19:18:18.654469
| 2018-12-19T12:53:03
| 2018-12-19T12:53:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
import numpy as np
class PreprocessingUtilsException(Exception):
pass
class PreprocessingUtils(object):
CATEGORICAL = "categorical"
CONTINOUS = "continous"
DISCRETE = "discrete"
@staticmethod
def get_type(x):
if len(x.shape) > 1:
if x.shape[1] != 1:
raise PreprocessingUtilsException(
"Please select one column to get its type"
)
col_type = str(x.dtype)
data_type = PreprocessingUtils.CATEGORICAL
if col_type.startswith("float"):
data_type = PreprocessingUtils.CONTINOUS
elif col_type.startswith("int"):
data_type = PreprocessingUtils.DISCRETE
return data_type
@staticmethod
def get_most_frequent(x):
a = x.value_counts()
first = sorted(dict(a).items(), key=lambda x: -x[1])[0]
return first[0]
@staticmethod
def get_min(x):
return np.amin(np.nanmin(x))
@staticmethod
def get_mean(x):
return np.nanmean(x)
@staticmethod
def get_median(x):
return np.nanmedian(x)
|
[
"pplonski86@gmail.com"
] |
pplonski86@gmail.com
|
11bdfbe141524cc597b00398c0e83a11f79ed8d3
|
fd7863c9f2d1d3ede7a91d50419095224ab4598d
|
/torinometeo/core/templatetags/core_tags.py
|
fb03a9fc6bb21c1e2752c1953e5a3ff7b939d914
|
[] |
no_license
|
TorinoMeteo/tm-website
|
9b80344d83ef2aa7c4c820f2cea093fdaa9c77fb
|
a6becc62eaf5c96e146431631c0d081600e7c5d3
|
refs/heads/master
| 2023-08-21T14:46:35.825982
| 2023-08-09T07:25:07
| 2023-08-09T07:25:07
| 31,906,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
from django import template
from django.contrib.sites.shortcuts import get_current_site
from sorl.thumbnail.templatetags.thumbnail import ThumbnailNode
register = template.Library()
@register.filter()
def strip_img(html):
import re
TAG_RE = re.compile(r'<img.+?/>')
return TAG_RE.sub('', html)
@register.filter()
def absurl(url):
request = None
return ''.join(['http://', get_current_site(request).domain, str(url)])
@register.inclusion_tag('core/sharethis.html')
def sharethis(relative_url, title=''):
return {'url': relative_url, 'title': title}
def sorl_thumbnail(parser, token):
return ThumbnailNode(parser, token)
register.tag(sorl_thumbnail)
|
[
"abidibo@gmail.com"
] |
abidibo@gmail.com
|
6ccb93f049e1ea71254fac76ac7ef5977ace21c4
|
425b68346e1fbd20ced43a4c1f3bc284d66538f4
|
/adanet/core/evaluator.py
|
78f6fdc92e1e4b157536cf104b51b5b952588825
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
todun/adanet
|
b29981f20203660091c122c1c0c7bc684749423c
|
74106c51e0602bdd62b643f4d6c42a00142947bc
|
refs/heads/master
| 2020-06-25T00:53:51.073322
| 2019-07-26T21:49:36
| 2019-07-26T22:16:26
| 199,146,094
| 1
| 0
|
Apache-2.0
| 2019-07-27T09:46:57
| 2019-07-27T09:46:56
| null |
UTF-8
|
Python
| false
| false
| 3,261
|
py
|
"""An AdaNet evaluator implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
from adanet import tf_compat
import tensorflow as tf
class Evaluator(object):
"""Evaluates candidate ensemble performance.
Args:
input_fn: Input function returning a tuple of: features - Dictionary of
string feature name to `Tensor`. labels - `Tensor` of labels.
steps: Number of steps for which to evaluate the ensembles. If an
`OutOfRangeError` occurs, evaluation stops. If set to None, will iterate
the dataset until all inputs are exhausted.
Returns:
An :class:`adanet.Evaluator` instance.
"""
def __init__(self, input_fn, steps=None):
self._input_fn = input_fn
self._steps = steps
super(Evaluator, self).__init__()
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return self._steps
def evaluate_adanet_losses(self, sess, adanet_losses):
"""Evaluates the given AdaNet objectives on the data from `input_fn`.
The candidates are fed the same batches of features and labels as
provided by `input_fn`, and their losses are computed and summed over
`steps` batches.
Args:
sess: `Session` instance with most recent variable values loaded.
adanet_losses: List of AdaNet loss `Tensors`.
Returns:
List of evaluated AdaNet losses.
"""
evals_completed = 0
if self.steps is None:
logging_frequency = 1000
elif self.steps < 10:
logging_frequency = 1
else:
logging_frequency = math.floor(self.steps / 10.)
adanet_losses = [
tf_compat.v1.metrics.mean(adanet_loss) for adanet_loss in adanet_losses
]
sess.run(tf_compat.v1.local_variables_initializer())
while True:
if self.steps is not None and evals_completed == self.steps:
break
try:
evals_completed += 1
if (evals_completed % logging_frequency == 0 or
self.steps == evals_completed):
logging.info("Ensemble evaluation [%d/%s]", evals_completed,
self.steps or "??")
sess.run(adanet_losses)
except tf.errors.OutOfRangeError:
logging.info("Encountered end of input after %d evaluations",
evals_completed)
break
# Losses are metric op tuples. Evaluating the first element is idempotent.
adanet_losses = [loss[0] for loss in adanet_losses]
return sess.run(adanet_losses)
|
[
"weill@google.com"
] |
weill@google.com
|
d2296d7676651e8e0b3c8042b94248e1dd922d48
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_138/1169.py
|
f23ae477662433aef243097ffc171b1263c142f0
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
import numpy as np
import sys
def evaluateLeadingTestCaseAndReturnAnswer():
global lines
N = int(lines.pop(0))
NaomiSetWar = sorted(map(lambda x: float(x), lines.pop(0).split(' ')))
KenSetWar = sorted(map(lambda x: float(x), lines.pop(0).split(' ')))
counterWar = 0
NaomiSetDeceitfulWar = list(NaomiSetWar)
KenSetDeceitfulWar = list(KenSetWar)
counterDeceitfulWar = 0
#Deceitful War
if N == 1:
if NaomiSetDeceitfulWar[0] < KenSetDeceitfulWar[0]:
counterDeceitfulWar = 0
else:
counterDeceitfulWar = 1
else:
for i in xrange(N):
n = NaomiSetDeceitfulWar.pop(0)
if n < KenSetDeceitfulWar[0]:
KenSetDeceitfulWar.pop(-1)
else:
KenSetDeceitfulWar.pop(0)
counterDeceitfulWar += 1
#War
if N == 1:
if NaomiSetWar[0] < KenSetWar[0]:
counterWar = 0
else:
counterWar = 1
else:
for i in xrange(N):
n = NaomiSetWar.pop(0)
if n < KenSetWar[-1]:
for j in KenSetWar:
if j > n:
KenSetWar.pop(KenSetWar.index(j))
break
else:
counterWar += len(NaomiSetWar) + 1
break
return (counterDeceitfulWar, counterWar)
def returnFormattedAnswer(caseNum, x):
g.write('Case #%d: %d %d\n' % (caseNum, x[0], x[1]))
if __name__=='__main__':
if len(sys.argv) != 3:
print 'Provide arg1: input file, arg2: output file.'
else:
f = open(sys.argv[1])
g = file(sys.argv[2], 'w')
lines = map(lambda x: x.strip('\n'), f.readlines())
numOfTestCases = int(lines.pop(0))
for i in xrange(1, numOfTestCases + 1):
returnFormattedAnswer(i, evaluateLeadingTestCaseAndReturnAnswer())
f.close()
g.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
87c425297ee0f88d7daca1b9febdf1f89e65da92
|
3529ecaa44a53172094ba13498097057c8972723
|
/Questiondir/696.count-binary-substrings/696.count-binary-substrings_123598239.py
|
74f334383e8c7d8ab23645fa15bf402b2364784a
|
[] |
no_license
|
cczhong11/Leetcode-contest-code-downloader
|
0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6
|
db64a67869aae4f0e55e78b65a7e04f5bc2e671c
|
refs/heads/master
| 2021-09-07T15:36:38.892742
| 2018-02-25T04:15:17
| 2018-02-25T04:15:17
| 118,612,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
class Solution(object):
def countBinarySubstrings(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
leftLength = [1]*len(s)
rightLength = [1]*len(s)
for i in range(1, len(s)):
if s[i] == s[i-1]:
leftLength[i] = leftLength[i-1] + 1
for i in reversed(range(len(s)-1)):
if s[i] == s[i+1]:
rightLength[i] = rightLength[i+1] + 1
ans = 0
for i in range(1, len(s)):
if s[i] == s[i-1]:
continue
ans += min(leftLength[i-1], rightLength[i])
return ans
|
[
"tczhong24@gmail.com"
] |
tczhong24@gmail.com
|
a4c98aaaef0c88024c048111c781ba6424cc35d7
|
2e07f6b94fc0f7a5cf55002040151b8745fd843d
|
/privious_learning_code/OS_Handling/os.tempnam() Method.py
|
d524bb204572241ac441fecbb617774578dd3cfc
|
[] |
no_license
|
LalithK90/LearningPython
|
a7e6404e900b7d66c663acc72cde3e3655d54ac7
|
ece38fdac88da66c8b76fe710b3df7d8635a3590
|
refs/heads/master
| 2023-06-09T22:32:16.674821
| 2021-06-27T18:55:00
| 2021-06-27T18:55:00
| 169,513,150
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
# Description
#
# The method tempnam() returns a unique path name that is reasonable for creating a temporary file.
# Syntax
#
# Following is the syntax for tempnam() method −
#
# os.tempnam(dir, prefix)
#
# Parameters
#
# dir − This is the dir where the temporary filename will be created.
#
# prefix − This is the prefix of the generated temporary filename.
#
# Return Value
#
# This method returns a unique path.
# Example
import os, sys
# prefix is tuts1 of the generated file
tmpfn = os.tempnam('/tmp/dir,'tut')
print("This is the unique path:")
print(tmpfn)
|
[
"asakahatapitiya@gmail.com"
] |
asakahatapitiya@gmail.com
|
16ddceaec23fd6336f6cedccdb2aebf64798ed80
|
2f330fc050de11676ab46b963b7878882e9b6614
|
/memsource_cli/models/page_dto_web_hook_dto.py
|
c508f9487b1dcee988c8b5bd9368135c2fd969fa
|
[
"Apache-2.0"
] |
permissive
|
zerodayz/memsource-cli-client
|
609f48c18a2b6daaa639d4cb8a61da43763b5143
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
refs/heads/master
| 2020-08-01T12:43:06.497982
| 2019-09-30T11:14:13
| 2019-09-30T11:14:13
| 210,999,654
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,386
|
py
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.web_hook_dto import WebHookDto # noqa: F401,E501
class PageDtoWebHookDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total_elements': 'int',
'total_pages': 'int',
'page_size': 'int',
'page_number': 'int',
'number_of_elements': 'int',
'content': 'list[WebHookDto]'
}
attribute_map = {
'total_elements': 'totalElements',
'total_pages': 'totalPages',
'page_size': 'pageSize',
'page_number': 'pageNumber',
'number_of_elements': 'numberOfElements',
'content': 'content'
}
def __init__(self, total_elements=None, total_pages=None, page_size=None, page_number=None, number_of_elements=None, content=None): # noqa: E501
"""PageDtoWebHookDto - a model defined in Swagger""" # noqa: E501
self._total_elements = None
self._total_pages = None
self._page_size = None
self._page_number = None
self._number_of_elements = None
self._content = None
self.discriminator = None
if total_elements is not None:
self.total_elements = total_elements
if total_pages is not None:
self.total_pages = total_pages
if page_size is not None:
self.page_size = page_size
if page_number is not None:
self.page_number = page_number
if number_of_elements is not None:
self.number_of_elements = number_of_elements
if content is not None:
self.content = content
@property
def total_elements(self):
"""Gets the total_elements of this PageDtoWebHookDto. # noqa: E501
:return: The total_elements of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._total_elements
@total_elements.setter
def total_elements(self, total_elements):
"""Sets the total_elements of this PageDtoWebHookDto.
:param total_elements: The total_elements of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._total_elements = total_elements
@property
def total_pages(self):
"""Gets the total_pages of this PageDtoWebHookDto. # noqa: E501
:return: The total_pages of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""Sets the total_pages of this PageDtoWebHookDto.
:param total_pages: The total_pages of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._total_pages = total_pages
@property
def page_size(self):
"""Gets the page_size of this PageDtoWebHookDto. # noqa: E501
:return: The page_size of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""Sets the page_size of this PageDtoWebHookDto.
:param page_size: The page_size of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._page_size = page_size
@property
def page_number(self):
"""Gets the page_number of this PageDtoWebHookDto. # noqa: E501
:return: The page_number of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""Sets the page_number of this PageDtoWebHookDto.
:param page_number: The page_number of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._page_number = page_number
@property
def number_of_elements(self):
"""Gets the number_of_elements of this PageDtoWebHookDto. # noqa: E501
:return: The number_of_elements of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._number_of_elements
@number_of_elements.setter
def number_of_elements(self, number_of_elements):
"""Sets the number_of_elements of this PageDtoWebHookDto.
:param number_of_elements: The number_of_elements of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._number_of_elements = number_of_elements
@property
def content(self):
"""Gets the content of this PageDtoWebHookDto. # noqa: E501
:return: The content of this PageDtoWebHookDto. # noqa: E501
:rtype: list[WebHookDto]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this PageDtoWebHookDto.
:param content: The content of this PageDtoWebHookDto. # noqa: E501
:type: list[WebHookDto]
"""
self._content = content
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PageDtoWebHookDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageDtoWebHookDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"cerninr@gmail.com"
] |
cerninr@gmail.com
|
c7bfd8a1db51a34b917578014b643d36a745a476
|
3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d
|
/chapter03/db_operation_demo/venv/Scripts/easy_install-script.py
|
0eb38f841eaf8e215cee3d2d5d44bb0c06bec793
|
[] |
no_license
|
yingkun1/python-django
|
a3084460a83682f3e0848d5b40c881f93961ecc2
|
08c9ed3771eb245ee9ff66f67cf28730d2675bbe
|
refs/heads/master
| 2022-12-11T12:33:20.788524
| 2019-06-12T09:30:59
| 2019-06-12T09:30:59
| 189,977,625
| 1
| 0
| null | 2022-11-22T02:57:01
| 2019-06-03T09:43:30
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
#!E:\python-django\chapter03\db_operation_demo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"925712087@qq.com"
] |
925712087@qq.com
|
6afded1246d1f7062c490a1205ece14505720572
|
a8750439f200e4efc11715df797489f30e9828c6
|
/codechef/KJCS2018_BOOK.py
|
b948dc53f60db9e60b446ba500360c8fc4f61da7
|
[] |
no_license
|
rajlath/rkl_codes
|
f657174305dc85c3fa07a6fff1c7c31cfe6e2f89
|
d4bcee3df2f501349feed7a26ef9828573aff873
|
refs/heads/master
| 2023-02-21T10:16:35.800612
| 2021-01-27T11:43:34
| 2021-01-27T11:43:34
| 110,989,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
# -*- coding: utf-8 -*-
# @Date : 2018-09-30 15:05:32
# @Author : raj lath (oorja.halt@gmail.com)
# @Link : link
# @Version : 1.0.0
from sys import stdin
from itertools import accumulate
max_val=int(10e12)
min_val=int(-10e12)
def read_int() : return int(stdin.readline())
def read_ints() : return [int(x) for x in stdin.readline().split()]
def read_str() : return input()
def read_strs() : return [x for x in stdin.readline().split()]
for _ in range(read_int()):
n, m = read_ints()
book = read_ints()
left = read_ints()
rite = read_ints()
limit = max(rite) + 1
beg = [0] * (limit)
end = [0] * (limit)
for x in book:
beg[x] += 1
end[x] += x
begs = list(accumulate(beg))
ends = list(accumulate(end))
for i in range(n):
print(begs[rite[i]] - begs[left[i] - 1], ends[rite[i]] - ends[left[i] - 1])
|
[
"raj.lath@gmail.com"
] |
raj.lath@gmail.com
|
1568a018a87eb7fb715c30958272328cd5f4e86c
|
9f2445e9a00cc34eebcf3d3f60124d0388dcb613
|
/2019-12-25-Parametersearch_Hay2011/plotcsv.py
|
4cf4ec481715b8ba3cc2c53eac9f70c8528747cc
|
[] |
no_license
|
analkumar2/Thesis-work
|
7ee916d71f04a60afbd117325df588908518b7d2
|
75905427c2a78a101b4eed2c27a955867c04465c
|
refs/heads/master
| 2022-01-02T02:33:35.864896
| 2021-12-18T03:34:04
| 2021-12-18T03:34:04
| 201,130,673
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
#exec(open('plotcsv.py').read())
import matplotlib as mpl
mpl.rcParams["savefig.directory"] = '/mnt/c/Analkumar2/Study/Biology/Neuroscience/2018 - 23 PhD Thesis/Thesis work'
import csv
import matplotlib.pyplot as plt
import numpy as np
import os
from neo.io import AxonIO
import pandas as pd
invidx = 1
Injectcurr = 300e-12
preStimTime = 0.5
foldername = os.path.basename(os.getcwd())
Pl = pd.read_csv(f'../../Output/{foldername}/Parametersdf.csv').tail(invidx).iloc[0]
Parameters = {key:Pl[key] for key in Pl.keys()}
Vtrace = list(pd.read_csv(f'../../Output/{foldername}/Vmvecdf.csv').tail(invidx).iloc[0])
ttrace = list(pd.read_csv(f'../../Output/{foldername}/tvecdf.csv').tail(invidx).iloc[0])
def exp_tracef(Injectcurr=150e-12):
global flnme
global exp_sampdur
global exp_samprate
global exp_samppoints
global exp_trace_injend
global exp_trace_injstart
stim1391 = ['Cell 3 of 181016.abf', 'cell 4 of 61016.abf', 'cell 4 of 111016.abf', 'cell 4 of 131016.abf', 'Cell 4 of 181016.abf', 'cell 5 of 61016.abf', 'Cell 5 of 181016.abf']
# flnme = 'Cell 3 of 10717.abf'
flnme = 'cell 4 of 61016.abf'
exp_tracefile = f'../../Raw_data/Deepanjali_data/WT step input cells/{flnme}'
reader = AxonIO(filename=exp_tracefile)
currno = int(Injectcurr*1e12/25+4)
seg = reader.read_block().segments[currno] # 10 means 150pA current
exp_trace = seg.analogsignals[0]
exp_samprate = float(exp_trace.sampling_rate)
exp_sampdur = float(exp_trace.t_stop) - float(exp_trace.t_start)
exp_samppoints = int(exp_samprate*exp_sampdur)
if flnme in stim1391:
exp_trace_injstart = 139.1e-3
exp_trace_injend = 639.1e-3
else:
exp_trace_injstart = 81.4e-3
exp_trace_injend = 581.4e-3
exp_trace = np.array(exp_trace).flatten()
return exp_trace
exp_trace = exp_tracef(Injectcurr=Injectcurr)
plt.plot(np.linspace(preStimTime-exp_trace_injstart,preStimTime+exp_sampdur-exp_trace_injstart,exp_samppoints), exp_trace*1e-3, label=flnme)
plt.plot(ttrace,Vtrace, label='Model')
plt.title('300pA injection')
plt.axis([0.4, 1.2, -0.100, 0.060])
plt.legend()
plt.xlabel('Time (s)')
plt.ylabel('Membrane potential (mV)')
print(Parameters)
plt.show()
|
[
"analkumar2@gmail.com"
] |
analkumar2@gmail.com
|
857d9434c9309e580b2b741fa15785895ffc5948
|
1040b320168c49e3fd784d93ff30923527582d26
|
/calm/dsl/api/vm_recovery_point.py
|
6c38001e2886ff7ea3c342f1f6e12a8c4d6bc643
|
[
"Apache-2.0"
] |
permissive
|
nutanix/calm-dsl
|
87eb8a82f202ec0c71b5c8d8fe49db29bdcf2cfc
|
56c52702cec4370f551785508d284e5cbe1a744a
|
refs/heads/master
| 2023-08-31T16:43:51.009235
| 2023-08-28T05:20:41
| 2023-08-28T05:20:41
| 227,190,868
| 41
| 59
|
Apache-2.0
| 2023-08-28T05:20:43
| 2019-12-10T18:38:58
|
Python
|
UTF-8
|
Python
| false
| false
| 195
|
py
|
from .resource import ResourceAPI
class VmRecoveryPointAPI(ResourceAPI):
def __init__(self, connection):
super().__init__(connection, resource_type="nutanix/v1/vm_recovery_points")
|
[
"abhijeet.kaurav@nutanix.com"
] |
abhijeet.kaurav@nutanix.com
|
ed009c63f5a3c7ece89552ccefba440deb74a17f
|
92b031f51f1c52c26d93987005b8209d9bb050a1
|
/mamba/pymtl/__init__.py
|
d775f90607b8d0da2431cc3660e6a9caecfb0394
|
[] |
no_license
|
cornell-brg/mamba-dac2018
|
e9353b142456768fcc0d3a9f9b3d29e162ad508e
|
01efaadc704a8abec9fa7d4b668005ee412a2353
|
refs/heads/master
| 2020-03-19T04:42:27.349346
| 2018-06-27T14:57:58
| 2018-06-27T15:00:29
| 135,856,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from model.ConstraintTypes import U, M, RD, WR
from model.Connectable import Wire, InVPort, OutVPort, Interface
from model.RTLComponent import RTLComponent
from passes import SimRTLPass, PrintMetadataPass, EventDrivenPass
from datatypes import *
from datatypes import _bitwidths
__all__ = [
'U','M','RD','WR',
'Wire', 'InVPort', 'OutVPort', 'Interface',
'RTLComponent', 'SimRTLPass', 'PrintMetadataPass', 'EventDrivenPass',
'sext', 'zext', 'clog2', 'concat',
'mk_bits',
] + [ "Bits{}".format(x) for x in _bitwidths ]
from datatypes.bits_import import _use_pymtl_bits
if _use_pymtl_bits:
__all__ += [ 'Bits' ]
|
[
"sj634@cornell.edu"
] |
sj634@cornell.edu
|
d12f0dfc9cecdc462ead55f260a9a8185bd6b3bc
|
e573b586a921084f29a36f8e2de5afcae2c65ff8
|
/tasks/part_2/shape.py
|
2b170227c0348f5c73280085dea6db24fbffca36
|
[] |
no_license
|
HannaKulba/AdaptiveTraining_English
|
e69c8a0c444c1fa72b4783ba837cb3d9dc055d91
|
46497dc6827df37f4ebb69671912ef5b934ab6f0
|
refs/heads/master
| 2020-12-28T15:05:25.762072
| 2020-02-19T14:39:22
| 2020-02-19T14:39:22
| 238,381,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
n = int(input())
if n == 1:
print('You have chosen a square')
elif n == 2:
print('You have chosen a circle')
elif n == 3:
print('You have chosen a triangle')
elif n == 4:
print('You have chosen a rhombus')
else:
print('There is no such shape!')
# if n == 0:
# print('do not move')
# elif n == 1:
# print('move up')
# elif n == 2:
# print('move down')
# elif n == 3:
# print('move left')
# elif n == 4:
# print('move right')
# else:
# print('error!')
|
[
"anna.mirraza@gmail.com"
] |
anna.mirraza@gmail.com
|
c702956a4a2e74d48fbc44229dde4a04631056e3
|
ec84619271eac42481231218c9ee653dec99adad
|
/3. Linear Data Structure/146. Lowercase to Uppercase II.py
|
1a3326e9a6fb166e2b6f1f1534d527cf102ead1c
|
[] |
no_license
|
LingHsiLiu/Algorithm0
|
19a968fffb5466022f9856c36af0364da6472434
|
f438e828dc9dd6196ee5809eb8fac21ccb688bf2
|
refs/heads/master
| 2020-04-04T17:55:48.182172
| 2019-01-02T19:06:57
| 2019-01-02T19:06:57
| 156,142,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# 146. Lowercase to Uppercase II
# Implement an upper method to convert all characters in a string to uppercase.
# You should ignore the characters not in alphabet.
# Example
# Given "abc", return "ABC".
# Given "aBc", return "ABC".
# Given "abC12", return "ABC12".
class Solution:
"""
@param str: A string
@return: A string
"""
def lowercaseToUppercase2(self, str):
# write your code here
|
[
"noreply@github.com"
] |
LingHsiLiu.noreply@github.com
|
319d58ed3197580898e4249bda21bfd1cec0797b
|
bad04904c1c939be61682aebabb0e37409011b3f
|
/authentication/views/admin.py
|
c64770c7f650d395b8cdb30fe504657672abe7ae
|
[] |
no_license
|
oosprey/SRPA
|
499213d39f41141dc35ab9170510a1f18332b17c
|
37017f600f61ea1b4a8c8c928ca65ab2c1f00d29
|
refs/heads/master
| 2021-01-23T22:00:31.865443
| 2017-10-12T03:16:27
| 2017-10-12T03:16:27
| 102,920,120
| 0
| 0
| null | 2017-09-09T02:17:00
| 2017-09-09T02:17:00
| null |
UTF-8
|
Python
| false
| false
| 5,429
|
py
|
#!/usr/bin/env python3
# coding: UTF-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2017-10-06 14:12
# Last modified: 2017-10-07 17:05
# Filename: admin.py
# Description:
from django.views.generic import TemplateView, CreateView, UpdateView
from django.views.generic import DetailView, ListView
from django.contrib.auth.mixins import UserPassesTestMixin
from django.contrib.auth.models import User, Group
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
from const.models import Workshop
from authentication import USER_IDENTITY_TEACHER
from authentication.forms import TeacherRegisterForm, TeacherUpdateForm
from authentication.forms import WorkshopUpdateForm
from authentication.models import TeacherInfo
from tools.utils import assign_perms
class AdminBase(UserPassesTestMixin):
raise_exception = True
def test_func(self):
user = self.request.user
return user.is_authenticated and user.is_superuser
class AdminIndex(AdminBase, TemplateView):
template_name = 'authentication/admin/index.html'
class AdminTeacherAdd(AdminBase, CreateView):
template_name = 'authentication/admin/teacher_add.html'
form_class = TeacherRegisterForm
identity = USER_IDENTITY_TEACHER
success_url = reverse_lazy('auth:admin:index')
form_post_url = reverse_lazy('auth:admin:teacher:add')
info_name = 'teacherinfo'
def form_valid(self, form):
username = form.cleaned_data['username']
first_name = form.cleaned_data['name']
password = form.cleaned_data['password']
email = form.cleaned_data['email']
user = User.objects.create_user(
email=email,
username=username,
password=password,
first_name=first_name)
form.instance.user = user
form.instance.identity = self.identity
self.object = form.save()
assign_perms(self.info_name, user, self.object,
perms=['update', 'view'])
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
kwargs['form_post_url'] = self.form_post_url
return super(AdminTeacherAdd, self).get_context_data(**kwargs)
class AdminTeacherUpdate(AdminBase, UpdateView):
model = TeacherInfo
form_class = TeacherUpdateForm
template_name = 'authentication/admin/teacher_info_update.html'
slug_field = 'uid'
slug_url_kwarg = 'uid'
success_url = reverse_lazy('auth:admin:teacher:list', args=(1,))
def get_initial(self):
kwargs = {}
kwargs['first_name'] = self.object.user_info.user.first_name
kwargs['phone'] = self.object.user_info.phone
kwargs['email'] = self.object.user_info.user.email
return kwargs
def form_valid(self, form):
self.object = form.save()
cleaned_data = form.cleaned_data
user_info = self.object.user_info
user_info.phone = cleaned_data['phone']
user_info.save()
user = user_info.user
user.email = cleaned_data['email']
user.save()
return HttpResponseRedirect(self.get_success_url())
class AdminTeacherDetail(AdminBase, DetailView):
model = TeacherInfo
template_name = 'authentication/admin/teacher_info_detail.html'
slug_field = 'uid'
slug_url_kwarg = 'uid'
class AdminTeacherList(AdminBase, ListView):
model = TeacherInfo
template_name = 'authentication/admin/teacher_info_list.html'
paginate_by = 10
ordering = 'user_info__user__first_name'
class AdminWorkshopAdd(AdminBase, CreateView):
model = Workshop
template_name = 'authentication/admin/workshop_add.html'
fields = ['desc']
success_url = reverse_lazy('auth:admin:workshop:list', args=(1,))
def form_valid(self, form):
self.object = form.save(commit=False)
group, status = Group.objects.get_or_create(name=self.object.desc)
self.object.group = group
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class AdminWorkshopUpdate(AdminBase, UpdateView):
model = Workshop
form_class = WorkshopUpdateForm
template_name = 'authentication/admin/workshop_update.html'
slug_field = 'uid'
slug_url_kwarg = 'uid'
success_url = reverse_lazy('auth:admin:workshop:list', args=(1,))
def get_initial(self):
kwargs = {}
kwargs['group_users'] = self.object.group.user_set.all()
return kwargs
def form_valid(self, form):
cleaned_data = form.cleaned_data
group = self.object.group
old_users = User.objects.filter(groups__name=group)
for user in old_users:
user.groups.remove(group)
for user in cleaned_data['group_users']:
user.groups.add(group)
return super(AdminWorkshopUpdate, self).form_valid(form)
class AdminWorkshopDetail(AdminBase, DetailView):
model = Workshop
template_name = 'authentication/admin/workshop_detail.html'
slug_field = 'uid'
slug_url_kwarg = 'uid'
def get_context_data(self, **kwargs):
context = super(AdminWorkshopDetail, self).get_context_data(**kwargs)
context['group_users'] = self.object.group.user_set.all()
return context
class AdminWorkshopList(AdminBase, ListView):
model = Workshop
template_name = 'authentication/admin/workshop_list.html'
paginate_by = 10
ordering = 'desc'
|
[
"youchen.du@gmail.com"
] |
youchen.du@gmail.com
|
c3b667c46fa1e0574626695cf60534a6c36e3f66
|
0ad7fa728c4a8eabb82edb7eb757a4bef3f0a19f
|
/ha/I2CInterface.py
|
9883e5e8fc196114b756733829d1bfdb8cceedfa
|
[] |
no_license
|
randyr505/ha
|
bb5d8d38441cee41a0c961a6e9d92e8cbbf89a0f
|
6771c50e3141c4162c89569fa4bce95b5f121867
|
refs/heads/master
| 2020-12-29T02:54:43.339856
| 2016-05-23T00:51:47
| 2016-05-23T00:51:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
import smbus
from ha.HAClasses import *
class I2CInterface(HAInterface):
def __init__(self, name, interface=None, event=None, bus=0):
HAInterface.__init__(self, name, interface=interface, event=event)
self.bus = smbus.SMBus(bus)
def read(self, addr):
try:
debug('debugI2C', self.name, "readByte", addr)
return self.bus.read_byte_data(*addr)
except:
return 0
def readWord(self, addr):
try:
debug('debugI2C', self.name, "readWord", addr)
return self.bus.read_word_data(*addr)
except:
return 0
def readBlock(self, addr, length):
try:
debug('debugI2C', self.name, "readBlock", addr, length)
return self.bus.read_i2c_block_data(*addr+(length,))
except:
return 0
def write(self, addr, value):
debug('debugI2C', self.name, "writeByte", addr, value)
self.bus.write_byte_data(*addr+(value,))
def writeWord(self, addr, value):
debug('debugI2C', self.name, "writeWord", addr, value)
self.bus.write_word_data(*addr+(value,))
def writeQuick(self, addr):
debug('debugI2C', self.name, "writeQuick", addr)
self.bus.write_quick(addr)
|
[
"joe@thebuehls.com"
] |
joe@thebuehls.com
|
62efaef4c35f83861dd5de7f6c954b1a4eb15c55
|
11ff14c118240e87c4804d0373e4656d0683d479
|
/RatToolAgent/ZapAgent.py
|
77c8328884d695c9fe760d84041ff9d8482c5aa6
|
[] |
no_license
|
wxmmavis/OS3.1
|
e3028d9c79d5a1a17449fea6380fcdda902bdec7
|
26d954344207a82d2298821c3c4f01302393dc7e
|
refs/heads/master
| 2020-03-25T20:07:11.225493
| 2018-08-13T03:20:57
| 2018-08-13T03:20:57
| 144,115,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
import subprocess as sp
import time
import threading
import pdb
import re
from RatToolAgent import _adapter_name
_zap_dict = dict(thread=None,
zap_cmd=r'zap -s%s -d%s -X%s',
pskill_cmd=r'C:\bin\win\pskill')
def send_zap_traffic(source_ip="", destination_ip="", duration="", speed="", proto="", length="", tos="" , debug=False):
"""
send traffic through zap
- source_ip: Specify the IP address of the source station.
- destination_ip: Specify the IP address of the destination station.
- duration: Test for specified number of seconds.
- speed: Controls the rate in mbits/s for transmitting data
"""
if debug: pdb.set_trace()
cmd = _zap_dict['zap_cmd']% (source_ip, destination_ip, duration)
if speed:
cmd += ' -r%s' % speed
if proto == 'tcp':
cmd += ' -t'
if length:
cmd += " -l%s" % length
if tos:
cmd += " -q%s" % tos
zap_thread = LaunchZap('start "zap traffic" %s' % cmd)
_zap_dict['thread'] = zap_thread
zap_thread.start()
return zap_thread
def kill_zap_thread():
"""
Stop zap by killing its process
"""
# Find tcpdump process to kill
cmd = _zap_dict['pskill_cmd']+ " -t zap"
if _zap_dict['thread'] != None:
#pipe = sp.Popen(cmd, shell=True, stdout=sp.PIPE)
del(_zap_dict['thread'])
output = sp.Popen(cmd, stdout=sp.PIPE).communicate()[0]
return output
def get_sta_traffic_by_if_name(if_name='Wireless Network Connection'):
"""
get station traffic from cmd "netsh interface show ip interface"
- if_name: user-friendly name
return tuple (In Octets, Out Octets)
"""
#@author: Jane.Guo @since: 2013-11 fix bug to get adapter_name from RatToolAgent
global _adapter_name
if_name = _adapter_name
result = {}
cmd_line = "netsh interface ip show interface"
output = sp.Popen(cmd_line, stdout=sp.PIPE).communicate()[0]
for if_info in re.split(r'\r\n\r\n', output):
if re.findall(if_name,if_info):
for line in if_info.split('\r\n'):
k, v = re.split(r':\s+',line)
result[k]=v
return (result['In Octets'],result['Out Octets'])
class LaunchZap(threading.Thread):
tlock = threading.Lock()
id = 0
def __init__(self, start_cmd):
threading.Thread.__init__(self)
self.command = start_cmd
self.status = 0
self.pid = -1
def run(self):
self.pipe = sp.Popen(self.command, shell=True,
stdin=sp.PIPE,stdout=sp.PIPE,stderr=sp.PIPE)
self.status = 1
time.sleep(5)
self.pid = self.pipe.pid
self.data = self.pipe.stdout.read()
self.status = 2
def pid(self):
return self.pid
def isDone(self):
return (self.status != 1)
|
[
"1475806321@qq.com"
] |
1475806321@qq.com
|
2967013c3c0c4a820168109b8f5c6c19cdcfe04c
|
53396d12d606bebea71c149aed0150af7b17b6f5
|
/array/medium/215-kth-largest-element-in-an-array-1.py
|
af2f422c0b1f7c9ef583847bde0cd8c0f18028da
|
[] |
no_license
|
superggn/myleetcode
|
4c623bd9ad3892d826df73ad3b2c122e08aaa9e9
|
40ca33aefbf0cf746a2d0b7e7f52643ae39591be
|
refs/heads/master
| 2023-02-02T11:06:35.163570
| 2020-12-19T10:36:45
| 2020-12-19T10:36:45
| 322,821,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
"""
排序-普通版
https://leetcode-cn.com/problems/kth-largest-element-in-an-array/solution/pai-xu-by-powcai-2/
"""
import heapq
from typing import List
# 排序
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
return sorted(nums, reverse=True)[k - 1]
|
[
"939401399@qq.com"
] |
939401399@qq.com
|
c6cb81d05d1327c650793835d04ddd9cd32a007c
|
750fa6642143723f4585a8668af701cccf053c5d
|
/barcodecop/barcodecop.py
|
d617f934941522f30ee36fabd89d40fe41fc19f7
|
[
"MIT"
] |
permissive
|
dhoogest/barcodecop
|
858c2cb9d553c4334e1edf67ed7d9723de59cfc5
|
586fb0df6889caef1bfe9d7cf2a138667465ca89
|
refs/heads/master
| 2021-01-25T06:25:19.933057
| 2017-03-13T19:24:45
| 2017-03-13T19:24:45
| 93,571,730
| 0
| 0
| null | 2017-06-06T23:08:28
| 2017-06-06T23:08:28
| null |
UTF-8
|
Python
| false
| false
| 5,274
|
py
|
#!/usr/bin/env python
"""Filter fastq files, limiting to exact barcode matches.
Input and output files may be compressed as indicated by a .bz2 or .gz
suffix.
"""
import argparse
import sys
from collections import Counter
from itertools import islice, tee, izip
import operator
import logging
from collections import namedtuple
from fastalite import fastqlite, Opener
try:
from . import __version__
except:
__version__ = ''
class VersionAction(argparse._VersionAction):
"""Write the version string to stdout and exit"""
def __call__(self, parser, namespace, values, option_string=None):
formatter = parser._get_formatter()
formatter.add_text(parser.version if self.version is None else self.version)
sys.stdout.write(formatter.format_help())
sys.exit(0)
def filter(barcodes, seqs, bc_match, invert=False):
compare = operator.ne if invert else operator.eq
for bc, seq in izip(barcodes, seqs):
assert bc.id == seq.id
if compare(str(bc.seq), bc_match):
yield seq
def seqdiff(s1, s2):
if s1 == s2:
return s1
else:
return ''.join('.' if c1 == c2 and c1.isalpha() else c2
for c1, c2 in zip(s1, s2))
def as_fastq(seq):
return '@{seq.description}\n{seq.seq}\n+\n{seq.qual}\n'.format(seq=seq)
def combine_dual_indices(file1, file2):
Seq = namedtuple('Seq', ['id', 'seq'])
for i1, i2 in izip(fastqlite(file1), fastqlite(file2)):
assert i1.id == i2.id
yield Seq(id=i1.id, seq=i1.seq + '+' + i2.seq)
def main(arguments=None):
parser = argparse.ArgumentParser(
prog='barcodecop', description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'index', nargs='+', type=Opener(), metavar='file.fastq[.bz2|.gz]',
help='one or two files containing index reads in fastq format')
parser.add_argument(
'-f', '--fastq', type=Opener(), metavar='file.fastq[.bz2|.gz]',
help='reads to filter in fastq format')
parser.add_argument(
'-o', '--outfile', default=sys.stdout, type=Opener('w'),
help='output fastq')
parser.add_argument(
'--snifflimit', type=int, default=10000, metavar='N',
help='read no more than N records from the index file [%(default)s]')
parser.add_argument(
'--head', type=int, metavar='N',
help='limit the output file to N records')
parser.add_argument(
'--min-pct-assignment', type=float, default=90.0, metavar='PERCENT',
help=("""warn (or fail with an error; see --strict) if the
most common barcode represents less than PERCENT of the
total [%(default)s]"""))
parser.add_argument(
'--strict', action='store_true', default=False,
help=("""fail if conditions of --min-pct-assignment are not met"""))
parser.add_argument(
'--invert', action='store_true', default=False,
help='include only sequences *not* matching the most common barcode')
# parser.add_argument('--format', choices=['fasta', 'fastq'], default='fastq')
parser.add_argument(
'-c', '--show-counts', action='store_true', default=False,
help='tabulate barcode counts and exit')
parser.add_argument(
'-q', '--quiet', action='store_true', default=False,
help='minimize messages to stderr')
parser.add_argument(
'-V', '--version', action=VersionAction, version=__version__,
help='Print the version number and exit')
args = parser.parse_args(arguments)
logging.basicConfig(
format='%(message)s',
level=logging.ERROR if args.quiet else logging.INFO)
log = logging.getLogger(__name__)
if len(args.index) == 1:
bcseqs = fastqlite(args.index[0])
elif len(args.index) == 2:
bcseqs = combine_dual_indices(*args.index)
else:
log.error('error: please specify either one or two index files')
bc1, bc2 = tee(bcseqs, 2)
# determine the most common barcode
barcode_counts = Counter([str(seq.seq) for seq in islice(bc1, args.snifflimit)])
barcodes, counts = zip(*barcode_counts.most_common())
most_common_bc = barcodes[0]
most_common_pct = 100 * float(counts[0])/sum(counts)
log.info('most common barcode: {} ({}/{} = {:.2f}%)'.format(
most_common_bc, counts[0], sum(counts), most_common_pct))
if args.show_counts:
for bc, count in barcode_counts.most_common():
print('{}\t{}\t{}'.format(bc, seqdiff(most_common_bc, bc), count))
return None
if most_common_pct < args.min_pct_assignment:
msg = 'frequency of most common barcode is less than {}%'.format(
args.min_pct_assignment)
if args.strict:
log.error('Error: ' + msg)
sys.exit(1)
else:
log.warning('Warning: ' + msg)
if not args.fastq:
log.error('specify a fastq format file to filter using -f/--fastq')
sys.exit(1)
seqs = fastqlite(args.fastq)
filtered = islice(filter(bc2, seqs, most_common_bc, args.invert), args.head)
for seq in filtered:
args.outfile.write(as_fastq(seq))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"noah.hoffman@gmail.com"
] |
noah.hoffman@gmail.com
|
67fd7ac430f02f90e57cf3651190c6e9dd7bb15f
|
ff6f60d02ed8d024f7b2db5c9eb4b1196ebf166b
|
/mysite/blog2/migrations/0002_article_pub_time.py
|
761ae2617117e36ba66c73bf9d7ffbcc593e45f4
|
[] |
no_license
|
cekong/learnit
|
43b707e347ff552754b6592e01dd106c98cd0cc5
|
b4111d6fee95960f7b7ca5421b7159cb6122ad2a
|
refs/heads/master
| 2020-03-25T13:53:37.848843
| 2019-08-29T06:46:48
| 2019-08-29T06:46:48
| 143,848,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 2.0.7 on 2018-10-25 06:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog2', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='pub_time',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"noreply@github.com"
] |
cekong.noreply@github.com
|
4afecaed7d549f21e85ffa373e6b6bd797513efe
|
3fda2cf03821d5627d6628aca348870ac3d127c2
|
/utils.py
|
4561fe05ef59a3c5eb413fc664df80a1786c8aa1
|
[
"MIT"
] |
permissive
|
valohai/aws-pricelist-tool
|
04f0e601cb32995d37f2b207480fe4db4f094f0a
|
637ff1add70dc5bab0651abd7051822da9df671f
|
refs/heads/master
| 2020-04-25T22:24:50.348013
| 2019-10-18T16:51:00
| 2019-10-24T13:35:01
| 173,110,705
| 1
| 0
|
MIT
| 2019-10-24T13:35:03
| 2019-02-28T12:45:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,293
|
py
|
import fnmatch
import itertools
import json
import os
import time
import logging
from typing import Set
import requests
log = logging.getLogger(__name__)
REGIONS = {
'ap-northeast-1': 'Asia Pacific (Tokyo)',
'ap-northeast-2': 'Asia Pacific (Seoul)',
'ap-northeast-3': 'Asia Pacific (Osaka-Local)',
'ap-south-1': 'Asia Pacific (Mumbai)',
'ap-southeast-1': 'Asia Pacific (Singapore)',
'ap-southeast-2': 'Asia Pacific (Sydney)',
'ca-central-1': 'Canada (Central)',
'cn-north-1': 'China (Beijing)',
'cn-northwest-1': 'China (Ningxia)',
'eu-central-1': 'EU (Frankfurt)',
'eu-north-1': 'EU (Stockholm)',
'eu-west-1': 'EU (Ireland)',
'eu-west-2': 'EU (London)',
'eu-west-3': 'EU (Paris)',
'sa-east-1': 'South America (São Paulo)',
'us-east-1': 'US East (N. Virginia)',
'us-east-2': 'US East (Ohio)',
'us-gov-east-1': 'AWS GovCloud (US-East)',
'us-gov-west-1': 'AWS GovCloud (US)',
'us-west-1': 'US West (N. California)',
'us-west-2': 'US West (Oregon)',
}
def download_or_read_cached_json(url, cache_filename, max_age=86400):
if os.path.isfile(cache_filename) and (time.time() - os.stat(cache_filename).st_mtime) < max_age:
log.info(f'Using cached {cache_filename}')
with open(cache_filename) as infp:
return json.load(infp)
log.info(f'Requesting {url}')
resp = requests.get(url)
resp.raise_for_status()
data = resp.json()
os.makedirs(os.path.dirname(os.path.realpath(cache_filename)), exist_ok=True)
with open(cache_filename, 'wb') as outfp:
outfp.write(resp.content)
return data
def get_region_names() -> Set[str]:
return set(REGIONS.keys())
def get_price_list(region, offer):
return download_or_read_cached_json(
url=f'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/{offer}/current/{region}/index.json',
cache_filename=f'cache/aws-prices-{region}-{offer}.cache.json',
)
def get_first_dict_value(d):
return next(iter(d.values()))
def wildcard_filter(values, patterns):
return itertools.chain(*((value for value in values if fnmatch.fnmatch(value, pattern)) for pattern in patterns))
def wildcard_match(value, patterns):
return any(fnmatch.fnmatch(value, pat) for pat in patterns)
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
52afe2fe2d6b878609a8ca061380d4bdaf627bc6
|
e121ceeeeea5dbacbd3ea36f8d5f226ea43faa89
|
/shop_catalog/shop_catalog/urls.py
|
c162cf0f299f8c60d5e66f4a4badb4ace00cf7b0
|
[] |
no_license
|
Ranc58/training_django_shop
|
dae7b264f849502da0e54f2d70c715335a8d52f9
|
260b7c1be1c120652a791ff355afdd64c06602b0
|
refs/heads/master
| 2021-05-16T01:34:08.820340
| 2017-10-16T13:50:08
| 2017-10-16T13:50:08
| 107,131,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
"""shop_catalog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from technic_catalog.views import ProductList, ProductView
app_name = 'technic_catalog'
urlpatterns = [
url(r'^$', ProductList.as_view(), name='index_page'),
url(r'^product/(?P<pk>[0-9]+)/$', ProductView.as_view(),
name='product_detail'),
url(r'^admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rancvova@gmail.com"
] |
rancvova@gmail.com
|
dc8862d5f210ae31a1ad69817c3da3b3f20d26f3
|
5f4e13201d4c5b7edc8dbbda289380682a187bec
|
/services/ping_service/coffeehouse_ping/__main__.py
|
54c78ce3e37e965ab5bbec124d3465090942452d
|
[] |
no_license
|
intellivoid/CoffeeHousePy
|
92f4fb344de757837c3d3da05cb5513e90408039
|
57c453625239f28da88b88ddd0ae5f1ecdd4de3c
|
refs/heads/master
| 2023-02-23T14:32:01.606630
| 2021-01-28T02:57:10
| 2021-01-28T02:57:10
| 324,419,067
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
import sys
from coffeehouse_ping import Server
def _real_main(argv=None):
"""
The main command-line processor
:param argv:
:return:
"""
if argv[1] == '--help':
_help_menu(argv)
if argv[1] == '--start-server':
_start_server(argv)
def _start_server(argv=None):
"""
Starts the server
:param argv:
:return:
"""
server = Server()
server.start()
def _help_menu(argv=None):
"""
Displays the help menu and commandline usage
:param argv:
:return:
"""
print(
"CoffeeHouse Ping CLI\n\n"
" --help\n"
" --start-server\n"
)
sys.exit()
if __name__ == '__main__':
try:
_real_main(sys.argv)
except KeyboardInterrupt:
print('\nInterrupted by user')
|
[
"netkas@intellivoid.info"
] |
netkas@intellivoid.info
|
33cb531d016b53d9e66de77beab3280ef808fe8e
|
61ce1dff5e61dde649e3908b6ba7c2a270a78e94
|
/users/models.py
|
fd8f6f26694565195903bf3553da2c8b17a77c6a
|
[
"Unlicense"
] |
permissive
|
xeddmc/pastebin-django
|
b9438104691b4c21af4a01e46e8c9ada7ee66403
|
5e38637e5a417ab907a353af8544f64a0ad2b127
|
refs/heads/master
| 2021-01-12T20:07:53.454983
| 2015-12-21T09:31:29
| 2015-12-21T09:31:29
| 45,421,895
| 0
| 0
|
Unlicense
| 2019-08-05T19:47:34
| 2015-11-02T21:03:40
|
Python
|
UTF-8
|
Python
| false
| false
| 6,890
|
py
|
from django.db import models, transaction
from django.core.cache import cache
from django.contrib.auth.models import User
from django_redis import get_redis_connection
from ipware.ip import get_real_ip
from sql import cursor
from pastes.models import Paste
from pastebin import settings
import datetime
class Favorite(models.Model):
"""
Handles user's favorites
"""
paste = models.ForeignKey(Paste)
user = models.ForeignKey(User)
added = models.DateTimeField(auto_now_add=True)
@staticmethod
def has_user_favorited_paste(user, paste):
"""
Returns True or False depending on whether user has favorited the paste
"""
result = cache.get("paste_favorited:%s:%s" % (user.username, paste.char_id))
if result != None:
return result
else:
result = Favorite.objects.filter(user=user, paste=paste).exists()
cache.set("paste_favorited:%s:%s" % (user.username, paste.char_id), result)
return result
class PastebinUser(object):
"""
Contains methods to run on a newly created or deleted user
"""
@staticmethod
def create_user(user):
"""
Create required entries for a new user
"""
site_settings = SiteSettings(user=user)
site_settings.save()
@staticmethod
def delete_user(user):
"""
Deletes an user as well as all of his pastes
"""
with transaction.atomic():
# Delete favorites
Favorite.objects.filter(user=user).delete()
Paste.objects.filter(user=user).delete()
# Django recommends setting User's is_active property to False instead of
# deleting it entirely, as it may break foreign keys
user.is_active = False
user.save()
class Limiter(object):
"""
Throttles the amount of actions an user can do
"""
PASTE_UPLOAD = 1
PASTE_EDIT = 2
COMMENT = 3
@staticmethod
def get_action_count(request, action):
"""
Get the raw count of actions a certain IP address has done
"""
authenticated = request.user.is_authenticated()
if action == Limiter.PASTE_UPLOAD and settings.MAX_PASTE_UPLOADS_PER_USER == -1 and \
settings.MAX_PASTE_UPLOADS_PER_GUEST == -1:
return 0
elif action == Limiter.PASTE_EDIT and settings.MAX_PASTE_EDITS_PER_USER == -1:
return 0
elif action == Limiter.COMMENT and settings.MAX_COMMENTS_PER_USER == -1:
return 0
count = 0
con = get_redis_connection("persistent")
ip = get_real_ip(request)
if action == Limiter.PASTE_UPLOAD:
count = con.get("paste_upload_count:%s" % ip)
elif action == Limiter.PASTE_EDIT:
count = con.get("paste_edit_count:%s" % ip)
elif action == Limiter.COMMENT:
count = con.get("comment_count:%s" % ip)
if count == None:
return 0
else:
return int(count)
@staticmethod
def increase_action_count(request, action, amount=1):
"""
Increase the amount of actions by a certain amount (default=1)
"""
authenticated = request.user.is_authenticated()
count = 0
con = get_redis_connection("persistent")
ip = get_real_ip(request)
if action == Limiter.PASTE_UPLOAD:
if settings.MAX_PASTE_UPLOADS_PER_USER == -1 and authenticated:
return 0
elif settings.MAX_PASTE_UPLOADS_PER_GUEST == -1 and not authenticated:
return 0
else:
count = int(con.incr("paste_upload_count:%s" % ip))
if count == 1:
con.expire("comment_count:%s" % ip, settings.MAX_PASTE_UPLOADS_PERIOD)
elif action == Limiter.PASTE_EDIT:
if settings.MAX_PASTE_EDITS_PER_USER == -1:
return 0
else:
count = int(con.incr("paste_edit_count:%s" % ip))
if count == 1:
con.expire("comment_count:%s" % ip, settings.MAX_PASTE_EDITS_PERIOD)
elif action == Limiter.COMMENT:
if settings.MAX_COMMENTS_PER_USER == -1:
return 0
else:
count = int(con.incr("comment_count:%s" % ip))
if count == 1:
con.expire("comment_count:%s" % ip, settings.MAX_COMMENTS_PERIOD)
return count
@staticmethod
def is_limit_reached(request, action, count=None):
"""
Has the guest/user reached the maximum amount of paste uploads
"""
authenticated = request.user.is_authenticated()
if action == Limiter.PASTE_UPLOAD and settings.MAX_PASTE_UPLOADS_PER_USER == -1 and \
authenticated:
return False
elif action == Limiter.PASTE_UPLOAD and settings.MAX_PASTE_UPLOADS_PER_GUEST == -1 and \
not authenticated:
return False
elif action == Limiter.PASTE_EDIT and settings.MAX_PASTE_EDITS_PER_USER == -1:
return False
elif action == Limiter.COMMENT and settings.MAX_COMMENTS_PER_USER == -1:
return False
if count == None:
count = Limiter.get_action_count(request, action)
if action == Limiter.PASTE_UPLOAD:
if authenticated:
return count >= settings.MAX_PASTE_UPLOADS_PER_USER
else:
return count >= settings.MAX_PASTE_UPLOADS_PER_GUEST
elif action == Limiter.PASTE_EDIT:
return count >= settings.MAX_PASTE_EDITS_PER_USER
elif action == Limiter.COMMENT:
return count >= settings.MAX_COMMENTS_PER_USER
@staticmethod
def get_action_limit(request, action):
"""
Return the maximum amount of actions the guest/user can do
"""
authenticated = request.user.is_authenticated()
if action == Limiter.PASTE_UPLOAD:
if authenticated:
return settings.MAX_PASTE_UPLOADS_PER_USER
else:
return settings.MAX_PASTE_UPLOADS_PER_GUEST
elif action == Limiter.PASTE_EDIT:
return settings.MAX_PASTE_EDITS_PER_USER
elif action == Limiter.COMMENT:
return settings.MAX_COMMENTS_PER_USER
class SiteSettings(models.Model):
"""
User's site settings, eg. whether user wants his favorites to be public
"""
user = models.ForeignKey(User)
public_favorites = models.BooleanField(default=True)
|
[
"jannepulk@gmail.com"
] |
jannepulk@gmail.com
|
194ea14eaf4b23e565969d666db913b837eb3d1e
|
0f16edb46a48f9b5a125abb56fc0545ede1d65aa
|
/doc/rst/conf.py
|
fee0a9988b79286c7831cf83dfe3e0e67b8e42df
|
[
"Apache-2.0"
] |
permissive
|
DataONEorg/d1_python
|
5e685f1af0c356190f2d6df45d1ac849e2f56972
|
d72a9461894d9be7d71178fb7310101b8ef9066a
|
refs/heads/master
| 2023-08-29T03:16:38.131760
| 2023-06-27T21:59:37
| 2023-06-27T21:59:37
| 60,103,877
| 15
| 12
|
Apache-2.0
| 2023-09-06T18:27:53
| 2016-05-31T16:01:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,408
|
py
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sphinx configuration for DataONE Python Products documentation."""
import os
import sys
import better
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'd1_gmn.settings'
django.setup()
project = "DataONE Python Products"
copyright = "2019 Participating institutions in DataONE"
import d1_gmn.app.models
source_suffix = ".rst"
master_doc = "index"
version = ""
release = ""
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"tests",
"test*.py",
"subject_info_renderer.py",
'*/generated/*',
]
pygments_style = "sphinx"
today_fmt = "%Y-%m-%d"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.graphviz",
"sphinx.ext.ifconfig",
"sphinx.ext.imgmath",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.napoleon",
# 'sphinxcontrib.napoleon',
"sphinx.ext.todo",
]
# The default syntax highlighting applied code-block and :: blocks.
# Set highlighting where needed, e.g., with ".. highlight:: python".
highlight_language = "none"
html_logo = "_static/dataone_logo.png"
html_theme_path = [better.better_theme_path]
html_theme = "better"
html_short_title = "Home"
html_static_path = ["_static"]
templates_path = ["_templates"]
html_theme_options = {
# show sidebar on the right instead of on the left
"rightsidebar": False,
# inline CSS to insert into the page if you're too lazy to make a
# separate file
"inlinecss": "",
# CSS files to include after all other CSS files
# (refer to by relative path from conf.py directory, or link to a
# remote file)
"cssfiles": [],
# show a big text header with the value of html_title
"showheader": True,
# show the breadcrumbs and index|next|previous links at the top of
# the page
"showrelbartop": True,
# same for bottom of the page
"showrelbarbottom": False,
# show the self-serving link in the footer
"linktotheme": True,
# width of the sidebar. page width is determined by a CSS rule.
# I prefer to define things in rem because it scales with the
# global font size rather than pixels or the local font size.
"sidebarwidth": "15rem",
# color of all body text
"textcolor": "#000000",
# color of all headings (<h1> tags); defaults to the value of
# textcolor, which is why it's defined here at all.
"headtextcolor": "",
# color of text in the footer, including links; defaults to the
# value of textcolor
"footertextcolor": "",
# Google Analytics info
"ga_ua": "",
"ga_domain": "",
}
# html_sidebars = {
# '**': ['localtoc.html', 'sourcelink.html', 'searchbox.html'],
# }
# Formatting of NumPy and Google style docstrings
# Toggled Napoleon switches
napoleon_use_param = False
napoleon_use_ivar = True
napoleon_include_init_with_doc = True
# Napoleon settings
# napoleon_google_docstring = True
# napoleon_numpy_docstring = True
# napoleon_include_private_with_doc = False
# napoleon_include_special_with_doc = False
# napoleon_use_admonition_for_examples = False
# napoleon_use_admonition_for_notes = False
# napoleon_use_admonition_for_references = False
# napoleon_use_rtype = True
# napoleon_use_keyword = True
# napoleon_custom_sections = None
# Autodoc
autodoc_default_options = {
# 'members': None,
'member-order': 'bysource',
'special-members': ','.join(('__init__',)),
'exclude-members': ','.join((
"__weakref__", "__doc__", "__module__", "__dict__",
)),
# Members starting with underscore are excluded by default. This specifies
# exceptions, which will be included.
# Don't show the base classes for the class.
'show-inheritance': False,
# Ignore members imported by __all__().
# 'ignore-module-all': True,
# Prevent imported and inherited members from being documented as part of the
# classes they're imported/inherited in. They still get documented as separate
# classes.
'imported-members': False,
########
'inherited-members': True,
# Skip private members.
# 'private-members': False,
# Skip members without docstrings.
# 'undoc-members': False,
}
# Create unique labels for autogenerated modules by prefixing with the path of the
# module.
autosectionlabel_prefix_document = True
# Combine the doc for the class and for __init__ and render it in the
# class.
autoclass_content = 'both'
def autodoc_skip_member(app, what, name, obj, skip, options):
"""Skip members matching criteria"""
exclude_member = name in [
'NAMES_2K',
'UNICODE_NAMES',
'UNICODE_TEST_STRINGS',
'WORDS_1K',
'models.py',
]
exclude_obj = obj is [
d1_gmn.app.models.ResourceMap
]
if exclude_obj:
sys.exit(obj)
exclude = exclude_member or exclude_obj
return skip or exclude
def autodoc_process_signature(app, what, name, obj, options, signature,
return_annotation):
# print(app, what, name, obj, options, signature, return_annotation)
pass
def setup(app):
for pkg_root in (
'client_cli',
'client_onedrive',
'csw',
'dev_tools',
'gmn',
'lib_client',
'lib_common',
'lib_scimeta',
'test_utilities',
'utilities',
):
sys.path.insert(0, os.path.abspath(f"../../{pkg_root}/src/"))
app.connect("autodoc-skip-member", autodoc_skip_member)
# app.connect("autodoc-process-signature", autodoc_process_signature)
|
[
"git@dahlsys.com"
] |
git@dahlsys.com
|
a204884acab08f712678c840bab689bf04f996c3
|
1ada3010856e39c93e2483c960aa8fc25e2b3332
|
/Binary Tree/DiameterOfBT.py
|
accf7d2fa64c600f094487d5b2cfaf5cc85c7837
|
[] |
no_license
|
Taoge123/LeetCode
|
4f9e26be05f39b37bdbb9c1e75db70afdfa1b456
|
4877e35a712f59bc7b8fffa3d8af2ffa56adb08c
|
refs/heads/master
| 2022-02-24T20:09:21.149818
| 2020-07-31T03:18:05
| 2020-07-31T03:18:05
| 142,700,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def height(node):
#Base case
if node is None:
return 0
return max(height(node.left), height(node.right)) + 1
def diameter(node):
if node is None:
return 0
lHeight = height(node.left)
rHeight = height(node.right)
lDiameter = diameter(node.left)
rDiameter = diameter(node.right)
#Return the max
return max(lHeight + rHeight + 1, max(lDiameter, rDiameter))
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print("Diameter of given binary tree is %d" %(diameter(root)))
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
113ffbcabf72b96ff950c9973a922243f540df6a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_216/ch50_2020_09_23_01_25_05_690150.py
|
3c21d662ff00c1e961bde777d852f3f46d55eb93
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
def junta_nome_sobrenome(nomes,sobrenomes):
e = ' '
resposta = []
i = 0
while i < len(nomes):
resposta.append(nomes[i] + e + sobrenomes[i])
i += 1
return resposta
|
[
"you@example.com"
] |
you@example.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.