hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2cdd5e7f00c248f854f0b16723d9100f318d8caf
| 1,624
|
py
|
Python
|
services/web/apps/inv/reportobjectsummary/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/web/apps/inv/reportobjectsummary/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/web/apps/inv/reportobjectsummary/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# inv.reportobjectsummary
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.simplereport import SimpleReport, TableColumn
from noc.inv.models.object import Object
from noc.inv.models.objectmodel import ObjectModel
from noc.core.translation import ugettext as _
class ReportObjectSummaryApplication(SimpleReport):
title = _("Inventory Object Summary")
def get_data(self, **kwargs):
self.model_name = {} # oid -> name
data = list(
Object._get_collection().aggregate(
[{"$group": {"_id": "$model", "total": {"$sum": 1}}}]
)
)
oms = [x["_id"] for x in data if x["_id"]]
c = ObjectModel._get_collection()
om_names = {}
while oms:
chunk, oms = oms[:500], oms[500:]
om_names.update(
{
x["_id"]: x["name"]
for x in c.find({"_id": {"$in": chunk}}, {"_id": 1, "name": 1})
}
)
data = sorted(
([om_names[x["_id"]], x["total"]] for x in data if x["_id"] in om_names),
key=lambda x: -x[1],
)
return self.from_dataset(
title=self.title,
columns=["Model", TableColumn("Count", format="numeric", align="right", total="sum")],
data=data,
enumerate=True,
)
| 35.304348
| 98
| 0.463054
|
41c381c269a4d73e17af491f432575a58f845953
| 17,360
|
py
|
Python
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ecs_task.py
|
ginigangadharan/ansible-real-life
|
897c2fc0d05babbb540768b336b6ad399dad5bfa
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ecs_task.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | null | null | null |
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ecs_task.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_task
short_description: Run, start or stop a task in ecs
description:
- Creates or deletes instances of task definitions.
author: Mark Chance (@Java1Guy)
requirements: [ json, botocore, boto3 ]
options:
operation:
description:
- Which task operation to execute.
required: True
choices: ['run', 'start', 'stop']
type: str
cluster:
description:
- The name of the cluster to run the task on.
required: False
type: str
task_definition:
description:
- The task definition to start or run.
required: False
type: str
overrides:
description:
- A dictionary of values to pass to the new instances.
required: False
type: dict
count:
description:
- How many new instances to start.
required: False
type: int
task:
description:
- The task to stop.
required: False
type: str
container_instances:
description:
- The list of container instances on which to deploy the task.
required: False
type: list
elements: str
started_by:
description:
- A value showing who or what started the task (for informational purposes).
required: False
type: str
network_configuration:
description:
- Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
type: dict
suboptions:
subnets:
description: A list of subnet IDs to which the task is attached.
type: list
elements: str
security_groups:
description: A list of group names or group IDs for the task.
type: list
elements: str
launch_type:
description:
- The launch type on which to run your service.
required: false
choices: ["EC2", "FARGATE"]
type: str
tags:
type: dict
description:
- Tags that will be added to ecs tasks on start and run
required: false
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = '''
# Simple example of run task
- name: Run task
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
count: 1
started_by: ansible_user
register: task_output
# Simple example of start task
- name: Start a task
ecs_task:
operation: start
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
tags:
resourceName: a_task_for_ansible_to_run
type: long_running_task
network: internal
version: 1.4
container_instances:
- arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
started_by: ansible_user
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: RUN a task on Fargate
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
started_by: ansible_user
launch_type: FARGATE
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: Stop a task
ecs_task:
operation: stop
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
'''
RETURN = '''
task:
description: details about the task that was started
returned: success
type: complex
contains:
taskArn:
description: The Amazon Resource Name (ARN) that identifies the task.
returned: always
type: str
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
returned: only when details is true
type: str
taskDefinitionArn:
description: The Amazon Resource Name (ARN) of the task definition.
returned: only when details is true
type: str
containerInstanceArn:
description: The Amazon Resource Name (ARN) of the container running the task.
returned: only when details is true
type: str
overrides:
description: The container overrides set for this task.
returned: only when details is true
type: list
elements: dict
lastStatus:
description: The last recorded status of the task.
returned: only when details is true
type: str
desiredStatus:
description: The desired status of the task.
returned: only when details is true
type: str
containers:
description: The container details.
returned: only when details is true
type: list
elements: dict
startedBy:
description: The used who started the task.
returned: only when details is true
type: str
stoppedReason:
description: The reason why the task was stopped.
returned: only when details is true
type: str
createdAt:
description: The timestamp of when the task was created.
returned: only when details is true
type: str
startedAt:
description: The timestamp of when the task was started.
returned: only when details is true
type: str
stoppedAt:
description: The timestamp of when the task was stopped.
returned: only when details is true
type: str
launchType:
description: The launch type on which to run your task.
returned: always
type: str
'''
from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.basic import missing_required_lib
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
class EcsExecManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
self.ec2 = module.client('ec2')
def format_network_configuration(self, network_config):
result = dict()
if 'subnets' in network_config:
result['subnets'] = network_config['subnets']
else:
self.module.fail_json(msg="Network configuration must include subnets")
if 'security_groups' in network_config:
groups = network_config['security_groups']
if any(not sg.startswith('sg-') for sg in groups):
try:
vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
result['securityGroups'] = groups
return dict(awsvpcConfiguration=result)
def list_tasks(self, cluster_name, service_name, status):
response = self.ecs.list_tasks(
cluster=cluster_name,
family=service_name,
desiredStatus=status
)
if len(response['taskArns']) > 0:
for c in response['taskArns']:
if c.endswith(service_name):
return c
return None
def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags):
if overrides is None:
overrides = dict()
params = dict(cluster=cluster, taskDefinition=task_definition,
overrides=overrides, count=count, startedBy=startedBy)
if self.module.params['network_configuration']:
params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
if launch_type:
params['launchType'] = launch_type
if tags:
params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
# TODO: need to check if long arn format enabled.
try:
response = self.ecs.run_task(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't run task")
# include tasks and failures
return response['tasks']
def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags):
args = dict()
if cluster:
args['cluster'] = cluster
if task_definition:
args['taskDefinition'] = task_definition
if overrides:
args['overrides'] = overrides
if container_instances:
args['containerInstances'] = container_instances
if startedBy:
args['startedBy'] = startedBy
if self.module.params['network_configuration']:
args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
if tags:
args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
try:
response = self.ecs.start_task(**args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't start task")
# include tasks and failures
return response['tasks']
def stop_task(self, cluster, task):
response = self.ecs.stop_task(cluster=cluster, task=task)
return response['task']
def ecs_api_handles_launch_type(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.8.4')
def ecs_task_long_format_enabled(self):
account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True)
return account_support['settings'][0]['value'] == 'enabled'
def ecs_api_handles_tags(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.12.46')
def ecs_api_handles_network_configuration(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44')
def main():
argument_spec = dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict'),
launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
tags=dict(required=False, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
if module.params['tags']:
if not service_mgr.ecs_api_handles_tags():
module.fail_json(msg=missing_required_lib("botocore >= 1.12.46", reason="to use tags"))
if not service_mgr.ecs_task_long_format_enabled():
module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags")
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'],
module.params['launch_type'],
module.params['tags'],
)
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by'],
module.params['tags'],
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| 38.75
| 137
| 0.625979
|
7d15042a5e4455121ca31ed437decf879afee33b
| 1,662
|
py
|
Python
|
FlaskApp/ModStorage/mountHelper.py
|
benno16/extraFiles
|
82d681a90e199b8efc81017b777d8573e5e5a067
|
[
"MIT"
] | null | null | null |
FlaskApp/ModStorage/mountHelper.py
|
benno16/extraFiles
|
82d681a90e199b8efc81017b777d8573e5e5a067
|
[
"MIT"
] | null | null | null |
FlaskApp/ModStorage/mountHelper.py
|
benno16/extraFiles
|
82d681a90e199b8efc81017b777d8573e5e5a067
|
[
"MIT"
] | null | null | null |
import os
import os.path
from subprocess import call
import platform
def mountShare(m):
if platform.system() == 'Linux':
print("Mounting : " + m['Name'] + "\n")
pathToMountTo= getMountPoint(m['ID'])
share = m['Path']
usernamePart = "username=\"{0}\",".format(m['UserName'])
argument = "-t cifs \"{0}\" \"{1}\" -o {2}password=\"{3}\" ".format(share,pathToMountTo,usernamePart,m['Password'])
isMounted = call("mount | grep \""+ share + "\" > /dev/null",shell=True)
if isMounted != 0:
exitCode = call("mount "+ argument,shell=True)
if exitCode == 0:
m['MountedTo'] = pathToMountTo
print("Mount completed")
return True
else:
m['LastError'] = "Failed to mount"
print("Failed to mount")
else:
print("Already mounted")
m['MountedTo'] = pathToMountTo
return True
else:
m['MountedTo'] = 'None'
m['LastError'] = "Can't use mount on " + platform.system()
print("Can't use mount on " + platform.system() + "\n")
return False
def getMountPoint(id):
MOUNT_DEVICE_BASE_PATH = "/mnt/firebrick/devices";
combinedPath = os.path.join(MOUNT_DEVICE_BASE_PATH,id)
if not os.path.exists(combinedPath):
os.makedirs(combinedPath)
return combinedPath
def unmountShare(m):
if platform.system() == 'Linux':
print("Unmounting " + m['Name'])
share = m['Path']
call("umount \"{0}\"".format(share),shell=True)
else:
print("Can't use umount on " + platform.system() + "\n")
| 33.918367
| 123
| 0.555355
|
bdeee7278c000a25a10b5d22e7adb67dd0459115
| 185
|
py
|
Python
|
fastx_compressor/fastx_compressor.py
|
VincentPreikstas/fastx_blocker
|
78894b1ff49d8ff674143eae0dfe0a88653d2243
|
[
"MIT"
] | null | null | null |
fastx_compressor/fastx_compressor.py
|
VincentPreikstas/fastx_blocker
|
78894b1ff49d8ff674143eae0dfe0a88653d2243
|
[
"MIT"
] | null | null | null |
fastx_compressor/fastx_compressor.py
|
VincentPreikstas/fastx_blocker
|
78894b1ff49d8ff674143eae0dfe0a88653d2243
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Main module."""
def block_fastq_file(fastq_file, block_size=1000):
"""Yield lines from a blocked fastq file starting with the header line."""
pass
| 20.555556
| 78
| 0.664865
|
50e0251cc594ec2d0ae1119a8cc11b19a18d9d87
| 2,362
|
py
|
Python
|
qanta_web/expo/game.py
|
vivlai/qanta
|
57a162b28961a1d11a82405db4f07ff824ce3faf
|
[
"MIT"
] | null | null | null |
qanta_web/expo/game.py
|
vivlai/qanta
|
57a162b28961a1d11a82405db4f07ff824ce3faf
|
[
"MIT"
] | null | null | null |
qanta_web/expo/game.py
|
vivlai/qanta
|
57a162b28961a1d11a82405db4f07ff824ce3faf
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import Optional, List
from qanta.datasets.quiz_bowl import Question as QBQuestion
class AnswerStatus(Enum):
UNANSWERED = 1
CORRECT = 2
WRONG = 3
class Player:
def __init__(self, player_id: int, name: str, score: int, answer_status: AnswerStatus, is_human: bool):
self.id = player_id
self.name = name
self.score = score
self.answer_status = answer_status
self.is_human = is_human
class Buzz:
def __init__(self, player_id: int, correct: bool, guess: str):
self.player_id = player_id
self.correct = correct
self.guess = guess
class Question:
def __init__(self, question: QBQuestion):
self.question = question
self.sentence = 0
self.word = 0
@property
def is_end_of_question(self):
return False
class GameState:
def __init__(self, game_id: int, players: List[Player], buzzes: List[Buzz], question: Optional[Question]):
self.game_id = game_id
self.players = players
self.buzzes = buzzes
self.question = question
class Environment:
def __init__(self, questions: List[QBQuestion], players: List[Player]):
self.questions = questions
self.game_state = GameState(0, players, [], None)
def _validate(self, action_tuple):
action, data = action_tuple
if action == Actions.BUZZ:
if not isinstance(data, str):
raise ValueError('data must be a string representing a guess')
elif action == Actions.WAIT:
if data is not None:
raise ValueError('When waiting data must be None')
else:
raise ValueError('Action must be BUZZ or WAIT')
def _buzz(self, guess):
pass
def _wait(self):
pass
def _next_question(self):
if self.question_index is None:
self.question_index = 1
qb_question = self.questions[self.question_index - 1]
question = Question(self.question_index, qb_question.id, )
def step(self, action_tuple):
self._validate(action_tuple)
action, data = action_tuple
if action == Actions.BUZZ:
guess = data
elif action == Actions.WAIT:
pass
class Actions(Enum):
BUZZ = 1
WAIT = 2
NEXT_QUESTION = 3
| 27.149425
| 110
| 0.624047
|
8c75fdd2aeb2d40f6c4b2b004cd9460e01914e66
| 2,653
|
py
|
Python
|
kuma_utils/common.py
|
analokmaus/kaggle-panda-challenge-public
|
ae4c79ddf0606a9f28b4e34d638f1d0d65b620c1
|
[
"MIT"
] | 36
|
2020-07-26T13:56:00.000Z
|
2022-03-10T07:02:06.000Z
|
kuma_utils/common.py
|
analokmaus/kaggle-panda-challenge-public
|
ae4c79ddf0606a9f28b4e34d638f1d0d65b620c1
|
[
"MIT"
] | 16
|
2020-11-13T19:01:11.000Z
|
2022-03-12T00:43:55.000Z
|
kuma_utils/common.py
|
analokmaus/kaggle-panda-challenge-public
|
ae4c79ddf0606a9f28b4e34d638f1d0d65b620c1
|
[
"MIT"
] | 7
|
2020-07-27T20:27:30.000Z
|
2022-02-23T04:08:08.000Z
|
import os
import sys
import time
import datetime
import re
from pathlib import Path
from copy import deepcopy, copy
import traceback
import warnings
from collections.abc import Iterable
import numpy as np
import pandas as pd
def clean_value(x):
if isinstance(x, str):
if x.isnumeric():
return float(x)
else:
return x
elif isinstance(x, int):
return float(x)
elif isinstance(x, float):
return x
else:
return x
class KumaNumpy:
'''
Enhanced numpy operation
'''
clean = np.vectorize(clean_value, otypes=[object])
@classmethod
def unique(self, x, return_counts=False):
val_cnt = pd.Series(x).value_counts()
val = val_cnt.index.values
if return_counts:
cnt = val_cnt.values
return val, cnt
else:
return val
@classmethod
def nunique(self, x):
return len(self.unique(x))
@classmethod
def to_numpy(self, x):
assert isinstance(x, (np.ndarray, pd.DataFrame, pd.Series, list))
if isinstance(x, pd.Series):
return x.values
elif isinstance(x, pd.DataFrame):
return x.values
elif isinstance(x, list):
return np.array(x)
else:
return x
@classmethod
def to_numeric(self, x, dtypes=[np.float], verbose=False):
if isinstance(dtypes, Iterable):
for dtype in dtypes:
# np.integer cannot handle nan
if np.issubdtype(dtype, np.integer) and np.sum(x!=x) > 0:
continue
try:
return x.astype(dtypes)
except:
if verbose:
print(f'failed to transform: {dtype}')
pass
else:
if np.issubdtype(dtypes, np.integer) and np.sum(x != x) > 0:
return x
try:
return x.astype(dtypes)
except:
if verbose:
print(f'failed to transform: {dtypes}')
pass
return x
@classmethod
def fillna(self, x, val):
_x = x.copy()
_x[_x!=_x] = val
return _x
@classmethod
def dropna(self, x):
return x[x==x]
@classmethod
def isin(self, x1, x2):
return pd.Series(x1).isin(pd.Series(x2)).values
@classmethod
def replace(self, x, d):
return pd.Series(x).map(d).values
@classmethod
def mode(self, x, **kwargs):
return pd.Series(x).mode(**kwargs).values[0]
| 23.6875
| 73
| 0.531097
|
266b8cfc50534e7fff5b0b66d5b0acec1b3a1b6a
| 736
|
py
|
Python
|
asset/migrations/0003_auto_20180517_1652.py
|
zoushiwen/icmdb
|
356aea2b970c999c0fafde12835c861b73ea42fb
|
[
"Apache-2.0"
] | null | null | null |
asset/migrations/0003_auto_20180517_1652.py
|
zoushiwen/icmdb
|
356aea2b970c999c0fafde12835c861b73ea42fb
|
[
"Apache-2.0"
] | null | null | null |
asset/migrations/0003_auto_20180517_1652.py
|
zoushiwen/icmdb
|
356aea2b970c999c0fafde12835c861b73ea42fb
|
[
"Apache-2.0"
] | 1
|
2018-08-03T07:04:11.000Z
|
2018-08-03T07:04:11.000Z
|
# Generated by Django 2.0.5 on 2018-05-17 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('asset', '0002_auto_20180517_1646'),
]
operations = [
migrations.AlterField(
model_name='assetinfo',
name='is_active',
field=models.CharField(choices=[('在线', '在线'), ('下线', '下线'), ('未知', '未知'), ('故障', '故障'), ('备用', '备用')], default=0, max_length=64, verbose_name='状态'),
),
migrations.AlterField(
model_name='assetloginuser',
name='private_key',
field=models.FileField(blank=True, null=True, upload_to='upload/privatekey/%Y%m%d73709', verbose_name='私钥'),
),
]
| 30.666667
| 160
| 0.580163
|
cc26684943e4d23ca95e29087c295a1e0aa87494
| 601
|
py
|
Python
|
src/quiz/migrations/0003_auto_20210122_2224.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/quiz/migrations/0003_auto_20210122_2224.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/quiz/migrations/0003_auto_20210122_2224.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-22 22:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("course", "0023_auto_20210115_1332"),
("quiz", "0002_auto_20210120_0738"),
]
operations = [
migrations.AlterField(
model_name="quiz",
name="chapter",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="quiz",
to="course.chapter",
),
),
]
| 24.04
| 60
| 0.574043
|
310c7910198f62a1d9422d4fe71feb79076e22ae
| 15,190
|
py
|
Python
|
woke/tests/test_solidity_version_parsing.py
|
Ackee-Blockchain/woke
|
0d27de25720142beb9619a89619b7a94c3556af1
|
[
"ISC"
] | 7
|
2022-01-28T06:50:00.000Z
|
2022-02-14T11:34:32.000Z
|
woke/tests/test_solidity_version_parsing.py
|
Ackee-Blockchain/woke
|
0d27de25720142beb9619a89619b7a94c3556af1
|
[
"ISC"
] | 30
|
2022-01-26T17:54:48.000Z
|
2022-03-21T12:33:53.000Z
|
woke/tests/test_solidity_version_parsing.py
|
Ackee-Blockchain/woke
|
0d27de25720142beb9619a89619b7a94c3556af1
|
[
"ISC"
] | null | null | null |
import pytest
from woke.c_regex_parsing.solidity_version import (
SolidityVersion,
SolidityVersionRange,
SolidityVersionExpr,
)
def test_version_basic_usage():
v1 = SolidityVersion.fromstring("0.8.9-alpha.2+commit.12345678")
assert v1.major == 0
assert v1.minor == 8
assert v1.patch == 9
assert v1.prerelease == "alpha.2"
assert v1.build == "commit.12345678"
assert v1 < "0.8.13"
v2 = SolidityVersion.fromstring("0.8.7")
assert v1 > v2
v3 = SolidityVersion.fromstring("0.8.9")
assert v1 == v3
v4 = SolidityVersion.fromstring("0.8.9-abc+def")
assert v3 == v4 # prerelease and build tags are ignored
def test_version_str_and_repr():
s = "1.2.3-abc.def-012-ABC-abc+xyz-123.XYZ"
v = SolidityVersion.fromstring(s)
assert str(v) == s
assert eval(repr(v)) == v
def test_version_invalid():
with pytest.raises(ValueError):
SolidityVersion.fromstring(">0.8.1")
with pytest.raises(ValueError):
SolidityVersion.fromstring("=0.8.1")
with pytest.raises(ValueError):
SolidityVersion.fromstring("v0.8.1")
with pytest.raises(ValueError):
SolidityVersion.fromstring("x.8.1")
def test_version_range_basic():
assert SolidityVersionRange(None, None, None, None) == SolidityVersionRange(
"0.0.0", True, None, None
)
assert SolidityVersionRange("1.2.3", True, None, None) != SolidityVersionRange(
"1.2.3", False, None, None
)
assert SolidityVersionRange(None, None, "3.4.5", True) != SolidityVersionRange(
None, None, "3.4.5", False
)
r1 = SolidityVersionRange(None, None, None, None)
assert not r1.isempty()
assert r1.lower == SolidityVersion(0, 0, 0)
assert r1.lower_inclusive
assert r1.higher is None
assert r1.higher_inclusive is None
r2 = SolidityVersionRange("1.2.3", True, "3.4.5", False)
assert not r2.isempty()
assert r2.lower == SolidityVersion(1, 2, 3)
assert r2.lower_inclusive
assert r2.higher == SolidityVersion(3, 4, 5)
assert not r2.higher_inclusive
assert SolidityVersionRange("1.2.3", True, "0.9.9", False).isempty()
assert SolidityVersionRange("1.2.3", True, "1.2.3", False).isempty()
assert SolidityVersionRange("1.2.3", False, "1.2.3", True).isempty()
assert SolidityVersionRange("1.2.3", False, "1.2.3", False).isempty()
assert not SolidityVersionRange("1.2.3", True, "1.2.3", True).isempty()
def test_version_range_errors():
r1 = SolidityVersionRange(None, None, None, None)
with pytest.raises(ValueError):
x = "abcd" in r1
with pytest.raises(ValueError):
SolidityVersionRange("-1.2.3", True, None, None)
with pytest.raises(ValueError):
SolidityVersionRange("1.2.3", None, None, None)
with pytest.raises(ValueError):
SolidityVersionRange(None, True, None, None)
with pytest.raises(ValueError):
SolidityVersionRange(None, None, "1.2.3", None)
with pytest.raises(ValueError):
SolidityVersionRange(None, None, None, True)
def test_version_range_contains():
r1 = SolidityVersionRange("1.2.3", True, "2.0.0", False)
assert SolidityVersion.fromstring("1.2.3") in r1
assert "1.2.4" in r1
assert "1.2.2" not in r1
assert "2.0.0" not in r1
assert "1.9.999" in r1
r2 = SolidityVersionRange("0.8.9", False, "1.0.1", True)
assert "0.8.9" not in r2
assert "0.8.8" not in r2
assert "0.8.10" in r2
assert "1.0.1" in r2
assert "1.0.0" in r2
assert "0.9.9" in r2
r3 = SolidityVersionRange("0.8.1", False, None, None)
assert "0.8.1" not in r3
assert "0.8.2" in r3
assert "999999.999999.99999" in r3
r4 = SolidityVersionRange("1.2.3", False, "0.1.2", False)
assert r4.isempty()
assert "0.0.0" not in r4
assert "0.0.1" not in r4
assert "0.1.2" not in r4
assert "1.2.3" not in r4
assert "1.2.4" not in r4
def test_version_range_str_and_repr():
r1 = SolidityVersionRange(None, None, None, None)
assert str(r1) == ">=0.0.0"
assert eval(repr(r1)) == r1
r2 = SolidityVersionRange("1.2.3", True, "4.5.6", False)
assert str(r2) == ">=1.2.3 <4.5.6"
assert eval(repr(r2)) == r2
r3 = SolidityVersionRange("0.7.6", False, "2.0.7", True)
assert str(r3) == ">0.7.6 <=2.0.7"
assert eval(repr(r3)) == r3
r4 = SolidityVersionRange("0.1.6", False, "0.0.8", True)
assert r4.isempty()
assert str(r4) == ">0.0.0 <0.0.0"
assert eval(repr(r4)) == r4
def test_version_range_intersection():
r1 = SolidityVersionRange("1.0.0", True, "2.0.0", True)
r2 = SolidityVersionRange("1.0.1", False, None, None)
assert r1 & r2 == SolidityVersionRange("1.0.1", False, "2.0.0", True)
assert r1 & r2 == SolidityVersionRange.intersection(r1, r2)
r3 = SolidityVersionRange("1.0.0", False, "2.0.0", False)
assert r1 & r3 == r3
assert r3 & r1 == r3
assert r1 & r2 & r3 == r3 & r2 & r1
assert r2 & r3 & r1 == SolidityVersionRange.intersection(r1, r3, r2)
r4 = SolidityVersionRange("0.9.8", True, "1.9.8", False)
assert r1 & r4 == SolidityVersionRange("1.0.0", True, "1.9.8", False)
r5 = SolidityVersionRange("1.2.3", False, "2.0.1", False)
assert r1 & r5 == SolidityVersionRange("1.2.3", False, "2.0.0", True)
r6 = SolidityVersionRange(None, None, "1.0.0", False)
r7 = SolidityVersionRange("2.0.0", False, None, None)
assert (r1 & r6).isempty()
assert r1 & r7
r8 = SolidityVersionRange("0.0.0", False, "0.0.0", False)
assert (r1 & r8).isempty()
def test_version_expr_invalid():
invalid = [
"v0.8.10",
"v 0.8.10",
".1.2.3",
"1.2.3.",
"0.1.2.3.4",
"abc",
"o.8.7",
"y.8.7",
"1.2.3 - 4.5.6 - 7.8.9",
">=0.8.0 - 0.9.0",
"0.8.0 - <0.9.0",
"^1.2.3 - 4.5.6",
"7.8.9 - ~1.2.4",
"12.2.3 - x",
"x.0.1",
"x.0.x",
"1.x.2",
"x.x.2",
"0.8.10-alpha.1",
">*",
"<X",
"<=x",
"",
"^x",
"^0.0.0",
"~*",
]
for i in invalid:
with pytest.raises(ValueError):
SolidityVersionExpr(i)
def test_version_expr_comparators():
expressions = [
# expression, list of matching versions, list of nonmatching versions
("=0.8.10", ["0.8.10"], ["0.8.9", "0.8.11", "0.0.0"]),
("0.2", ["0.2.0", "0.2.1", "0.2.7", "0.2.99"], ["0.1.999", "0.3.0", "0.0.0"]),
("=2.*.X", ["2.0.0", "2.0.1", "2.1.3"], ["1.2.3", "0.0.0", "3.1.0"]),
("*", ["0.0.0", "1.2.3", "0.2.8", "0.8.10"], []),
(">=0.8.10", ["0.8.10", "0.8.11"], ["0.8.9"]),
(">=1.2", ["1.2.0", "1.2.1", "1.3.0", "2.0.7"], ["1.1.9", "0.0.0", "1.1.1"]),
(">=1.X.X", ["1.0.0", "1.2.3", "9.8.7"], ["0.9.9", "0.0.0"]),
(">=*", ["0.0.0", "1.2.3", "0.3.4", "9.9.9"], []),
(">0.8.10", ["0.8.11", "0.9.0", "1.0.1"], ["0.8.10", "0.8.9"]),
(">0.6", ["0.7.0", "0.7.1", "1.2.3"], ["0.6.0", "0.6.999", "0.0.0"]),
(">1", ["2.0.0", "2.0.1", "3.4.5"], ["1.0.0", "1.999.999"]),
(
"<0.8.10",
["0.8.9", "0.8.8", "0.7.0", "0.0.0"],
["0.8.10", "0.8.11", "0.9.0"],
),
("<1.1.x", ["1.0.9", "1.0.0", "0.8.9"], ["1.1.0", "1.1.1", "1.2.0"]),
("<1", ["0.9.9", "0.0.7", "0.9.99999"], ["1.0.0", "1.2.3", "2.0.1"]),
(
"<=0.8.10",
["0.8.10", "0.8.9", "0.5.1", "0.0.0"],
["0.8.11", "1.0.0", "0.9.0"],
),
("<=2.0", ["2.0.0", "2.0.999", "1.9.9"], ["2.1.0", "2.1.1", "3.0.1"]),
("<=1", ["1.0.0", "1.9.9", "0.8.9"], ["2.0.0", "2.8.1", "3.0.7"]),
]
for exp in expressions:
e = SolidityVersionExpr(exp[0])
for matching in exp[1]:
version = SolidityVersion.fromstring(matching)
assert (
version in e
and version in e.version_ranges
and matching in e
and matching in e.version_ranges
), f"Assertion failed: {matching} in {exp[0]}"
for nonmatching in exp[2]:
version = SolidityVersion.fromstring(nonmatching)
assert (
version not in e
and version not in e.version_ranges
and nonmatching not in e
and nonmatching not in e.version_ranges
), f"Assertion failed: {nonmatching} not in {exp[0]}"
def test_version_expr_hyphen():
expressions = [
# expression, list of matching versions, list of nonmatching versions
(
"0.7.5 - 0.8.10",
["0.7.5", "0.8.1", "0.8.10"],
["0.7.4", "0.8.11", "0.0.0", "1.2.3"],
),
("1.2 - 1.3.2", ["1.2.0", "1.2.10", "1.3.0", "1.3.2"], ["1.1.999", "1.3.3"]),
("2 - 3", ["2.0.0", "2.5.7", "3.0.0", "3.9.9"], ["4.0.0", "4.9.99"]),
(
"0.2.5 - 0.3",
["0.2.5", "0.2.99", "0.3.1", "0.3.999"],
["0.2.4", "0.4.0", "0.4.2"],
),
(
"0.6.9 - 1",
["0.6.9", "0.9.9", "1.0.0", "1.8.7"],
["0.6.8", "2.0.0", "0.0.0"],
),
]
for exp in expressions:
e = SolidityVersionExpr(exp[0])
for matching in exp[1]:
version = SolidityVersion.fromstring(matching)
assert (
version in e
and version in e.version_ranges
and matching in e
and matching in e.version_ranges
), f"Assertion failed: {matching} in {exp[0]}"
for nonmatching in exp[2]:
version = SolidityVersion.fromstring(nonmatching)
assert (
version not in e
and version not in e.version_ranges
and nonmatching not in e
and nonmatching not in e.version_ranges
), f"Assertion failed: {nonmatching} not in {exp[0]}"
def test_version_expr_tilde():
expressions = [
# expression, list of matching versions, list of nonmatching versions
("~0.8.7", ["0.8.7", "0.8.8", "0.8.999"], ["0.9.0", "0.8.6"]),
("~2.1", ["2.1.0", "2.1.1", "2.1.999"], ["2.2.0", "2.2.1", "2.0.9"]),
("~1", ["1.0.0", "1.2.3", "1.999.99"], ["2.0.0", "2.0.2", "0.9.9"]),
("~0", ["0.0.0", "0.1.2", "0.9.9"], ["1.0.0", "2.1.3"]),
]
for exp in expressions:
e = SolidityVersionExpr(exp[0])
for matching in exp[1]:
version = SolidityVersion.fromstring(matching)
assert (
version in e
and version in e.version_ranges
and matching in e
and matching in e.version_ranges
), f"Assertion failed: {matching} in {exp[0]}"
for nonmatching in exp[2]:
version = SolidityVersion.fromstring(nonmatching)
assert (
version not in e
and version not in e.version_ranges
and nonmatching not in e
and nonmatching not in e.version_ranges
), f"Assertion failed: {nonmatching} not in {exp[0]}"
def test_version_expr_caret():
expressions = [
# expression, list of matching versions, list of nonmatching versions
("^3.2.1", ["3.2.1", "3.9.8", "3.5.0"], ["3.2.0", "4.0.0"]),
("^0.4.0", ["0.4.0", "0.4.1", "0.4.99"], ["0.3.99", "0.5.0"]),
("^0.0.7", ["0.0.7"], ["0.0.6", "0.0.8", "0.0.0"]),
("^1.2", ["1.2.0", "1.9.99"], ["2.0.0", "1.1.99"]),
("^0.0.X", ["0.0.0", "0.0.99"], ["0.1.0", "1.0.0"]),
("^1", ["1.0.0", "1.9.8", "1.4.0"], ["0.0.0", "0.9.9", "2.0.0"]),
("^0.*.X", ["0.0.0", "0.9.70"], ["1.0.0", "1.2.0"]),
]
for exp in expressions:
e = SolidityVersionExpr(exp[0])
for matching in exp[1]:
version = SolidityVersion.fromstring(matching)
assert (
version in e
and version in e.version_ranges
and matching in e
and matching in e.version_ranges
), f"Assertion failed: {matching} in {exp[0]}"
for nonmatching in exp[2]:
version = SolidityVersion.fromstring(nonmatching)
assert (
version not in e
and version not in e.version_ranges
and nonmatching not in e
and nonmatching not in e.version_ranges
), f"Assertion failed: {nonmatching} not in {exp[0]}"
def test_version_expr_complex():
expressions = [
# expression, list of matching versions, list of nonmatching versions
(
"0.8.7 || 1 - 1.2.7",
["0.8.7", "1.0.0", "1.2.7"],
["0.8.6", "0.8.8", "0.9.9", "1.2.8"],
),
(
"^0.8 || 0.6.1 - 0.7.8",
["0.6.1", "0.7.0", "0.7.8", "0.8.0", "0.8.9"],
["0.6.0", "0.7.9", "0.9.0"],
),
("~0 || >=1.0.0 <1 || ^0", ["0.0.0", "0.1.2", "0.9.1"], ["1.0.0", "1.2.3"]),
]
for exp in expressions:
e = SolidityVersionExpr(exp[0])
for matching in exp[1]:
version = SolidityVersion.fromstring(matching)
assert (
version in e
and version in e.version_ranges
and matching in e
and matching in e.version_ranges
), f"Assertion failed: {matching} in {exp[0]}"
for nonmatching in exp[2]:
version = SolidityVersion.fromstring(nonmatching)
assert (
version not in e
and version not in e.version_ranges
and nonmatching not in e
and nonmatching not in e.version_ranges
), f"Assertion failed: {nonmatching} not in {exp[0]}"
def test_version_expr_whitespace():
expressions = [
# expression, list of matching versions, list of nonmatching versions
(
" 0 .\t8\n.\r7||1 - 1.2.7\n",
["0.8.7", "1.0.0", "1.2.7"],
["0.8.6", "0.8.8", "0.9.9", "1.2.8"],
),
(
"\r^0.8\t||0.6.1 - 0.7.8\r",
["0.6.1", "0.7.0", "0.7.8", "0.8.0", "0.8.9"],
["0.6.0", "0.7.9", "0.9.0"],
),
(
"~\t0\n \t|| >=\r\n1.0.0<1||^0",
["0.0.0", "0.1.2", "0.9.1"],
["1.0.0", "1.2.3"],
),
]
for exp in expressions:
e = SolidityVersionExpr(exp[0])
for matching in exp[1]:
version = SolidityVersion.fromstring(matching)
assert (
version in e
and version in e.version_ranges
and matching in e
and matching in e.version_ranges
), f"Assertion failed: {matching} in {exp[0]}"
for nonmatching in exp[2]:
version = SolidityVersion.fromstring(nonmatching)
assert (
version not in e
and version not in e.version_ranges
and nonmatching not in e
and nonmatching not in e.version_ranges
), f"Assertion failed: {nonmatching} not in {exp[0]}"
| 35.162037
| 86
| 0.504279
|
cadfd42bc79abba9e2230b86f6cd293ed3fdadde
| 29,180
|
py
|
Python
|
moto/ec2/_models/instances.py
|
viren-nadkarni/moto
|
61a5d5ca3b7f6b6a48e6b0584ca4336ca27f5bdb
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/_models/instances.py
|
viren-nadkarni/moto
|
61a5d5ca3b7f6b6a48e6b0584ca4336ca27f5bdb
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/_models/instances.py
|
viren-nadkarni/moto
|
61a5d5ca3b7f6b6a48e6b0584ca4336ca27f5bdb
|
[
"Apache-2.0"
] | null | null | null |
import copy
import warnings
from collections import OrderedDict
from datetime import datetime
from moto.core import ACCOUNT_ID
from moto.core.models import CloudFormationModel
from moto.core.utils import camelcase_to_underscores
from moto.packages.boto.ec2.blockdevicemapping import BlockDeviceMapping
from moto.packages.boto.ec2.instance import Instance as BotoInstance, Reservation
from ..exceptions import (
EC2ClientError,
InvalidInstanceIdError,
InvalidParameterValueErrorUnknownAttribute,
OperationNotPermitted4,
)
from .core import TaggedEC2Resource
from ..utils import (
random_eni_attach_id,
random_instance_id,
random_private_ip,
random_reservation_id,
filter_reservations,
utc_date_and_time,
)
class InstanceState(object):
def __init__(self, name="pending", code=0):
self.name = name
self.code = code
class StateReason(object):
def __init__(self, message="", code=""):
self.message = message
self.code = code
class Instance(TaggedEC2Resource, BotoInstance, CloudFormationModel):
VALID_ATTRIBUTES = {
"instanceType",
"kernel",
"ramdisk",
"userData",
"disableApiTermination",
"instanceInitiatedShutdownBehavior",
"rootDeviceName",
"blockDeviceMapping",
"productCodes",
"sourceDestCheck",
"groupSet",
"ebsOptimized",
"sriovNetSupport",
}
def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs):
super().__init__()
self.ec2_backend = ec2_backend
self.id = random_instance_id()
self.owner_id = ACCOUNT_ID
self.lifecycle = kwargs.get("lifecycle")
nics = kwargs.get("nics", {})
launch_template_arg = kwargs.get("launch_template", {})
if launch_template_arg and not image_id:
# the image id from the template should be used
template_version = ec2_backend._get_template_from_args(launch_template_arg)
self.image_id = template_version.image_id
else:
self.image_id = image_id
self._state = InstanceState("running", 16)
self._reason = ""
self._state_reason = StateReason()
self.user_data = user_data
self.security_groups = security_groups
self.instance_type = kwargs.get("instance_type", "m1.small")
self.region_name = kwargs.get("region_name", "us-east-1")
placement = kwargs.get("placement", None)
self.subnet_id = kwargs.get("subnet_id")
if not self.subnet_id:
self.subnet_id = next(
(n["SubnetId"] for n in nics if "SubnetId" in n), None
)
in_ec2_classic = not bool(self.subnet_id)
self.key_name = kwargs.get("key_name")
self.ebs_optimized = kwargs.get("ebs_optimized", False)
self.source_dest_check = "true"
self.launch_time = utc_date_and_time()
self.ami_launch_index = kwargs.get("ami_launch_index", 0)
self.disable_api_termination = kwargs.get("disable_api_termination", False)
self.instance_initiated_shutdown_behavior = kwargs.get(
"instance_initiated_shutdown_behavior", "stop"
)
self.sriov_net_support = "simple"
self._spot_fleet_id = kwargs.get("spot_fleet_id", None)
self.associate_public_ip = kwargs.get("associate_public_ip", False)
if in_ec2_classic:
# If we are in EC2-Classic, autoassign a public IP
self.associate_public_ip = True
amis = self.ec2_backend.describe_images(filters={"image-id": self.image_id})
ami = amis[0] if amis else None
if ami is None:
warnings.warn(
"Could not find AMI with image-id:{0}, "
"in the near future this will "
"cause an error.\n"
"Use ec2_backend.describe_images() to "
"find suitable image for your test".format(self.image_id),
PendingDeprecationWarning,
)
self.platform = ami.platform if ami else None
self.virtualization_type = ami.virtualization_type if ami else "paravirtual"
self.architecture = ami.architecture if ami else "x86_64"
self.root_device_name = ami.root_device_name if ami else None
# handle weird bug around user_data -- something grabs the repr(), so
# it must be clean
if isinstance(self.user_data, list) and len(self.user_data) > 0:
if isinstance(self.user_data[0], bytes):
# string will have a "b" prefix -- need to get rid of it
self.user_data[0] = self.user_data[0].decode("utf-8")
if self.subnet_id:
subnet = ec2_backend.get_subnet(self.subnet_id)
self._placement.zone = subnet.availability_zone
if self.associate_public_ip is None:
# Mapping public ip hasnt been explicitly enabled or disabled
self.associate_public_ip = subnet.map_public_ip_on_launch == "true"
elif placement:
self._placement.zone = placement
else:
self._placement.zone = ec2_backend.region_name + "a"
self.block_device_mapping = BlockDeviceMapping()
self._private_ips = set()
self.prep_nics(
nics,
private_ip=kwargs.get("private_ip"),
associate_public_ip=self.associate_public_ip,
security_groups=self.security_groups,
)
@property
def vpc_id(self):
if self.subnet_id:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
return subnet.vpc_id
if self.nics and 0 in self.nics:
return self.nics[0].subnet.vpc_id
return None
def __del__(self):
try:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
for ip in self._private_ips:
subnet.del_subnet_ip(ip)
except Exception:
# Its not "super" critical we clean this up, as reset will do this
# worst case we'll get IP address exaustion... rarely
pass
def add_block_device(
self,
size,
device_path,
snapshot_id=None,
encrypted=False,
delete_on_termination=False,
kms_key_id=None,
volume_type=None,
):
volume = self.ec2_backend.create_volume(
size=size,
zone_name=self._placement.zone,
snapshot_id=snapshot_id,
encrypted=encrypted,
kms_key_id=kms_key_id,
volume_type=volume_type,
)
self.ec2_backend.attach_volume(
volume.id, self.id, device_path, delete_on_termination
)
def setup_defaults(self):
# Default have an instance with root volume should you not wish to
# override with attach volume cmd.
volume = self.ec2_backend.create_volume(size=8, zone_name=self._placement.zone)
self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1", True)
def teardown_defaults(self):
for device_path in list(self.block_device_mapping.keys()):
volume = self.block_device_mapping[device_path]
volume_id = volume.volume_id
self.ec2_backend.detach_volume(volume_id, self.id, device_path)
if volume.delete_on_termination:
self.ec2_backend.delete_volume(volume_id)
@property
def get_block_device_mapping(self):
return self.block_device_mapping.items()
@property
def private_ip(self):
return self.nics[0].private_ip_address
@property
def private_dns(self):
formatted_ip = self.private_ip.replace(".", "-")
if self.region_name == "us-east-1":
return "ip-{0}.ec2.internal".format(formatted_ip)
else:
return "ip-{0}.{1}.compute.internal".format(formatted_ip, self.region_name)
@property
def public_ip(self):
return self.nics[0].public_ip
@property
def public_dns(self):
if self.public_ip:
formatted_ip = self.public_ip.replace(".", "-")
if self.region_name == "us-east-1":
return "ec2-{0}.compute-1.amazonaws.com".format(formatted_ip)
else:
return "ec2-{0}.{1}.compute.amazonaws.com".format(
formatted_ip, self.region_name
)
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-instance.html
return "AWS::EC2::Instance"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
from ..models import ec2_backends
properties = cloudformation_json["Properties"]
ec2_backend = ec2_backends[region_name]
security_group_ids = properties.get("SecurityGroups", [])
group_names = [
ec2_backend.get_security_group_from_id(group_id).name
for group_id in security_group_ids
]
reservation = ec2_backend.add_instances(
image_id=properties["ImageId"],
user_data=properties.get("UserData"),
count=1,
security_group_names=group_names,
instance_type=properties.get("InstanceType", "m1.small"),
subnet_id=properties.get("SubnetId"),
key_name=properties.get("KeyName"),
private_ip=properties.get("PrivateIpAddress"),
block_device_mappings=properties.get("BlockDeviceMappings", {}),
)
instance = reservation.instances[0]
for tag in properties.get("Tags", []):
instance.add_tag(tag["Key"], tag["Value"])
# Associating iam instance profile.
# TODO: Don't forget to implement replace_iam_instance_profile_association once update_from_cloudformation_json
# for ec2 instance will be implemented.
if properties.get("IamInstanceProfile"):
ec2_backend.associate_iam_instance_profile(
instance_id=instance.id,
iam_instance_profile_name=properties.get("IamInstanceProfile"),
)
return instance
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
from ..models import ec2_backends
ec2_backend = ec2_backends[region_name]
all_instances = ec2_backend.all_instances()
# the resource_name for instances is the stack name, logical id, and random suffix separated
# by hyphens. So to lookup the instances using the 'aws:cloudformation:logical-id' tag, we need to
# extract the logical-id from the resource_name
logical_id = resource_name.split("-")[1]
for instance in all_instances:
instance_tags = instance.get_tags()
for tag in instance_tags:
if (
tag["key"] == "aws:cloudformation:logical-id"
and tag["value"] == logical_id
):
instance.delete(region_name)
@property
def physical_resource_id(self):
return self.id
def start(self):
for nic in self.nics.values():
nic.start()
self._state.name = "running"
self._state.code = 16
self._reason = ""
self._state_reason = StateReason()
def stop(self):
for nic in self.nics.values():
nic.stop()
self._state.name = "stopped"
self._state.code = 80
self._reason = "User initiated ({0})".format(
datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
)
self._state_reason = StateReason(
"Client.UserInitiatedShutdown: User initiated shutdown",
"Client.UserInitiatedShutdown",
)
def delete(self, region): # pylint: disable=unused-argument
self.terminate()
def terminate(self):
for nic in self.nics.values():
nic.stop()
self.teardown_defaults()
if self._spot_fleet_id:
spot_fleet = self.ec2_backend.get_spot_fleet_request(self._spot_fleet_id)
for spec in spot_fleet.launch_specs:
if (
spec.instance_type == self.instance_type
and spec.subnet_id == self.subnet_id
):
break
spot_fleet.fulfilled_capacity -= spec.weighted_capacity
spot_fleet.spot_requests = [
req for req in spot_fleet.spot_requests if req.instance != self
]
self._state.name = "terminated"
self._state.code = 48
self._reason = "User initiated ({0})".format(
datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
)
self._state_reason = StateReason(
"Client.UserInitiatedShutdown: User initiated shutdown",
"Client.UserInitiatedShutdown",
)
# Disassociate iam instance profile if associated, otherwise iam_instance_profile_associations will
# be pointing to None.
if self.ec2_backend.iam_instance_profile_associations.get(self.id):
self.ec2_backend.disassociate_iam_instance_profile(
association_id=self.ec2_backend.iam_instance_profile_associations[
self.id
].id
)
def reboot(self):
self._state.name = "running"
self._state.code = 16
self._reason = ""
self._state_reason = StateReason()
@property
def dynamic_group_list(self):
return self.security_groups
def prep_nics(
self, nic_spec, private_ip=None, associate_public_ip=None, security_groups=None
):
self.nics = {}
if self.subnet_id:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
if not private_ip:
private_ip = subnet.get_available_subnet_ip(instance=self)
else:
subnet.request_ip(private_ip, instance=self)
self._private_ips.add(private_ip)
elif private_ip is None:
# Preserve old behaviour if in EC2-Classic mode
private_ip = random_private_ip()
# Primary NIC defaults
primary_nic = {
"SubnetId": self.subnet_id,
"PrivateIpAddress": private_ip,
"AssociatePublicIpAddress": associate_public_ip,
}
primary_nic = dict((k, v) for k, v in primary_nic.items() if v)
# If empty NIC spec but primary NIC values provided, create NIC from
# them.
if primary_nic and not nic_spec:
nic_spec = [primary_nic]
nic_spec[0]["DeviceIndex"] = 0
# Flesh out data structures and associations
for nic in nic_spec:
device_index = int(nic.get("DeviceIndex"))
nic_id = nic.get("NetworkInterfaceId")
if nic_id:
# If existing NIC found, use it.
use_nic = self.ec2_backend.get_network_interface(nic_id)
use_nic.device_index = device_index
use_nic.public_ip_auto_assign = False
else:
# If primary NIC values provided, use them for the primary NIC.
if device_index == 0 and primary_nic:
nic.update(primary_nic)
if "SubnetId" in nic:
subnet = self.ec2_backend.get_subnet(nic["SubnetId"])
else:
# Get default Subnet
zone = self._placement.zone
subnet = self.ec2_backend.get_default_subnet(availability_zone=zone)
group_ids = nic.get("SecurityGroupId") or []
if security_groups:
group_ids.extend([group.id for group in security_groups])
use_nic = self.ec2_backend.create_network_interface(
subnet,
nic.get("PrivateIpAddress"),
device_index=device_index,
public_ip_auto_assign=nic.get("AssociatePublicIpAddress", False),
group_ids=group_ids,
)
self.attach_eni(use_nic, device_index)
def attach_eni(self, eni, device_index):
device_index = int(device_index)
self.nics[device_index] = eni
# This is used upon associate/disassociate public IP.
eni.instance = self
eni.attachment_id = random_eni_attach_id()
eni.attach_time = utc_date_and_time()
eni.status = "in-use"
eni.device_index = device_index
return eni.attachment_id
def detach_eni(self, eni):
self.nics.pop(eni.device_index, None)
eni.instance = None
eni.attachment_id = None
eni.attach_time = None
eni.status = "available"
eni.device_index = None
@classmethod
def has_cfn_attr(cls, attr):
return attr in [
"AvailabilityZone",
"PrivateDnsName",
"PublicDnsName",
"PrivateIp",
"PublicIp",
]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "AvailabilityZone":
return self.placement
elif attribute_name == "PrivateDnsName":
return self.private_dns
elif attribute_name == "PublicDnsName":
return self.public_dns
elif attribute_name == "PrivateIp":
return self.private_ip
elif attribute_name == "PublicIp":
return self.public_ip
raise UnformattedGetAttTemplateException()
def applies(self, filters):
if filters:
applicable = False
for f in filters:
acceptable_values = f["values"]
if f["name"] == "instance-state-name":
if self._state.name in acceptable_values:
applicable = True
if f["name"] == "instance-state-code":
if str(self._state.code) in acceptable_values:
applicable = True
return applicable
# If there are no filters, all instances are valid
return True
class InstanceBackend(object):
def __init__(self):
self.reservations = OrderedDict()
super().__init__()
def get_instance(self, instance_id):
for instance in self.all_instances():
if instance.id == instance_id:
return instance
raise InvalidInstanceIdError(instance_id)
def add_instances(self, image_id, count, user_data, security_group_names, **kwargs):
new_reservation = Reservation()
new_reservation.id = random_reservation_id()
security_groups = [
self.get_security_group_by_name_or_id(name) for name in security_group_names
]
for sg_id in kwargs.pop("security_group_ids", []):
if isinstance(sg_id, str):
security_groups.append(self.get_security_group_from_id(sg_id))
else:
security_groups.append(sg_id)
self.reservations[new_reservation.id] = new_reservation
tags = kwargs.pop("tags", {})
instance_tags = tags.get("instance", {})
volume_tags = tags.get("volume", {})
for index in range(count):
kwargs["ami_launch_index"] = index
new_instance = Instance(
self, image_id, user_data, security_groups, **kwargs
)
new_reservation.instances.append(new_instance)
new_instance.add_tags(instance_tags)
block_device_mappings = None
if "block_device_mappings" in kwargs:
block_device_mappings = kwargs["block_device_mappings"]
elif kwargs.get("launch_template"):
template = self._get_template_from_args(kwargs["launch_template"])
block_device_mappings = template.data.get("BlockDeviceMapping")
elif kwargs.get("launch_config"):
block_device_mappings = kwargs[
"launch_config"
].block_device_mapping_dict
if block_device_mappings:
for block_device in block_device_mappings:
device_name = block_device["DeviceName"]
volume_size = block_device["Ebs"].get("VolumeSize")
volume_type = block_device["Ebs"].get("VolumeType")
snapshot_id = block_device["Ebs"].get("SnapshotId")
encrypted = block_device["Ebs"].get("Encrypted", False)
if isinstance(encrypted, str):
encrypted = encrypted.lower() == "true"
delete_on_termination = block_device["Ebs"].get(
"DeleteOnTermination", False
)
kms_key_id = block_device["Ebs"].get("KmsKeyId")
if block_device.get("NoDevice") != "":
new_instance.add_block_device(
volume_size,
device_name,
snapshot_id,
encrypted,
delete_on_termination,
kms_key_id,
volume_type=volume_type,
)
else:
new_instance.setup_defaults()
if kwargs.get("instance_market_options"):
new_instance.lifecycle = "spot"
# Tag all created volumes.
for _, device in new_instance.get_block_device_mapping:
volumes = self.describe_volumes(volume_ids=[device.volume_id])
for volume in volumes:
volume.add_tags(volume_tags)
return new_reservation
def run_instances(self):
# Logic resides in add_instances
# Fake method here to make implementation coverage script aware that this method is implemented
pass
def start_instances(self, instance_ids):
started_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.start()
started_instances.append(instance)
return started_instances
def stop_instances(self, instance_ids):
stopped_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.stop()
stopped_instances.append(instance)
return stopped_instances
def terminate_instances(self, instance_ids):
terminated_instances = []
if not instance_ids:
raise EC2ClientError(
"InvalidParameterCombination", "No instances specified"
)
for instance in self.get_multi_instances_by_id(instance_ids):
if instance.disable_api_termination == "true":
raise OperationNotPermitted4(instance.id)
instance.terminate()
terminated_instances.append(instance)
return terminated_instances
def reboot_instances(self, instance_ids):
rebooted_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.reboot()
rebooted_instances.append(instance)
return rebooted_instances
def modify_instance_attribute(self, instance_id, key, value):
instance = self.get_instance(instance_id)
setattr(instance, key, value)
return instance
def modify_instance_security_groups(self, instance_id, new_group_id_list):
instance = self.get_instance(instance_id)
new_group_list = []
for new_group_id in new_group_id_list:
new_group_list.append(self.get_security_group_from_id(new_group_id))
setattr(instance, "security_groups", new_group_list)
return instance
def describe_instance_attribute(self, instance_id, attribute):
if attribute not in Instance.VALID_ATTRIBUTES:
raise InvalidParameterValueErrorUnknownAttribute(attribute)
if attribute == "groupSet":
key = "security_groups"
else:
key = camelcase_to_underscores(attribute)
instance = self.get_instance(instance_id)
value = getattr(instance, key)
return instance, value
def describe_instance_credit_specifications(self, instance_ids):
queried_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
queried_instances.append(instance)
return queried_instances
def all_instances(self, filters=None):
instances = []
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.applies(filters):
instances.append(instance)
return instances
def all_running_instances(self, filters=None):
instances = []
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.state_code == 16 and instance.applies(filters):
instances.append(instance)
return instances
def get_multi_instances_by_id(self, instance_ids, filters=None):
"""
:param instance_ids: A string list with instance ids
:return: A list with instance objects
"""
result = []
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.id in instance_ids:
if instance.applies(filters):
result.append(instance)
if instance_ids and len(instance_ids) > len(result):
result_ids = [i.id for i in result]
missing_instance_ids = [i for i in instance_ids if i not in result_ids]
raise InvalidInstanceIdError(missing_instance_ids)
return result
def get_instance_by_id(self, instance_id):
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.id == instance_id:
return instance
def get_reservations_by_instance_ids(self, instance_ids, filters=None):
"""Go through all of the reservations and filter to only return those
associated with the given instance_ids.
"""
reservations = []
for reservation in self.all_reservations():
reservation_instance_ids = [
instance.id for instance in reservation.instances
]
matching_reservation = any(
instance_id in reservation_instance_ids for instance_id in instance_ids
)
if matching_reservation:
reservation.instances = [
instance
for instance in reservation.instances
if instance.id in instance_ids
]
reservations.append(reservation)
found_instance_ids = [
instance.id
for reservation in reservations
for instance in reservation.instances
]
if len(found_instance_ids) != len(instance_ids):
invalid_id = list(set(instance_ids).difference(set(found_instance_ids)))[0]
raise InvalidInstanceIdError(invalid_id)
if filters is not None:
reservations = filter_reservations(reservations, filters)
return reservations
def describe_instances(self, filters=None):
return self.all_reservations(filters)
def describe_instance_status(self, instance_ids, include_all_instances, filters):
if instance_ids:
return self.get_multi_instances_by_id(instance_ids, filters)
elif include_all_instances:
return self.all_instances(filters)
else:
return self.all_running_instances(filters)
def all_reservations(self, filters=None):
reservations = [
copy.copy(reservation) for reservation in self.reservations.copy().values()
]
if filters is not None:
reservations = filter_reservations(reservations, filters)
return reservations
def _get_template_from_args(self, launch_template_arg):
template = (
self.describe_launch_templates(
template_ids=[launch_template_arg["LaunchTemplateId"]]
)[0]
if "LaunchTemplateId" in launch_template_arg
else self.describe_launch_templates(
template_names=[launch_template_arg["LaunchTemplateName"]]
)[0]
)
version = launch_template_arg.get("Version", template.latest_version_number)
template_version = template.get_version(int(version))
return template_version
| 37.219388
| 119
| 0.612988
|
4d9f529fdd33c6f146f0d615d78b64b18e6a72b6
| 362
|
py
|
Python
|
userbot/plugins/thinklol.py
|
justteen/BUZZ-USERBOT
|
55651cce150e1d04d2c61efb2565ef9f46b42933
|
[
"BSL-1.0"
] | null | null | null |
userbot/plugins/thinklol.py
|
justteen/BUZZ-USERBOT
|
55651cce150e1d04d2c61efb2565ef9f46b42933
|
[
"BSL-1.0"
] | null | null | null |
userbot/plugins/thinklol.py
|
justteen/BUZZ-USERBOT
|
55651cce150e1d04d2c61efb2565ef9f46b42933
|
[
"BSL-1.0"
] | null | null | null |
# (c) @UniBorg
import asyncio
from collections import deque
from telethon import events
@borg.on(events.NewMessage(pattern=r"\.lol", outgoing=True))
async def _(event):
if event.fwd_from:
return
deq = deque(list("🤔🧐🤔🧐🤔🧐"))
for _ in range(999):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
| 20.111111
| 60
| 0.627072
|
16f0067a57cc4559930cdefa51f80edcd0989343
| 645
|
py
|
Python
|
sample/test_remote.py
|
cara/webdriver
|
c03e320cac02a4591df7ac509dd29b0ad4478943
|
[
"MIT"
] | null | null | null |
sample/test_remote.py
|
cara/webdriver
|
c03e320cac02a4591df7ac509dd29b0ad4478943
|
[
"MIT"
] | null | null | null |
sample/test_remote.py
|
cara/webdriver
|
c03e320cac02a4591df7ac509dd29b0ad4478943
|
[
"MIT"
] | null | null | null |
import os, sys
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
print "Environment Mac", os.environ
try:
os.environ["SELENIUM"]
except KeyError:
print "Please set the environment variable SELENIUM to Selenium URL"
sys.exit(1)
driver = webdriver.Remote(
command_executor=os.environ["SELENIUM"],
desired_capabilities= DesiredCapabilities.FIREFOX
)
print "Driver initialized"
print "Getting https://web.whatsapp.com"
driver.get("https://web.whatsapp.com")
driver.save_screenshot('shot.png')
print "Screenshot saved"
driver.close()
print "Driver closed"
| 28.043478
| 78
| 0.765891
|
1f1a279e6674db90dcccd09dfb30e411654187f1
| 10,217
|
py
|
Python
|
docs/source/conf.py
|
ndem0/PyGeM
|
1d6500c3f8db179d57b463b4b9b9e41c20058651
|
[
"MIT"
] | 223
|
2016-03-30T20:13:03.000Z
|
2022-03-23T02:54:45.000Z
|
docs/source/conf.py
|
ndem0/PyGeM
|
1d6500c3f8db179d57b463b4b9b9e41c20058651
|
[
"MIT"
] | 67
|
2016-03-22T14:18:31.000Z
|
2022-02-05T21:39:01.000Z
|
docs/source/conf.py
|
ndem0/PyGeM
|
1d6500c3f8db179d57b463b4b9b9e41c20058651
|
[
"MIT"
] | 103
|
2016-03-14T11:13:17.000Z
|
2022-03-08T08:38:22.000Z
|
# -*- coding: utf-8 -*-
#
# pygem documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 22 16:09:40 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx
from sphinx.errors import VersionRequirementError
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
import pygem
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.graphviz',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.ifconfig',
]
intersphinx_mapping = {'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = pygem.__project__
copyright = pygem.__copyright__
author = pygem.__author__
# autoclass
autoclass_content = 'both'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pygem.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for viewcode extension ---------------------------------------
# Follow alias objects that are imported from another module such as functions,
# classes and attributes. As side effects, this option ... ???
# If false, ... ???.
# The default is True.
viewcode_import = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'bizstyle'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['_tutorials']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pygemdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pygem.tex', u'pygem Documentation',
u'Filippo Salmoiraghi, Marco Tezzele', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, pygem.__title__, u'PyGeM Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pygem', u'pygem Documentation',
author, 'pygem', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = 'bysource'
| 32.332278
| 85
| 0.711657
|
d3809b6854130e5f9a51236f8da59c0ff4ecb65c
| 4,907
|
py
|
Python
|
docs/conf.py
|
drinksober/arbitrage
|
00ba419d84b7182692b62fed3121dd486cb5c2b9
|
[
"MIT"
] | 10
|
2018-03-26T02:19:18.000Z
|
2019-12-04T12:20:28.000Z
|
docs/conf.py
|
drinks5/arbitrage
|
00ba419d84b7182692b62fed3121dd486cb5c2b9
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
drinks5/arbitrage
|
00ba419d84b7182692b62fed3121dd486cb5c2b9
|
[
"MIT"
] | 1
|
2018-05-05T04:52:23.000Z
|
2018-05-05T04:52:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cycle_arbitrage documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import cycle_arbitrage
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cycle-arbitrage'
copyright = u"2018, drinksober"
author = u"drinksober"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cycle_arbitrage.__version__
# The full version, including alpha/beta/rc tags.
release = cycle_arbitrage.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cycle_arbitragedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cycle_arbitrage.tex',
u'cycle-arbitrage Documentation',
u'drinksober', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cycle_arbitrage',
u'cycle-arbitrage Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cycle_arbitrage',
u'cycle-arbitrage Documentation',
author,
'cycle_arbitrage',
'One line description of project.',
'Miscellaneous'),
]
| 29.920732
| 77
| 0.689219
|
c7c6d8c06b8e664af054867f225c0ab9bf22b41a
| 440
|
py
|
Python
|
packages/python/plotly/plotly/validators/layout/xaxis/_categoryarraysrc.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/xaxis/_categoryarraysrc.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/xaxis/_categoryarraysrc.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="categoryarraysrc", parent_name="layout.xaxis", **kwargs
):
super(CategoryarraysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| 31.428571
| 82
| 0.670455
|
f6f8a7de3aea70c139d6e098ceccbe3756b8fed0
| 4,039
|
py
|
Python
|
cogan/tools/caffe/classifier.py
|
sagardsaxena/CoGAN
|
6a0fbdb850c9ee78a8c83631aab8ded26195822b
|
[
"FSFAP"
] | 285
|
2016-09-22T19:48:44.000Z
|
2022-03-11T03:30:07.000Z
|
cogan/tools/caffe/classifier.py
|
IsChristina/CoGAN
|
f940c28330ace09b3471d8745bfad7d891dbf095
|
[
"FSFAP"
] | 12
|
2016-12-08T03:43:42.000Z
|
2020-06-04T05:42:52.000Z
|
cogan/tools/caffe/classifier.py
|
IsChristina/CoGAN
|
f940c28330ace09b3471d8745bfad7d891dbf095
|
[
"FSFAP"
] | 76
|
2016-10-03T21:26:24.000Z
|
2022-03-28T12:05:57.000Z
|
#!/usr/bin/env python
"""
Classifier is an image classifier specialization of Net.
"""
import numpy as np
import caffe
class Classifier(caffe.Net):
"""
Classifier extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, image_dims=None,
mean=None, input_scale=None, raw_scale=None,
channel_swap=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
crop = crop.astype(int)
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) / 10, 10, -1))
predictions = predictions.mean(1)
def render(self,caffe_in):
# caffe_in = np.zeros((1,1,1,1), dtype=np.float32)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
return predictions
def render2(self,caffe_in):
# caffe_in = np.zeros((1,1,1,1), dtype=np.float32)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions0 = out[self.outputs[0]]
predictions1 = out[self.outputs[1]]
return predictions0,predictions1
return predictions
| 36.718182
| 78
| 0.588264
|
1955925c4a528dc792c9d8cce561bde5b72ed87c
| 2,727
|
py
|
Python
|
cheritest/trunk/tests/tlb/test_tlb_invalid_store_h.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 36
|
2015-05-29T16:47:19.000Z
|
2022-02-08T21:16:26.000Z
|
cheritest/trunk/tests/tlb/test_tlb_invalid_store_h.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 1
|
2015-10-14T13:05:21.000Z
|
2015-10-19T20:34:03.000Z
|
cheritest/trunk/tests/tlb/test_tlb_invalid_store_h.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 15
|
2015-06-11T07:10:58.000Z
|
2021-06-18T05:14:54.000Z
|
#-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
# a0: paddr of testdata
# a1: PFN of testdata
# a2: EntryLo0 value
# a3: EntryLo1 value
# a4: Vaddr of testdata
# a5: Result of load
# a6: Expected PC of faulting instruction
# a7: final value of testdata
# s0: BadVAddr
# s1: Context
# s2: XContext
# s3: EntryHi
# s4: Status
# s5: Cause
# s6: EPC
class test_tlb_invalid_store_h(BaseBERITestCase):
@attr('tlb')
def test_badvaddr(self):
self.assertRegisterEqual(self.MIPS.s0, self.MIPS.a4, "Wrong BadVaddr")
@attr('tlb')
def test_context(self):
self.assertRegisterEqual(self.MIPS.s1, (self.MIPS.a4 & 0xffffe000)>>9, "Wrong Context") # TODO test page table base
@attr('tlb')
def test_xcontext(self):
self.assertRegisterEqual(self.MIPS.s2, (self.MIPS.a4 & 0xffffe000)>>9, "Wrong XContext") # TODO test page table base
@attr('tlb')
def test_entryhi(self):
self.assertRegisterMaskEqual(self.MIPS.a4, 0xfffff000, self.MIPS.s3, "Wrong EntryHi")
@attr('tlb')
def test_status(self):
self.assertRegisterMaskEqual(self.MIPS.s4, 2, 2, "Wrong EXL")
@attr('tlb')
def test_cause(self):
self.assertRegisterMaskEqual(self.MIPS.s5, 0x7c, 0xc, "Wrong Exception Code")
@attr('tlb')
def test_epc(self):
'''Test EPC after TLB Invalid exception'''
self.assertRegisterEqual(self.MIPS.a6, self.MIPS.s6, "Wrong EPC")
@attr('tlb')
def test_testdata(self):
self.assertRegisterEqual(self.MIPS.a7, 0xfffffffffffffedc, "Wrong testdata")
| 33.256098
| 124
| 0.716905
|
73b7364ad4f04d728e46be1c9550f1ab111e1e03
| 3,776
|
py
|
Python
|
nlpmodels/utils/elt/gpt_dataset.py
|
will-thompson-k/deeplearning-nlp-models
|
d3afac437b1ddf563c8d2694ada950bf20ab1c34
|
[
"MIT"
] | 77
|
2020-11-01T19:22:32.000Z
|
2021-12-12T20:41:34.000Z
|
nlpmodels/utils/elt/gpt_dataset.py
|
will-thompson-k/deeplearning-nlp-models
|
d3afac437b1ddf563c8d2694ada950bf20ab1c34
|
[
"MIT"
] | 3
|
2020-11-16T02:59:22.000Z
|
2020-12-21T23:22:55.000Z
|
nlpmodels/utils/elt/gpt_dataset.py
|
will-thompson-k/deeplearning-nlp-models
|
d3afac437b1ddf563c8d2694ada950bf20ab1c34
|
[
"MIT"
] | 6
|
2020-12-10T10:10:34.000Z
|
2020-12-21T14:17:49.000Z
|
"""
This module contains the GPT Dataset and GPT Dataloaders for the GPT problem.
"""
from typing import Tuple, Any
import torch
from torch.utils.data import DataLoader
from datasets import load_dataset
from nlpmodels.utils.elt.dataset import AbstractNLPDataset
from nlpmodels.utils.tokenizer import tokenize_corpus_basic
from nlpmodels.utils.vocabulary import NLPVocabulary
class GPTDataset(AbstractNLPDataset):
"""
GPT class for transforming and storing dataset for use in GPT language model.
Uses torchtext's WikiText2 dataset.
"""
def __init__(self, data: torch.Tensor, vocab: NLPVocabulary, block_size: int):
"""
Args:
data (torch.Tensor): 1D tensor of integers to sample batches from.
vocab (NLPVocabulary): Vocabulary. Not target/source this time.
block_size (int): Size of context window.
"""
self._data = data
self._vocab = vocab
self._block_size = block_size
@property
def data(self) -> torch.Tensor:
"""
Returns:
data (torch.Tensor): 1D tensor of integers to sample batches from.
"""
return self._data
def __len__(self) -> int:
"""
Returns: size of dataset.
"""
return len(self.data)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
idx (int): index of dataset slice to grab.
Returns:
Tuple of tensors (source,target) for that index.
"""
# only grabbing full length tensors
idx = min(len(self.data)-self._block_size-1, idx)
# grab a chunk of (block_size + 1) from the data
chunk = self.data[idx:idx + self._block_size + 1]
# return 2 block_size chunks shifted by 1 index
return chunk[:-1], chunk[1:]
@classmethod
def get_training_dataloader(cls, args: Any) -> Tuple[DataLoader, NLPVocabulary]:
"""
Returns a pytorch::Dataloader object and vocabulary ready for model training.
Args:
args: Parameters for deriving training data.
Returns:
Tuple of Dataloader class, source and target dictionaries
"""
batch_size = args.batch_size
block_size = args.block_size # size of context window.
train_data, vocab = cls.get_training_data(block_size)
train_loader = DataLoader(train_data,
batch_size=batch_size,
shuffle=True,
num_workers=2,
pin_memory=True)
return train_loader, vocab
@classmethod
def get_training_data(cls, block_size: int) -> Tuple[AbstractNLPDataset, NLPVocabulary]:
"""
Returns the dataset class along with vocabulary object.
Args:
block_size (int): The size of the context window.
Returns:
Tuple of the dataset and dictionary.
"""
# download the huggingfaces::wikitext language model development
train_dataset = load_dataset("wikitext", 'wikitext-2-raw-v1')['train']
# flatten the pyarrow chunks into one string
train_dataset = [" ".join([str(x) for x in train_dataset._data[0]])]
train_dataset = tokenize_corpus_basic(train_dataset, False)
# hack: i'm going to only grab the first 300k examples. cause this is like > 1MM words
# build vocabulary
vocab = NLPVocabulary.build_vocabulary([train_dataset[0]])
train_dataset = torch.LongTensor([vocab.token_to_idx[x] for x in train_dataset[0]])
# we pass the dataset, vocab... Dataset will do the rest
return cls(train_dataset, vocab, block_size), vocab
| 34.642202
| 94
| 0.62553
|
58bfb20a7d7178752fa512effe9819cfe40e68bf
| 670
|
py
|
Python
|
sceptre/cli/status.py
|
acaire/sceptre
|
bf70aec9f88e095d295c93a40d091d4d45f57b3e
|
[
"Apache-2.0"
] | null | null | null |
sceptre/cli/status.py
|
acaire/sceptre
|
bf70aec9f88e095d295c93a40d091d4d45f57b3e
|
[
"Apache-2.0"
] | null | null | null |
sceptre/cli/status.py
|
acaire/sceptre
|
bf70aec9f88e095d295c93a40d091d4d45f57b3e
|
[
"Apache-2.0"
] | 4
|
2019-09-10T13:32:18.000Z
|
2021-06-16T19:03:47.000Z
|
import click
from sceptre.cli.helpers import catch_exceptions, get_stack_or_env, write
@click.command(name="status")
@click.argument("path")
@click.pass_context
@catch_exceptions
def status_command(ctx, path):
"""
Print status of stack or environment.
Prints the stack status or the status of the stacks within a environment
for a given config PATH.
"""
output_format = ctx.obj["output_format"]
no_colour = ctx.obj["no_colour"]
stack, env = get_stack_or_env(ctx, path)
if stack:
write(stack.get_status(), no_colour=no_colour)
elif env:
write(env.describe(), output_format=output_format, no_colour=no_colour)
| 25.769231
| 79
| 0.716418
|
456306b534a586ffc9e521600d9121ce94468383
| 1,574
|
py
|
Python
|
jobs/name-search-script/config.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 4
|
2018-10-05T23:41:05.000Z
|
2019-06-19T16:17:50.000Z
|
jobs/name-search-script/config.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 635
|
2018-05-31T04:12:46.000Z
|
2022-03-31T18:45:42.000Z
|
jobs/name-search-script/config.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 71
|
2018-05-14T20:47:55.000Z
|
2022-03-31T23:08:30.000Z
|
"""Config setup for inprogress updater job."""
import os
from dotenv import find_dotenv, load_dotenv
# this will load all the envars from a .env file
load_dotenv(find_dotenv())
class BaseConfig(object):
"""Base config."""
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
MAX_ROW_LIMIT = os.getenv('MAX_ROW_LIMIT', 3000)
SECRET_KEY = 'a secret'
SQLALCHEMY_TRACK_MODIFICATIONS = False
# POSTGRESQL
DB_USER = os.getenv('DATABASE_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_PASSWORD', '')
DB_NAME = os.getenv('DATABASE_NAME', '')
DB_HOST = os.getenv('DATABASE_HOST', '')
DB_PORT = os.getenv('DATABASE_PORT', '5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME,
)
class Config(BaseConfig):
"""Normal config."""
DEBUG = False
TESTING = False
class TestConfig(BaseConfig):
"""Test config."""
DEBUG = True
TESTING = True
# POSTGRESQL
DB_USER = os.getenv('DATABASE_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_PASSWORD', '')
DB_NAME = os.getenv('DATABASE_NAME_TEST', '')
DB_HOST = os.getenv('DATABASE_HOST', '')
DB_PORT = os.getenv('DATABASE_PORT', '5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME,
)
| 25.387097
| 91
| 0.641677
|
8a26e1bcb548fe1c96a10626e20e02561315f444
| 3,475
|
py
|
Python
|
commands/fun.py
|
MicrohexHQ/Wilson-Source-Code
|
7e0f6f97125b67885c7739c0f40c725aefb8dba8
|
[
"MIT"
] | null | null | null |
commands/fun.py
|
MicrohexHQ/Wilson-Source-Code
|
7e0f6f97125b67885c7739c0f40c725aefb8dba8
|
[
"MIT"
] | null | null | null |
commands/fun.py
|
MicrohexHQ/Wilson-Source-Code
|
7e0f6f97125b67885c7739c0f40c725aefb8dba8
|
[
"MIT"
] | null | null | null |
import discord
import asyncio
import yaml
import random
from discord.ext import commands
# YAML containing reactions
bot_reactions = yaml.load(open('./data/yaml/reactions.yml'))
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def puppet(self, ctx, *, message: str):
'''Get the bot to speak a message'''
await ctx.message.delete()
await ctx.send(message)
@commands.command()
async def message(self, ctx, recipent : discord.Member, *, message):
'''Direct Message a user'''
await ctx.message.delete()
await recipent.send('**Message from {}**\n{}'.format(ctx.message.author, message))
@commands.command()
async def send(self, ctx, channel: discord.TextChannel, *, message):
'''Direct Message a channel'''
await ctx.message.delete()
await channel.send('**Message from {}**\n{}'.format(ctx.message.author, message))
@commands.command()
async def avatar(self, ctx, *, user: discord.Member = None):
'''Get a discord user's avatar'''
if user is None:
user = ctx.message.author
embed = discord.Embed(title='Avatar to **{}**'.format(user.name), colour=0x1f0000)
embed.set_image(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.command()
async def emoji(self, ctx, emoji :discord.Emoji):
embed = discord.Embed(colour=0x1f0000)
embed.set_image(url=emoji.url)
await ctx.send(embed=embed)
@commands.command()
async def lenny(self, ctx):
'''Get a lenny face ( ͡° ͜ʖ ͡°)'''
await ctx.message.delete()
await ctx.send('( ͡° ͜ʖ ͡°)')
@commands.command()
async def f(self, ctx):
'''Pay respects'''
user = ctx.message.author.id
await ctx.send('<@{}> paid their respects.'.format(user))
@commands.command()
async def flip(self, ctx):
'''Flip a coin'''
coin = ['**Heads!**', '**Tails!**']
await ctx.send(random.choice(coin))
@commands.command()
async def dice(self, ctx, message: int = 6):
'''Roll a dice'''
dice_faces = message
if (dice_faces > 0):
await ctx.send('Rolling D{}'.format(dice_faces))
# await self.bot.send_typing()
dice_roll = random.randint(1, dice_faces)
await asyncio.sleep(0.3)
await ctx.send('**{}!**'.format(dice_roll))
else:
await ctx.send('Invalid dice roll')
@commands.command()
async def react(self, ctx, reaction = 'none', *, message = 'none'):
'''Reaction GIFs'''
reaction_message = message
if (message == 'none'):
reaction_message = bot_reactions['{}'.format(reaction.lower())]['subject']
if (reaction != 'none'):
try:
delete_target = ctx.message
user = ctx.message.author.id
embed = discord.Embed(title=None,
description=(bot_reactions[reaction.lower()]['message'].format(user, reaction_message)),
colour=0x1f0000)
embed.set_image(url=bot_reactions[reaction.lower()]['img'])
await delete_target.delete()
await ctx.send(embed=embed)
except:
await ctx.send('This reaction does not exist.')
def setup(bot):
bot.add_cog(Fun(bot))
| 34.405941
| 126
| 0.574964
|
2ebaf4e4a78e26f2d1a81dabe3cfe233f814cad0
| 12,518
|
py
|
Python
|
datasets/trivia_qa/trivia_qa.py
|
xiaoya-li/datasets
|
b153141355d0c3ba3a6933ab901cef379b7830e0
|
[
"Apache-2.0"
] | 2
|
2021-08-28T06:48:02.000Z
|
2021-08-28T23:18:34.000Z
|
datasets/trivia_qa/trivia_qa.py
|
xiaoya-li/datasets
|
b153141355d0c3ba3a6933ab901cef379b7830e0
|
[
"Apache-2.0"
] | null | null | null |
datasets/trivia_qa/trivia_qa.py
|
xiaoya-li/datasets
|
b153141355d0c3ba3a6933ab901cef379b7830e0
|
[
"Apache-2.0"
] | 4
|
2021-07-25T17:09:39.000Z
|
2022-02-12T03:30:08.000Z
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TriviaQA: A Reading Comprehension Dataset."""
import glob
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@article{2017arXivtriviaqa,
author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},
Daniel and {Zettlemoyer}, Luke},
title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}",
journal = {arXiv e-prints},
year = 2017,
eid = {arXiv:1705.03551},
pages = {arXiv:1705.03551},
archivePrefix = {arXiv},
eprint = {1705.03551},
}
"""
_DOWNLOAD_URL_TMPL = "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-{}.tar.gz"
_TRAIN_FILE_FORMAT = "*-train.json"
_VALIDATION_FILE_FORMAT = "*-dev.json"
_TEST_FILE_FORMAT = "*test-without-answers.json"
_WEB_EVIDENCE_DIR = "evidence/web"
_WIKI_EVIDENCE_DIR = "evidence/wikipedia"
_DESCRIPTION = """\
TriviaqQA is a reading comprehension dataset containing over 650K
question-answer-evidence triples. TriviaqQA includes 95K question-answer
pairs authored by trivia enthusiasts and independently gathered evidence
documents, six per question on average, that provide high quality distant
supervision for answering the questions.
"""
_RC_DESCRIPTION = """\
Question-answer pairs where all documents for a given question contain the
answer string(s).
"""
_UNFILTERED_DESCRIPTION = """\
110k question-answer pairs for open domain QA where not all documents for a
given question contain the answer string(s). This makes the unfiltered dataset
more appropriate for IR-style QA.
"""
_CONTEXT_ADDENDUM = "Includes context from Wikipedia and search results."
def _web_evidence_dir(tmp_dir):
return sorted(glob.glob(os.path.join(tmp_dir, _WEB_EVIDENCE_DIR)))
def _wiki_evidence_dir(tmp_dir):
return sorted(glob.glob(os.path.join(tmp_dir, _WIKI_EVIDENCE_DIR)))
class TriviaQaConfig(datasets.BuilderConfig):
"""BuilderConfig for TriviaQA."""
def __init__(self, unfiltered=False, exclude_context=False, **kwargs):
"""BuilderConfig for TriviaQA.
Args:
unfiltered: bool, whether to use the unfiltered version of the dataset,
intended for open-domain QA.
exclude_context: bool, whether to exclude Wikipedia and search context for
reduced size.
**kwargs: keyword arguments forwarded to super.
"""
name = "unfiltered" if unfiltered else "rc"
if exclude_context:
name += ".nocontext"
description = _UNFILTERED_DESCRIPTION if unfiltered else _RC_DESCRIPTION
if not exclude_context:
description += _CONTEXT_ADDENDUM
super(TriviaQaConfig, self).__init__(
name=name, description=description, version=datasets.Version("1.1.0"), **kwargs
)
self.unfiltered = unfiltered
self.exclude_context = exclude_context
class TriviaQa(datasets.GeneratorBasedBuilder):
"""TriviaQA is a reading comprehension dataset.
It containss over 650K question-answer-evidence triples.
"""
BUILDER_CONFIGS = [
TriviaQaConfig(unfiltered=False, exclude_context=False), # rc
TriviaQaConfig(unfiltered=False, exclude_context=True), # rc.nocontext
TriviaQaConfig(unfiltered=True, exclude_context=False), # unfiltered
TriviaQaConfig(unfiltered=True, exclude_context=True),
# unfilered.nocontext
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"question": datasets.Value("string"),
"question_id": datasets.Value("string"),
"question_source": datasets.Value("string"),
"entity_pages": datasets.features.Sequence(
{
"doc_source": datasets.Value("string"),
"filename": datasets.Value("string"),
"title": datasets.Value("string"),
"wiki_context": datasets.Value("string"),
}
),
"search_results": datasets.features.Sequence(
{
"description": datasets.Value("string"),
"filename": datasets.Value("string"),
"rank": datasets.Value("int32"),
"title": datasets.Value("string"),
"url": datasets.Value("string"),
"search_context": datasets.Value("string"),
}
),
"answer": dict(
{
"aliases": datasets.features.Sequence(datasets.Value("string")),
"normalized_aliases": datasets.features.Sequence(datasets.Value("string")),
"matched_wiki_entity_name": datasets.Value("string"),
"normalized_matched_wiki_entity_name": datasets.Value("string"),
"normalized_value": datasets.Value("string"),
"type": datasets.Value("string"),
"value": datasets.Value("string"),
}
),
}
),
supervised_keys=None,
homepage="http://nlp.cs.washington.edu/triviaqa/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cfg = self.config
download_urls = dict()
if not (cfg.unfiltered and cfg.exclude_context):
download_urls["rc"] = _DOWNLOAD_URL_TMPL.format("rc")
if cfg.unfiltered:
download_urls["unfiltered"] = _DOWNLOAD_URL_TMPL.format("unfiltered")
file_paths = dl_manager.download_and_extract(download_urls)
qa_dir = (
os.path.join(file_paths["unfiltered"], "triviaqa-unfiltered")
if cfg.unfiltered
else os.path.join(file_paths["rc"], "qa")
)
train_files = sorted(glob.glob(os.path.join(qa_dir, _TRAIN_FILE_FORMAT)))
valid_files = sorted(glob.glob(os.path.join(qa_dir, _VALIDATION_FILE_FORMAT)))
test_files = sorted(glob.glob(os.path.join(qa_dir, _TEST_FILE_FORMAT)))
if cfg.exclude_context:
web_evidence_dir = None
wiki_evidence_dir = None
else:
web_evidence_dir = os.path.join(file_paths["rc"], _WEB_EVIDENCE_DIR)
wiki_evidence_dir = os.path.join(file_paths["rc"], _WIKI_EVIDENCE_DIR)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"files": train_files, "web_dir": web_evidence_dir, "wiki_dir": wiki_evidence_dir},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"files": valid_files, "web_dir": web_evidence_dir, "wiki_dir": wiki_evidence_dir},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"files": test_files, "web_dir": web_evidence_dir, "wiki_dir": wiki_evidence_dir},
),
]
def _generate_examples(self, files, web_dir, wiki_dir):
"""This function returns the examples."""
def parse_example(article):
"""Return a single example from an article JSON record."""
def _strip(collection):
return [item.strip() for item in collection]
if "Answer" in article:
answer = article["Answer"]
answer_dict = {
"aliases": _strip(answer["Aliases"]),
"normalized_aliases": _strip(answer["NormalizedAliases"]),
"matched_wiki_entity_name": answer.get("MatchedWikiEntryName", "").strip(),
"normalized_matched_wiki_entity_name": answer.get("NormalizedMatchedWikiEntryName", "").strip(),
"normalized_value": answer["NormalizedValue"].strip(),
"type": answer["Type"].strip(),
"value": answer["Value"].strip(),
}
else:
answer_dict = {
"aliases": [],
"normalized_aliases": [],
"matched_wiki_entity_name": "<unk>",
"normalized_matched_wiki_entity_name": "<unk>",
"normalized_value": "<unk>",
"type": "",
"value": "<unk>",
}
if self.config.exclude_context:
article["SearchResults"] = []
article["EntityPages"] = []
def _add_context(collection, context_field, file_dir):
"""Adds context from file, or skips if file does not exist."""
new_items = []
for item in collection:
if "Filename" not in item:
logger.info("Missing context 'Filename', skipping.")
continue
new_item = item.copy()
fname = item["Filename"]
try:
with open(os.path.join(file_dir, fname), encoding="utf-8") as f:
new_item[context_field] = f.read()
except (IOError, datasets.Value("errors").NotFoundError):
logger.info("File does not exist, skipping: %s", fname)
continue
new_items.append(new_item)
return new_items
def _strip_if_str(v):
return v.strip() if isinstance(v, str) else v
def _transpose_and_strip_dicts(dicts, field_names):
return {
datasets.naming.camelcase_to_snakecase(k): [_strip_if_str(d[k]) for d in dicts]
for k in field_names
}
search_results = _transpose_and_strip_dicts(
_add_context(article.get("SearchResults", []), "SearchContext", web_dir),
["Description", "Filename", "Rank", "Title", "Url", "SearchContext"],
)
entity_pages = _transpose_and_strip_dicts(
_add_context(article.get("EntityPages", []), "WikiContext", wiki_dir),
["DocSource", "Filename", "Title", "WikiContext"],
)
question = article["Question"].strip()
question_id = article["QuestionId"]
question_source = article["QuestionSource"].strip()
return {
"entity_pages": entity_pages,
"search_results": search_results,
"question": question,
"question_id": question_id,
"question_source": question_source,
"answer": answer_dict,
}
for filepath in files:
logger.info("generating examples from = %s", filepath)
fname = os.path.basename(filepath)
with open(filepath, encoding="utf-8") as f:
current_record = ""
for line in f:
if line == " {\n":
current_record = line
elif line.startswith(" }"): # Handles final record as well.
article = json.loads(current_record + "}")
current_record = ""
example = parse_example(article)
yield "%s_%s" % (fname, example["question_id"]), example
else:
current_record += line
| 40.775244
| 116
| 0.56974
|
c78b38fa548f7774dba9a8aa9349f42dc978307b
| 8,197
|
py
|
Python
|
opencontrail_netns/network_manage.py
|
Doude/opencontrail-netns
|
48351f4dd5e5c91e3570a6024e96e4a5f19013b2
|
[
"Apache-2.0"
] | null | null | null |
opencontrail_netns/network_manage.py
|
Doude/opencontrail-netns
|
48351f4dd5e5c91e3570a6024e96e4a5f19013b2
|
[
"Apache-2.0"
] | null | null | null |
opencontrail_netns/network_manage.py
|
Doude/opencontrail-netns
|
48351f4dd5e5c91e3570a6024e96e4a5f19013b2
|
[
"Apache-2.0"
] | 1
|
2019-01-16T00:04:32.000Z
|
2019-01-16T00:04:32.000Z
|
#!/bin/env python
import argparse
from vnc_api.vnc_api import *
class NetworkManager(object):
def __init__(self, api_server, api_port, project=None):
self._client = VncApi(api_server_host=api_server,
api_server_port=api_port)
self._project = project
def _netname(self, name):
fqn = name.split(':')
if len(fqn) == 1:
return "%s:%s" % (self._project, name)
return name
# end _netname
def _add_subnet(self, vnet, subnet):
ipam = self._client.network_ipam_read(
fq_name=['default-domain',
'default-project',
'default-network-ipam'])
(prefix, plen) = subnet.split('/')
subnet = IpamSubnetType(subnet=SubnetType(prefix, int(plen)))
vnet.add_network_ipam(ipam, VnSubnetsType([subnet]))
# end _add_subnet
def create(self, name, subnet=None):
netname = self._netname(name)
fq_name = netname.split(':')
try:
vnet = self._client.virtual_network_read(fq_name=fq_name)
print 'Network %s already exists' % netname
sys.exit(1)
except NoIdError:
pass
vnet = VirtualNetwork(fq_name[-1], parent_type='project',
fq_name=fq_name)
if subnet:
self._add_subnet(vnet, subnet)
self._client.virtual_network_create(vnet)
# end create
def delete(self, name):
netname = self._netname(name)
fq_name = netname.split(':')
try:
vnet = self._client.virtual_network_read(fq_name=fq_name)
except NoIdError:
print 'Network %s does not exist' % netname
sys.exit(1)
self._client.virtual_network_delete(id=vnet.uuid)
# end delete
def show(self, name):
netname = self._netname(name)
fq_name = netname.split(':')
try:
vnet = self._client.virtual_network_read(fq_name=fq_name)
except NoIdError:
print 'Network %s does not exist' % netname
sys.exit(1)
print 'name: %s' % ':'.join(vnet.fq_name)
print 'uuid: %s' % vnet.uuid
ipam_refs = vnet.get_network_ipam_refs()
if ipam_refs is None:
ipam_refs = []
for iref in ipam_refs:
subnets = iref['attr'].ipam_subnets
for snet in subnets:
print ' ',
print(snet.subnet.__dict__)
instance_list = vnet.get_routing_instances()
if len(instance_list):
rt_instance = self._client.routing_instance_read(
id=instance_list[0]['uuid'])
for rt in rt_instance.route_target_refs:
print ' ',
print(rt['to'][0], rt['attr'].__dict__)
# end show
def _rti_rtarget_add(self, vnet, rtarget_str, direction):
instance_list = vnet.get_routing_instances()
if len(instance_list) == 0:
print 'Routing instance not found'
sys.exit(1)
rt_instance = self._client.routing_instance_read(
id=instance_list[0]['uuid'])
for rt in rt_instance.route_target_refs:
if rt['to'][0] == rtarget_str:
sys.exit(1)
try:
rt_obj = self._client.route_target_read(fq_name=rtarget_str)
except NoIdError:
rt_obj = RouteTarget(rtarget_str)
self._client.route_target_create(rt_obj)
rt_instance.add_route_target(RouteTarget(rtarget_str),
InstanceTargetType(
import_export=direction))
self._client.routing_instance_update(rt_instance)
def _rti_rtarget_del(self, vnet, rtarget_str, direction):
instance_list = vnet.get_routing_instances()
if len(instance_list) == 0:
print 'Routing instance not found'
sys.exit(1)
rt_instance = self._client.routing_instance_read(
id=instance_list[0]['uuid'])
for rt in rt_instance.route_target_refs:
if rt['to'][0] == rtarget_str:
rt_obj = self._client.route_target_read(id=rt['uuid'])
rt_instance.del_route_target(rt_obj)
self._client.routing_instance_update(rt_instance)
# end _rti_rtarget_add
def rtarget_add(self, name, rtarget, direction=None):
netname = self._netname(name)
fq_name = netname.split(':')
try:
vnet = self._client.virtual_network_read(fq_name=fq_name)
except NoIdError:
print 'Network %s does not exist' % netname
sys.exit(1)
rtarget_str = 'target:%s' % rtarget
if direction:
self._rti_rtarget_add(vnet, rtarget_str, direction)
return
target_list = vnet.get_route_target_list()
if target_list:
for rt in target_list:
if rt['to'][0] == rtarget_str:
sys.exit(1)
if target_list:
target_list.add_route_target(rtarget_str)
else:
target_list = RouteTargetList([rtarget_str])
vnet.set_route_target_list(target_list)
self._client.virtual_network_update(vnet)
# end rtarget_add
def rtarget_del(self, name, rtarget, direction=None):
netname = self._netname(name)
fq_name = netname.split(':')
try:
vnet = self._client.virtual_network_read(fq_name=fq_name)
except NoIdError:
print 'Network %s does not exist' % netname
sys.exit(1)
rtarget_str = 'target:%s' % rtarget
if direction:
self._rti_rtarget_del(vnet, rtarget_str, direction)
return
# target_list = vnet.get_route_target_list()
# if target_list:
# for rt in target_list:
# if rt['to'][0] == rtarget_str:
# sys.exit(1)
# if target_list:
# target_list.add_route_target(rtarget_str)
# else:
# target_list = RouteTargetList([rtarget_str])
# vnet.set_route_target_list(target_list)
# self._client.virtual_network_update(vnet)
# end rtarget_del
def main(argv):
parser = argparse.ArgumentParser()
defaults = {
'api-server': '127.0.0.1',
'api-port': '8082',
'project': 'default-domain:default-project',
}
parser.set_defaults(**defaults)
parser.add_argument(
"-s", "--api-server", help="API server address")
parser.add_argument("-p", "--api-port", help="API server port")
parser.add_argument("--project", help="OpenStack project name")
parser.add_argument("--rtarget", help="Router target")
parser.add_argument("--import-only", action='store_true')
parser.add_argument("--export-only", action='store_true')
parser.add_argument("--subnet", help="Subnet prefix")
parser.add_argument("command", choices=['create', 'delete', 'show',
'rtarget-add', 'rtarget-del'])
parser.add_argument("network", help="Network name")
arguments = parser.parse_args(argv)
manager = NetworkManager(arguments.api_server, arguments.api_port,
project=arguments.project)
if arguments.command == "create":
manager.create(arguments.network, subnet=arguments.subnet)
elif arguments.command == "delete":
manager.delete(arguments.network)
elif arguments.command == "show":
manager.show(arguments.network)
elif arguments.command == "rtarget-add":
direction = None
if arguments.import_only:
direction = 'import'
elif arguments.export_only:
direction = 'export'
manager.rtarget_add(arguments.network, arguments.rtarget, direction)
elif arguments.command == "rtarget-del":
direction = None
if arguments.import_only:
direction = 'import'
elif arguments.export_only:
direction = 'export'
manager.rtarget_del(arguments.network, arguments.rtarget, direction)
if __name__ == '__main__':
main(sys.argv[1:])
| 34.297071
| 76
| 0.592778
|
aa7539ae9f09163bf1a2cc9f7dfcc6fc06737ae8
| 1,820
|
py
|
Python
|
tensorflow/python/ops/nn_grad_test.py
|
qinchangping/tensorflow
|
f7f7036d1cdc5716aff976fae0ea4d1b9a931b56
|
[
"Apache-2.0"
] | 24
|
2018-02-01T15:49:22.000Z
|
2021-01-11T16:31:18.000Z
|
tensorflow/python/ops/nn_grad_test.py
|
qinchangping/tensorflow
|
f7f7036d1cdc5716aff976fae0ea4d1b9a931b56
|
[
"Apache-2.0"
] | 2
|
2018-09-09T07:29:07.000Z
|
2019-03-11T07:14:45.000Z
|
tensorflow/python/ops/nn_grad_test.py
|
qinchangping/tensorflow
|
f7f7036d1cdc5716aff976fae0ea4d1b9a931b56
|
[
"Apache-2.0"
] | 4
|
2018-10-29T18:43:22.000Z
|
2020-09-28T07:19:52.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in nn_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Relu6OpTest(test.TestCase):
def testRelu6GradGrad(self):
inputs = constant_op.constant(
[[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32)
x_init_value = np.array([[-3.5, -1.5, 2, 4], [4.5, 7.5, 8.5, 11]])
r = nn_ops.relu6(inputs)
r_g = gradients_impl.gradients(r, inputs)[0]
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(),
r_g,
r_g.get_shape().as_list(),
x_init_value=x_init_value)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
| 35
| 80
| 0.697802
|
7d96ab622a381dd81ef585a1c073a1ed155c90b4
| 1,143
|
py
|
Python
|
var/spack/repos/builtin/packages/py-huggingface-hub/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-huggingface-hub/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-huggingface-hub/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHuggingfaceHub(PythonPackage):
"""This library allows anyone to work with the Hub
repositories: you can clone them, create them and upload
your models to them."""
homepage = "https://github.com/huggingface/huggingface_hub"
pypi = "huggingface_hub/huggingface_hub-0.0.10.tar.gz"
version('0.0.10', sha256='556765e4c7edd2d2c4c733809bae1069dca20e10ff043870ec40d53e498efae2')
version('0.0.8', sha256='be5b9a7ed36437bb10a780d500154d426798ec16803ff3406f7a61107e4ebfc2')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-filelock', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-typing-extensions', when='@0.0.10:', type=('build', 'run'))
depends_on('py-importlib-metadata', when='^python@:3.7', type=('build', 'run'))
| 42.333333
| 96
| 0.708661
|
89edd23b0ecdfa651f3d8d6e127fcb6d2bd26f5a
| 2,834
|
py
|
Python
|
pause_menu.py
|
mandaw2014/parkour
|
8b9a7f6fa47d5de82c9d22c27d8d803cc6a30b07
|
[
"MIT"
] | 4
|
2021-09-04T08:22:23.000Z
|
2022-01-20T17:35:18.000Z
|
pause_menu.py
|
mandaw2014/parkour
|
8b9a7f6fa47d5de82c9d22c27d8d803cc6a30b07
|
[
"MIT"
] | 1
|
2021-05-31T18:48:10.000Z
|
2021-05-31T18:48:10.000Z
|
pause_menu.py
|
mandaw2014/parkour
|
8b9a7f6fa47d5de82c9d22c27d8d803cc6a30b07
|
[
"MIT"
] | 2
|
2021-05-31T17:16:06.000Z
|
2021-12-09T13:33:52.000Z
|
from ursina import *
class PauseMenu(Entity):
def __init__(self):
super().__init__(parent = camera.ui, ignore_paused = True)
self.pause_menu = Entity(parent = self, enabled = True)
self.player = None
self.main_menu = None
self.level01 = None
self.level02 = None
self.level03 = None
self.level04 = None
self.level05 = None
self.level06 = None
self.level07 = None
self.level08 = None
self.level09 = None
def reset():
self.pause_menu.disable()
self.player.enable()
mouse.locked = True
self.player.time.enable()
self.player.time_running = True
if self.level01.is_enabled == True:
self.player.position = (888, 12, 18)
self.player.rotation = (0, -142, 0)
if self.level02.is_enabled == True:
self.player.position = (811, 14, 108)
self.player.rotation = (0, -267, 0)
if self.level03.is_enabled == True:
self.player.position = (809, 4, 106)
self.player.rotation = (0, 181, 0)
if self.level04.is_enabled == True:
self.player.position = (5, 10, -150)
self.player.rotation = (0, -90, 0)
if self.level05.is_enabled == True:
self.player.position = (-1, -5, 34)
self.player.rotation = (0, -180, 0)
if self.level06.is_enabled == True:
self.player.position = (130, -120, 0)
self.player.rotation = (0, -90, 0)
if self.level07.is_enabled == True:
self.player.position = (0, 10, 0)
self.player.rotation = (0, 0, 0)
if self.level09.is_enabled == True:
self.player.position = (0, 10, 0)
self.player.rotation = (0, 0, 0)
if self.level09.is_enabled == True:
self.player.position = (0, 10, 0)
self.player.rotation = (0, 0, 0)
def resume():
self.player.enable()
mouse.locked = True
self.pause_menu.disable()
self.player.time.enable()
self.player.time_running = True
resume_button = Button(text = "R e s u m e", color = color.black, scale_y = 0.1, scale_x = 0.3, y = 0.12, parent = self.pause_menu)
reset_button = Button(text = "R e s e t", color = color.black, scale_y = 0.1, scale_x = 0.3, y = 0, parent = self.pause_menu)
quit_button = Button(text = "Q u i t", color = color.black, scale_y = 0.1, scale_x = 0.3, y = -0.12, parent = self.pause_menu)
quit_button.on_click = application.quit
reset_button.on_click = Func(reset)
resume_button.on_click = Func(resume)
| 41.072464
| 139
| 0.534227
|
48cbfb1a6e730cdda2d971851de46d2255a455bd
| 5,579
|
py
|
Python
|
bookmarks/views/generic.py
|
gradel/django-generic-bookmarks
|
98d4c2099c019a6767fccebd96ec726f35fd1414
|
[
"MIT"
] | null | null | null |
bookmarks/views/generic.py
|
gradel/django-generic-bookmarks
|
98d4c2099c019a6767fccebd96ec726f35fd1414
|
[
"MIT"
] | null | null | null |
bookmarks/views/generic.py
|
gradel/django-generic-bookmarks
|
98d4c2099c019a6767fccebd96ec726f35fd1414
|
[
"MIT"
] | null | null | null |
"""
Class based generic views.
These views are only available if you are using Django >= 1.3.
"""
from django.contrib.auth.models import User
from django.views.generic.detail import DetailView
from bookmarks.handlers import library
class BookmarksMixin(object):
"""
Mixin for bookmarks class based views.
Views subclassing this class must implement the *get_bookmarks* method.
.. py:attribute:: context_bookmarks_name
The name of context variable containing bookmarks.
Default is *'bookmarks'*.
.. py:attribute:: key
The bookmarks key to use for retreiving bookmarks.
Default is *None*.
.. py:attribute:: reversed_order
If True, bookmarks are ordered by creation date descending.
Default is True.
"""
context_bookmarks_name = 'bookmarks'
template_name_suffix = '_bookmarks'
key = None
reversed_order = True
def get_context_bookmarks_name(self, obj):
"""
Get the variable name to use for the bookmarks.
"""
return self.context_bookmarks_name
def get_key(self, obj):
"""
Get the key to use to retreive bookmarks.
If the key is None, use all keys.
"""
return self.key
def order_is_reversed(self, obj):
"""
Return True to sort bookmarks by creation date descending.
"""
return self.reversed_order
def get_context_data(self, **kwargs):
context = super(BookmarksMixin, self).get_context_data(**kwargs)
context_bookmarks_name = self.get_context_bookmarks_name(self.object)
key = self.get_key(self.object)
is_reversed = self.order_is_reversed(self.object)
bookmarks = self.get_bookmarks(self.object, key, is_reversed)
context[context_bookmarks_name] = bookmarks
return context
def get_bookmarks(self, obj, key, is_reversed):
"""
Must return a bookmark queryset.
"""
raise NotImplementedError
class BookmarksForView(BookmarksMixin, DetailView):
"""
Can be used to retreive and display a list of bookmarks of a given object.
This class based view accepts all the parameters that can be passed
to *django.views.generic.detail.DetailView*.
For example, you can add in your *urls.py* a view displaying all
bookmarks of a single active article::
from bookmarks.views.generic import BookmarksForView
urlpatterns = patterns('',
url(r'^(?P<slug>[-\w]+)/bookmarks/$', BookmarksForView.as_view(
queryset=Article.objects.filter(is_active=True)),
name="article_bookmarks"),
)
You can also manage bookmarks order (default is by date descending) and
bookmarks keys, in order to retreive only bookmarks for a given key, e.g.::
from bookmarks.views.generic import BookmarksForView
urlpatterns = patterns('',
url(r'^(?P<slug>[-\w]+)/bookmarks/$', BookmarksForView.as_view(
model=Article, key='mykey', reversed_order=False),
name="article_bookmarks"),
)
Two context variables will be present in the template:
- *object*: the bookmarked article
- *bookmarks*: all the bookmarks of that article
The default template suffix is ``'_bookmarks'``, and so the template
used in our example is ``article_bookmarks.html``.
"""
def get_bookmarks(self, obj, key, is_reversed):
"""
Return a queryset of bookmarks of *obj*.
"""
lookups = {'instance': obj, 'reversed': is_reversed}
if key is not None:
lookups['key'] = key
return library.backend.filter(**lookups)
class BookmarksByView(BookmarksMixin, DetailView):
"""
Can be used to retreive and display a list of bookmarks saved by a
given user.
This class based view accepts all the parameters that can be passed
to *django.views.generic.detail.DetailView*, with an exception:
it is not mandatory to specify the model or queryset used to
retreive the user (*django.contrib.auth.models.User* model is used
by default).
For example, you can add in your *urls.py* a view displaying all
bookmarks by a single active user::
from bookmarks.views.generic import BookmarksByView
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/bookmarks/$', BookmarksByView.as_view(
queryset=User.objects.filter(is_active=True)),
name="user_bookmarks"),
)
You can also manage bookmarks order (default is by date descending) and
bookmarks keys, in order to retreive only bookmarks for a given key, e.g.::
from bookmarks.views.generic import BookmarksByView
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/bookmarks/$', BookmarksByView.as_view(
key='mykey', reversed_order=False),
name="user_bookmarks"),
)
Two context variables will be present in the template:
- *object*: the user
- *bookmarks*: all the bookmarks saved by that user
The default template suffix is ``'_bookmarks'``, and so the template
used in our example is ``user_bookmarks.html``.
"""
model = User
def get_bookmarks(self, obj, key, is_reversed):
"""
Return a queryset of bookmarks saved by *obj* user.
"""
lookups = {'user': obj, 'reversed': is_reversed}
if key is not None:
lookups['key'] = key
return library.backend.filter(**lookups)
| 33.011834
| 79
| 0.647069
|
d17d237b844e08f46089321ff1d14b9b1fdb1098
| 12,325
|
py
|
Python
|
clients/oathkeeper/python/ory_oathkeeper_client/model/upstream.py
|
sproutfi/sdk
|
5340b37d7b3e8f3c1b8f4c0c16ede05488498620
|
[
"Apache-2.0"
] | null | null | null |
clients/oathkeeper/python/ory_oathkeeper_client/model/upstream.py
|
sproutfi/sdk
|
5340b37d7b3e8f3c1b8f4c0c16ede05488498620
|
[
"Apache-2.0"
] | null | null | null |
clients/oathkeeper/python/ory_oathkeeper_client/model/upstream.py
|
sproutfi/sdk
|
5340b37d7b3e8f3c1b8f4c0c16ede05488498620
|
[
"Apache-2.0"
] | null | null | null |
"""
ORY Oathkeeper
ORY Oathkeeper is a reverse proxy that checks the HTTP Authorization for validity against a set of rules. This service uses Hydra to validate access tokens and policies. # noqa: E501
The version of the OpenAPI document: v0.38.19-beta.1
Contact: hi@ory.am
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_oathkeeper_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from ory_oathkeeper_client.exceptions import ApiAttributeError
class Upstream(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'preserve_host': (bool,), # noqa: E501
'strip_path': (str,), # noqa: E501
'url': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'preserve_host': 'preserve_host', # noqa: E501
'strip_path': 'strip_path', # noqa: E501
'url': 'url', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Upstream - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
preserve_host (bool): PreserveHost, if false (the default), tells ORY Oathkeeper to set the upstream request's Host header to the hostname of the API's upstream's URL. Setting this flag to true instructs ORY Oathkeeper not to do so.. [optional] # noqa: E501
strip_path (str): StripPath if set, replaces the provided path prefix when forwarding the requested URL to the upstream URL.. [optional] # noqa: E501
url (str): URL is the URL the request will be proxied to.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Upstream - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
preserve_host (bool): PreserveHost, if false (the default), tells ORY Oathkeeper to set the upstream request's Host header to the hostname of the API's upstream's URL. Setting this flag to true instructs ORY Oathkeeper not to do so.. [optional] # noqa: E501
strip_path (str): StripPath if set, replaces the provided path prefix when forwarding the requested URL to the upstream URL.. [optional] # noqa: E501
url (str): URL is the URL the request will be proxied to.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 46.509434
| 270
| 0.583286
|
2e10ec95765dc271d1ef348df89461d61f387e9e
| 14,541
|
py
|
Python
|
filetype/types/archive.py
|
fzzylogic/filetype.py
|
1937d2b22b6852655e79f40550306a5e7282e9c5
|
[
"MIT"
] | null | null | null |
filetype/types/archive.py
|
fzzylogic/filetype.py
|
1937d2b22b6852655e79f40550306a5e7282e9c5
|
[
"MIT"
] | null | null | null |
filetype/types/archive.py
|
fzzylogic/filetype.py
|
1937d2b22b6852655e79f40550306a5e7282e9c5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .base import Type
class Epub(Type):
"""
Implements the EPUB archive type matcher.
"""
MIME = 'application/epub+zip'
EXTENSION = 'epub'
def __init__(self):
super(Epub, self).__init__(
mime=Epub.MIME,
extension=Epub.EXTENSION
)
def match(self, buf):
return (len(buf) > 57 and
buf[0] == 0x50 and buf[1] == 0x4B and
buf[2] == 0x3 and buf[3] == 0x4 and
buf[30] == 0x6D and buf[31] == 0x69 and
buf[32] == 0x6D and buf[33] == 0x65 and
buf[34] == 0x74 and buf[35] == 0x79 and
buf[36] == 0x70 and buf[37] == 0x65 and
buf[38] == 0x61 and buf[39] == 0x70 and
buf[40] == 0x70 and buf[41] == 0x6C and
buf[42] == 0x69 and buf[43] == 0x63 and
buf[44] == 0x61 and buf[45] == 0x74 and
buf[46] == 0x69 and buf[47] == 0x6F and
buf[48] == 0x6E and buf[49] == 0x2F and
buf[50] == 0x65 and buf[51] == 0x70 and
buf[52] == 0x75 and buf[53] == 0x62 and
buf[54] == 0x2B and buf[55] == 0x7A and
buf[56] == 0x69 and buf[57] == 0x70)
class Zip(Type):
"""
Implements the Zip archive type matcher.
"""
MIME = 'application/zip'
EXTENSION = 'zip'
def __init__(self):
super(Zip, self).__init__(
mime=Zip.MIME,
extension=Zip.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x50 and buf[1] == 0x4B and
(buf[2] == 0x3 or buf[2] == 0x5 or
buf[2] == 0x7) and
(buf[3] == 0x4 or buf[3] == 0x6 or
buf[3] == 0x8))
class Tar(Type):
"""
Implements the Tar archive type matcher.
"""
MIME = 'application/x-tar'
EXTENSION = 'tar'
def __init__(self):
super(Tar, self).__init__(
mime=Tar.MIME,
extension=Tar.EXTENSION
)
def match(self, buf):
return (len(buf) > 261 and
buf[257] == 0x75 and
buf[258] == 0x73 and
buf[259] == 0x74 and
buf[260] == 0x61 and
buf[261] == 0x72)
class Rar(Type):
"""
Implements the RAR archive type matcher.
"""
MIME = 'application/x-rar-compressed'
EXTENSION = 'rar'
def __init__(self):
super(Rar, self).__init__(
mime=Rar.MIME,
extension=Rar.EXTENSION
)
def match(self, buf):
return (len(buf) > 6 and
buf[0] == 0x52 and
buf[1] == 0x61 and
buf[2] == 0x72 and
buf[3] == 0x21 and
buf[4] == 0x1A and
buf[5] == 0x7 and
(buf[6] == 0x0 or
buf[6] == 0x1))
class Gz(Type):
"""
Implements the GZ archive type matcher.
"""
MIME = 'application/gzip'
EXTENSION = 'gz'
def __init__(self):
super(Gz, self).__init__(
mime=Gz.MIME,
extension=Gz.EXTENSION
)
def match(self, buf):
return (len(buf) > 2 and
buf[0] == 0x1F and
buf[1] == 0x8B and
buf[2] == 0x8)
class Bz2(Type):
"""
Implements the BZ2 archive type matcher.
"""
MIME = 'application/x-bzip2'
EXTENSION = 'bz2'
def __init__(self):
super(Bz2, self).__init__(
mime=Bz2.MIME,
extension=Bz2.EXTENSION
)
def match(self, buf):
return (len(buf) > 2 and
buf[0] == 0x42 and
buf[1] == 0x5A and
buf[2] == 0x68)
class SevenZ(Type):
"""
Implements the SevenZ (7z) archive type matcher.
"""
MIME = 'application/x-7z-compressed'
EXTENSION = '7z'
def __init__(self):
super(SevenZ, self).__init__(
mime=SevenZ.MIME,
extension=SevenZ.EXTENSION
)
def match(self, buf):
return (len(buf) > 5 and
buf[0] == 0x37 and
buf[1] == 0x7A and
buf[2] == 0xBC and
buf[3] == 0xAF and
buf[4] == 0x27 and
buf[5] == 0x1C)
class Pdf(Type):
"""
Implements the PDF archive type matcher.
"""
MIME = 'application/pdf'
EXTENSION = 'pdf'
def __init__(self):
super(Pdf, self).__init__(
mime=Pdf.MIME,
extension=Pdf.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x25 and
buf[1] == 0x50 and
buf[2] == 0x44 and
buf[3] == 0x46)
class Exe(Type):
"""
Implements the EXE archive type matcher.
"""
MIME = 'application/x-msdownload'
EXTENSION = 'exe'
def __init__(self):
super(Exe, self).__init__(
mime=Exe.MIME,
extension=Exe.EXTENSION
)
def match(self, buf):
return (len(buf) > 1 and
buf[0] == 0x4D and
buf[1] == 0x5A)
class Swf(Type):
"""
Implements the SWF archive type matcher.
"""
MIME = 'application/x-shockwave-flash'
EXTENSION = 'swf'
def __init__(self):
super(Swf, self).__init__(
mime=Swf.MIME,
extension=Swf.EXTENSION
)
def match(self, buf):
return (len(buf) > 2 and
(buf[0] == 0x43 or
buf[0] == 0x46) and
buf[1] == 0x57 and
buf[2] == 0x53)
class Rtf(Type):
"""
Implements the RTF archive type matcher.
"""
MIME = 'application/rtf'
EXTENSION = 'rtf'
def __init__(self):
super(Rtf, self).__init__(
mime=Rtf.MIME,
extension=Rtf.EXTENSION
)
def match(self, buf):
return (len(buf) > 4 and
buf[0] == 0x7B and
buf[1] == 0x5C and
buf[2] == 0x72 and
buf[3] == 0x74 and
buf[4] == 0x66)
class Nes(Type):
"""
Implements the NES archive type matcher.
"""
MIME = 'application/x-nintendo-nes-rom'
EXTENSION = 'nes'
def __init__(self):
super(Nes, self).__init__(
mime=Nes.MIME,
extension=Nes.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x4E and
buf[1] == 0x45 and
buf[2] == 0x53 and
buf[3] == 0x1A)
class Crx(Type):
"""
Implements the CRX archive type matcher.
"""
MIME = 'application/x-google-chrome-extension'
EXTENSION = 'crx'
def __init__(self):
super(Crx, self).__init__(
mime=Crx.MIME,
extension=Crx.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x43 and
buf[1] == 0x72 and
buf[2] == 0x32 and
buf[3] == 0x34)
class Cab(Type):
"""
Implements the CAB archive type matcher.
"""
MIME = 'application/vnd.ms-cab-compressed'
EXTENSION = 'cab'
def __init__(self):
super(Cab, self).__init__(
mime=Cab.MIME,
extension=Cab.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
((buf[0] == 0x4D and
buf[1] == 0x53 and
buf[2] == 0x43 and
buf[3] == 0x46) or
(buf[0] == 0x49 and
buf[1] == 0x53 and
buf[2] == 0x63 and
buf[3] == 0x28)))
class Eot(Type):
"""
Implements the EOT archive type matcher.
"""
MIME = 'application/octet-stream'
EXTENSION = 'eot'
def __init__(self):
super(Eot, self).__init__(
mime=Eot.MIME,
extension=Eot.EXTENSION
)
def match(self, buf):
return (len(buf) > 35 and
buf[34] == 0x4C and
buf[35] == 0x50 and
((buf[8] == 0x02 and
buf[9] == 0x00 and
buf[10] == 0x01) or
(buf[8] == 0x01 and
buf[9] == 0x00 and
buf[10] == 0x00) or
(buf[8] == 0x02 and
buf[9] == 0x00 and
buf[10] == 0x02)))
class Ps(Type):
"""
Implements the PS archive type matcher.
"""
MIME = 'application/postscript'
EXTENSION = 'ps'
def __init__(self):
super(Ps, self).__init__(
mime=Ps.MIME,
extension=Ps.EXTENSION
)
def match(self, buf):
return (len(buf) > 1 and
buf[0] == 0x25 and
buf[1] == 0x21)
class Xz(Type):
"""
Implements the XS archive type matcher.
"""
MIME = 'application/x-xz'
EXTENSION = 'xz'
def __init__(self):
super(Xz, self).__init__(
mime=Xz.MIME,
extension=Xz.EXTENSION
)
def match(self, buf):
return (len(buf) > 5 and
buf[0] == 0xFD and
buf[1] == 0x37 and
buf[2] == 0x7A and
buf[3] == 0x58 and
buf[4] == 0x5A and
buf[5] == 0x00)
class Sqlite(Type):
"""
Implements the Sqlite DB archive type matcher.
"""
MIME = 'application/x-sqlite3'
EXTENSION = 'sqlite'
def __init__(self):
super(Sqlite, self).__init__(
mime=Sqlite.MIME,
extension=Sqlite.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x53 and
buf[1] == 0x51 and
buf[2] == 0x4C and
buf[3] == 0x69)
class Deb(Type):
"""
Implements the DEB archive type matcher.
"""
MIME = 'application/x-deb'
EXTENSION = 'deb'
def __init__(self):
super(Deb, self).__init__(
mime=Deb.MIME,
extension=Deb.EXTENSION
)
def match(self, buf):
return (len(buf) > 20 and
buf[0] == 0x21 and
buf[1] == 0x3C and
buf[2] == 0x61 and
buf[3] == 0x72 and
buf[4] == 0x63 and
buf[5] == 0x68 and
buf[6] == 0x3E and
buf[7] == 0x0A and
buf[8] == 0x64 and
buf[9] == 0x65 and
buf[10] == 0x62 and
buf[11] == 0x69 and
buf[12] == 0x61 and
buf[13] == 0x6E and
buf[14] == 0x2D and
buf[15] == 0x62 and
buf[16] == 0x69 and
buf[17] == 0x6E and
buf[18] == 0x61 and
buf[19] == 0x72 and
buf[20] == 0x79)
class Ar(Type):
"""
Implements the AR archive type matcher.
"""
MIME = 'application/x-unix-archive'
EXTENSION = 'ar'
def __init__(self):
super(Ar, self).__init__(
mime=Ar.MIME,
extension=Ar.EXTENSION
)
def match(self, buf):
return (len(buf) > 6 and
buf[0] == 0x21 and
buf[1] == 0x3C and
buf[2] == 0x61 and
buf[3] == 0x72 and
buf[4] == 0x63 and
buf[5] == 0x68 and
buf[6] == 0x3E)
class Z(Type):
"""
Implements the Z archive type matcher.
"""
MIME = 'application/x-compress'
EXTENSION = 'Z'
def __init__(self):
super(Z, self).__init__(
mime=Z.MIME,
extension=Z.EXTENSION
)
def match(self, buf):
return (len(buf) > 1 and
((buf[0] == 0x1F and
buf[1] == 0xA0) or
(buf[0] == 0x1F and
buf[1] == 0x9D)))
class Lz(Type):
"""
Implements the Lz archive type matcher.
"""
MIME = 'application/x-lzip'
EXTENSION = 'lz'
def __init__(self):
super(Lz, self).__init__(
mime=Lz.MIME,
extension=Lz.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x4C and
buf[1] == 0x5A and
buf[2] == 0x49 and
buf[3] == 0x50)
class Rpm(Type):
"""
Implements the RPM archive type matcher.
"""
MIME = 'application/x-rpm'
EXTENSION = 'rpm'
def __init__(self):
super(Rpm, self).__init__(
mime=Rpm.MIME,
extension=Rpm.EXTENSION
)
def match(self, buf):
return (len(buf) > 96 and
buf[0] == 0xED and
buf[1] == 0xAB and
buf[2] == 0xEE and
buf[3] == 0xDB)
class Elf(Type):
"""
Implements the Elf archive type matcher.
"""
MIME = 'application/x-executable'
EXTENSION = 'elf'
def __init__(self):
super(Elf, self).__init__(
mime=Elf.MIME,
extension=Elf.EXTENSION
)
def match(self, buf):
return (len(buf) > 52 and
buf[0] == 0x7F and
buf[1] == 0x45 and
buf[2] == 0x4C and
buf[3] == 0x46)
class Dcm(Type):
"""
Implements the Dcm archive type matcher.
"""
MIME = 'application/dicom'
EXTENSION = 'dcm'
def __init__(self):
super(Dcm, self).__init__(
mime=Dcm.MIME,
extension=Dcm.EXTENSION
)
def match(self, buf):
return (len(buf) > 131 and
buf[128] == 0x44 and
buf[129] == 0x49 and
buf[130] == 0x43 and
buf[131] == 0x4D)
class Iso(Type):
"""
Implements the ISO archive type matcher.
"""
MIME = 'application/x-iso9660-image'
EXTENSION = 'iso'
def __init__(self):
super(Iso, self).__init__(
mime=Iso.MIME,
extension=Iso.EXTENSION
)
def match(self, buf):
return (len(buf) > 32773 and
buf[32769] == 0x43 and
buf[32770] == 0x44 and
buf[32771] == 0x30 and
buf[32772] == 0x30 and
buf[32773] == 0x31)
| 24.194676
| 55
| 0.455883
|
db320a62c5267c2f32ac2c76aba8aa0712217394
| 7,474
|
py
|
Python
|
azurelinuxagent/common/utils/textutil.py
|
longlimsft/WALinuxAgent
|
abc9c2e396ce8be0e643de5659578a415d9884ce
|
[
"Apache-2.0"
] | 2
|
2019-05-03T21:59:45.000Z
|
2021-01-19T07:17:03.000Z
|
azurelinuxagent/common/utils/textutil.py
|
longlimsft/WALinuxAgent
|
abc9c2e396ce8be0e643de5659578a415d9884ce
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/utils/textutil.py
|
longlimsft/WALinuxAgent
|
abc9c2e396ce8be0e643de5659578a415d9884ce
|
[
"Apache-2.0"
] | null | null | null |
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
import base64
import crypt
import random
import string
import struct
import sys
import xml.dom.minidom as minidom
from distutils.version import LooseVersion as Version
def parse_doc(xml_text):
"""
Parse xml document from string
"""
# The minidom lib has some issue with unicode in python2.
# Encode the string into utf-8 first
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
"""
Get all nodes by tag and namespace under Node root.
"""
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
"""
Get first node by tag and namespace under Node root.
"""
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
"""
Get node text
"""
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
"""
Get text of node by tag and namespace under Node root.
"""
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
"""
Get attribute of xml node
"""
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, range):
"""
Unpack bytes into python values.
"""
result = 0
for i in range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
"""
Unpack little endian bytes into python values.
"""
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
"""
Unpack big endian bytes into python values.
"""
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
"""
Dump range of buf in formatted hex.
"""
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
"""
Dump buf in formatted hex.
"""
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
"""
Return True if 'a' in 'low' <= a >= 'high'
"""
return (a >= low and a <= high)
def is_printable(ch):
"""
Return True if character is displayable.
"""
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size):
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte = buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function.
"""
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
"""
Build DHCP request string.
"""
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
"""
Return hex string packed into a binary struct.
"""
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
notfound = True
for i in range(0, len(config)):
if config[i].startswith(name):
config[i] = "{0} {1}".format(name, val)
notfound = False
elif config[i].startswith("Match"):
# Match block must be put in the end of sshd config
break
if notfound:
config.insert(i, "{0} {1}".format(name, val))
return config
def set_ini_config(config, name, val):
notfound = True
nameEqual = name + '='
length = len(config)
text = "{0}=\"{1}\"".format(name, val)
for i in reversed(range(0, length)):
if config[i].startswith(nameEqual):
config[i] = text
notfound = False
break
if notfound:
config.insert(length - 1, text)
def remove_bom(c):
if str_to_ord(c[0]) > 128 and str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
def b64encode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8')
return base64.b64encode(s)
def b64decode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64decode(s).decode('utf-8')
return base64.b64decode(s)
def safe_shlex_split(s):
import shlex
from azurelinuxagent.common.version import PY_VERSION
if PY_VERSION[:2] == (2, 6):
return shlex.split(s.encode('utf-8'))
return shlex.split(s)
| 25.508532
| 87
| 0.576532
|
333f0470ffa504fa0d3c4aa40a2de0085a612bbc
| 1,063
|
py
|
Python
|
Base/TestEnum.py
|
InverseLina/python-practice
|
496d2020916d8096a32131cdedd25a4da7b7735e
|
[
"Apache-2.0"
] | null | null | null |
Base/TestEnum.py
|
InverseLina/python-practice
|
496d2020916d8096a32131cdedd25a4da7b7735e
|
[
"Apache-2.0"
] | null | null | null |
Base/TestEnum.py
|
InverseLina/python-practice
|
496d2020916d8096a32131cdedd25a4da7b7735e
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
from enum import unique
# encoding=utf-8
__author__ = 'Hinsteny'
def test_enum():
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
print(Month.Jan, " = ",Month.Jan.value)
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
return "Test enum success!"
@unique
class Weekday(Enum):
Sun = 0 # Sun的value被设定为0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6
def test_enum_two():
day1 = Weekday.Mon
print(day1)
Weekday.Mon
print(Weekday.Tue)
Weekday.Tue
print(Weekday['Tue'])
Weekday.Tue
print(Weekday.Tue.value)
print(day1 == Weekday.Mon)
print(day1 == Weekday.Tue)
print(Weekday(1))
print(day1 == Weekday(1))
print("6:", Weekday(6))
# print("7:", Weekday(7))
for name, member in Weekday.__members__.items():
print(name, '=>', member)
# Do test
if __name__ == "__main__":
print(test_enum())
test_enum_two()
| 20.442308
| 111
| 0.585136
|
23f55bed82ba83e79fcd9c864592ae76171a1799
| 987
|
py
|
Python
|
var/spack/repos/builtin/packages/py-jupyterlab/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-jupyterlab/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-jupyterlab/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyJupyterlab(PythonPackage):
"""JupyterLab is the next-generation web-based user interface
for Project Jupyter."""
homepage = "https://jupyterlab.readthedocs.io/"
pypi = "jupyterlab/jupyterlab-2.2.7.tar.gz"
version('2.2.7', sha256='a72ffd0d919cba03a5ef8422bc92c3332a957ff97b0490494209c83ad93826da')
version('2.1.0', sha256='8c239aababf5baa0b3d36e375fddeb9fd96f3a9a24a8cda098d6a414f5bbdc81')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-notebook@4.3.1:', type=('build', 'run'))
depends_on('py-tornado@:5,6.0.3:', type=('build', 'run'))
depends_on('py-jupyterlab-server@1.1.5:1.999', type=('build', 'run'))
depends_on('py-jinja2@2.10:', type=('build', 'run'))
| 42.913043
| 95
| 0.702128
|
a7138ae06ae47ff7ddaf3be859efe1bf75ac9e48
| 6,446
|
py
|
Python
|
main.py
|
jrupac/nest-wfh
|
3b65a04b1dd19d1e0c2bd0aa6a863df19468f4af
|
[
"MIT"
] | 1
|
2015-06-21T14:52:43.000Z
|
2015-06-21T14:52:43.000Z
|
main.py
|
jrupac/nest-wfh
|
3b65a04b1dd19d1e0c2bd0aa6a863df19468f4af
|
[
"MIT"
] | null | null | null |
main.py
|
jrupac/nest-wfh
|
3b65a04b1dd19d1e0c2bd0aa6a863df19468f4af
|
[
"MIT"
] | null | null | null |
"""
Nest-WFH is a project that aims to prevent your Nest thermostat from going into
auto-away mode if you are home.
The program works by querying your calendar to look for specific events. If so,
it will manually set all thermostats to "home". This is meant to be run as a
cron job.
"""
__author__ = 'ajay@roopakalu.com (Ajay Roopakalu)'
import re
import sys
import dateutil.parser
import prometheus_client as pc
from absl import flags
from datetime import datetime
from datetime import timedelta
from dateutil import tz
import calendar_client
import log
import nest
import keys
import weather
flags.DEFINE_boolean('set_status', True, 'Whether to modify Nest state.')
FLAGS = flags.FLAGS
registry = pc.CollectorRegistry()
ambient_temperature_metric = pc.Gauge(
'ambient_temperature', 'Current ambient temperature in Fahrenheit',
registry=registry)
target_temperature_high_metric = pc.Gauge(
'target_temperature_high', 'Target high temperature in Fahrenheit',
registry=registry)
target_temperature_low_metric = pc.Gauge(
'target_temperature_low', 'Target low temperature in Fahrenheit',
registry=registry)
external_temperature_metric = pc.Gauge(
'external_temperature', 'Current external temperature in Fahrenheit',
registry=registry)
humidity_metric = pc.Gauge(
'humidity', 'Internal humidity in percentage', registry=registry)
external_humidity_metric = pc.Gauge(
'external_humidity', 'External humidity in percentage', registry=registry)
hvac_state_metric = pc.Gauge(
'hvac_state', 'State of HVAC ("heating", "cooling", or "off")',
['state'], registry=registry)
fan_active_metric = pc.Gauge(
'fan_timer_active', 'State of fan ("on" or "off")', ['state'],
registry=registry)
user_state_metric = pc.Gauge(
'user_state', 'State of user ("home", "away", or "auto-away")',
['structure_id', 'state'], registry=registry)
logging = log.Log(__name__)
EPOCH = datetime(1970, 1, 1)
VALID_HVAC_STATES = frozenset(['heating', 'cooling', 'off'])
VALID_AWAY_STATES = frozenset(['home', 'away', 'auto-away'])
DEVICES_URL = 'https://developer-api.nest.com/devices/'
STRUCTURE_URL = 'https://developer-api.nest.com/structures/'
THERMOSTATS_URL = 'https://developer-api.nest.com/devices/thermostats/'
STATUS_HOME = '"home"'
STATUS_AWAY = '"away"'
ENTER_WORK_REGEX = re.compile('I entered work')
EXIT_WORK_REGEX = re.compile('I exited work')
WFH_REGEX = re.compile('WFH')
MIDDAY = 12 # Hour of the day to indicate noon
def RecordStats(thermostats, structures, external_weather):
for thermostat in thermostats.itervalues():
ambient_temperature_metric.set(thermostat['ambient_temperature_f'])
target_temperature_high_metric.set(thermostat['target_temperature_high_f'])
target_temperature_low_metric.set(thermostat['target_temperature_low_f'])
humidity_metric.set(thermostat['humidity'])
fan_active_metric.labels('on').set(int(thermostat['fan_timer_active']))
fan_active_metric.labels('off').set(int(not thermostat['fan_timer_active']))
hvac_state = thermostat['hvac_state']
if hvac_state not in VALID_HVAC_STATES:
logging.warning('Unexpected HVAC state: %s', hvac_state)
else:
for state in VALID_HVAC_STATES:
hvac_state_metric.labels(state).set(int(state == hvac_state))
for structure_id in structures:
user_state = structures[structure_id]['away']
if user_state not in VALID_AWAY_STATES:
logging.warning('Unexpected away state: %s', user_state)
else:
for state in VALID_AWAY_STATES:
user_state_metric.labels(
structure_id, state).set(int(state == user_state))
external_temperature_metric.set(external_weather['temp'])
external_humidity_metric.set(external_weather['humidity'])
def PushMetrics():
if keys.PROMETHEUS_ENDPOINT is not None:
logging.info('Pushing metrics to %s', keys.PROMETHEUS_ENDPOINT)
pc.push_to_gateway(
keys.PROMETHEUS_ENDPOINT, job='nest-wfh', registry=registry)
def Run():
now = datetime.now(tz=tz.tzlocal())
localized_now = now.astimezone(tz.gettz(keys.WORK_HOURS_CALENDAR_TZ))
today = localized_now.replace(hour=0, minute=0, second=0, microsecond=0)
tomorrow = today + timedelta(days=1)
logging.info('Retrieving known thermostats.')
thermostats = nest.GetAllThermostats(keys.NEST_ACCESS_TOKEN, THERMOSTATS_URL)
structure_ids = nest.GetStructureIds(thermostats)
logging.info('Retrieving known structures.')
structures = nest.GetStructures(
keys.NEST_ACCESS_TOKEN, STRUCTURE_URL, structure_ids)
logging.info('Retrieving external temperature.')
external_weather = weather.GetCurrentExternalWeather(
keys.OWM_API_KEY, keys.LOCATION_CITY_ID)
RecordStats(thermostats, structures, external_weather)
PushMetrics()
logging.info('Retrieving relevant calendar events.')
calendar_instance = calendar_client.Calendar()
events = calendar_instance.GetEvents(
keys.WORK_HOURS_CALENDAR_ID, today, tomorrow)
if not events:
logging.info('No events found.')
exit(0)
for event in events:
try:
# If WFH, always set status to HOME.
if WFH_REGEX.match(event.get('summary')):
logging.info(
nest.SetAwayStatus(
keys.NEST_ACCESS_TOKEN, STRUCTURE_URL, structure_ids,
status=STATUS_HOME))
startEntity = event.get('start')
# Ignore full-day events here.
if not startEntity.get('dateTime'):
continue
startTime = dateutil.parser.parse(startEntity.get('dateTime'))
if today < startTime < tomorrow:
if (localized_now.hour <= MIDDAY and
ENTER_WORK_REGEX.match(event.get('summary'))):
logging.info('User is at work..')
if FLAGS.set_status:
logging.info(
nest.SetAwayStatus(
keys.NEST_ACCESS_TOKEN, STRUCTURE_URL, structure_ids,
status=STATUS_AWAY))
if (localized_now.hour > MIDDAY and
EXIT_WORK_REGEX.match(event.get('summary'))):
logging.info('User is coming home..')
if FLAGS.set_status:
logging.info(
nest.SetAwayStatus(
keys.NEST_ACCESS_TOKEN, STRUCTURE_URL, structure_ids,
status=STATUS_HOME))
except Exception as e:
logging.exception('Error while performing operation: %s', e)
PushMetrics()
def main(argv):
FLAGS(argv)
Run()
if __name__ == '__main__':
main(sys.argv)
| 34.843243
| 80
| 0.718585
|
007cb81442d166873a05ab5c944fc7d46a963311
| 1,548
|
py
|
Python
|
pyqt/pyqt5-master/src/windows/DarkStyleSheet.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | null | null | null |
pyqt/pyqt5-master/src/windows/DarkStyleSheet.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | null | null | null |
pyqt/pyqt5-master/src/windows/DarkStyleSheet.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | 2
|
2019-06-18T05:53:26.000Z
|
2019-06-19T03:26:02.000Z
|
'''
QDarkStyleSheet样式
conda install qdarkstyle
'''
import logging
import sys
from PyQt5 import QtWidgets, QtCore
# make the example runnable without the need to install
from os.path import abspath, dirname
sys.path.insert(0, abspath(dirname(abspath(__file__)) + '/..'))
import qdarkstyle
import ui.example_pyqt5_ui as example_ui
def main():
"""
Application entry point
"""
logging.basicConfig(level=logging.DEBUG)
# create the application and the main window
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QMainWindow()
# setup ui
ui = example_ui.Ui_MainWindow()
ui.setupUi(window)
ui.bt_delay_popup.addActions([
ui.actionAction,
ui.actionAction_C
])
ui.bt_instant_popup.addActions([
ui.actionAction,
ui.actionAction_C
])
ui.bt_menu_button_popup.addActions([
ui.actionAction,
ui.actionAction_C
])
item = QtWidgets.QTableWidgetItem("Test")
item.setCheckState(QtCore.Qt.Checked)
ui.tableWidget.setItem(0, 0, item)
window.setWindowTitle("QDarkStyle example")
# tabify dock widgets to show bug #6
window.tabifyDockWidget(ui.dockWidget1, ui.dockWidget2)
# setup stylesheet
print(qdarkstyle.load_stylesheet_pyqt5())
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
# auto quit after 2s when testing on travis-ci
if "--travis" in sys.argv:
QtCore.QTimer.singleShot(2000, app.exit)
# run
window.show()
app.exec_()
if __name__ == "__main__":
main()
| 23.815385
| 63
| 0.691214
|
d338a0aaebf2d863ab3dad2863673e7a12962476
| 553
|
py
|
Python
|
nodes/networkedSingleStepper/updateFirmware.py
|
imoyer/pygestalt
|
d332df64264cce4a2bec8a73d698c386f1eaca7b
|
[
"MIT"
] | 1
|
2017-07-03T08:34:39.000Z
|
2017-07-03T08:34:39.000Z
|
nodes/networkedSingleStepper/updateFirmware.py
|
imoyer/pygestalt
|
d332df64264cce4a2bec8a73d698c386f1eaca7b
|
[
"MIT"
] | 3
|
2015-12-04T23:14:50.000Z
|
2016-11-08T16:24:32.000Z
|
nodes/networkedSingleStepper/updateFirmware.py
|
imnp/pygestalt
|
d332df64264cce4a2bec8a73d698c386f1eaca7b
|
[
"MIT"
] | 1
|
2017-09-13T00:17:39.000Z
|
2017-09-13T00:17:39.000Z
|
''' Updates the firmware on a networkedSingleStepper node.
April 14th, 2019
Ilan E. Moyer
'''
from pygestalt import nodes, config
import time, sys
# ---- SYNTHETIC MODE ----
# config.syntheticModeOn() #Un-comment this line to run in synthetic mode (i.e. test mode)
# ---- DEFINE TEST NODE ----
targetNode = nodes.networkedGestaltNode(name = "Networked Single Stepper", filename = "086-005b.py") #filename must be provided for synthetic mode
# ---- LOAD NEW FIRMWARE ----
targetNode.loadProgram('firmware/086-005b.hex')
# ---- RUN UNIT TESTS ----
| 26.333333
| 146
| 0.705244
|
697f48f9b5e7f79083dbff14bf24bb1c925484ec
| 8,849
|
py
|
Python
|
myapp/migrations/0001_initial.py
|
archerc56/classroomProject2
|
683e6098ea794207d2f3ce409663f88dcbdf082e
|
[
"MIT"
] | null | null | null |
myapp/migrations/0001_initial.py
|
archerc56/classroomProject2
|
683e6098ea794207d2f3ce409663f88dcbdf082e
|
[
"MIT"
] | null | null | null |
myapp/migrations/0001_initial.py
|
archerc56/classroomProject2
|
683e6098ea794207d2f3ce409663f88dcbdf082e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-29 16:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assignment',
fields=[
('assignment_no', models.IntegerField(primary_key=True, serialize=False)),
('subject', models.CharField(blank=True, max_length=13, null=True)),
('name', models.CharField(blank=True, max_length=30, null=True)),
('due_date', models.DateField(blank=True, null=True)),
('total_points', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'assignment',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True)),
],
options={
'db_table': 'auth_group',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroupPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_group_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='AuthPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('codename', models.CharField(max_length=100)),
],
options={
'db_table': 'auth_permission',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128)),
('last_login', models.DateTimeField(blank=True, null=True)),
('is_superuser', models.IntegerField()),
('username', models.CharField(max_length=150, unique=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('email', models.CharField(max_length=254)),
('is_staff', models.IntegerField()),
('is_active', models.IntegerField()),
('date_joined', models.DateTimeField()),
],
options={
'db_table': 'auth_user',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserGroups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_groups',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserUserPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_user_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='DisciplinaryReport',
fields=[
('report_no', models.IntegerField(primary_key=True, serialize=False)),
('incident', models.CharField(blank=True, max_length=144, null=True)),
('time', models.CharField(blank=True, max_length=5, null=True)),
('severity_index', models.IntegerField(blank=True, choices=[(1, 'Accidental'), (2, 'Minor'), (3, 'Average'), (4, 'Major')], null=True)),
],
options={
'db_table': 'disciplinary_report',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoAdminLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField()),
('object_id', models.TextField(blank=True, null=True)),
('object_repr', models.CharField(max_length=200)),
('action_flag', models.SmallIntegerField()),
('change_message', models.TextField()),
],
options={
'db_table': 'django_admin_log',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoContentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
],
options={
'db_table': 'django_content_type',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoSession',
fields=[
('session_key', models.CharField(max_length=40, primary_key=True, serialize=False)),
('session_data', models.TextField()),
('expire_date', models.DateTimeField()),
],
options={
'db_table': 'django_session',
'managed': False,
},
),
migrations.CreateModel(
name='IstationProgram',
fields=[
('program_no', models.IntegerField(primary_key=True, serialize=False)),
('month', models.CharField(blank=True, max_length=3, null=True)),
('no_of_sections', models.IntegerField(blank=True, null=True)),
('time_limit', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'istation_program',
'managed': False,
},
),
migrations.CreateModel(
name='Student',
fields=[
('student_id', models.IntegerField(primary_key=True, serialize=False)),
('fname', models.CharField(max_length=20)),
('lname', models.CharField(max_length=30)),
('date_of_birth', models.DateField(blank=True, null=True)),
('address', models.CharField(blank=True, max_length=45, null=True)),
('overall_grade', models.FloatField(blank=True, null=True)),
('emergency_contact_no', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'db_table': 'student',
'managed': False,
},
),
migrations.CreateModel(
name='WeeklyLessonPlan',
fields=[
('week_no', models.IntegerField(primary_key=True, serialize=False)),
('summary', models.CharField(blank=True, max_length=144, null=True)),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
],
options={
'db_table': 'weekly_lesson_plan',
'managed': False,
},
),
migrations.CreateModel(
name='Assigned',
fields=[
('student', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='myapp.Student')),
],
options={
'db_table': 'assigned',
'managed': False,
},
),
]
| 39.86036
| 152
| 0.509323
|
f319a60dd66472e50e07cf15afd6680dbc1069dd
| 481
|
py
|
Python
|
Hilo3.py
|
GatomanJuarez/Threads.
|
8e1fce71303709a54821edd43a084ab72f6f6aaf
|
[
"BSD-2-Clause"
] | null | null | null |
Hilo3.py
|
GatomanJuarez/Threads.
|
8e1fce71303709a54821edd43a084ab72f6f6aaf
|
[
"BSD-2-Clause"
] | null | null | null |
Hilo3.py
|
GatomanJuarez/Threads.
|
8e1fce71303709a54821edd43a084ab72f6f6aaf
|
[
"BSD-2-Clause"
] | null | null | null |
import threading
import time
def worker():
print (threading.currentThread().getName(),'Lanzado')
time.sleep(2)
print (threading.currentThread().getName(),'Detenido')
def servicio():
print (threading.currentThread().getName(),'Lanzado')
print (threading.currentThread().getName(),'Detenido')
t = threading.Thread(target=servicio, name='Hilo 1')
w = threading.Thread(target=worker, name='Hilo 2')
z = threading.Thread(target=worker)
w.start()
z.start()
t.start()
| 30.0625
| 56
| 0.713098
|
c37c52dbe4e474a2b9507aa9a152b224cb0e5009
| 1,144
|
py
|
Python
|
{{cookiecutter.project_slug}}/setup.py
|
mthpower/cc-python-package
|
e3614d4cd86018bed5af2cf51a96d60ebd3157e9
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/setup.py
|
mthpower/cc-python-package
|
e3614d4cd86018bed5af2cf51a96d60ebd3157e9
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/setup.py
|
mthpower/cc-python-package
|
e3614d4cd86018bed5af2cf51a96d60ebd3157e9
|
[
"MIT"
] | null | null | null |
import os
import re
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
with open('{{cookiecutter.py_namespace}}/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(), re.MULTILINE).group(1)
setup(
name='{{cookiecutter.project_slug}}',
version=version,
packages=('{{cookiecutter.py_namespace}}',),
description='{{cookiecutter.project_short_description}}',
long_description=README,
author='{{cookiecutter.full_name}}',
author_email='{{cookiecutter.email}}',
zip_safe=True,
classifiers=[
"Private :: Don't upload",
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
install_requires=[
'requests',
],
entry_points={
'console_scripts': [
'{{cookiecutter.project_slug}} = {{cookiecutter.py_namespace}}.{{cookiecutter.py_namespace}}:main',
],
},
)
| 30.918919
| 111
| 0.603147
|
ab3468757165224b1e528fe67e7bbce2f62be549
| 2,423
|
py
|
Python
|
util/__init__.py
|
01shruti/Clustering
|
a79b590316066a4e0e6181add2642190515964fe
|
[
"MIT"
] | 2
|
2019-01-11T13:22:33.000Z
|
2019-01-11T14:48:58.000Z
|
kmodes/util/__init__.py
|
mahdigh99/kmodes
|
546ea6e72ead6475b62ad40a617188ff650efdb0
|
[
"MIT"
] | null | null | null |
kmodes/util/__init__.py
|
mahdigh99/kmodes
|
546ea6e72ead6475b62ad40a617188ff650efdb0
|
[
"MIT"
] | null | null | null |
"""
Generic utilities for clustering
"""
import numpy as np
def get_max_value_key(dic):
"""Gets the key for the maximum value in a dict."""
v = np.array(list(dic.values()))
k = np.array(list(dic.keys()))
maxima = np.where(v == np.max(v))[0]
if len(maxima) == 1:
return k[maxima[0]]
else:
# In order to be consistent, always selects the minimum key
# (guaranteed to be unique) when there are multiple maximum values.
return k[maxima[np.argmin(k[maxima])]]
def encode_features(X, enc_map=None):
"""Converts categorical values in each column of X to integers in the range
[0, n_unique_values_in_column - 1], if X is not already of integer type.
If mapping is not provided, it is calculated based on the values in X.
Unknown values during prediction get a value of -1. np.NaNs are ignored
during encoding, and get treated as unknowns during prediction.
"""
if np.issubdtype(X.dtype, np.integer):
# Already integer type, so we can take a shortcut. Simply reshape
# the data to mapping dictionaries, and do nothing with X.
enc_map = [{val: val for val in np.unique(col)} for col in X.T]
return X, enc_map
if enc_map is None:
fit = True
# We will calculate enc_map, so initialize the list of column mappings.
enc_map = []
else:
fit = False
Xenc = np.zeros(X.shape).astype('int')
for ii in range(X.shape[1]):
if fit:
col_enc = {val: jj for jj, val in enumerate(np.unique(X[:, ii]))
if not (isinstance(val, float) and np.isnan(val))}
enc_map.append(col_enc)
# Unknown categories (including np.NaNs) all get a value of -1.
Xenc[:, ii] = np.array([enc_map[ii].get(x, -1) for x in X[:, ii]])
return Xenc, enc_map
def decode_centroids(encoded, mapping):
"""Decodes the encoded centroids array back to the original data
labels using a list of mappings.
"""
decoded = []
for ii in range(encoded.shape[1]):
# Invert the mapping so that we can decode.
inv_mapping = {v: k for k, v in mapping[ii].items()}
decoded.append(np.vectorize(inv_mapping.__getitem__)(encoded[:, ii]))
return np.atleast_2d(np.array(decoded)).T
def get_unique_rows(a):
"""Gets the unique rows in a numpy array."""
return np.vstack({tuple(row) for row in a})
| 34.126761
| 79
| 0.633925
|
823d8e72e1557a7eaa5f7e9dae7f7e58df6e7e86
| 316
|
py
|
Python
|
Estimating the Area of a Circle/area.py
|
rajitbanerjee/kattis
|
3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad
|
[
"CC0-1.0"
] | 2
|
2021-04-13T12:56:30.000Z
|
2022-03-21T16:46:58.000Z
|
Estimating the Area of a Circle/area.py
|
rajitbanerjee/kattis
|
3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad
|
[
"CC0-1.0"
] | null | null | null |
Estimating the Area of a Circle/area.py
|
rajitbanerjee/kattis
|
3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad
|
[
"CC0-1.0"
] | 1
|
2021-11-14T14:06:46.000Z
|
2021-11-14T14:06:46.000Z
|
"""https://open.kattis.com/problems/estimatingtheareaofacircle"""
import math
ans = []
while True:
r, m, c = map(float, input().split())
if (r, m, c) == (0, 0, 0):
break
area = math.pi * r * r
estimate = c/m * ((2 * r) ** 2)
ans.append(f"{area} {estimate}")
for a in ans:
print(a)
| 19.75
| 65
| 0.537975
|
84926ffb190601593a9d9dcb76f529eb3475a76b
| 2,649
|
py
|
Python
|
vcf2csv_shell.py
|
sensecollective/bioinformatics_scripts
|
3a23611f382b7f3dd60e5e2abe841b84408c0d44
|
[
"BSD-3-Clause"
] | 7
|
2016-03-23T11:31:06.000Z
|
2021-05-20T19:07:38.000Z
|
vcf2csv_shell.py
|
raonyguimaraes/bioinformatics_scripts
|
3a23611f382b7f3dd60e5e2abe841b84408c0d44
|
[
"BSD-3-Clause"
] | null | null | null |
vcf2csv_shell.py
|
raonyguimaraes/bioinformatics_scripts
|
3a23611f382b7f3dd60e5e2abe841b84408c0d44
|
[
"BSD-3-Clause"
] | 8
|
2016-06-01T19:28:46.000Z
|
2022-01-09T01:26:10.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#script to convert vcf to csv
import os
import csv
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-v", dest="vcf_file",
help="VCF File to Annotate", metavar="VCF")
(options, args) = parser.parse_args()
vcffile = options.vcf_file
#Get all annotation tags from a VCF File reading all file (lazy mode!)
def get_all_info_tags(vcffile):
tempfile = open(vcffile, 'r')
annotation_tags = set()
for line in tempfile:
if not line.startswith('#'):
variant = line.split('\t')
string = variant[7].split(';')
for element in string:
element = element.split('=')
tag = element[0]
if tag not in annotation_tags:
annotation_tags.add(tag)
tempfile.close()
annotation_tags = sorted(annotation_tags)
return annotation_tags
def parse_info_tag(string, annotation_tags):
string = string.split(';')
information = {}
for element in string:
element = element.split('=')
tag = element[0]
if len(element) > 1:
information[tag] = element[1]
else:
information[tag] = 'Yes'
information_list = []
for tag in annotation_tags:
if tag in information:
information_list.append(str(information[tag]))
else:
information_list.append('')
return information_list
def Get_vcfheader(filepath):
vcffile = open(filepath, 'r')
for line in vcffile:
#print line
if line.startswith("#CHROM"):
header_tags = line.strip().split('\t')
if not line.startswith("#"):
break
vcffile.close()
return header_tags
vcf_header = Get_vcfheader(vcffile)
annotation_tags = get_all_info_tags(vcffile)
vcf_header = vcf_header[:7] + annotation_tags + vcf_header[8:]
csvfilename = ".".join(os.path.basename(vcffile).split('.')[:-1])+'.csv'
csvfilepath = os.path.join(os.path.dirname(vcffile), csvfilename)
readfile = open(vcffile, 'r')
f = open(csvfilepath, "w")
csvfile = csv.writer(f, quoting=csv.QUOTE_ALL)
csvfile.writerow(vcf_header)
for line in readfile:
if line.startswith('#CHROM'):
vcf_header_original = line.strip().split('\t')
vcf_header_original = vcf_header_original[:7] + list(annotation_tags) + vcf_header_original[8:]
if not line.startswith('#'):
variant = line.strip().split('\t')
information = parse_info_tag(variant[7], annotation_tags)
variant = variant[:7] + information + variant[8:]
csvfile.writerow(variant)
#new_variant = []
##hack to old times
#print vcf_header
#print information
#die()
#for tag in vcf_header:
#tag_index = vcf_header_original.index(tag)
#new_variant.append(variant[tag_index])
f.close()
| 26.227723
| 96
| 0.685164
|
5413cc32d02984a351e6a649f1af56386a98dcaf
| 989
|
py
|
Python
|
resize_textframe_main_to_margins.py
|
innermond/scribus-scripts
|
35100ad8740f970c56b8bca4c7e01f1bb638afbb
|
[
"MIT"
] | 1
|
2021-12-19T09:08:28.000Z
|
2021-12-19T09:08:28.000Z
|
resize_textframe_main_to_margins.py
|
innermond/scribus-scripts
|
35100ad8740f970c56b8bca4c7e01f1bb638afbb
|
[
"MIT"
] | null | null | null |
resize_textframe_main_to_margins.py
|
innermond/scribus-scripts
|
35100ad8740f970c56b8bca4c7e01f1bb638afbb
|
[
"MIT"
] | 1
|
2021-01-22T03:14:37.000Z
|
2021-01-22T03:14:37.000Z
|
#!/usr/bin/python3
try:
from scribus import *
except ImportErr:
pass
"""Resize a textframe with 'main' attribute with value 'True' to page margins"""
for p in range(1, pageCount() + 1):
gotoPage(p)
typ = getPageType(p)
W, H = getPageSize()
mm = getPageMargins()
ii=getPageItems()
if len(ii) == 0:
continue
for i in ii:
# just text frame 4
if i[1]!=4:
continue
aa = getObjectAttributes(i[0])
if len(aa) == 0:
continue
found = None
for a in aa:
if a["Name"] == "main" and a["Value"] == "True":
found = i
if found == None:
continue
#resize
w = W - (mm[2]+mm[1])
h = H - (mm[3]+mm[0])
sizeObject(w, h, found[0])
#position on margins
if typ == 2:
moveObjectAbs(mm[1], mm[0], found[0])
elif typ == 0:
moveObjectAbs(mm[2], mm[0], found[0])
| 24.725
| 80
| 0.482305
|
253c3edde0c56c3acf3379c341686bcf017a59a5
| 4,407
|
py
|
Python
|
Python2.7-IDCard_Detect_by_AI/idcard_detection.py
|
TencentCloud/Serverless-examples
|
4cfe9183e4fd547762ac610e9370f8c98bf5de70
|
[
"Python-2.0",
"OLDAP-2.7"
] | 6
|
2020-05-13T15:48:24.000Z
|
2021-01-27T15:59:28.000Z
|
Python2.7-IDCard_Detect_by_AI/idcard_detection.py
|
TencentCloud/Serverless-examples
|
4cfe9183e4fd547762ac610e9370f8c98bf5de70
|
[
"Python-2.0",
"OLDAP-2.7"
] | 2
|
2020-07-18T13:24:20.000Z
|
2021-05-10T17:36:53.000Z
|
Python2.7-IDCard_Detect_by_AI/idcard_detection.py
|
TencentCloud/Serverless-examples
|
4cfe9183e4fd547762ac610e9370f8c98bf5de70
|
[
"Python-2.0",
"OLDAP-2.7"
] | 4
|
2020-04-15T13:05:23.000Z
|
2021-04-19T02:12:24.000Z
|
# -*- coding: utf-8 -*-
import json
import os
import logging
import datetime
from qcloud_cos_v5 import CosConfig
from qcloud_cos_v5 import CosS3Client
from qcloud_cos_v5 import CosServiceError
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.ocr.v20181119 import ocr_client, models
from PIL import Image
import PIL.Image
import sys
import base64
print('Loading function')
region = u'ap-beijing' # 根据实际情况,修改地域
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger()
logger.setLevel(level=logging.INFO)
def delete_local_file(src):
logger.info("delete files and folders")
if os.path.isfile(src):
try:
os.remove(src)
except:
pass
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc = os.path.join(src, item)
delete_file_folder(itemsrc)
try:
os.rmdir(src)
except:
pass
def idcard_detection(secret_id,secret_key,token,download_path,CardSide):
try:
cred = credential.Credential(secret_id,secret_key,token)
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.IDCardOCRRequest()
with open(download_path, "rb") as f:
base64_data = base64.b64encode(f.read())
params = '{"ImageBase64":"%s","CardSide":"%s"}'%(base64_data,CardSide)
req.from_json_string(params)
resp = client.IDCardOCR(req)
res_ai = json.loads(resp.to_json_string())
return res_ai
except TencentCloudSDKException as err:
# print(err)
return -1
def main_handler(event, context):
logger.info("start main handler")
if "Records" not in event.keys():
return {"errorMsg": "event is not come from cos"}
secret_id = os.environ.get('TENCENTCLOUD_SECRETID')
secret_key = os.environ.get('TENCENTCLOUD_SECRETKEY')
token = os.environ.get('TENCENTCLOUD_SESSIONTOKEN')
appid = event['Records'][0]['cos']['cosBucket']['appid']
config = CosConfig(Secret_id=secret_id, Secret_key=secret_key, Region=region, Token=token)
cos_client = CosS3Client(config)
bucket = event['Records'][0]['cos']['cosBucket']['name'] + '-' + str(appid)
key = event['Records'][0]['cos']['cosObject']['key']
key = key.replace('/' + str(appid) + '/' + event['Records'][0]['cos']['cosBucket']['name'] + '/', '', 1)
download_path = '/tmp/{}'.format(key)
tmpload_path = '/tmp/resized-{}'.format(key)
logger.info("Key is " + key)
logger.info("Get from [%s] to download file [%s]" % (bucket, key))
# download image from cos
try:
response = cos_client.get_object(Bucket=bucket, Key=key, )
response['Body'].get_stream_to_file(download_path)
logger.info("Download file [%s] Success" % key)
except CosServiceError as e:
print(e.get_error_code())
print(e.get_error_msg())
print(e.get_resource_location())
return "Download File Fail"
# detect idcard
logger.info("Start Detection")
CardSide = "FRONT"
res_ai = idcard_detection(secret_id,secret_key,token,download_path,CardSide)
if res_ai != -1 :
res_print = {
"姓名:": res_ai["Name"],
"性别:": res_ai["Sex"],
"出生:": res_ai["Birth"],
"住址:": res_ai["Address"],
"民族:": res_ai["Nation"],
"公民身份证号:": res_ai['IdNum']
}
print (json.dumps(res_print).decode('unicode-escape'))
else:
CardSide = "BACK"
res_ai = idcard_detection(secret_id,secret_key,token,download_path,CardSide)
if res_ai != -1 :
res_print = {
"有效期限:": res_ai["ValidDate"],
"签发机关:": res_ai["Authority"]
}
print (json.dumps(res_print).decode('unicode-escape'))
else:
return "Detect Fail"
# delete local file
delete_local_file(str(download_path))
return res_print
| 36.421488
| 108
| 0.638983
|
5b6d01bfd633f705953af3efeb56ba8c5b83f3bd
| 527
|
py
|
Python
|
06_Joining Data with pandas/02_Merging Tables With Different Join Types/08_How does pandas handle self joins.py
|
mohd-faizy/DataScience-With-Python
|
13ebb10cf9083343056d5b782957241de1d595f9
|
[
"MIT"
] | 5
|
2021-02-03T14:36:58.000Z
|
2022-01-01T10:29:26.000Z
|
06_Joining Data with pandas/02_Merging Tables With Different Join Types/08_How does pandas handle self joins.py
|
mohd-faizy/DataScience-With-Python
|
13ebb10cf9083343056d5b782957241de1d595f9
|
[
"MIT"
] | null | null | null |
06_Joining Data with pandas/02_Merging Tables With Different Join Types/08_How does pandas handle self joins.py
|
mohd-faizy/DataScience-With-Python
|
13ebb10cf9083343056d5b782957241de1d595f9
|
[
"MIT"
] | 3
|
2021-02-08T00:31:16.000Z
|
2022-03-17T13:52:32.000Z
|
'''
How does pandas handle self joins?
Select the false statement about merging a table to itself.
Answer the question
Possible Answers
1. You can merge a table to itself with a right join.[X]
2. Merging a table to itself can allow you to compare values in a column to
other values in the same column.[X]
3. The Pandas module limits you to one merge where you merge a table to itself.
You cannot repeat this process over and over.[✓]
4. Merging a table to itself is like working with two separate tables.[X]
'''
| 25.095238
| 79
| 0.743833
|
a09de6dbecf713b694b8a832ed3d9c4ca0285d39
| 1,422
|
py
|
Python
|
main.py
|
GunshipPenguin/prospector
|
fd760dd7f8eff684aa9022cc6c5a8febfaba3ebd
|
[
"MIT"
] | null | null | null |
main.py
|
GunshipPenguin/prospector
|
fd760dd7f8eff684aa9022cc6c5a8febfaba3ebd
|
[
"MIT"
] | 1
|
2017-01-21T22:54:37.000Z
|
2017-01-21T22:54:37.000Z
|
main.py
|
GunshipPenguin/prospector
|
fd760dd7f8eff684aa9022cc6c5a8febfaba3ebd
|
[
"MIT"
] | null | null | null |
from flask import Flask
from challengeswrapper import ChallengesWrapper
challengesWrapper = ChallengesWrapper()
app = Flask(__name__)
@app.route('/<challenge_id>')
def challenge(challenge_id):
'''
Controller that gives responses for a specific challenge. challenge_id is
the unique id of the challenge. If challend_id does not represent a valid
challenge, return 404.
'''
challenge = challengesWrapper.get_challenge(challenge_id)
if (not challenge):
return 'Not found', 404
return challenge.get_response(app)
@app.route('/<challenge_id>/hint/<hint>')
def hint(challenge_id, hint):
'''
Controller that gives responses for challenge hints. hint is the hint
number for the challenge with id challenge_id. If hint is not a valid
hint number for the challenge with id challenge_id, return 404.
'''
challenge = challengesWrapper.get_challenge(challenge_id)
# Ensure that challenge_id is valid
if (not challenge):
return 'Invalid hint number', 404
# Ensure that the hint number provided is a valid integer
hintNum = 0
try:
hintNum = int(hint)
except:
return 'Invalid hint number', 404
# Ensure that the hint number is a valid hint number
if (not challenge.get_hint(hintNum)):
return 'Invalid Hint Number', 404
return challenge.get_hint(hintNum)
if __name__ == '__main__':
app.run()
| 28.44
| 77
| 0.703938
|
64565ac5699fac2b6660bded653a45da4f94c48f
| 493
|
py
|
Python
|
bang/dag/serializers.py
|
rgoss-waystar/bang
|
dfc739fbeead20e5dbbf2f39a23d69517a3fac2e
|
[
"MIT"
] | null | null | null |
bang/dag/serializers.py
|
rgoss-waystar/bang
|
dfc739fbeead20e5dbbf2f39a23d69517a3fac2e
|
[
"MIT"
] | 4
|
2021-03-19T04:50:40.000Z
|
2022-02-10T09:04:08.000Z
|
bang/dag/serializers.py
|
jvhart/bang
|
665cc3271130ab9292e5ec4a2e6ec5a41fb5cb05
|
[
"MIT"
] | 3
|
2020-07-08T10:43:13.000Z
|
2021-03-04T14:06:54.000Z
|
from . import models
from rest_framework import serializers
class DAGProcessStatusSerializer(serializers.ModelSerializer):
observed_time = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S")
class Meta:
model = models.DAGProcessStatus
fields = (
'pk',
'dag_process',
'pid_running',
'memory_consumption',
'cpu_consumption',
'child_processes',
'observed_time',
)
| 24.65
| 73
| 0.578093
|
142f122a6e22141f9e12abddd8469dd77ef8adc9
| 4,401
|
py
|
Python
|
Agents/mirl.py
|
TylerJamesMalloy/bullet3
|
e357853815c1e0297683218273de79e586b574c8
|
[
"Zlib"
] | null | null | null |
Agents/mirl.py
|
TylerJamesMalloy/bullet3
|
e357853815c1e0297683218273de79e586b574c8
|
[
"Zlib"
] | null | null | null |
Agents/mirl.py
|
TylerJamesMalloy/bullet3
|
e357853815c1e0297683218273de79e586b574c8
|
[
"Zlib"
] | null | null | null |
import logging, os, time, multiprocessing, sys, signal
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
import gym
import pybullet, pybullet_envs, pybullet_data
import numpy as np
import pandas as pd
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines.clac.policies import MlpPolicy as CLAC_MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import SAC, CLAC
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#ENVIRONMENT_NAMES = [Walker2DBulletEnv-v0, Robots/AntBulletEnv-v0 , "HopperBulletEnv-v0" , "HumanoidBulletEnv-v0", "HalfCheetahBulletEnv-v0"]
#FOLDERS = [ "Robots/AntBulletEnv" , "Robots/HopperBulletEnv" , "Robots/HumanoidBulletEnv", "Robots/HumanoidFlagrunBulletEnv"]
#physicsClient = pybullet.connect(pybullet.GUI) #or p.DIRECT for non-graphical version
#pybullet.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
# Robots
# RobotsGen
# RobotsExtremeGen
FOLDER = "RobotsExtremeGen/AntBulletEnv"
# Create target Directory if don't exist
if not os.path.exists(FOLDER):
os.mkdir(FOLDER)
if not os.path.exists(FOLDER + "/models"):
os.mkdir(FOLDER + "/models")
if not os.path.exists(FOLDER + "/results"):
os.mkdir(FOLDER + "/results")
if not os.path.exists(FOLDER + "/features"):
os.mkdir(FOLDER + "/features")
NUM_RESAMPLES = 100
NUM_TRAINING_STEPS = 10000
ENVIRONMENT_NAME = "AntBulletEnv-v0"
#RANDOMIZATION_LEVEL = "None"
#RANDOMIZATION_LEVEL = "Test"
#RANDOMIZATION_LEVEL = "Normal"
RANDOMIZATION_LEVEL = "Extreme"
CLAC_COEFS = [2.0]
def test_agent(agent_step):
for coef_index in range(len(CLAC_COEFS)):
mut_coef = CLAC_COEFS[coef_index]
if(agent_step == 1):
print(mut_coef, " ", NUM_TRAINING_STEPS, " ", ENVIRONMENT_NAME, " ", FOLDER)
features = pd.DataFrame()
mirl_env = gym.make(ENVIRONMENT_NAME)
mirl_env = DummyVecEnv([lambda: mirl_env])
mirl_model = CLAC(CLAC_MlpPolicy, mirl_env, mut_inf_coef=mut_coef, coef_schedule=3.3e-4, verbose=1)
(mirl_model, learning_results) = mirl_model.learn(total_timesteps=NUM_TRAINING_STEPS, log_interval=10)
learning_results['AgentID'] = agent_step
learning_results.to_pickle(FOLDER + "/results/MIRL_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_0.pkl")
for resample_step in range(1, NUM_RESAMPLES):
# Set both environments to the same resampled values
if(RANDOMIZATION_LEVEL == "Normal"):
mirl_env.env_method("randomize", 0)
elif(RANDOMIZATION_LEVEL == "Extreme"):
mirl_env.env_method("randomize", 1)
elif(RANDOMIZATION_LEVEL == "Test"):
mirl_env.env_method("randomize", -1)
else:
print("Error resampling unknown value: ", RANDOMIZATION_LEVEL)
continue
if(agent_step == 1):
print(mut_coef, " ", NUM_TRAINING_STEPS, " ", ENVIRONMENT_NAME, " ", FOLDER, " resample step ", resample_step)
(mirl_model, learning_results) = mirl_model.learn(total_timesteps=NUM_TRAINING_STEPS, reset_num_timesteps=False, log_interval=10)
learning_results.to_pickle(FOLDER + "/results/MIRL_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
mirl_model.save(FOLDER + "/models/MIRL_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_0")
del mirl_model
del mirl_env
def main():
Agents = [1, 2, 3, 4, 5, 6, 7, 8]
print("Initializng workers: ", Agents)
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = multiprocessing.Pool(processes=len(Agents))
signal.signal(signal.SIGINT, original_sigint_handler)
try:
print("Starting jobs")
res = pool.map_async(test_agent, Agents)
print("Waiting for results")
#res.get(1000000) # Without the timeout this blocking call ignores all signals.
except KeyboardInterrupt:
print("Caught Keyboard Interrupt, terminating workers")
pool.terminate()
pool.join()
else:
print("Normal termination")
pool.close()
pool.join()
if __name__ == "__main__":
main()
| 37.615385
| 159
| 0.6778
|
0d398c15301276f39bfc00d923b6ce455e0918c7
| 22,789
|
py
|
Python
|
tests/providers/google/cloud/hooks/test_pubsub.py
|
arezamoosavi/airflow
|
c3c81c3144386d1de535c1c5e777270e727bb69e
|
[
"Apache-2.0"
] | 2
|
2016-08-23T14:22:15.000Z
|
2017-09-28T19:45:26.000Z
|
tests/providers/google/cloud/hooks/test_pubsub.py
|
arezamoosavi/airflow
|
c3c81c3144386d1de535c1c5e777270e727bb69e
|
[
"Apache-2.0"
] | 4
|
2019-01-24T11:01:17.000Z
|
2022-02-28T04:28:07.000Z
|
tests/providers/google/cloud/hooks/test_pubsub.py
|
arezamoosavi/airflow
|
c3c81c3144386d1de535c1c5e777270e727bb69e
|
[
"Apache-2.0"
] | 6
|
2018-04-09T07:46:05.000Z
|
2019-07-16T00:13:15.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from typing import List
from unittest import mock
from uuid import UUID
import pytest
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError
from google.cloud.exceptions import NotFound
from google.cloud.pubsub_v1.types import ReceivedMessage
from googleapiclient.errors import HttpError
from parameterized import parameterized
from airflow.providers.google.cloud.hooks.pubsub import PubSubException, PubSubHook
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.version import version
BASE_STRING = 'airflow.providers.google.common.hooks.base_google.{}'
PUBSUB_STRING = 'airflow.providers.google.cloud.hooks.pubsub.{}'
EMPTY_CONTENT = b''
TEST_PROJECT = 'test-project'
TEST_TOPIC = 'test-topic'
TEST_SUBSCRIPTION = 'test-subscription'
TEST_UUID = UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
TEST_MESSAGES = [
{'data': b'Hello, World!', 'attributes': {'type': 'greeting'}},
{'data': b'Knock, knock'},
{'attributes': {'foo': ''}},
]
EXPANDED_TOPIC = f'projects/{TEST_PROJECT}/topics/{TEST_TOPIC}'
EXPANDED_SUBSCRIPTION = f'projects/{TEST_PROJECT}/subscriptions/{TEST_SUBSCRIPTION}'
LABELS = {'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')}
def mock_init(
self,
gcp_conn_id,
delegate_to=None,
impersonation_chain=None,
):
pass
class TestPubSubHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.pubsub_hook = PubSubHook(gcp_conn_id='test')
def _generate_messages(self, count) -> List[ReceivedMessage]:
return [
ReceivedMessage(
ack_id=str(i),
message={
"data": f'Message {i}'.encode(),
"attributes": {"type": "generated message"},
},
)
for i in range(1, count + 1)
]
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PublisherClient")
def test_publisher_client_creation(self, mock_client, mock_get_creds):
assert self.pubsub_hook._client is None
result = self.pubsub_hook.get_conn()
mock_client.assert_called_once_with(credentials=mock_get_creds.return_value, client_info=CLIENT_INFO)
assert mock_client.return_value == result
assert self.pubsub_hook._client == result
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.SubscriberClient")
def test_subscriber_client_creation(self, mock_client, mock_get_creds):
assert self.pubsub_hook._client is None
result = self.pubsub_hook.subscriber_client
mock_client.assert_called_once_with(credentials=mock_get_creds.return_value, client_info=CLIENT_INFO)
assert mock_client.return_value == result
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_nonexistent_topic(self, mock_service):
create_method = mock_service.return_value.create_topic
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC)
create_method.assert_called_once_with(
request=dict(name=EXPANDED_TOPIC, labels=LABELS, message_storage_policy=None, kms_key_name=None),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_delete_topic(self, mock_service):
delete_method = mock_service.return_value.delete_topic
self.pubsub_hook.delete_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC)
delete_method.assert_called_once_with(
request=dict(topic=EXPANDED_TOPIC), retry=None, timeout=None, metadata=()
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_delete_nonexisting_topic_failifnotexists(self, mock_service):
mock_service.return_value.delete_topic.side_effect = NotFound(
f'Topic does not exists: {EXPANDED_TOPIC}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.delete_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_not_exists=True)
assert str(ctx.value) == f'Topic does not exist: {EXPANDED_TOPIC}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_delete_topic_api_call_error(self, mock_service):
mock_service.return_value.delete_topic.side_effect = GoogleAPICallError(
f'Error deleting topic: {EXPANDED_TOPIC}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.delete_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_not_exists=True)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_preexisting_topic_failifexists(self, mock_service):
mock_service.return_value.create_topic.side_effect = AlreadyExists(
f'Topic already exists: {TEST_TOPIC}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_exists=True)
assert str(ctx.value) == f'Topic already exists: {TEST_TOPIC}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_preexisting_topic_nofailifexists(self, mock_service):
mock_service.return_value.create_topic.side_effect = AlreadyExists(
f'Topic already exists: {EXPANDED_TOPIC}'
)
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_topic_api_call_error(self, mock_service):
mock_service.return_value.create_topic.side_effect = GoogleAPICallError(
f'Error creating topic: {TEST_TOPIC}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_exists=True)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_nonexistent_subscription(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION
)
create_method.assert_called_once_with(
request=dict(
name=EXPANDED_SUBSCRIPTION,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_different_project_topic(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id='a-different-project',
)
expected_subscription = f'projects/a-different-project/subscriptions/{TEST_SUBSCRIPTION}'
create_method.assert_called_once_with(
request=dict(
name=expected_subscription,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_delete_subscription(self, mock_service):
self.pubsub_hook.delete_subscription(project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION)
delete_method = mock_service.delete_subscription
delete_method.assert_called_once_with(
request=dict(subscription=EXPANDED_SUBSCRIPTION), retry=None, timeout=None, metadata=()
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_delete_nonexisting_subscription_failifnotexists(self, mock_service):
mock_service.delete_subscription.side_effect = NotFound(
f'Subscription does not exists: {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.delete_subscription(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, fail_if_not_exists=True
)
assert str(ctx.value) == f'Subscription does not exist: {EXPANDED_SUBSCRIPTION}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_delete_subscription_api_call_error(self, mock_service):
mock_service.delete_subscription.side_effect = GoogleAPICallError(
f'Error deleting subscription {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.delete_subscription(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, fail_if_not_exists=True
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
@mock.patch(PUBSUB_STRING.format('uuid4'), new_callable=mock.Mock(return_value=lambda: TEST_UUID))
def test_create_subscription_without_subscription_name(self, mock_uuid, mock_service):
create_method = mock_service.create_subscription
expected_name = EXPANDED_SUBSCRIPTION.replace(TEST_SUBSCRIPTION, f'sub-{TEST_UUID}')
response = self.pubsub_hook.create_subscription(project_id=TEST_PROJECT, topic=TEST_TOPIC)
create_method.assert_called_once_with(
request=dict(
name=expected_name,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert f'sub-{TEST_UUID}' == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_with_ack_deadline(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION, ack_deadline_secs=30
)
create_method.assert_called_once_with(
request=dict(
name=EXPANDED_SUBSCRIPTION,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=30,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_with_filter(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
filter_='attributes.domain="com"',
)
create_method.assert_called_once_with(
request=dict(
name=EXPANDED_SUBSCRIPTION,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter='attributes.domain="com"',
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_failifexists(self, mock_service):
mock_service.create_subscription.side_effect = AlreadyExists(
f'Subscription already exists: {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION, fail_if_exists=True
)
assert str(ctx.value) == f'Subscription already exists: {EXPANDED_SUBSCRIPTION}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_api_call_error(self, mock_service):
mock_service.create_subscription.side_effect = GoogleAPICallError(
f'Error creating subscription {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION, fail_if_exists=True
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_nofailifexists(self, mock_service):
mock_service.create_subscription.side_effect = AlreadyExists(
f'Subscription already exists: {EXPANDED_SUBSCRIPTION}'
)
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_publish(self, mock_service):
publish_method = mock_service.return_value.publish
self.pubsub_hook.publish(project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES)
calls = [
mock.call(topic=EXPANDED_TOPIC, data=message.get("data", b''), **message.get('attributes', {}))
for message in TEST_MESSAGES
]
publish_method.has_calls(calls)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_publish_api_call_error(self, mock_service):
publish_method = mock_service.return_value.publish
publish_method.side_effect = GoogleAPICallError(f'Error publishing to topic {EXPANDED_SUBSCRIPTION}')
with pytest.raises(PubSubException):
self.pubsub_hook.publish(project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_pull(self, mock_service):
pull_method = mock_service.pull
pulled_messages = []
for i, msg in enumerate(TEST_MESSAGES):
pulled_messages.append({'ackId': i, 'message': msg})
pull_method.return_value.received_messages = pulled_messages
response = self.pubsub_hook.pull(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=10
)
pull_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
max_messages=10,
return_immediately=False,
),
retry=None,
timeout=None,
metadata=(),
)
assert pulled_messages == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_pull_no_messages(self, mock_service):
pull_method = mock_service.pull
pull_method.return_value.received_messages = []
response = self.pubsub_hook.pull(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=10
)
pull_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
max_messages=10,
return_immediately=False,
),
retry=None,
timeout=None,
metadata=(),
)
assert [] == response
@parameterized.expand(
[
(exception,)
for exception in [
HttpError(resp={'status': '404'}, content=EMPTY_CONTENT),
GoogleAPICallError("API Call Error"),
]
]
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_pull_fails_on_exception(self, exception, mock_service):
pull_method = mock_service.pull
pull_method.side_effect = exception
with pytest.raises(PubSubException):
self.pubsub_hook.pull(project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=10)
pull_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
max_messages=10,
return_immediately=False,
),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_acknowledge_by_ack_ids(self, mock_service):
ack_method = mock_service.acknowledge
self.pubsub_hook.acknowledge(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, ack_ids=['1', '2', '3']
)
ack_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
ack_ids=['1', '2', '3'],
),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_acknowledge_by_message_objects(self, mock_service):
ack_method = mock_service.acknowledge
self.pubsub_hook.acknowledge(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
messages=self._generate_messages(3),
)
ack_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
ack_ids=['1', '2', '3'],
),
retry=None,
timeout=None,
metadata=(),
)
@parameterized.expand(
[
(exception,)
for exception in [
HttpError(resp={'status': '404'}, content=EMPTY_CONTENT),
GoogleAPICallError("API Call Error"),
]
]
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_acknowledge_fails_on_exception(self, exception, mock_service):
ack_method = mock_service.acknowledge
ack_method.side_effect = exception
with pytest.raises(PubSubException):
self.pubsub_hook.acknowledge(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, ack_ids=['1', '2', '3']
)
ack_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
ack_ids=['1', '2', '3'],
),
retry=None,
timeout=None,
metadata=(),
)
@parameterized.expand(
[
(messages,)
for messages in [
[{"data": b'test'}],
[{"data": b''}],
[{"data": b'test', "attributes": {"weight": "100kg"}}],
[{"data": b'', "attributes": {"weight": "100kg"}}],
[{"attributes": {"weight": "100kg"}}],
]
]
)
def test_messages_validation_positive(self, messages):
PubSubHook._validate_messages(messages)
@parameterized.expand(
[
([("wrong type",)], "Wrong message type. Must be a dictionary."),
([{"wrong_key": b'test'}], "Wrong message. Dictionary must contain 'data' or 'attributes'."),
([{"data": 'wrong string'}], "Wrong message. 'data' must be send as a bytestring"),
([{"data": None}], "Wrong message. 'data' must be send as a bytestring"),
(
[{"attributes": None}],
"Wrong message. If 'data' is not provided 'attributes' must be a non empty dictionary.",
),
(
[{"attributes": "wrong string"}],
"Wrong message. If 'data' is not provided 'attributes' must be a non empty dictionary.",
),
]
)
def test_messages_validation_negative(self, messages, error_message):
with pytest.raises(PubSubException) as ctx:
PubSubHook._validate_messages(messages)
assert str(ctx.value) == error_message
| 41.209765
| 110
| 0.649743
|
0bdbd134a4f33f4ae87ef0df061ae8afd4b7b957
| 8,139
|
py
|
Python
|
tensorflow_tts/processor/base_processor.py
|
fkemeth/TensorFlowTTS
|
16b5885e4152f4af19fe99501f1ce4d6c1d61940
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_tts/processor/base_processor.py
|
fkemeth/TensorFlowTTS
|
16b5885e4152f4af19fe99501f1ce4d6c1d61940
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_tts/processor/base_processor.py
|
fkemeth/TensorFlowTTS
|
16b5885e4152f4af19fe99501f1ce4d6c1d61940
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Processor for all processor."""
import abc
import json
import os
from typing import Dict, List, Union
from dataclasses import dataclass, field
class DataProcessorError(Exception):
pass
@dataclass
class BaseProcessor(abc.ABC):
data_dir: str
symbols: List[str] = field(default_factory=list)
speakers_map: Dict[str, int] = field(default_factory=dict)
train_f_name: str = "train.txt"
delimiter: str = "|"
positions = {
"file": 0,
"text": 1,
"speaker_name": 2,
} # positions of file,text,speaker_name after split line
f_extension: str = ".wav"
saved_mapper_path: str = None
loaded_mapper_path: str = None
# extras
items: List[List[str]] = field(default_factory=list) # text, wav_path, speaker_name
symbol_to_id: Dict[str, int] = field(default_factory=dict)
id_to_symbol: Dict[int, str] = field(default_factory=dict)
def __post_init__(self):
if self.loaded_mapper_path is not None:
self._load_mapper(loaded_path=self.loaded_mapper_path)
if self.setup_eos_token():
self.add_symbol(
self.setup_eos_token()
) # if this eos token not yet present in symbols list.
self.eos_id = self.symbol_to_id[self.setup_eos_token()]
return
if self.symbols.__len__() < 1:
raise DataProcessorError("Symbols list is empty but mapper isn't loaded")
self.create_items()
self.create_speaker_map()
self.reverse_speaker = {v: k for k, v in self.speakers_map.items()}
self.create_symbols()
if self.saved_mapper_path is not None:
self._save_mapper(saved_path=self.saved_mapper_path)
# processor name. usefull to use it for AutoProcessor
self._processor_name = type(self).__name__
if self.setup_eos_token():
self.add_symbol(
self.setup_eos_token()
) # if this eos token not yet present in symbols list.
self.eos_id = self.symbol_to_id[self.setup_eos_token()]
def __getattr__(self, name: str) -> Union[str, int]:
if "_id" in name: # map symbol to id
return self.symbol_to_id[name.replace("_id", "")]
return self.symbol_to_id[name] # map symbol to value
def create_speaker_map(self):
"""
Create speaker map for dataset.
"""
sp_id = 0
for i in self.items:
speaker_name = i[-1]
if speaker_name not in self.speakers_map:
self.speakers_map[speaker_name] = sp_id
sp_id += 1
def get_speaker_id(self, name: str) -> int:
return self.speakers_map[name]
def get_speaker_name(self, speaker_id: int) -> str:
return self.speakers_map[speaker_id]
def create_symbols(self):
self.symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
self.id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
def create_items(self):
"""
Method used to create items from training file
items struct example => text, wav_file_path, speaker_name.
Note that the speaker_name should be a last.
"""
with open(
os.path.join(self.data_dir, self.train_f_name), mode="r", encoding="utf-8"
) as f:
for line in f:
parts = line.strip().split(self.delimiter)
wav_path = os.path.join(self.data_dir, parts[self.positions["file"]])
wav_path = (
wav_path + self.f_extension
if wav_path[-len(self.f_extension):] != self.f_extension
else wav_path
)
text = parts[self.positions["text"]]
speaker_name = parts[self.positions["speaker_name"]]
self.items.append([text, wav_path, speaker_name])
def add_symbol(self, symbol: Union[str, list]):
if isinstance(symbol, str):
if symbol in self.symbol_to_id:
return
self.symbols.append(symbol)
symbol_id = len(self.symbol_to_id)
self.symbol_to_id[symbol] = symbol_id
self.id_to_symbol[symbol_id] = symbol
elif isinstance(symbol, list):
for i in symbol:
self.add_symbol(i)
else:
raise ValueError("A new_symbols must be a string or list of string.")
@abc.abstractmethod
def get_one_sample(self, item):
"""Get one sample from dataset items.
Args:
item: one item in Dataset items.
Dataset items may include (raw_text, speaker_id, wav_path, ...)
Returns:
sample (dict): sample dictionary return all feature used for preprocessing later.
"""
sample = {
"raw_text": None,
"text_ids": None,
"audio": None,
"utt_id": None,
"speaker_name": None,
"rate": None,
}
return sample
@abc.abstractmethod
def text_to_sequence(self, text: str):
return []
@abc.abstractmethod
def setup_eos_token(self):
"""Return eos symbol of type string."""
return "eos"
def convert_symbols_to_ids(self, symbols: Union[str, list]):
sequence = []
if isinstance(symbols, str):
sequence.append(self._symbol_to_id[symbols])
return sequence
elif isinstance(symbols, list):
for s in symbols:
if isinstance(s, str):
sequence.append(self._symbol_to_id[s])
else:
raise ValueError("All elements of symbols must be a string.")
else:
raise ValueError("A symbols must be a string or list of string.")
return sequence
def _load_mapper(self, loaded_path: str = None):
"""
Save all needed mappers to file
"""
loaded_path = (
os.path.join(self.data_dir, "mapper.json")
if loaded_path is None
else loaded_path
)
with open(loaded_path, "r") as f:
data = json.load(f)
self.speakers_map = data["speakers_map"]
self.symbol_to_id = data["symbol_to_id"]
self.id_to_symbol = {int(k): v for k, v in data["id_to_symbol"].items()}
self._processor_name = data["processor_name"]
# other keys
all_data_keys = data.keys()
for key in all_data_keys:
if key not in ["speakers_map", "symbol_to_id", "id_to_symbol"]:
setattr(self, key, data[key])
def _save_mapper(self, saved_path: str = None, extra_attrs_to_save: dict = None):
"""
Save all needed mappers to file
"""
saved_path = (
os.path.join(self.data_dir, "mapper.json")
if saved_path is None
else saved_path
)
with open(saved_path, "w") as f:
full_mapper = {
"symbol_to_id": self.symbol_to_id,
"id_to_symbol": self.id_to_symbol,
"speakers_map": self.speakers_map,
"processor_name": self._processor_name,
}
if extra_attrs_to_save:
full_mapper = {**full_mapper, **extra_attrs_to_save}
json.dump(full_mapper, f, ensure_ascii=False)
@abc.abstractmethod
def save_pretrained(self, saved_path):
"""Save mappers to file"""
pass
| 35.081897
| 93
| 0.595282
|
fe77de3c2757149de9b70b33431998b93bde63e5
| 5,197
|
py
|
Python
|
src/segments/tests/test_views.py
|
augustuswm/flagsmith-api
|
6f37947fe3791726a92b4df2cdbded11e77387d3
|
[
"BSD-3-Clause"
] | 1,259
|
2021-06-10T11:24:09.000Z
|
2022-03-31T10:30:44.000Z
|
src/segments/tests/test_views.py
|
augustuswm/flagsmith-api
|
6f37947fe3791726a92b4df2cdbded11e77387d3
|
[
"BSD-3-Clause"
] | 392
|
2021-06-10T11:12:29.000Z
|
2022-03-31T10:13:53.000Z
|
src/segments/tests/test_views.py
|
augustuswm/flagsmith-api
|
6f37947fe3791726a92b4df2cdbded11e77387d3
|
[
"BSD-3-Clause"
] | 58
|
2021-06-11T03:18:07.000Z
|
2022-03-31T14:39:10.000Z
|
import json
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from audit.models import AuditLog, RelatedObjectType
from environments.identities.models import Identity
from environments.identities.traits.models import Trait
from environments.models import STRING, Environment
from organisations.models import Organisation, OrganisationRole
from projects.models import Project
from segments.models import EQUAL, Condition, Segment, SegmentRule
User = get_user_model()
class SegmentViewSetTestCase(APITestCase):
def setUp(self) -> None:
self.user = User.objects.create(email="test@example.com")
self.organisation = Organisation.objects.create(name="Test Organisation")
self.user.add_organisation(self.organisation, OrganisationRole.ADMIN)
self.client.force_authenticate(self.user)
self.project = Project.objects.create(
name="Test project", organisation=self.organisation
)
def tearDown(self) -> None:
AuditLog.objects.all().delete()
def test_audit_log_created_when_segment_created(self):
# Given
url = reverse("api-v1:projects:project-segments-list", args=[self.project.id])
data = {
"name": "Test Segment",
"project": self.project.id,
"rules": [{"type": "ALL", "rules": [], "conditions": []}],
}
# When
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_201_CREATED
assert (
AuditLog.objects.filter(
related_object_type=RelatedObjectType.SEGMENT.name
).count()
== 1
)
def test_audit_log_created_when_segment_updated(self):
# Given
segment = Segment.objects.create(name="Test segment", project=self.project)
url = reverse(
"api-v1:projects:project-segments-detail",
args=[self.project.id, segment.id],
)
data = {
"name": "New segment name",
"project": self.project.id,
"rules": [{"type": "ALL", "rules": [], "conditions": []}],
}
# When
res = self.client.put(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_200_OK
assert (
AuditLog.objects.filter(
related_object_type=RelatedObjectType.SEGMENT.name
).count()
== 1
)
def test_can_filter_by_identity_to_get_only_matching_segments(self):
# Given
trait_key = "trait_key"
trait_value = "trait_value"
matching_segment = Segment.objects.create(
name="Matching segment", project=self.project
)
matching_rule = SegmentRule.objects.create(
segment=matching_segment, type=SegmentRule.ALL_RULE
)
Condition.objects.create(
rule=matching_rule, property=trait_key, operator=EQUAL, value=trait_value
)
Segment.objects.create(name="Non matching segment", project=self.project)
environment = Environment.objects.create(
name="Test environment", project=self.project
)
identity = Identity.objects.create(
identifier="test-user", environment=environment
)
Trait.objects.create(
identity=identity,
trait_key=trait_key,
value_type=STRING,
string_value=trait_value,
)
base_url = reverse(
"api-v1:projects:project-segments-list", args=[self.project.id]
)
url = base_url + "?identity=%d" % identity.id
# When
res = self.client.get(url)
# Then
assert res.json().get("count") == 1
def test_cannot_create_segments_without_rules(self):
# Given
url = reverse("api-v1:projects:project-segments-list", args=[self.project.id])
data = {"name": "New segment name", "project": self.project.id, "rules": []}
# When
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_400_BAD_REQUEST
def test_can_create_segments_with_boolean_condition(self):
# Given
url = reverse("api-v1:projects:project-segments-list", args=[self.project.id])
data = {
"name": "New segment name",
"project": self.project.id,
"rules": [
{
"type": "ALL",
"rules": [],
"conditions": [
{"operator": EQUAL, "property": "test-property", "value": True}
],
}
],
}
# When
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_201_CREATED
| 32.48125
| 87
| 0.594381
|
919bd847c02b814fc0c5cfb9fb2cf0c42e03efdf
| 13,719
|
py
|
Python
|
hstrat/hstrat/stratum_retention_predicates/StratumRetentionPredicateGeomSeqNthRoot.py
|
mmore500/hstrat
|
7fedcf3a7203e1e6c99ac16f4ec43ad160da3e6c
|
[
"MIT"
] | null | null | null |
hstrat/hstrat/stratum_retention_predicates/StratumRetentionPredicateGeomSeqNthRoot.py
|
mmore500/hstrat
|
7fedcf3a7203e1e6c99ac16f4ec43ad160da3e6c
|
[
"MIT"
] | 3
|
2022-02-28T17:33:57.000Z
|
2022-02-28T21:41:33.000Z
|
hstrat/hstrat/stratum_retention_predicates/StratumRetentionPredicateGeomSeqNthRoot.py
|
mmore500/hstrat
|
7fedcf3a7203e1e6c99ac16f4ec43ad160da3e6c
|
[
"MIT"
] | null | null | null |
import itertools as it
import math
import numpy as np
import typing
from ...helpers import bit_floor, is_nondecreasing
class StratumRetentionPredicateGeomSeqNthRoot:
"""Functor to implement the approximate space-filling MRCA-recency-
proportional resolution stratum retention policy, for use with
HereditaryStratigraphicColumn.
This functor enacts the approximate space-filling MRCA-recency-proportional
resolution policy by specifying whether a stratum with deposition rank r
should be retained within the hereditary stratigraphic column after n
strata have been deposited.
The approximate space-filling MRCA-recency-proportional resolution policy imposes an O(1) limit on the number of retained strata and that retained strata will be exponentially distributed with respect to ranks elapsed since their deposit. MRCA rank estimate uncertainty scales in the worst case scales as O(n) with respect to the greater number of strata deposited on either column. However, with respect to estimating the rank of the MRCA when lineages diverged any fixed number of generations ago,
uncertainty scales as O(log(n)) (TODO check this).
Under the MRCA-recency-proportional resolution policy, the number of strata
retained (i.e., space complexity) scales as O(1) with respect to the
number of strata deposited.
Suppose k is specified as the policy's target space utilization. The first
k strata deposited are retained. Then, strata are retained so that MRCA
rank estimate uncertainty is less than or equal to s * (1 - n^(-1/k))
is the number of strata deposited and s is the true number of ranks
deposited since the MRCA. From this point onward, the number of strata
retained fluctuates between respects a hard upper limit of 4k + 2
(inclusive). All strata are retained until the target space utilization is
reached, then the number of strata retained fluctuates to maintain the
guaranteed estimate uncertainty. For larger target space utilizations,
number of strata retained appears generally less than twice the target
space utilization.
"""
_degree: int
_interspersal: int
def __init__(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
degree: int=100,
interspersal: int=2,
):
"""Construct the functor.
Parameters
----------
degree : int, optional
TODO.
interspersal : int, optional
TODO.
"""
assert degree >= 0
assert interspersal >= 1
self._degree = degree
self._interspersal = interspersal
def __eq__(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
other: 'StratumRetentionPredicateGeomSeqNthRoot',
) -> bool:
"""Compare for value-wise equality."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def _calc_common_ratio(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
) -> float:
"""What should the base of the exponential distribution of retained
ranks be?"""
# base ** degree == num_strata_deposited
# take the degree'th root of each side...
return num_strata_deposited ** (1 / self._degree)
def _iter_target_recencies(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
):
"""TODO."""
# target recencies are a geometric sequence
common_ratio = self._calc_common_ratio(num_strata_deposited)
# don't iterate over 0th pow, this is just the most recent rank
# i.e., recency == 1
for pow in range(1, self._degree + 1):
yield common_ratio ** pow
def _iter_target_ranks(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
):
"""TODO."""
for target_recency in self._iter_target_recencies(num_strata_deposited):
recency_cutoff = target_recency
rank_cutoff = max(
num_strata_deposited - int(math.ceil(recency_cutoff)),
0,
)
if num_strata_deposited == 0: assert rank_cutoff == 0
else: assert 0 <= rank_cutoff <= num_strata_deposited - 1
yield rank_cutoff
def _iter_rank_cutoffs(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
):
"""TODO."""
for target_recency in self._iter_target_recencies(num_strata_deposited):
rank_cutoff = max(
num_strata_deposited - int(math.ceil(
target_recency
* (self._interspersal + 1) / self._interspersal
)),
0,
)
if num_strata_deposited == 0: assert rank_cutoff == 0
else: assert 0 <= rank_cutoff <= num_strata_deposited - 1
yield rank_cutoff
def _iter_rank_seps(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
):
"""TODO."""
for target_recency in self._iter_target_recencies(num_strata_deposited):
# spacing between retained ranks
target_retained_ranks_sep = max(
target_recency / self._interspersal,
1.0,
)
# round down to power of 2
retained_ranks_sep = bit_floor(int(target_retained_ranks_sep))
yield retained_ranks_sep
def _iter_rank_backstops(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
):
"""TODO."""
for rank_cutoff, retained_ranks_sep in zip(
self._iter_rank_cutoffs(num_strata_deposited),
self._iter_rank_seps(num_strata_deposited),
):
# round UP from rank_cutoff
# adapted from https://stackoverflow.com/a/14092788
min_retained_rank = (
rank_cutoff
- (rank_cutoff % -retained_ranks_sep)
)
assert min_retained_rank % retained_ranks_sep == 0
if num_strata_deposited == 0: assert min_retained_rank == 0
else: assert 0 <= min_retained_rank <= num_strata_deposited - 1
yield min_retained_rank
def _get_retained_ranks(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
) -> typing.Set[int]:
"""TODO."""
# special case
if num_strata_deposited == 0: return set()
interspersal = self._interspersal
last_rank = num_strata_deposited - 1
res = {0, last_rank}
for target_rank, rank_backstop, retained_ranks_sep in zip(
self._iter_target_ranks(num_strata_deposited),
self._iter_rank_backstops(num_strata_deposited),
self._iter_rank_seps(num_strata_deposited),
):
min_retained_rank = rank_backstop
target_ranks = range(
min_retained_rank, # start
num_strata_deposited, # stop
retained_ranks_sep, # sep
)
# ensure target_ranks non-empty
assert len(target_ranks)
# ensure expected ordering of target ranks
assert is_nondecreasing(target_ranks)
# ensure last coverage at or past the target
assert target_ranks[0] <= target_rank
# ensure one-past-midpoint coverage before the target
if len(target_ranks) >= 3:
assert target_ranks[len(target_ranks)//2 + 1] > target_rank
# ensure at least interspersal ranks covered
assert len(target_ranks) >= min(
interspersal,
len(range(target_rank, num_strata_deposited)),
)
# ensure space complexity cap respected
assert len(target_ranks) <= 2 * (interspersal + 1)
# ensure sufficient target_ranks included
if retained_ranks_sep > 1: assert len(target_ranks) >= interspersal
res.update(target_ranks)
assert all(isinstance(n, int) for n in res)
assert all(0 <= n < num_strata_deposited for n in res)
assert res
return res
def _iter_retained_ranks(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
):
yield from sorted(self._get_retained_ranks(num_strata_deposited))
def CalcNumStrataRetainedExact(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
) -> int:
"""Exactly how many strata are retained after n deposted?"""
return len(self._get_retained_ranks(num_strata_deposited))
def __call__(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
stratum_rank: int,
num_stratum_depositions_completed: int,
) -> bool:
"""Decide if a stratum within the stratagraphic column should be
retained or purged.
Every time a new stratum is deposited, this method is called on each
stratum present in a HereditaryStratigraphicColumn to determine whether
it should be retained. Strata that return False are immediately purged
from the column, meaning that for a stratum to persist it must earn a
True result from this method each and every time a new stratum is
deposited.
Parameters
----------
stratum_rank : int
The number of strata that were deposited before the stratum under
consideration for retention.
num_stratum_depositions_completed : int
The number of strata that have already been deposited, not
including the latest stratum being deposited which prompted the
current purge operation.
Returns
-------
bool
True if the stratum should be retained, False otherwise.
"""
return stratum_rank in self._get_retained_ranks(
num_stratum_depositions_completed + 1,
)
def CalcNumStrataRetainedExact(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: int,
) -> int:
"""Exactly how many strata are retained after n deposted?"""
return len(
self._get_retained_ranks(num_strata_deposited)
)
def CalcNumStrataRetainedUpperBound(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
num_strata_deposited: typing.Optional[int]=None,
):
"""At most, how many strata are retained after n deposted? Inclusive."""
# +2 is 0th rank and last rank
return self._degree * 2 * (self._interspersal + 1) + 2
def CalcMrcaUncertaintyUpperBound(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
*,
actual_rank_of_mrca: int,
first_num_strata_deposited: int,
second_num_strata_deposited: int,
) -> int:
"""At most, how much uncertainty to estimate rank of MRCA? Inclusive."""
max_num_strata_deposited = max(
first_num_strata_deposited,
second_num_strata_deposited,
)
if max_num_strata_deposited == 0: return 0
interspersal = self._interspersal
# edge case: no uncertainty guarantee for interspersal 1
# interspersal >= 2 required for uncertainty guarantee
if interspersal == 1: return max_num_strata_deposited
max_ranks_since_mrca = max_num_strata_deposited - actual_rank_of_mrca
# edge case: columns are identical
if max_ranks_since_mrca == 0: return 0
common_ratio = self._calc_common_ratio(max_num_strata_deposited)
# edge case: no strata have yet been dropped
if common_ratio == 1.0: return 0
# round up to next power of common_ratio
rounded_ranks_since_mrca = (
common_ratio
** int(math.ceil(math.log(max_ranks_since_mrca, common_ratio)))
)
# should be leq just multiplying max_ranks_since_mrca by common_ratio
assert (
rounded_ranks_since_mrca <= max_ranks_since_mrca * common_ratio
# account for representation error etc.
or math.isclose(
rounded_ranks_since_mrca,
max_ranks_since_mrca * common_ratio,
)
)
# account for increased resolution from interspersal
return int(math.ceil(rounded_ranks_since_mrca / (interspersal - 1)))
def CalcRankAtColumnIndex(
self: 'StratumRetentionPredicateGeomSeqNthRoot',
index: int,
num_strata_deposited: int,
) -> int:
"""After n strata have been deposited, what will the rank of the
stratum at column index k be?
Enables a HereditaryStratigraphicColumn using this predicate to
optimize away storage of rank annotations on strata. Takes into the
account the possiblity for in-progress stratum depositions that haven't
been reflected in num_strata_deposited.
"""
num_retained = self.CalcNumStrataRetainedExact(
num_strata_deposited=num_strata_deposited,
)
# allow index equal for in-progress deposition case
assert 0 <= index <= num_retained
return next(
rank for i, rank in enumerate(
it.chain(
self._iter_retained_ranks(num_strata_deposited),
# in-progress deposition case
(num_strata_deposited,),
)
)
if i == index
)
| 37.586301
| 503
| 0.644581
|
7c502a9c213caed88dd248344dbc412f6d7e6a66
| 18
|
py
|
Python
|
allennlp/tests/fixtures/plugins/project_b/allennlp_plugins/b/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 1
|
2020-03-30T14:07:02.000Z
|
2020-03-30T14:07:02.000Z
|
allennlp/tests/fixtures/plugins/project_b/allennlp_plugins/b/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 123
|
2020-04-26T02:41:30.000Z
|
2021-08-02T21:18:00.000Z
|
allennlp/tests/fixtures/plugins/project_b/allennlp_plugins/b/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 2
|
2019-12-21T05:58:44.000Z
|
2021-08-16T07:41:21.000Z
|
from b.b import B
| 9
| 17
| 0.722222
|
1939818a7e8be168bae1a7d689bc1ec8d85bbde6
| 5,514
|
py
|
Python
|
test/terra/backends/aer_simulator/test_chunk.py
|
garrison/qiskit-aer
|
24c51a675b8653c8ad2af587d40b795ac94c07c7
|
[
"Apache-2.0"
] | 313
|
2018-12-19T09:19:12.000Z
|
2022-03-21T18:15:41.000Z
|
test/terra/backends/aer_simulator/test_chunk.py
|
garrison/qiskit-aer
|
24c51a675b8653c8ad2af587d40b795ac94c07c7
|
[
"Apache-2.0"
] | 933
|
2018-12-21T02:56:49.000Z
|
2022-03-30T01:19:54.000Z
|
test/terra/backends/aer_simulator/test_chunk.py
|
chriseclectic/qiskit-aer
|
61b028b7ccd1d6e96c8de48a10648c0bc3c07ff9
|
[
"Apache-2.0"
] | 313
|
2018-12-19T14:52:55.000Z
|
2022-02-28T20:20:14.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
AerSimulator Integration Tests
"""
# pylint: disable=no-member
import copy
from ddt import ddt
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit.library import QuantumVolume, QFT
from qiskit.compiler import transpile
from test.terra.backends.simulator_test_case import (
SimulatorTestCase, supported_methods)
@ddt
class TestChunkSimulators(SimulatorTestCase):
"""AerSimulator Multi-chunk tests."""
OPTIONS = {
"seed_simulator": 271828,
"max_parallel_threads": 1
}
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_QuantumVolume(self, method, device):
"""Test multi-chunk with quantum volume"""
opts = {
"blocking_enable": True,
"blocking_qubits": 2
}
backend = self.backend(method=method, device=device, **opts)
backend_no_chunk = self.backend(method=method, device=device)
shots = 100
num_qubits = 4
depth = 10
circuit = transpile(QuantumVolume(num_qubits, depth, seed=0),
backend=backend,
optimization_level=0)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_QuantumVolumeWithFusion(self, method, device):
"""Test multi-chunk with fused quantum volume"""
opts_no_chunk = {
"fusion_enable": True,
"fusion_threshold": 5,
}
opts_chunk = copy.copy(opts_no_chunk)
opts_chunk["blocking_enable"] = True
opts_chunk["blocking_qubits"] = 4
backend = self.backend(
method=method, device=device, **opts_chunk)
backend_no_chunk = self.backend(
method=method, device=device, **opts_no_chunk)
shots = 100
num_qubits = 8
depth = 10
circuit = transpile(QuantumVolume(num_qubits, depth, seed=0),
backend=backend, optimization_level=0)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_QFTWithFusion(self, method, device):
"""Test multi-chunk with fused QFT (testing multi-chunk diagonal matrix)"""
opts_no_chunk = {
"fusion_enable": True,
"fusion_threshold": 5,
}
opts_chunk = copy.copy(opts_no_chunk)
opts_chunk["blocking_enable"] = True
opts_chunk["blocking_qubits"] = 4
backend = self.backend(
method=method, device=device, **opts_chunk)
backend_no_chunk = self.backend(
method=method, device=device, **opts_no_chunk)
shots = 100
num_qubits = 8
circuit = transpile(QFT(num_qubits), backend=backend,
optimization_level=0)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_pauli(self, method, device):
"""Test multi-chunk pauli gate"""
opts_no_chunk = {"fusion_enable": False}
opts_chunk = copy.copy(opts_no_chunk)
opts_chunk["blocking_enable"] = True
opts_chunk["blocking_qubits"] = 3
backend = self.backend(
method=method, device=device, **opts_chunk)
backend_no_chunk = self.backend(
method=method, device=device, **opts_no_chunk)
shots = 100
qr = QuantumRegister(5)
cr = ClassicalRegister(5)
regs = (qr, cr)
circuit = QuantumCircuit(*regs)
circuit.h(qr[0])
circuit.h(qr[1])
circuit.h(qr[2])
circuit.h(qr[3])
circuit.h(qr[4])
circuit.pauli('YXZYX',qr)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
| 36.039216
| 90
| 0.649075
|
423a03a8f8e6423010f19833ccb9f75fd214ebb0
| 878
|
py
|
Python
|
alipay/aop/api/domain/KoubeiCateringTablelistQueryModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/KoubeiCateringTablelistQueryModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/KoubeiCateringTablelistQueryModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiCateringTablelistQueryModel(object):
def __init__(self):
self._shop_id = None
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringTablelistQueryModel()
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| 21.414634
| 65
| 0.587699
|
a3f22495213c4cf49f33ac6b407f9698218e262c
| 1,829
|
py
|
Python
|
var/spack/repos/builtin/packages/r-survey/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-survey/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17
|
2018-09-20T18:32:50.000Z
|
2019-12-04T16:58:12.000Z
|
var/spack/repos/builtin/packages/r-survey/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-09-21T07:45:10.000Z
|
2019-09-21T07:45:10.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSurvey(RPackage):
"""Summary statistics, two-sample tests, rank tests, generalised linear
models, cumulative link models, Cox models, loglinear models, and general
maximum pseudolikelihood estimation for multistage stratified,
cluster-sampled, unequally weighted survey samples. Variances by Taylor
series linearisation or replicate weights. Post-stratification,
calibration, and raking. Two-phase subsampling designs. Graphics. PPS
sampling without replacement. Principal components, factor analysis."""
homepage = "http://r-survey.r-forge.r-project.org/survey/"
url = "https://cloud.r-project.org/src/contrib/survey_3.30-3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/survey"
version('3.36', sha256='90f32e9d2b52eacf881e6717a4b5edfc5a3beb5da516f8372293549589d79475')
version('3.35-1', sha256='11e5ddde9c8c21dfaed0b1247036e068ad32782c76ff71f7937eb7585dd364db')
version('3.30-3', 'c70cdae9cb43d35abddd11173d64cad0')
depends_on('r@2.14.0:', when='@:3.31-5', type=('build', 'run'))
depends_on('r@2.16.0:', when='@3.32:3.34', type=('build', 'run'))
depends_on('r@3.1.0:', when='@3.35:', type=('build', 'run'))
depends_on('r-matrix', when='@3.31:', type=('build', 'run'))
depends_on('r-survival', when='@3.31:', type=('build', 'run'))
depends_on('r-lattice', when='@3.31:', type=('build', 'run'))
depends_on('r-minqa', when='@3.34:', type=('build', 'run'))
depends_on('r-numderiv', when='@3.34:', type=('build', 'run'))
depends_on('r-mitools@2.4:', when='@3.36:', type=('build', 'run'))
| 52.257143
| 96
| 0.694369
|
f399bd0b0266e5bffdbd3b41b802d7e07421c880
| 3,657
|
py
|
Python
|
tests/test_glob.py
|
olson-sean-k/peru
|
67b14affa621faae2ac4e0ee680065ab611d56d9
|
[
"MIT"
] | null | null | null |
tests/test_glob.py
|
olson-sean-k/peru
|
67b14affa621faae2ac4e0ee680065ab611d56d9
|
[
"MIT"
] | null | null | null |
tests/test_glob.py
|
olson-sean-k/peru
|
67b14affa621faae2ac4e0ee680065ab611d56d9
|
[
"MIT"
] | null | null | null |
import collections
import re
import peru.glob as glob
import shared
class GlobTest(shared.PeruTest):
def test_split_on_stars_interpreting_backslashes(self):
cases = [
('', ['']),
('*', ['', '']),
('abc', ['abc']),
('abc\\', ['abc\\']),
('abc\\n', ['abc\\n']),
('abc\\\\', ['abc\\']),
('ab*c', ['ab', 'c']),
('*abc*', ['', 'abc', '']),
(r'a\*bc', ['a*bc']),
(r'a\\*bc', ['a\\', 'bc']),
(r'a\\\*bc', ['a\\*bc']),
(r'a\\\\*bc', ['a\\\\', 'bc']),
]
for input, output in cases:
self.assertEqual(
output,
glob.split_on_stars_interpreting_backslashes(input),
'Failed split for input {}'.format(input))
def test_glob_to_path_regex(self):
Case = collections.namedtuple('Case', ['glob', 'matches', 'excludes'])
cases = [
Case(glob='a/b/c',
matches=['a/b/c'],
excludes=['a/b', 'a/b/c/', '/a/b/c', 'a/b/c/d']),
# * should be able to match nothing.
Case(glob='a/*b/c',
matches=['a/b/c', 'a/xb/c'],
excludes=['a/x/c', 'a/c', 'a//c']),
# But * by itself should never match an empty path component.
Case(glob='a/*/c',
matches=['a/b/c', 'a/boooo/c', 'a/*/c'],
excludes=['a/c', 'a/b/d/c', 'a//c']),
# Similarly, ** does not match empty path components. It's tempting
# to allow this, but we never want '**/c' to match '/c'.
Case(glob='a/**/c',
matches=['a/b/c', 'a/d/e/f/g/c', 'a/c'],
excludes=['a/b/c/d', 'x/a/c', 'a//c']),
Case(glob='a/**/**/c',
matches=['a/b/c', 'a/d/e/f/g/c', 'a/c'],
excludes=['a/b/c/d', 'x/a/c', 'a//c']),
Case(glob='**/c',
matches=['a/b/c', 'c'],
excludes=['/c', 'c/d']),
Case(glob='**/*/c',
matches=['a/b/c', 'a/c'],
excludes=['c', '/c']),
# Leading slashes should be preserved if present.
Case(glob='/a',
matches=['/a'],
excludes=['a']),
Case(glob='/**/c',
matches=['/a/b/c', '/c'],
excludes=['c', 'a/b/c']),
# Make sure special characters are escaped properly.
Case(glob='a|b',
matches=['a|b'],
excludes=['a', 'b'])
]
for case in cases:
regex = glob.glob_to_path_regex(case.glob)
for m in case.matches:
assert re.match(regex, m), \
'Glob {} (regex: {} ) should match path {}'.format(
case.glob, regex, m)
for e in case.excludes:
assert not re.match(regex, e), \
'Glob {} (regex: {} ) should not match path {}'.format(
case.glob, regex, e)
def test_bad_globs(self):
bad_globs = [
'**',
'a/b/**',
'a/b/**/',
'a/b/**c/d',
]
for bad_glob in bad_globs:
with self.assertRaises(glob.GlobError):
glob.glob_to_path_regex(bad_glob)
def test_unglobbed_prefix(self):
assert glob.unglobbed_prefix('a/b/c*/d') == 'a/b'
assert glob.unglobbed_prefix('a/b/**/d') == 'a/b'
assert glob.unglobbed_prefix('/a/b/*/d') == '/a/b'
assert glob.unglobbed_prefix('*/a/b') == ''
| 37.701031
| 79
| 0.411266
|
d7bc9fae7f366ff6e7e593cf72207527ed2ed4f5
| 1,224
|
py
|
Python
|
travelmap.py
|
alexmmorgan/alexmmorgan.github.io
|
d5f63822e865bcecf8539fc337db2fa2fcc642a5
|
[
"MIT"
] | null | null | null |
travelmap.py
|
alexmmorgan/alexmmorgan.github.io
|
d5f63822e865bcecf8539fc337db2fa2fcc642a5
|
[
"MIT"
] | null | null | null |
travelmap.py
|
alexmmorgan/alexmmorgan.github.io
|
d5f63822e865bcecf8539fc337db2fa2fcc642a5
|
[
"MIT"
] | null | null | null |
# # Leaflet cluster map of talk locations
#
# (c) 2016-2017 R. Stuart Geiger, released under the MIT license
#
# Run this from the _talks/ directory, which contains .md files of all your talks.
# This scrapes the location YAML field from each .md file, geolocates it with
# geopy/Nominatim, and uses the getorg library to output data, HTML,
# and Javascript for a standalone cluster map.
#
# Requires: glob, getorg, geopy
#
# Updated from Talks to Travel
import glob
import getorg
from geopy import Nominatim
g = glob.glob("*.md")
geocoder = Nominatim()
location_dict = {}
location = ""
permalink = ""
title = ""
for file in g:
with open(file, 'r') as f:
lines = f.read()
if lines.find('location: "') > 1:
loc_start = lines.find('location: "') + 11
lines_trim = lines[loc_start:]
loc_end = lines_trim.find('"')
location = lines_trim[:loc_end]
location_dict[location] = geocoder.geocode(location)
print(location, "\n", location_dict[location])
m = getorg.orgmap.create_map_obj()
getorg.orgmap.output_html_cluster_map(location_dict, folder_name="../travelmap", hashed_usernames=False)
| 24.48
| 104
| 0.652778
|
0f54c2e66133782dfb4b830ea1ac61519d8b9b37
| 2,148
|
py
|
Python
|
internal/notes/builtin-SAVE/packages/tycho2/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 1
|
2019-01-17T20:07:19.000Z
|
2019-01-17T20:07:19.000Z
|
internal/notes/builtin-SAVE/packages/tycho2/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | null | null | null |
internal/notes/builtin-SAVE/packages/tycho2/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T18:13:57.000Z
|
2021-11-05T18:19:49.000Z
|
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Tycho2(MakefilePackage):
"""A neutral particle transport mini-app to study performance of sweeps
on unstructured, 3D tetrahedral meshes.
"""
homepage = "https://github.com/lanl/tycho2"
url = "https://github.com/lanl/tycho2/tarball/v0.1"
version('develop', git='https://github.com/lanl/tycho2', branch='master')
depends_on("mpi")
def patch(self):
# make.inc is included by Makefile to set MPICC, but we that
# through build_targets() below, so any empty include file is fine.
touch('make.inc')
@property
def build_targets(self):
targets = [
'MPICC={0} -std=c++11 {1}'.format(self.spec['mpi'].mpicxx,
self.compiler.openmp_flag)
]
return targets
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('sweep.x', prefix.bin)
| 37.684211
| 78
| 0.636406
|
fef0f03740630e3927bc8367f7b0e98d4eb08c75
| 25,832
|
py
|
Python
|
os_ken/services/protocols/bgp/info_base/vrf.py
|
rolaya/os-ken
|
10009e41539c737c7c423f13e4f5bc5f46d219ff
|
[
"Apache-2.0"
] | 1
|
2019-04-24T04:01:07.000Z
|
2019-04-24T04:01:07.000Z
|
os_ken/services/protocols/bgp/info_base/vrf.py
|
anlaneg/os-ken
|
379a7694c3129cc0156343af71f4fca8830d9de5
|
[
"Apache-2.0"
] | null | null | null |
os_ken/services/protocols/bgp/info_base/vrf.py
|
anlaneg/os-ken
|
379a7694c3129cc0156343af71f4fca8830d9de5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines base data types and models required specifically for VRF support.
"""
import abc
import logging
import six
from os_ken.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN
from os_ken.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH
from os_ken.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES
from os_ken.lib.packet.bgp import BGP_ATTR_TYEP_PMSI_TUNNEL_ATTRIBUTE
from os_ken.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC
from os_ken.lib.packet.bgp import BGPPathAttributeOrigin
from os_ken.lib.packet.bgp import BGPPathAttributeAsPath
from os_ken.lib.packet.bgp import EvpnEthernetSegmentNLRI
from os_ken.lib.packet.bgp import BGPPathAttributeExtendedCommunities
from os_ken.lib.packet.bgp import BGPPathAttributeMultiExitDisc
from os_ken.lib.packet.bgp import BGPEncapsulationExtendedCommunity
from os_ken.lib.packet.bgp import BGPEvpnEsiLabelExtendedCommunity
from os_ken.lib.packet.bgp import BGPEvpnEsImportRTExtendedCommunity
from os_ken.lib.packet.bgp import BGPPathAttributePmsiTunnel
from os_ken.lib.packet.bgp import PmsiTunnelIdIngressReplication
from os_ken.lib.packet.bgp import RF_L2_EVPN
from os_ken.lib.packet.bgp import EvpnMacIPAdvertisementNLRI
from os_ken.lib.packet.bgp import EvpnIpPrefixNLRI
from os_ken.lib.packet.safi import (
IP_FLOWSPEC,
VPN_FLOWSPEC,
)
from os_ken.services.protocols.bgp.base import OrderedDict
from os_ken.services.protocols.bgp.constants import VPN_TABLE
from os_ken.services.protocols.bgp.constants import VRF_TABLE
from os_ken.services.protocols.bgp.info_base.base import Destination
from os_ken.services.protocols.bgp.info_base.base import Path
from os_ken.services.protocols.bgp.info_base.base import Table
from os_ken.services.protocols.bgp.utils.bgp import create_rt_extended_community
from os_ken.services.protocols.bgp.utils.stats import LOCAL_ROUTES
from os_ken.services.protocols.bgp.utils.stats import REMOTE_ROUTES
from os_ken.services.protocols.bgp.utils.stats import RESOURCE_ID
from os_ken.services.protocols.bgp.utils.stats import RESOURCE_NAME
LOG = logging.getLogger('bgpspeaker.info_base.vrf')
@six.add_metaclass(abc.ABCMeta)
class VrfTable(Table):
"""Virtual Routing and Forwarding information base.
Keeps destination imported to given vrf in represents.
"""
ROUTE_FAMILY = None
VPN_ROUTE_FAMILY = None
NLRI_CLASS = None
VRF_PATH_CLASS = None
VRF_DEST_CLASS = None
def __init__(self, vrf_conf, core_service, signal_bus):
Table.__init__(self, vrf_conf.route_dist, core_service, signal_bus)
self._vrf_conf = vrf_conf
self._import_maps = []
self.init_import_maps(vrf_conf.import_maps)
def init_import_maps(self, import_maps):
LOG.debug(
"Initializing import maps (%s) for %r", import_maps, self
)
del self._import_maps[:]
importmap_manager = self._core_service.importmap_manager
for name in import_maps:
import_map = importmap_manager.get_import_map_by_name(name)
if import_map is None:
raise KeyError('No import map with name %s' % name)
self._import_maps.append(import_map)
@property
def import_rts(self):
return self._vrf_conf.import_rts
@property
def vrf_conf(self):
return self._vrf_conf
def _table_key(self, nlri):
"""Return a key that will uniquely identify this NLRI inside
this table.
"""
# Note: We use `prefix` representation of the NLRI, because
# BGP route can be identified without the route distinguisher
# value in the VRF space.
return nlri.prefix
def _create_dest(self, nlri):
return self.VRF_DEST_CLASS(self, nlri)
def append_import_map(self, import_map):
self._import_maps.append(import_map)
def remove_import_map(self, import_map):
self._import_maps.remove(import_map)
def get_stats_summary_dict(self):
"""Returns count of local and remote paths."""
remote_route_count = 0
local_route_count = 0
for dest in self.values():
for path in dest.known_path_list:
if (hasattr(path.source, 'version_num') or
path.source == VPN_TABLE):
remote_route_count += 1
else:
local_route_count += 1
return {RESOURCE_ID: self._vrf_conf.id,
RESOURCE_NAME: self._vrf_conf.name,
REMOTE_ROUTES: remote_route_count,
LOCAL_ROUTES: local_route_count}
def import_vpn_paths_from_table(self, vpn_table, import_rts=None):
for vpn_dest in vpn_table.values():
vpn_path = vpn_dest.best_path
if not vpn_path:
continue
if import_rts is None:
import_rts = set(self.import_rts)
else:
import_rts = set(import_rts)
path_rts = vpn_path.get_rts()
if import_rts.intersection(path_rts):
# TODO(PH): When (re-)implementing extranet, check what should
# be the label reported back to NC for local paths coming from
# other VRFs.
self.import_vpn_path(vpn_path)
def import_vpn_path(self, vpn_path):
"""Imports `vpnv(4|6)_path` into `vrf(4|6)_table` or `evpn_path`
into vrfevpn_table`.
:Parameters:
- `vpn_path`: (Path) VPN path that will be cloned and imported
into VRF.
Note: Does not do any checking if this import is valid.
"""
assert vpn_path.route_family == self.VPN_ROUTE_FAMILY
# If source of given vpnv4 path is NC we import it to given VRF
# table because of extranet setting. Hence we identify source of
# EXTRANET prefixes as VRF_TABLE, else VPN_TABLE.
source = vpn_path.source
if not source:
source = VRF_TABLE
if self.VPN_ROUTE_FAMILY == RF_L2_EVPN:
# Because NLRI class is the same if the route family is EVPN,
# we re-use the NLRI instance.
vrf_nlri = vpn_path.nlri
elif self.ROUTE_FAMILY.safi in [IP_FLOWSPEC, VPN_FLOWSPEC]:
vrf_nlri = self.NLRI_CLASS(rules=vpn_path.nlri.rules)
else: # self.VPN_ROUTE_FAMILY in [RF_IPv4_VPN, RF_IPv6_VPN]
# Copy NLRI instance
ip, masklen = vpn_path.nlri.prefix.split('/')
vrf_nlri = self.NLRI_CLASS(length=int(masklen), addr=ip)
vrf_path = self.VRF_PATH_CLASS(
puid=self.VRF_PATH_CLASS.create_puid(
vpn_path.nlri.route_dist,
vpn_path.nlri.prefix),
source=source,
nlri=vrf_nlri,
src_ver_num=vpn_path.source_version_num,
pattrs=vpn_path.pathattr_map,
nexthop=vpn_path.nexthop,
is_withdraw=vpn_path.is_withdraw,
label_list=getattr(vpn_path.nlri, 'label_list', None),
)
if self._is_vrf_path_already_in_table(vrf_path):
return None
if self._is_vrf_path_filtered_out_by_import_maps(vrf_path):
return None
else:
vrf_dest = self.insert(vrf_path)
self._signal_bus.dest_changed(vrf_dest)
def _is_vrf_path_filtered_out_by_import_maps(self, vrf_path):
for import_map in self._import_maps:
if import_map.match(vrf_path):
return True
return False
def _is_vrf_path_already_in_table(self, vrf_path):
dest = self._get_dest(vrf_path.nlri)
if dest is None:
return False
return vrf_path in dest.known_path_list
def apply_import_maps(self):
changed_dests = []
for dest in self.values():
assert isinstance(dest, VrfDest)
for import_map in self._import_maps:
for path in dest.known_path_list:
if import_map.match(path):
dest.withdraw_path(path)
changed_dests.append(dest)
return changed_dests
def insert_vrf_path(self, nlri, next_hop=None,
gen_lbl=False, is_withdraw=False, **kwargs):
assert nlri
pattrs = None
label_list = []
vrf_conf = self.vrf_conf
if not is_withdraw:
table_manager = self._core_service.table_manager
if gen_lbl and next_hop:
# Label per next_hop demands we use a different label
# per next_hop. Here connected interfaces are advertised per
# VRF.
label_key = (vrf_conf.route_dist, next_hop)
nh_label = table_manager.get_nexthop_label(label_key)
if not nh_label:
nh_label = table_manager.get_next_vpnv4_label()
table_manager.set_nexthop_label(label_key, nh_label)
label_list.append(nh_label)
elif gen_lbl:
# If we do not have next_hop, get a new label.
label_list.append(table_manager.get_next_vpnv4_label())
# Set MPLS labels with the generated labels
if gen_lbl and isinstance(nlri, EvpnMacIPAdvertisementNLRI):
nlri.mpls_labels = label_list[:2]
elif gen_lbl and isinstance(nlri, EvpnIpPrefixNLRI):
nlri.mpls_label = label_list[0]
# Create a dictionary for path-attrs.
pattrs = OrderedDict()
# MpReachNlri and/or MpUnReachNlri attribute info. is contained
# in the path. Hence we do not add these attributes here.
from os_ken.services.protocols.bgp.core import EXPECTED_ORIGIN
pattrs[BGP_ATTR_TYPE_ORIGIN] = BGPPathAttributeOrigin(
EXPECTED_ORIGIN)
pattrs[BGP_ATTR_TYPE_AS_PATH] = BGPPathAttributeAsPath([])
communities = []
# Set ES-Import Route Target
if isinstance(nlri, EvpnEthernetSegmentNLRI):
subtype = 2
es_import = nlri.esi.mac_addr
communities.append(BGPEvpnEsImportRTExtendedCommunity(
subtype=subtype,
es_import=es_import))
for rt in vrf_conf.export_rts:
communities.append(create_rt_extended_community(rt, 2))
for soo in vrf_conf.soo_list:
communities.append(create_rt_extended_community(soo, 3))
# Set Tunnel Encapsulation Attribute
tunnel_type = kwargs.get('tunnel_type', None)
if tunnel_type:
communities.append(
BGPEncapsulationExtendedCommunity.from_str(tunnel_type))
# Set ESI Label Extended Community
redundancy_mode = kwargs.get('redundancy_mode', None)
if redundancy_mode is not None:
subtype = 1
flags = 0
from os_ken.services.protocols.bgp.api.prefix import (
REDUNDANCY_MODE_SINGLE_ACTIVE)
if redundancy_mode == REDUNDANCY_MODE_SINGLE_ACTIVE:
flags |= BGPEvpnEsiLabelExtendedCommunity.SINGLE_ACTIVE_BIT
vni = kwargs.get('vni', None)
if vni is not None:
communities.append(BGPEvpnEsiLabelExtendedCommunity(
subtype=subtype,
flags=flags,
vni=vni))
else:
communities.append(BGPEvpnEsiLabelExtendedCommunity(
subtype=subtype,
flags=flags,
mpls_label=label_list[0]))
pattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = \
BGPPathAttributeExtendedCommunities(communities=communities)
if vrf_conf.multi_exit_disc:
pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = \
BGPPathAttributeMultiExitDisc(vrf_conf.multi_exit_disc)
# Set PMSI Tunnel Attribute
pmsi_tunnel_type = kwargs.get('pmsi_tunnel_type', None)
if pmsi_tunnel_type is not None:
from os_ken.services.protocols.bgp.api.prefix import (
PMSI_TYPE_INGRESS_REP)
if pmsi_tunnel_type == PMSI_TYPE_INGRESS_REP:
tunnel_id = PmsiTunnelIdIngressReplication(
tunnel_endpoint_ip=self._core_service.router_id)
else: # pmsi_tunnel_type == PMSI_TYPE_NO_TUNNEL_INFO
tunnel_id = None
pattrs[BGP_ATTR_TYEP_PMSI_TUNNEL_ATTRIBUTE] = \
BGPPathAttributePmsiTunnel(pmsi_flags=0,
tunnel_type=pmsi_tunnel_type,
tunnel_id=tunnel_id,
vni=kwargs.get('vni', None))
puid = self.VRF_PATH_CLASS.create_puid(
vrf_conf.route_dist, nlri.prefix)
path = self.VRF_PATH_CLASS(
puid, None, nlri, 0, pattrs=pattrs,
nexthop=next_hop, label_list=label_list,
is_withdraw=is_withdraw
)
# Insert the path into VRF table, get affected destination so that we
# can process it further.
eff_dest = self.insert(path)
# Enqueue the eff_dest for further processing.
self._signal_bus.dest_changed(eff_dest)
return label_list
def clean_uninteresting_paths(self, interested_rts=None):
if interested_rts is None:
interested_rts = set(self.vrf_conf.import_rts)
return super(VrfTable, self).clean_uninteresting_paths(interested_rts)
@six.add_metaclass(abc.ABCMeta)
class VrfDest(Destination):
"""Base class for VRF destination."""
def __init__(self, table, nlri):
super(VrfDest, self).__init__(table, nlri)
self._route_dist = self._table.vrf_conf.route_dist
@property
def nlri_str(self):
# Returns `prefix` without the route distinguisher value, because
# a destination in VRF space can be identified without the route
# distinguisher.
return self._nlri.prefix
def _best_path_lost(self):
# Have to send update messages for withdraw of best-path to Network
# controller or Global table.
old_best_path = self._best_path
self._best_path = None
if old_best_path is None:
return
if old_best_path.source is not None:
# Send update-withdraw msg. to Sink. Create withdraw path
# out of old best path and queue it into flexinet sinks.
old_best_path = old_best_path.clone(for_withdrawal=True)
self._core_service.update_flexinet_peers(old_best_path,
self._route_dist)
else:
# Create withdraw-path out of old best path.
gpath = old_best_path.clone_to_vpn(self._route_dist,
for_withdrawal=True)
# Insert withdraw into global table and enqueue the destination
# for further processing.
tm = self._core_service.table_manager
tm.learn_path(gpath)
def _new_best_path(self, best_path):
LOG.debug('New best path selected for destination %s', self)
old_best_path = self._best_path
assert (best_path != old_best_path)
self._best_path = best_path
# Distribute new best-path to flexinet-peers.
if best_path.source is not None:
# Since route-refresh just causes the version number to
# go up and this changes best-path, we check if new-
# best-path is really different than old-best-path that
# warrants sending update to flexinet peers.
def really_diff():
old_labels = old_best_path.label_list
new_labels = best_path.label_list
return old_best_path.nexthop != best_path.nexthop \
or set(old_labels) != set(new_labels)
if not old_best_path or (old_best_path and really_diff()):
# Create OutgoingRoute and queue it into NC sink.
self._core_service.update_flexinet_peers(
best_path, self._route_dist
)
else:
# If NC is source, we create new path and insert into global
# table.
gpath = best_path.clone_to_vpn(self._route_dist)
tm = self._core_service.table_manager
tm.learn_path(gpath)
LOG.debug('VRF table %s has new best path: %s',
self._route_dist, self.best_path)
def _remove_withdrawals(self):
"""Removes withdrawn paths.
Note:
We may have disproportionate number of withdraws compared to know paths
since not all paths get installed into the table due to bgp policy and
we can receive withdraws for such paths and withdrawals may not be
stopped by the same policies.
"""
LOG.debug('Removing %s withdrawals', len(self._withdraw_list))
# If we have not withdrawals, we have nothing to do.
if not self._withdraw_list:
return
# If we have some withdrawals and no know-paths, it means it is safe to
# delete these withdraws.
if not self._known_path_list:
LOG.debug('Found %s withdrawals for path(s) that did not get'
' installed.', len(self._withdraw_list))
del (self._withdraw_list[:])
return
# If we have some known paths and some withdrawals, we find matches and
# delete them first.
matches = []
w_matches = []
# Match all withdrawals from destination paths.
for withdraw in self._withdraw_list:
match = None
for path in self._known_path_list:
# We have a match if the source are same.
if path.puid == withdraw.puid:
match = path
matches.append(path)
w_matches.append(withdraw)
# One withdraw can remove only one path.
break
# We do no have any match for this withdraw.
if not match:
LOG.debug('No matching path for withdraw found, may be path '
'was not installed into table: %s',
withdraw)
# If we have partial match.
if len(matches) != len(self._withdraw_list):
LOG.debug('Did not find match for some withdrawals. Number of '
'matches(%s), number of withdrawals (%s)',
len(matches), len(self._withdraw_list))
# Clear matching paths and withdrawals.
for match in matches:
self._known_path_list.remove(match)
for w_match in w_matches:
self._withdraw_list.remove(w_match)
def _remove_old_paths(self):
"""Identifies which of known paths are old and removes them.
Known paths will no longer have paths whose new version is present in
new paths.
"""
new_paths = self._new_path_list
known_paths = self._known_path_list
for new_path in new_paths:
old_paths = []
for path in known_paths:
# Here we just check if source is same and not check if path
# version num. as new_paths are implicit withdrawal of old
# paths and when doing RouteRefresh (not EnhancedRouteRefresh)
# we get same paths again.
if new_path.puid == path.puid:
old_paths.append(path)
break
for old_path in old_paths:
known_paths.remove(old_path)
LOG.debug('Implicit withdrawal of old path, since we have'
' learned new path from same source: %s', old_path)
def _validate_path(self, path):
if not path or not hasattr(path, 'label_list'):
raise ValueError('Invalid value of path. Expected type '
'with attribute label_list got %s' % path)
@six.add_metaclass(abc.ABCMeta)
class VrfPath(Path):
"""Represents a way of reaching an IP destination with a VPN.
"""
__slots__ = ('_label_list', '_puid')
ROUTE_FAMILY = None
VPN_PATH_CLASS = None
VPN_NLRI_CLASS = None
def __init__(self, puid, source, nlri, src_ver_num,
pattrs=None, nexthop=None,
is_withdraw=False, label_list=None):
"""Initializes a Vrf path.
Parameters:
- `puid`: (str) path ID, identifies VPN path from which this
VRF path was imported.
- `label_list`: (list) List of labels for this path.
Note: other parameters are as documented in super class.
"""
if self.ROUTE_FAMILY.safi in [IP_FLOWSPEC, VPN_FLOWSPEC]:
nexthop = '0.0.0.0'
Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop,
is_withdraw)
if label_list is None:
label_list = []
self._label_list = label_list
self._puid = puid
@property
def puid(self):
return self._puid
@property
def origin_rd(self):
tokens = self.puid.split(':')
return tokens[0] + ':' + tokens[1]
@property
def label_list(self):
return self._label_list[:]
@property
def nlri_str(self):
# Returns `prefix` without the route distinguisher value, because
# a destination in VRF space can be identified without the route
# distinguisher.
return self._nlri.prefix
@staticmethod
def create_puid(route_dist, ip_prefix):
assert route_dist and ip_prefix
return str(route_dist) + ':' + ip_prefix
def clone(self, for_withdrawal=False):
pathattrs = None
if not for_withdrawal:
pathattrs = self.pathattr_map
clone = self.__class__(
self.puid,
self._source,
self.nlri,
self.source_version_num,
pattrs=pathattrs,
nexthop=self.nexthop,
is_withdraw=for_withdrawal,
label_list=self.label_list
)
return clone
def clone_to_vpn(self, route_dist, for_withdrawal=False):
if self.ROUTE_FAMILY == RF_L2_EVPN:
# Because NLRI class is the same if the route family is EVPN,
# we re-use the NLRI instance.
vpn_nlri = self._nlri
elif self.ROUTE_FAMILY.safi in [IP_FLOWSPEC, VPN_FLOWSPEC]:
vpn_nlri = self.VPN_NLRI_CLASS(route_dist=route_dist,
rules=self.nlri.rules)
else: # self.ROUTE_FAMILY in [RF_IPv4_UC, RF_IPv6_UC]
ip, masklen = self._nlri.prefix.split('/')
vpn_nlri = self.VPN_NLRI_CLASS(length=int(masklen),
addr=ip,
labels=self.label_list,
route_dist=route_dist)
pathattrs = None
if not for_withdrawal:
pathattrs = self.pathattr_map
vpnv_path = self.VPN_PATH_CLASS(
source=self.source,
nlri=vpn_nlri,
src_ver_num=self.source_version_num,
pattrs=pathattrs,
nexthop=self.nexthop,
is_withdraw=for_withdrawal)
return vpnv_path
def __eq__(self, b_path):
if not isinstance(b_path, self.__class__):
return False
if not self.route_family == b_path.route_family:
return False
if not self.puid == b_path.puid:
return False
if not self.label_list == b_path.label_list:
return False
if not self.nexthop == b_path.nexthop:
return False
if not self.pathattr_map == b_path.pathattr_map:
return False
return True
class ImportMap(object):
def match(self, vrf_path):
raise NotImplementedError()
class VrfNlriImportMap(ImportMap):
VRF_PATH_CLASS = None
NLRI_CLASS = None
def __init__(self, prefix):
assert self.VRF_PATH_CLASS is not None
assert self.NLRI_CLASS is not None
self._nlri = self.NLRI_CLASS(prefix)
def match(self, vrf_path):
if vrf_path.route_family != self.VRF_PATH_CLASS.ROUTE_FAMILY:
LOG.error(
"vrf_paths route_family does not match importmaps"
"route_family. Applied to wrong table?")
return False
return vrf_path.nlri == self._nlri
class VrfRtImportMap(ImportMap):
def __init__(self, rt):
self._rt = rt
def match(self, vrf_path):
extcomm = vrf_path.pathattr_map.get(BGP_ATTR_TYPE_EXTENDED_COMMUNITIES)
return extcomm is not None and self._rt in extcomm.rt_list
| 38.962293
| 80
| 0.616987
|
3f0a8a139ea67df44cc97157f36d1c1e006062f9
| 5,188
|
py
|
Python
|
lulu/extractors/ucas.py
|
fakegit/Lulu
|
940c9344df8aab6044ec603f8e78d145b0f9bad7
|
[
"MIT"
] | 922
|
2018-01-17T09:22:26.000Z
|
2022-03-28T04:10:45.000Z
|
lulu/extractors/ucas.py
|
fakegit/Lulu
|
940c9344df8aab6044ec603f8e78d145b0f9bad7
|
[
"MIT"
] | 83
|
2018-01-16T08:33:55.000Z
|
2021-06-25T06:18:24.000Z
|
lulu/extractors/ucas.py
|
fakegit/Lulu
|
940c9344df8aab6044ec603f8e78d145b0f9bad7
|
[
"MIT"
] | 156
|
2018-02-03T06:24:14.000Z
|
2022-01-05T05:39:14.000Z
|
#!/usr/bin/env python
import re
import http.client
from time import time
from copy import copy
from random import random
import xml.etree.ElementTree as ET
from lulu.common import (
match1,
print_info,
get_content,
download_urls,
)
from lulu.config import FAKE_HEADERS
__all__ = ['ucas_download', 'ucas_download_single', 'ucas_download_playlist']
site_info = '中国科学院大学 ucas.ac.cn'
"""
Do not replace http.client with get_content
for UCAS's server is not correctly returning data!
"""
def dictify(r, root=True):
"""http://stackoverflow.com/a/30923963/2946714
"""
if root:
return {r.tag: dictify(r, False)}
d = copy(r.attrib)
if r.text:
d['_text'] = r.text
for x in r.findall('./*'):
if x.tag not in d:
d[x.tag] = []
d[x.tag].append(dictify(x, False))
return d
def _get_video_query_url(resourceID):
# has to be like this
headers = FAKE_HEADERS.copy()
headers.update({
'DNT': '1',
'Referer': 'http://v.ucas.ac.cn/',
'Connection': 'keep-alive',
})
conn = http.client.HTTPConnection('210.76.211.10')
conn.request(
'GET',
'/vplus/remote.do?method=query2&loginname=videocas&pwd=af1c7a4c5f77'
'f790722f7cae474c37e281203765d423a23b&resource=%5B%7B%22resourceID%2'
'2%3A%22{}%22%2C%22on%22%3A1%2C%22time%22%3A600%2C%22eid%22%3A100%2C'
'%22w%22%3A800%2C%22h%22%3A600%7D%5D&timeStamp={}'.format(
resourceID, str(int(time()))
),
headers=headers
)
res = conn.getresponse()
data = res.read()
info = data.decode("utf-8")
return match1(info, r'video":"(.+)"')
def _get_virtualPath(video_query_url):
# getResourceJsCode2
html = get_content(video_query_url)
return match1(
html, r"function\s+getVirtualPath\(\)\s+{\s+return\s+'(\w+)'"
)
def _get_video_list(resourceID):
conn = http.client.HTTPConnection('210.76.211.10')
conn.request(
'GET',
'/vplus/member/resource.do?isyulan=0&method=queryFlashXmlByResourceId'
'&resourceId={resourceID}&randoms={randoms}'.format(
resourceID=resourceID, randoms=random()
)
)
res = conn.getresponse()
data = res.read()
video_xml = data.decode('utf-8')
root = ET.fromstring(video_xml.split('___!!!___')[0])
r = dictify(root)
huge_list = []
# main
huge_list.append(
[i['value'] for i in sorted(
r['video']['mainUrl'][0]['_flv'][0]['part'][0]['video'],
key=lambda k: int(k['index'])
)]
)
# sub
if '_flv' in r['video']['subUrl'][0]:
huge_list.append([i['value'] for i in sorted(
r['video']['subUrl'][0]['_flv'][0]['part'][0]['video'],
key=lambda k: int(k['index'])
)])
return huge_list
def _ucas_get_url_lists_by_resourceID(resourceID):
video_query_url = _get_video_query_url(resourceID)
assert video_query_url != '', 'Cannot find video GUID!'
virtualPath = _get_virtualPath(video_query_url)
assert virtualPath != '', 'Cannot find virtualPath!'
url_lists = _get_video_list(resourceID)
assert url_lists, 'Cannot find any URL to download!'
# make real url
# credit to a mate in UCAS
for video_type_id, video_urls in enumerate(url_lists):
for k, path in enumerate(video_urls):
url_lists[video_type_id][k] = (
'http://210.76.211.10/vplus/member/resource.do?virtualPath='
'{virtualPath}&method=getImgByStream&imgPath={path}'.format(
virtualPath=virtualPath, path=path
)
)
return url_lists
def ucas_download_single(url, info_only=False, **kwargs):
'''video page
'''
html = get_content(url)
# resourceID is UUID
resourceID = re.findall(
r'resourceID":"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-'
r'[0-9a-f]{12})',
html
)[0]
assert resourceID != '', 'Cannot find resourceID!'
title = match1(html, r'<div class="bc-h">(.+)</div>')
url_lists = _ucas_get_url_lists_by_resourceID(resourceID)
assert url_lists, 'Cannot find any URL of such class!'
for k, part in enumerate(url_lists):
part_title = '{}_{}'.format(title, str(k))
print_info(site_info, part_title, 'flv', 0)
if not info_only:
download_urls(part, part_title, 'flv', total_size=None, **kwargs)
def ucas_download_playlist(url, info_only=False, **kwargs):
'''course page
'''
html = get_content(url)
parts = re.findall(r'(getplaytitle.do\?.+)"', html)
assert parts, 'No part found!'
for part_path in parts:
ucas_download(
'http://v.ucas.ac.cn/course/{}'.format(part_path),
info_only=info_only, **kwargs
)
def ucas_download(url, info_only=False, **kwargs):
if 'classid=' in url and 'getplaytitle.do' in url:
ucas_download_single(url, info_only=info_only, **kwargs)
elif 'CourseIndex.do' in url:
ucas_download_playlist(url, info_only=info_only, **kwargs)
download = ucas_download
download_playlist = ucas_download_playlist
| 27.743316
| 78
| 0.616037
|
cae1ff3eadc97b44f563b6d821687c1e39077b38
| 1,553
|
py
|
Python
|
umakit/general.py
|
taruma/belajar-tsa
|
ded702846f0453993ba90738a4b6b2401abe9177
|
[
"MIT"
] | null | null | null |
umakit/general.py
|
taruma/belajar-tsa
|
ded702846f0453993ba90738a4b6b2401abe9177
|
[
"MIT"
] | null | null | null |
umakit/general.py
|
taruma/belajar-tsa
|
ded702846f0453993ba90738a4b6b2401abe9177
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 09:01:45 2018
@author: tarum
"""
# Ignore this block code.
# Define Function for printing result
def cetak(name, val, unit = "", pad=50):
print('| {:>15s} = {:>10.5f} {:<{n}s}|'.format(name, val, unit, n = pad-15-10-7))
def new_line(pad = 50):
print('='*pad)
def create_dictionary(string, value):
"""
Creating dictionary based on string and value order
args:
string: string with comma seperated no spaces
value: list of value to be assigned
return:
dictionary with {string-keys: value}
example:
create_dictionary('a,b,c,d', [1,10,20,300])
return {'a':1, 'b':10, 'c':20, 'd':300}
create_dictionary('a', [1])
return {'a':1}
"""
dictionary = {}
keys = string.split(',')
for index, value in enumerate(value):
dictionary[keys[index]] = value
return dictionary
def get_valdict(dictionary, string):
"""
get value of dictionary based list of string
args:
dictionary: dictionary
string: string with comma seperated no spaces
return:
list of value of string-key
example:
let DICT = {'a':3, 'ba':10, 'c':30}
get_valdict(DICT, 'a,ba')
return [3, 10]
get_valdict(DICT, 'a')
return [3]
get_valdict(DICT, 'c,ba')
return [30, 10]
"""
string = string.replace(' ', '')
return map(dictionary.get, string.split(','))
| 24.265625
| 85
| 0.547328
|
64f576ad6e5a883dc5fb04220572007d36367796
| 949
|
py
|
Python
|
simuvex/procedures/libc___so___6/realloc.py
|
praetorian-inc/simuvex
|
7984bc4432a1c2126e6f2eb963c935e9f6a98da5
|
[
"BSD-2-Clause"
] | 8
|
2016-01-19T03:13:32.000Z
|
2020-11-03T09:30:05.000Z
|
simuvex/procedures/libc___so___6/realloc.py
|
praetorian-inc/simuvex
|
7984bc4432a1c2126e6f2eb963c935e9f6a98da5
|
[
"BSD-2-Clause"
] | null | null | null |
simuvex/procedures/libc___so___6/realloc.py
|
praetorian-inc/simuvex
|
7984bc4432a1c2126e6f2eb963c935e9f6a98da5
|
[
"BSD-2-Clause"
] | 3
|
2017-04-24T00:22:30.000Z
|
2020-11-03T09:30:06.000Z
|
import simuvex
from simuvex.s_type import SimTypeLength, SimTypeTop
import logging
l = logging.getLogger("simuvex.procedures.libc.realloc")
######################################
# realloc
######################################
class realloc(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, ptr, size):
self.state.add_constraints(size <= self.state.libc.max_variable_size)
size_int = self.state.se.max_int(size)
l.debug("Size: %d", size_int)
self.state.add_constraints(size_int == size)
self.argument_types = { 0: self.ty_ptr(SimTypeTop()),
1: SimTypeLength(self.state.arch) }
self.return_type = self.ty_ptr(SimTypeTop(size))
addr = self.state.libc.heap_location
v = self.state.memory.load(ptr, size_int)
self.state.memory.store(addr, v)
self.state.libc.heap_location += size_int
return addr
| 30.612903
| 77
| 0.609062
|
c37676591bba966850b415fbc82b34c3267e8d93
| 6,630
|
py
|
Python
|
perceptrons.py
|
Ras-al-Ghul/PerceptronAlgorithms-NeuralNetwork
|
c8ef655f59d2840e3c27f333a4af7f58295de580
|
[
"CECILL-B"
] | 2
|
2018-09-06T09:55:27.000Z
|
2020-02-02T17:57:23.000Z
|
perceptrons.py
|
Ras-al-Ghul/PerceptronAlgorithms-NeuralNetwork
|
c8ef655f59d2840e3c27f333a4af7f58295de580
|
[
"CECILL-B"
] | null | null | null |
perceptrons.py
|
Ras-al-Ghul/PerceptronAlgorithms-NeuralNetwork
|
c8ef655f59d2840e3c27f333a4af7f58295de580
|
[
"CECILL-B"
] | null | null | null |
from random import randrange
import matplotlib.pyplot as plt
from copy import deepcopy
from math import sqrt
from time import time
class1 = [[1, 2, 7], [1, 8, 1], [1, 7, 5], [1, 6, 3], [1, 7, 8], [1, 5, 9], [1, 4, 5]]
class2 = [[1, 4, 2], [1, -1, -1], [1, 1, 3], [1, 3, -2], [1, 5, 3.25], [1, 2, 4], [1, 7, 1]]
global line1
global line2
global line3
global line4
def singlesampleperceptron(weight):
start = time()
# normalize class2
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
# create dataset
dataset = []
for i in class1:
dataset.append(i)
for i in class2:
dataset.append(i)
# main loop
k = 0
count = 0
while 1:
value = 0
for i in range(0,3):
value += weight[i] * dataset[k][i]
if value > 0:
count += 1
if count == len(dataset):
break
else:
count = 0
for i in range(0,3):
weight[i] += dataset[k][i]
k += 1
if k == len(dataset):
k %= len(dataset)
print weight
end = time()
print end - start, "Time"
# denormalize
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
class12Dx = []
class12Dy = []
class22Dx = []
class22Dy = []
for i in range(len(class1)):
class12Dx.append(class1[i][1])
class12Dy.append(class1[i][2])
for i in range(len(class2)):
class22Dx.append(class2[i][1])
class22Dy.append(class2[i][2])
plt.plot(class12Dx, class12Dy, 'ro')
plt.plot(class22Dx, class22Dy, 'bo')
# find points on x and y axes
y1 = -(weight[0]/weight[2])
x2 = -(weight[0]/weight[1])
line1, = plt.plot([0, y1], [x2, 0], label = "ssp")
plt.setp(line1, color='r', linewidth=1.0)
return line1, weight
def singlesampleperceptronmargin(margin, weight):
start = time()
# normalize class2
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
# create dataset
dataset = []
for i in class1:
dataset.append(i)
for i in class2:
dataset.append(i)
# main loop
k = 0
count = 0
while 1:
value = 0
for i in range(0,3):
value += weight[i] * dataset[k][i]
if value > margin:
count += 1
if count == len(dataset):
break
else:
count = 0
for i in range(0,3):
weight[i] += dataset[k][i]
k += 1
if k == len(dataset):
k %= len(dataset)
print weight
end = time()
print end - start, "Time"
# denormalize
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
class12Dx = []
class12Dy = []
class22Dx = []
class22Dy = []
for i in range(len(class1)):
class12Dx.append(class1[i][1])
class12Dy.append(class1[i][2])
for i in range(len(class2)):
class22Dx.append(class2[i][1])
class22Dy.append(class2[i][2])
plt.plot(class12Dx, class12Dy, 'ro')
plt.plot(class22Dx, class22Dy, 'bo')
# find points on x and y axes
y1 = -(weight[0]/weight[2])
x2 = -(weight[0]/weight[1])
line2, = plt.plot([0, y1], [x2, 0], label='sspm')
plt.setp(line2, color='b', linewidth=1.0)
return line2, weight
def relaxationalgo(lrate, margin, weight):
start = time()
# normalize class2
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
# create dataset
dataset = []
for i in class1:
dataset.append(i)
for i in class2:
dataset.append(i)
# main loop
k = 0
count = 0
overallcount = 0
while 1:
overallcount += 1
value = 0
for i in range(0,3):
value += weight[i] * dataset[k][i]
if value > margin:
count += 1
if count == len(dataset):
break
else:
count = 0
value = 0
for i in range(0,3):
value += (weight[i] * dataset[k][i])
value = (-(value) + margin)/((dataset[k][0]*dataset[k][0]) + (dataset[k][1]*dataset[k][1]) + (dataset[k][2]*dataset[k][2]))
for j in range(0,3):
weight[j] = weight[j] + (lrate * value * dataset[k][j])
k += 1
if k == len(dataset):
k %= len(dataset)
print weight
end = time()
print end - start, "Time"
# denormalize
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
class12Dx = []
class12Dy = []
class22Dx = []
class22Dy = []
for i in range(len(class1)):
class12Dx.append(class1[i][1])
class12Dy.append(class1[i][2])
for i in range(len(class2)):
class22Dx.append(class2[i][1])
class22Dy.append(class2[i][2])
plt.plot(class12Dx, class12Dy, 'ro')
plt.plot(class22Dx, class22Dy, 'bo')
# find points on x and y axes
y1 = -(weight[0]/weight[2])
x2 = -(weight[0]/weight[1])
line3, = plt.plot([0, y1], [x2, 0], label='relaxationalgo')
plt.setp(line3, color='g', linewidth=1.0)
return line3, weight
def widrowhoff(lrate, theta, weight):
start = time()
bvec = [1,1,1,1,1,1,1,1,1,1,1,1,1,1]
# normalize class2
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
# create dataset
dataset = []
for i in class1:
dataset.append(i)
for i in class2:
dataset.append(i)
# main loop
k = 0
count = 1
overallcnt = 0
while 1:
value = 0
for i in range(0,3):
value += weight[i] * dataset[k][i]
value = bvec[k] - value
lr = lrate/count
temp = deepcopy(dataset[k])
for i in range(len(temp)):
temp[i] = temp[i] * value * lr
if sqrt((temp[0]*temp[0]) + (temp[1]*temp[1]) + (temp[2]*temp[2])) < theta:
overallcnt += 1
if overallcnt == 1:
break
else:
overallcnt = 0
for j in range(0,3):
weight[j] = weight[j] + temp[j]
k += 1
if k == len(dataset):
k %= len(dataset)
count += 1
print weight
end = time()
print end - start, "Time"
# denormalize
for i in class2:
for j in range(len(i)):
i[j] = -i[j]
class12Dx = []
class12Dy = []
class22Dx = []
class22Dy = []
for i in range(len(class1)):
class12Dx.append(class1[i][1])
class12Dy.append(class1[i][2])
for i in range(len(class2)):
class22Dx.append(class2[i][1])
class22Dy.append(class2[i][2])
plt.plot(class12Dx, class12Dy, 'ro')
plt.plot(class22Dx, class22Dy, 'bo')
# find points on x and y axes
y1 = -(weight[0]/weight[2])
x2 = -(weight[0]/weight[1])
line4, = plt.plot([0, y1], [x2, 0], label='widrowhoff')
plt.setp(line4, color='y', linewidth=1.0)
return line4, weight
def main():
weight = [1,1,1]
weight1 = deepcopy(weight)
line1, weight1 = singlesampleperceptron(weight1)
weight2 = deepcopy(weight)
margin = 0.5
line2, weight2 = singlesampleperceptronmargin(margin, weight2)
weight3 = deepcopy(weight)
lrate = 2
margin = 0.5
line3, weight3 = relaxationalgo(lrate, margin, weight3)
weight4 = deepcopy(weight)
lrate = 0.7
theta = 0.01
line4, weight4 = widrowhoff(lrate, theta, weight4)
plt.axis([-5, 15, -5, 15])
plt.legend([line1, line2, line3, line4], ['Single Sample Perceptron' + str(weight1), 'Perceptron with margin' + str(weight2), 'Perceptron with Margin and Relaxation' + str(weight3), 'Widrow Hoff' + str(weight4)])
plt.show()
if __name__ == '__main__':
main()
| 20.213415
| 213
| 0.611463
|
47e9feaf3c66abbcd76fffa9da115a188db55f3d
| 175
|
py
|
Python
|
x_6_3.py
|
ofl/kuku
|
76eefc0d3d859051473ee0d5f48b5d42d17d05a6
|
[
"MIT"
] | null | null | null |
x_6_3.py
|
ofl/kuku
|
76eefc0d3d859051473ee0d5f48b5d42d17d05a6
|
[
"MIT"
] | 4
|
2021-09-23T03:19:52.000Z
|
2021-11-13T10:38:21.000Z
|
x_6_3.py
|
ofl/kuku
|
76eefc0d3d859051473ee0d5f48b5d42d17d05a6
|
[
"MIT"
] | null | null | null |
# x_6_3
#
# 全てのメンバーに対して「○○さんこんにちは」と表示できるようにプログラムを追加してください
# ヒント: for文の中でさらにfor文を使う
teams = [
['桃太郎', 'いぬ', 'さる', 'きじ'],
['かに', 'くり', 'うす', 'はち', '牛糞']
]
print(teams)
| 15.909091
| 47
| 0.571429
|
5e971151427000eb78a58d12e4f9c79c58f9bbf1
| 840
|
py
|
Python
|
pyrival/data_structures/tree_repr.py
|
MattJDavidson/aoc2021
|
1c26697da55e58408f36525639d201303f808b1b
|
[
"Apache-2.0"
] | 748
|
2018-09-27T01:08:12.000Z
|
2022-03-25T17:31:56.000Z
|
pyrival/data_structures/tree_repr.py
|
MattJDavidson/aoc2021
|
1c26697da55e58408f36525639d201303f808b1b
|
[
"Apache-2.0"
] | 38
|
2019-02-24T14:50:02.000Z
|
2022-03-25T01:27:50.000Z
|
pyrival/data_structures/tree_repr.py
|
MattJDavidson/aoc2021
|
1c26697da55e58408f36525639d201303f808b1b
|
[
"Apache-2.0"
] | 288
|
2018-10-29T11:55:57.000Z
|
2022-03-20T04:37:27.000Z
|
def tree_repr(tree):
def recursive_repr(i):
if i >= tree._size:
return [str(tree.data[i])]
left = recursive_repr(2 * i)
right = recursive_repr(2 * i + 1)
lines = ["{} {}".format(l, r) for l, r in zip(left, right)]
width = len(lines[0])
left_width = len(left[0]) // 2
right_width = len(right[0]) // 2
stem_width = width - left_width - right_width - 2
branches = " " * left_width + "/" + " " * stem_width + "\\" + " " * right_width
stem = [" "] * (left_width + 1) + ["_"] * stem_width + [" "] * (right_width + 1)
stem[width // 2] = "^"
lines.appstop(branches)
lines.appstop("".join(stem))
lines.appstop(str(tree.data[i]).center(width))
return lines
return "\n".join(reversed(recursive_repr(1)))
| 33.6
| 88
| 0.519048
|
555271bf11dbbf1eb89076ee3ad26aae8f32596b
| 10,228
|
py
|
Python
|
docs/conf.py
|
lcv3/SyncGitlab2MSProject
|
4a81191b7deb6974e893d44f3b04fcfc1da36571
|
[
"MIT"
] | 4
|
2020-11-21T13:53:00.000Z
|
2022-02-25T08:26:36.000Z
|
docs/conf.py
|
lcv3/SyncGitlab2MSProject
|
4a81191b7deb6974e893d44f3b04fcfc1da36571
|
[
"MIT"
] | 8
|
2020-11-11T15:30:25.000Z
|
2022-02-25T08:28:05.000Z
|
docs/conf.py
|
lcv3/SyncGitlab2MSProject
|
4a81191b7deb6974e893d44f3b04fcfc1da36571
|
[
"MIT"
] | 1
|
2022-02-24T16:11:40.000Z
|
2022-02-24T16:11:40.000Z
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(
os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))
)
import m2r2
readme_rst_content = m2r2.parse_from_file(os.path.join(__location__, "..", "README.md"))
with open(os.path.join(__location__, "README.rst"), "w") as f:
f.write(readme_rst_content)
# Try to import win32com, otherwise use mocking
try:
import win32com
except ImportError:
sys.path.insert(0, os.path.join(__location__, "../mocking"))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, "../src"))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/syncgitlab2msproject")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version("1.7"):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.extlinks",
"sphinx_rtd_theme",
"recommonmark",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# To configure AutoStructify
def setup(app):
from recommonmark.transform import AutoStructify
app.add_config_value(
"recommonmark_config",
{
"auto_toc_tree_section": "Contents",
"enable_eval_rst": True,
"enable_math": True,
"enable_inline_math": True,
},
True,
)
app.add_transform(AutoStructify)
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"SyncGitlab2MSProject"
copyright = u"2020, Carli* Freudenberg"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "" # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = "" # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {"sidebar_width": "300px", "page_width": "1200px"}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from syncgitlab2msproject import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "syncgitlab2msproject-doc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"user_guide.tex",
u"SyncGitlab2MSProject Documentation",
u"Carli",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("http://www.sphinx-doc.org/en/stable", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"sklearn": ("http://scikit-learn.org/stable", None),
"pandas": ("http://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
}
extlinks = {
"issue": ("https://github.com/CarliJoy/SyncGitlab2MSProject/issues/%s", "#")
}
| 32.062696
| 88
| 0.696031
|
d4cdf413f11b9f560fac58bb146ec2e572eb2ec8
| 2,194
|
py
|
Python
|
docs_src/tutorial/connect/select/tutorial001.py
|
javivdm/sqlmodel
|
050372be795ff3c292c0e9263b93f94023f38d15
|
[
"MIT"
] | 5,490
|
2021-08-24T14:27:44.000Z
|
2022-03-31T14:24:20.000Z
|
docs_src/tutorial/connect/select/tutorial001.py
|
javivdm/sqlmodel
|
050372be795ff3c292c0e9263b93f94023f38d15
|
[
"MIT"
] | 231
|
2021-08-24T15:22:14.000Z
|
2022-03-30T14:39:44.000Z
|
docs_src/tutorial/connect/select/tutorial001.py
|
javivdm/sqlmodel
|
050372be795ff3c292c0e9263b93f94023f38d15
|
[
"MIT"
] | 245
|
2021-08-24T15:00:27.000Z
|
2022-03-30T02:12:56.000Z
|
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine, select
class Team(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret’s Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero, Team).where(Hero.team_id == Team.id)
results = session.exec(statement)
for hero, team in results:
print("Hero:", hero, "Team:", team)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| 27.772152
| 81
| 0.658159
|
b31dc8f6afe77a74a084b4c1cd7191ff5abaf6b0
| 1,445
|
py
|
Python
|
cellacdc/models/cellpose/acdcSegment.py
|
SchmollerLab/Cell_ACDC
|
2be9c0055c3306c4c35da99831f146d8a211baa0
|
[
"BSD-3-Clause"
] | 29
|
2021-10-01T09:43:26.000Z
|
2022-03-15T10:46:53.000Z
|
cellacdc/models/cellpose/acdcSegment.py
|
SchmollerLab/Cell_ACDC
|
2be9c0055c3306c4c35da99831f146d8a211baa0
|
[
"BSD-3-Clause"
] | 15
|
2022-02-04T09:21:43.000Z
|
2022-03-31T08:29:00.000Z
|
cellacdc/models/cellpose/acdcSegment.py
|
SchmollerLab/Cell_ACDC
|
2be9c0055c3306c4c35da99831f146d8a211baa0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-15T02:23:02.000Z
|
2022-03-15T02:23:02.000Z
|
import os
import pathlib
import numpy as np
import skimage.exposure
import skimage.filters
from cellpose import models
from cellacdc.models import CELLPOSE_MODELS
help_url = 'https://cellpose.readthedocs.io/en/latest/api.html'
class Model:
def __init__(self, model_type='cyto', net_avg=False, gpu=False):
if model_type not in CELLPOSE_MODELS:
err_msg = (
f'"{model_type}" not available. '
f'Available models are {CELLPOSE_MODELS}'
)
raise NameError(err_msg)
if model_type=='cyto':
self.model = models.Cellpose(
gpu=gpu, net_avg=net_avg, model_type=model_type
)
else:
self.model = models.CellposeModel(
gpu=gpu, net_avg=net_avg, model_type=model_type
)
def segment(
self, image,
diameter=0.0,
flow_threshold=0.4,
cellprob_threshold=0.0
):
# Preprocess image
# image = image/image.max()
# image = skimage.filters.gaussian(image, sigma=1)
# image = skimage.exposure.equalize_adapthist(image)
# Run cellpose eval
lab = self.model.eval(
image.astype(np.float32),
channels=[0,0],
diameter=diameter,
flow_threshold=flow_threshold,
cellprob_threshold=cellprob_threshold
)[0]
return lab
| 28.333333
| 68
| 0.585467
|
71d8e82f8d2c1c2e9da14f753d28badf2a2e1345
| 7,928
|
py
|
Python
|
nipype/interfaces/ants/visualization.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/ants/visualization.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/ants/visualization.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""The ants visualisation module provides basic functions based on ITK.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import os
from ..base import TraitedSpec, File, traits
from .base import ANTSCommand, ANTSCommandInputSpec
class ConvertScalarImageToRGBInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=True,
desc='image dimension (2 or 3)', mandatory=True,
position=0)
input_image = File(argstr='%s', exists=True,
desc='Main input is a 3-D grayscale image.', mandatory=True,
position=1)
output_image = traits.Str('rgb.nii.gz', argstr='%s', usedefault=True,
desc='rgb output image', position=2)
mask_image = File('none', argstr='%s', exists=True,
desc='mask image', position=3, usedefault=True)
colormap = traits.Str(argstr='%s', usedefault=True,
desc=('Possible colormaps: grey, red, green, '
'blue, copper, jet, hsv, spring, summer, '
'autumn, winter, hot, cool, overunder, custom '
), mandatory=True, position=4)
custom_color_map_file = traits.Str('none', argstr='%s', usedefault=True,
desc='custom color map file', position=5)
minimum_input = traits.Int(argstr='%d', desc='minimum input',
mandatory=True, position=6)
maximum_input = traits.Int(argstr='%d', desc='maximum input',
mandatory=True, position=7)
minimum_RGB_output = traits.Int(0, usedefault=True,
argstr='%d', desc='', position=8)
maximum_RGB_output = traits.Int(255, usedefault=True,
argstr='%d', desc='', position=9)
class ConvertScalarImageToRGBOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='converted RGB image')
class ConvertScalarImageToRGB(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants.visualization import ConvertScalarImageToRGB
>>> converter = ConvertScalarImageToRGB()
>>> converter.inputs.dimension = 3
>>> converter.inputs.input_image = 'T1.nii.gz'
>>> converter.inputs.colormap = 'jet'
>>> converter.inputs.minimum_input = 0
>>> converter.inputs.maximum_input = 6
>>> converter.cmdline # doctest: +IGNORE_UNICODE
'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255'
"""
_cmd = 'ConvertScalarImageToRGB'
input_spec = ConvertScalarImageToRGBInputSpec
output_spec = ConvertScalarImageToRGBOutputSpec
def _format_arg(self, opt, spec, val):
return super(ConvertScalarImageToRGB, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.join(os.getcwd(),
self.inputs.output_image)
return outputs
class CreateTiledMosaicInputSpec(ANTSCommandInputSpec):
input_image = File(argstr='-i %s', exists=True,
desc='Main input is a 3-D grayscale image.',
mandatory=True)
rgb_image = File(argstr='-r %s', exists=True,
desc=('An optional Rgb image can be added as an overlay.'
'It must have the same image'
'geometry as the input grayscale image.'),
mandatory=True)
mask_image = File(argstr='-x %s', exists=True,
desc='Specifies the ROI of the RGB voxels used.')
alpha_value = traits.Float(argstr='-a %.2f',
desc=('If an Rgb image is provided, render the overlay '
'using the specified alpha parameter.'))
output_image = traits.Str('output.png', argstr='-o %s',
desc='The output consists of the tiled mosaic image.',
usedefault=True)
tile_geometry = traits.Str(argstr='-t %s', desc=(
'The tile geometry specifies the number of rows and columns'
'in the output image. For example, if the user specifies "5x10", '
'then 5 rows by 10 columns of slices are rendered. If R < 0 and C > '
'0 (or vice versa), the negative value is selected'
'based on direction.'))
direction = traits.Int(argstr='-d %d', desc=('Specifies the direction of '
'the slices. If no direction is specified, the '
'direction with the coarsest spacing is chosen.'))
pad_or_crop = traits.Str(argstr='-p %s',
desc='argument passed to -p flag:'
'[padVoxelWidth,<constantValue=0>]'
'[lowerPadding[0]xlowerPadding[1],upperPadding[0]xupperPadding[1],'
'constantValue]'
'The user can specify whether to pad or crop a specified '
'voxel-width boundary of each individual slice. For this '
'program, cropping is simply padding with negative voxel-widths.'
'If one pads (+), the user can also specify a constant pad '
'value (default = 0). If a mask is specified, the user can use '
'the mask to define the region, by using the keyword "mask"'
' plus an offset, e.g. "-p mask+3".'
)
slices = traits.Str(argstr='-s %s',
desc=('Number of slices to increment Slice1xSlice2xSlice3'
'[numberOfSlicesToIncrement,<minSlice=0>,<maxSlice=lastSlice>]'))
flip_slice = traits.Str(argstr='-f %s', desc='flipXxflipY')
permute_axes = traits.Bool(argstr='-g', desc='doPermute')
class CreateTiledMosaicOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='image file')
class CreateTiledMosaic(ANTSCommand):
"""The program CreateTiledMosaic in conjunction with ConvertScalarImageToRGB
provides useful functionality for common image analysis tasks. The basic
usage of CreateTiledMosaic is to tile a 3-D image volume slice-wise into
a 2-D image.
Examples
--------
>>> from nipype.interfaces.ants.visualization import CreateTiledMosaic
>>> mosaic_slicer = CreateTiledMosaic()
>>> mosaic_slicer.inputs.input_image = 'T1.nii.gz'
>>> mosaic_slicer.inputs.rgb_image = 'rgb.nii.gz'
>>> mosaic_slicer.inputs.mask_image = 'mask.nii.gz'
>>> mosaic_slicer.inputs.output_image = 'output.png'
>>> mosaic_slicer.inputs.alpha_value = 0.5
>>> mosaic_slicer.inputs.direction = 2
>>> mosaic_slicer.inputs.pad_or_crop = '[ -15x -50 , -15x -30 ,0]'
>>> mosaic_slicer.inputs.slices = '[2 ,100 ,160]'
>>> mosaic_slicer.cmdline # doctest: +IGNORE_UNICODE
'CreateTiledMosaic -a 0.50 -d 2 -i T1.nii.gz -x mask.nii.gz -o output.png -p [ -15x -50 , -15x -30 ,0] \
-r rgb.nii.gz -s [2 ,100 ,160]'
"""
_cmd = 'CreateTiledMosaic'
input_spec = CreateTiledMosaicInputSpec
output_spec = CreateTiledMosaicOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.join(os.getcwd(),
self.inputs.output_image)
return outputs
| 49.55
| 108
| 0.584006
|
03cc1096d672161aedce10afe5b5bec7da0e76da
| 585
|
py
|
Python
|
jirani/migrations/0006_auto_20181017_2139.py
|
samsoluoch/Neighborhood
|
8a05b3882290765e06c72c1609767ec36056ba24
|
[
"MIT"
] | null | null | null |
jirani/migrations/0006_auto_20181017_2139.py
|
samsoluoch/Neighborhood
|
8a05b3882290765e06c72c1609767ec36056ba24
|
[
"MIT"
] | null | null | null |
jirani/migrations/0006_auto_20181017_2139.py
|
samsoluoch/Neighborhood
|
8a05b3882290765e06c72c1609767ec36056ba24
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-17 18:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jirani', '0005_business'),
]
operations = [
migrations.AddField(
model_name='location',
name='hospital',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='location',
name='police',
field=models.TextField(null=True),
),
]
| 22.5
| 46
| 0.577778
|
fce3a4a3681d5d28ed4de34cbf57a5989e8f54e8
| 18,315
|
py
|
Python
|
Main Code/WebService/app.py
|
abhiagarwal18/Question-Answering-and-Ranking-System
|
86fc805cdbbddbf543eddc19151a1f1be264607e
|
[
"MIT"
] | 2
|
2020-08-25T16:53:35.000Z
|
2020-09-20T10:59:16.000Z
|
Main Code/WebService/app.py
|
abhiagarwal18/Question-Answering-and-Ranking-System
|
86fc805cdbbddbf543eddc19151a1f1be264607e
|
[
"MIT"
] | null | null | null |
Main Code/WebService/app.py
|
abhiagarwal18/Question-Answering-and-Ranking-System
|
86fc805cdbbddbf543eddc19151a1f1be264607e
|
[
"MIT"
] | 1
|
2020-09-20T11:00:42.000Z
|
2020-09-20T11:00:42.000Z
|
from flask import Flask, request, Response, jsonify, render_template
import flask
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
from urllib.request import urlopen
import csv,json
# Cosine Similarity
import nltk, string, numpy
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import pandas as pd
import numpy as np
import re, nltk
import gensim
import codecs
import pickle
import en_core_web_sm
from sner import Ner
import spacy
from sklearn.metrics import confusion_matrix, accuracy_score, average_precision_score
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score, GridSearchCV
from nltk.internals import find_jars_within_path
from nltk.tag import StanfordPOSTagger
from nltk.tag import StanfordNERTagger
from sklearn import linear_model
from sklearn import svm
from sklearn.metrics import fbeta_score, accuracy_score
from scipy.sparse import hstack
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import requests
from bert import QA
model = QA('model')
context = ' Bits Pilani is a private institute of higher education and a deemed university under Section 3 of the UGC Act 1956. \
The institute was established in its present form in 1964. It is established across 4 campuses and has 15 academic departments. \
Pilani is located 220 kilometres far from Delhi in Rajasthan. We can reach bits pilani via train or bus from delhi. \
Bits Pilani has its campuses in Pilani , Goa , Hyderabad , Dubai. There are multiple scholarships available at BITS namely Merit Scholarships, Merit Cum Need Scholarships and BITSAA Scholarships. \
BITS Model United Nations Conference (BITSMUN) is one of the largest MUN conferences in the country. BITS conducts the All-India computerized entrance examination, BITSAT (BITS Admission Test). \
Admission is merit-based, as assessed by the BITSAT examination. \
We can reach bits pilani through bus or train from delhi or jaipur. \
Mr. Ashoke Kumar Sarkar is the director of Bits Pilani, pilani campus. \
Founder of Bits pilani was Ghanshyam Das Birla.'
def get_answer(context:str, ques:str):
answer= model.predict(context, ques)
return answer['answer']
app = Flask(__name__)
app.debug = True
QALinks = {
'0':"context",
'1':"demo",
'2':"demo",
'3':"demo",
'4':"demo",
'5':"demo"
}
@app.route("/check")
def checkServer():
return "up and running"
@app.route("/search", methods = ["GET","POST"])
def srchLinks():
questionString = str(request.form["question"])
bits = 0
response_data={}
for token in questionString.split():
token = token.lower()
if token == 'bits' or token == 'pilani':
bits = 1
if bits == 1:
predicted_class = "BITS Pilani"
QALinks['0']=predicted_class
QALinks['1']=get_answer(context, questionString)
QALinks.pop('2', None)
QALinks.pop('3', None)
QALinks.pop('4', None)
QALinks.pop('5', None)
# print(response_data)
response = flask.jsonify(QALinks)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
#creating question
qstn = questionString.replace(' ','+')
questions = [] # a list to store link of questions
documents = [] # # a list to store questions in words
documents.append(questionString)
questions.append('Original question')
#Context Identification of Questions
def text_clean(corpus, keep_list):
'''
Purpose : Function to keep only alphabets, digits and certain words (punctuations, qmarks, tabs etc. removed)
Input : Takes a text corpus, 'corpus' to be cleaned along with a list of words, 'keep_list', which have to be retained
even after the cleaning process
Output : Returns the cleaned text corpus
'''
cleaned_corpus = pd.Series()
for row in corpus:
qs = []
for word in row.split():
if word not in keep_list:
p1 = re.sub(pattern='[^a-zA-Z0-9]',repl=' ',string=word)
p1 = p1.lower()
qs.append(p1)
else : qs.append(word)
cleaned_corpus = cleaned_corpus.append(pd.Series(' '.join(qs)))
return cleaned_corpus
def preprocess(corpus, keep_list, cleaning = True, stemming = False, stem_type = None, lemmatization = False, remove_stopwords = True):
'''
Purpose : Function to perform all pre-processing tasks (cleaning, stemming, lemmatization, stopwords removal etc.)
Input :
'corpus' - Text corpus on which pre-processing tasks will be performed
'keep_list' - List of words to be retained during cleaning process
'cleaning', 'stemming', 'lemmatization', 'remove_stopwords' - Boolean variables indicating whether a particular task should
be performed or not
'stem_type' - Choose between Porter stemmer or Snowball(Porter2) stemmer. Default is "None", which corresponds to Porter
Stemmer. 'snowball' corresponds to Snowball Stemmer
Note : Either stemming or lemmatization should be used. There's no benefit of using both of them together
Output : Returns the processed text corpus
'''
if cleaning == True:
corpus = text_clean(corpus, keep_list)
if remove_stopwords == True:
wh_words = ['who', 'what', 'when', 'why', 'how', 'which', 'where', 'whom']
stop = set(stopwords.words('english'))
for word in wh_words:
stop.remove(word)
corpus = [[x for x in x.split() if x not in stop] for x in corpus]
else :
corpus = [[x for x in x.split()] for x in corpus]
if lemmatization == True:
lem = WordNetLemmatizer()
corpus = [[lem.lemmatize(x, pos = 'v') for x in x] for x in corpus]
if stemming == True:
if stem_type == 'snowball':
stemmer = SnowballStemmer(language = 'english')
corpus = [[stemmer.stem(x) for x in x] for x in corpus]
else :
stemmer = PorterStemmer()
corpus = [[stemmer.stem(x) for x in x] for x in corpus]
corpus = [' '.join(x) for x in corpus]
return corpus
common_dot_words = ['U.S.', 'St.', 'Mr.', 'Mrs.', 'D.C.']
nlp = en_core_web_sm.load()
# read information from file and load the model
count_vec_ner = pickle.load(open("count_vec_ner.p", "rb"))
count_vec_dep = pickle.load(open("count_vec_dep.p", "rb"))
count_vec_lemma = pickle.load(open("count_vec_lemma.p", "rb"))
count_vec_shape = pickle.load(open("count_vec_shape.p", "rb"))
count_vec_tag = pickle.load(open("count_vec_tag.p", "rb"))
with open('my_question_classifier.pkl', 'rb') as fid:
model = pickle.load(fid)
classes = {
"0": "BITS Pilani",
"1": "Society & Culture",
"2": "Science & Mathematics",
"3": "Health",
"4": "Education & Reference",
"5": "Computers & Internet",
"6": "Sports",
"7": "Business & Finance",
"8": "Entertainment & Music",
"9": "Family & Relationships",
"10": "Politics & Government"
}
myQ = {
'Question': questionString
}
bits = 0
for token in myQ['Question'].split():
token = token.lower()
if token == 'bits' or token == 'pilani':
bits = 1
if bits == 1:
predicted_class = classes["0"]
print(predicted_class)
else:
ques = pd.Series(myQ['Question']).astype(str)
ques = preprocess(ques, keep_list = common_dot_words, remove_stopwords = True)
myQ_ner = []
myQ_lemma = []
myQ_tag = []
myQ_dep = []
myQ_shape = []
doc = nlp(ques[0])
present_lemma = []
present_tag = []
present_dep = []
present_shape = []
present_ner = []
for token in doc:
present_lemma.append(token.lemma_)
present_tag.append(token.tag_)
present_dep.append(token.dep_)
present_shape.append(token.shape_)
myQ_lemma.append(" ".join(present_lemma))
myQ_tag.append(" ".join(present_tag))
myQ_dep.append(" ".join(present_dep))
myQ_shape.append(" ".join(present_shape))
# Named entities are available as the ents property of a Doc
doc = nlp(myQ['Question'])
for ent in doc.ents:
present_ner.append(ent.label_)
myQ_ner.append(" ".join(present_ner))
ner_myQ_ft = count_vec_ner.transform(myQ_ner)
lemma_myQ_ft = count_vec_lemma.transform(myQ_lemma)
tag_myQ_ft = count_vec_tag.transform(myQ_tag)
dep_myQ_ft = count_vec_dep.transform(myQ_dep)
shape_myQ_ft = count_vec_shape.transform(myQ_shape)
x_all_ft_myQ = hstack([ner_myQ_ft, lemma_myQ_ft, tag_myQ_ft])
x_all_ft_myQ = x_all_ft_myQ.tocsr()
preds = model.predict(x_all_ft_myQ)
predicted_class = classes[str(preds[0])]
print(predicted_class)
QALinks["0"] = predicted_class
# ### Crawling Quora
# Set the URL you want to webscrape from
url = urllib.parse.quote_plus('https://www.quora.com/search?q='+qstn)
handler = urlopen('https://api.proxycrawl.com/?token=SnUpBD_-K2v7xz_sxCDrHQ&url=' + url)
# Connect to the URL
response = requests.get('https://api.proxycrawl.com/?token=SnUpBD_-K2v7xz_sxCDrHQ&url=' + url)
# Parse HTML and save to BeautifulSoup object¶
soup = BeautifulSoup(response.text, "html5lib")
table = soup.findAll('div', attrs = {'class':'pagedlist_item'})
# print("Quora")
for i in range(min(5, len(table))):
question = table[i]
link = question.find('div', attrs = {'class':'QuestionQueryResult'}).find('a', attrs = {'class':'question_link'})
document = link['href']
document = document[1:].replace('-', ' ') + '?'
final_link = 'https://www.quora.com/' + link['href']
questions.append(final_link)
documents.append(document)
# ### Crawling StackOverflow
# Set the URL you want to webscrape from
url = 'https://stackoverflow.com/search?q=' + qstn
# Connect to the URL
response = requests.get(url)
# Parse HTML and save to BeautifulSoup object¶
soup = BeautifulSoup(response.text, "html5lib")
table = soup.findAll('div', attrs = {'class':'question-summary search-result'})
# print("StackOverflow")
for i in range(min(5, len(table))):
question = table[i]
a_href = question.find('div', attrs = {'class':'summary'}).find('div', attrs = {'class':'result-link'}).h3
link = a_href.findAll('a')[0]
document = link['title']
final_link = 'https://stackoverflow.com/' + link['href']
# print(final_link)
# print(document)
# print()
questions.append(final_link)
documents.append(document)
# ### Crawling Yahoo Answers
# Set the URL you want to webscrape from
url = 'https://in.answers.search.yahoo.com/search?q=' + qstn
# Connect to the URL
response = requests.get(url)
# Parse HTML and save to BeautifulSoup object¶
soup = BeautifulSoup(response.text, "html5lib")
table = soup.findAll('div', attrs = {'class':'AnswrsV2'})
# print(table)
# print("Yahoo Answers")
for i in range(min(5, len(table))):
question = table[i]
a_href = question.find('div', attrs = {'class':'compTitle'}).h3
link = a_href.findAll('a')[0]
doc = link.contents
document = []
for word in doc:
document.append(BeautifulSoup(str(word).strip(), "lxml").text)
document = ' '.join(document)
final_link = link['href']
# print(final_link)
# print(document)
# print()
questions.append(final_link)
documents.append(document)
# ### Crawling Stack Exchange
# Set the URL you want to webscrape from
url = 'https://stackexchange.com/search?q=' + qstn
# Connect to the URL
response = requests.get(url)
# Parse HTML and save to BeautifulSoup object¶
soup = BeautifulSoup(response.text, "html5lib")
table = soup.findAll('div', attrs = {'class':'question search-result'})
# print(table)
# print("Stack Exchange")
for i in range(min(5, len(table))):
question = table[i]
a_href = question.find('div', attrs = {'class':'result-link'}).span
link = a_href.findAll('a')[0]
doc = link.contents
document = []
for word in doc:
document.append(BeautifulSoup(str(word).strip(), "lxml").text)
document = ' '.join(document)
final_link = link['href']
# print(final_link)
# print(document)
# print()
questions.append(final_link)
documents.append(document)
# print("Cosine")
# ### Finding Cosine Similarity through NLP
# Preprocessing with nltk
# The default functions of CountVectorizer and TfidfVectorizer in scikit-learn detect word
# boundary and remove punctuations automatically. However, if we want to do
# stemming or lemmatization, we need to customize certain parameters in
# CountVectorizer and TfidfVectorizer. Doing this overrides the default tokenization
# setting, which means that we have to customize tokenization, punctuation removal,
# and turning terms to lower case altogether.
# Normalize by stemming:
# first-time use only
# nltk.download('punkt')
# nltk.download('stopwords')
# nltk.download('wordnet')
#Normalize by stemming:
# nltk.download('punkt') # first-time use only
stemmer = nltk.stem.porter.PorterStemmer()
def StemTokens(tokens):
return [stemmer.stem(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def StemNormalize(text):
return StemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
# Normalize by lemmatization:
lemmer = nltk.stem.WordNetLemmatizer()
def LemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
# Turn text into vectors of term frequency:
stop_words = set(nltk.corpus.stopwords.words('english'))
tokenized_stop_words = nltk.word_tokenize(' '.join(nltk.corpus.stopwords.words('english')))
class Tokenizer(object):
def __init__(self):
nltk.download('punkt', quiet=True, raise_on_error=True)
self.stemmer = nltk.stem.PorterStemmer()
def _stem(self, token):
if (token in stop_words):
return token # Solves error "UserWarning: Your stop_words may be inconsistent with your preprocessing."
return self.stemmer.stem(token)
def __call__(self, line):
tokens = nltk.word_tokenize(line)
tokens = (self._stem(token) for token in tokens) # Stemming
return list(tokens)
LemVectorizer = CountVectorizer(tokenizer=Tokenizer(), stop_words = tokenized_stop_words, lowercase=True)
LemVectorizer.fit_transform(documents)
# Normalized (after lemmatization) text in the four documents are tokenized and each
# term is indexed:
# print (LemVectorizer.vocabulary_ )
tf_matrix = LemVectorizer.transform(documents).toarray()
# print (tf_matrix)
# print(tf_matrix.shape)
# Calculate idf and turn tf matrix to tf-idf matrix:
# Get idf:
tfidfTran = TfidfTransformer(norm="l2")
tfidfTran.fit(tf_matrix)
# print (tfidfTran.idf_)
# Now we have a vector where each component is the idf for each term. In this case, the
# values are almost the same because other than one term, each term only appears in 1
# document.
# Get the tf-idf matrix (4 by 41):
tfidf_matrix = tfidfTran.transform(tf_matrix)
# print (tfidf_matrix.toarray())
# Here what the transform method does is multiplying the tf matrix (4 by 41) by the
# diagonal idf matrix (41 by 41 with idf for each term on the main diagonal), and dividing
# the tf-idf by the Euclidean norm.
# Get the pairwise similarity matrix (n by n):
cos_similarity_matrix = (tfidf_matrix * tfidf_matrix.T).toarray()
# print (cos_similarity_matrix)
# print (cos_similarity_matrix)
# The matrix obtained in the last step is multiplied by its transpose. The result is the
# similarity matrix, which indicates that d2 and d3 are more similar to each other than any
# other pair.
# ### Print top 5 question links
lst = cos_similarity_matrix[0]
# print(lst)
top = []
for j in range(5):
mx = -1
index = -1
for i in range(1, len(documents)):
if(lst[i] > mx):
mx = lst[i]
index = i
top.append(index)
lst[index] = -1
# print("Top 5 Questions")
idx = 0
for i in top:
QALinks[str(idx+1)] = questions[i]
# print(QALinks[str(idx+1)])
idx = idx + 1
response = flask.jsonify(QALinks)
# print(response)
response.headers.add('Access-Control-Allow-Origin', '*')
print(QALinks)
return response
if(__name__ == "__main__"):
app.run()
| 32.705357
| 197
| 0.630139
|
5f83a4bf858120d863a9c01a16d800340842b8bb
| 24,089
|
py
|
Python
|
sdk/python/pulumi_vault/mfa_duo.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2019-10-07T17:44:18.000Z
|
2022-03-30T20:46:33.000Z
|
sdk/python/pulumi_vault/mfa_duo.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 79
|
2019-10-11T18:13:07.000Z
|
2022-03-31T21:09:41.000Z
|
sdk/python/pulumi_vault/mfa_duo.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-10-28T10:08:40.000Z
|
2020-03-17T14:20:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['MfaDuoArgs', 'MfaDuo']
@pulumi.input_type
class MfaDuoArgs:
def __init__(__self__, *,
api_hostname: pulumi.Input[str],
integration_key: pulumi.Input[str],
mount_accessor: pulumi.Input[str],
secret_key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MfaDuo resource.
:param pulumi.Input[str] api_hostname: `(string: <required>)` - API hostname for Duo.
:param pulumi.Input[str] integration_key: `(string: <required>)` - Integration key for Duo.
:param pulumi.Input[str] mount_accessor: `(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
:param pulumi.Input[str] secret_key: `(string: <required>)` - Secret key for Duo.
:param pulumi.Input[str] name: `(string: <required>)` – Name of the MFA method.
:param pulumi.Input[str] push_info: `(string)` - Push information for Duo.
:param pulumi.Input[str] username_format: `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
pulumi.set(__self__, "api_hostname", api_hostname)
pulumi.set(__self__, "integration_key", integration_key)
pulumi.set(__self__, "mount_accessor", mount_accessor)
pulumi.set(__self__, "secret_key", secret_key)
if name is not None:
pulumi.set(__self__, "name", name)
if push_info is not None:
pulumi.set(__self__, "push_info", push_info)
if username_format is not None:
pulumi.set(__self__, "username_format", username_format)
@property
@pulumi.getter(name="apiHostname")
def api_hostname(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - API hostname for Duo.
"""
return pulumi.get(self, "api_hostname")
@api_hostname.setter
def api_hostname(self, value: pulumi.Input[str]):
pulumi.set(self, "api_hostname", value)
@property
@pulumi.getter(name="integrationKey")
def integration_key(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - Integration key for Duo.
"""
return pulumi.get(self, "integration_key")
@integration_key.setter
def integration_key(self, value: pulumi.Input[str]):
pulumi.set(self, "integration_key", value)
@property
@pulumi.getter(name="mountAccessor")
def mount_accessor(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
"""
return pulumi.get(self, "mount_accessor")
@mount_accessor.setter
def mount_accessor(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_accessor", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - Secret key for Duo.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` – Name of the MFA method.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pushInfo")
def push_info(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - Push information for Duo.
"""
return pulumi.get(self, "push_info")
@push_info.setter
def push_info(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "push_info", value)
@property
@pulumi.getter(name="usernameFormat")
def username_format(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
return pulumi.get(self, "username_format")
@username_format.setter
def username_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username_format", value)
@pulumi.input_type
class _MfaDuoState:
def __init__(__self__, *,
api_hostname: Optional[pulumi.Input[str]] = None,
integration_key: Optional[pulumi.Input[str]] = None,
mount_accessor: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering MfaDuo resources.
:param pulumi.Input[str] api_hostname: `(string: <required>)` - API hostname for Duo.
:param pulumi.Input[str] integration_key: `(string: <required>)` - Integration key for Duo.
:param pulumi.Input[str] mount_accessor: `(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
:param pulumi.Input[str] name: `(string: <required>)` – Name of the MFA method.
:param pulumi.Input[str] push_info: `(string)` - Push information for Duo.
:param pulumi.Input[str] secret_key: `(string: <required>)` - Secret key for Duo.
:param pulumi.Input[str] username_format: `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
if api_hostname is not None:
pulumi.set(__self__, "api_hostname", api_hostname)
if integration_key is not None:
pulumi.set(__self__, "integration_key", integration_key)
if mount_accessor is not None:
pulumi.set(__self__, "mount_accessor", mount_accessor)
if name is not None:
pulumi.set(__self__, "name", name)
if push_info is not None:
pulumi.set(__self__, "push_info", push_info)
if secret_key is not None:
pulumi.set(__self__, "secret_key", secret_key)
if username_format is not None:
pulumi.set(__self__, "username_format", username_format)
@property
@pulumi.getter(name="apiHostname")
def api_hostname(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - API hostname for Duo.
"""
return pulumi.get(self, "api_hostname")
@api_hostname.setter
def api_hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_hostname", value)
@property
@pulumi.getter(name="integrationKey")
def integration_key(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - Integration key for Duo.
"""
return pulumi.get(self, "integration_key")
@integration_key.setter
def integration_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integration_key", value)
@property
@pulumi.getter(name="mountAccessor")
def mount_accessor(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
"""
return pulumi.get(self, "mount_accessor")
@mount_accessor.setter
def mount_accessor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_accessor", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` – Name of the MFA method.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pushInfo")
def push_info(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - Push information for Duo.
"""
return pulumi.get(self, "push_info")
@push_info.setter
def push_info(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "push_info", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - Secret key for Duo.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter(name="usernameFormat")
def username_format(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
return pulumi.get(self, "username_format")
@username_format.setter
def username_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username_format", value)
class MfaDuo(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_hostname: Optional[pulumi.Input[str]] = None,
integration_key: Optional[pulumi.Input[str]] = None,
mount_accessor: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to manage [Duo MFA](https://www.vaultproject.io/docs/enterprise/mfa/mfa-duo.html).
**Note** this feature is available only with Vault Enterprise.
## Example Usage
```python
import pulumi
import pulumi_vault as vault
userpass = vault.AuthBackend("userpass",
type="userpass",
path="userpass")
my_duo = vault.MfaDuo("myDuo",
mount_accessor=userpass.accessor,
secret_key="8C7THtrIigh2rPZQMbguugt8IUftWhMRCOBzbuyz",
integration_key="BIACEUEAXI20BNWTEYXT",
api_hostname="api-2b5c39f5.duosecurity.com")
```
## Import
Mounts can be imported using the `path`, e.g.
```sh
$ pulumi import vault:index/mfaDuo:MfaDuo my_duo my_duo
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_hostname: `(string: <required>)` - API hostname for Duo.
:param pulumi.Input[str] integration_key: `(string: <required>)` - Integration key for Duo.
:param pulumi.Input[str] mount_accessor: `(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
:param pulumi.Input[str] name: `(string: <required>)` – Name of the MFA method.
:param pulumi.Input[str] push_info: `(string)` - Push information for Duo.
:param pulumi.Input[str] secret_key: `(string: <required>)` - Secret key for Duo.
:param pulumi.Input[str] username_format: `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MfaDuoArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage [Duo MFA](https://www.vaultproject.io/docs/enterprise/mfa/mfa-duo.html).
**Note** this feature is available only with Vault Enterprise.
## Example Usage
```python
import pulumi
import pulumi_vault as vault
userpass = vault.AuthBackend("userpass",
type="userpass",
path="userpass")
my_duo = vault.MfaDuo("myDuo",
mount_accessor=userpass.accessor,
secret_key="8C7THtrIigh2rPZQMbguugt8IUftWhMRCOBzbuyz",
integration_key="BIACEUEAXI20BNWTEYXT",
api_hostname="api-2b5c39f5.duosecurity.com")
```
## Import
Mounts can be imported using the `path`, e.g.
```sh
$ pulumi import vault:index/mfaDuo:MfaDuo my_duo my_duo
```
:param str resource_name: The name of the resource.
:param MfaDuoArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MfaDuoArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_hostname: Optional[pulumi.Input[str]] = None,
integration_key: Optional[pulumi.Input[str]] = None,
mount_accessor: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MfaDuoArgs.__new__(MfaDuoArgs)
if api_hostname is None and not opts.urn:
raise TypeError("Missing required property 'api_hostname'")
__props__.__dict__["api_hostname"] = api_hostname
if integration_key is None and not opts.urn:
raise TypeError("Missing required property 'integration_key'")
__props__.__dict__["integration_key"] = integration_key
if mount_accessor is None and not opts.urn:
raise TypeError("Missing required property 'mount_accessor'")
__props__.__dict__["mount_accessor"] = mount_accessor
__props__.__dict__["name"] = name
__props__.__dict__["push_info"] = push_info
if secret_key is None and not opts.urn:
raise TypeError("Missing required property 'secret_key'")
__props__.__dict__["secret_key"] = secret_key
__props__.__dict__["username_format"] = username_format
super(MfaDuo, __self__).__init__(
'vault:index/mfaDuo:MfaDuo',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_hostname: Optional[pulumi.Input[str]] = None,
integration_key: Optional[pulumi.Input[str]] = None,
mount_accessor: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None) -> 'MfaDuo':
"""
Get an existing MfaDuo resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_hostname: `(string: <required>)` - API hostname for Duo.
:param pulumi.Input[str] integration_key: `(string: <required>)` - Integration key for Duo.
:param pulumi.Input[str] mount_accessor: `(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
:param pulumi.Input[str] name: `(string: <required>)` – Name of the MFA method.
:param pulumi.Input[str] push_info: `(string)` - Push information for Duo.
:param pulumi.Input[str] secret_key: `(string: <required>)` - Secret key for Duo.
:param pulumi.Input[str] username_format: `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MfaDuoState.__new__(_MfaDuoState)
__props__.__dict__["api_hostname"] = api_hostname
__props__.__dict__["integration_key"] = integration_key
__props__.__dict__["mount_accessor"] = mount_accessor
__props__.__dict__["name"] = name
__props__.__dict__["push_info"] = push_info
__props__.__dict__["secret_key"] = secret_key
__props__.__dict__["username_format"] = username_format
return MfaDuo(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiHostname")
def api_hostname(self) -> pulumi.Output[str]:
"""
`(string: <required>)` - API hostname for Duo.
"""
return pulumi.get(self, "api_hostname")
@property
@pulumi.getter(name="integrationKey")
def integration_key(self) -> pulumi.Output[str]:
"""
`(string: <required>)` - Integration key for Duo.
"""
return pulumi.get(self, "integration_key")
@property
@pulumi.getter(name="mountAccessor")
def mount_accessor(self) -> pulumi.Output[str]:
"""
`(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
"""
return pulumi.get(self, "mount_accessor")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
`(string: <required>)` – Name of the MFA method.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pushInfo")
def push_info(self) -> pulumi.Output[Optional[str]]:
"""
`(string)` - Push information for Duo.
"""
return pulumi.get(self, "push_info")
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Output[str]:
"""
`(string: <required>)` - Secret key for Duo.
"""
return pulumi.get(self, "secret_key")
@property
@pulumi.getter(name="usernameFormat")
def username_format(self) -> pulumi.Output[Optional[str]]:
"""
`(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
return pulumi.get(self, "username_format")
| 46.865759
| 303
| 0.636639
|
40b8cadadaa9c20a862062cf0e39883f513b9a89
| 4,686
|
py
|
Python
|
smacc2_sm_reference_library/sm_dance_bot_warehouse_2/launch/gazebo_launch.py
|
droidware-ai/SMACC2
|
1aa0680f71c1b22f06e1a9b129a0a42b36911139
|
[
"Apache-2.0"
] | 48
|
2021-05-28T01:33:20.000Z
|
2022-03-24T03:16:03.000Z
|
smacc2_sm_reference_library/sm_dance_bot_warehouse_2/launch/gazebo_launch.py
|
droidware-ai/SMACC2
|
1aa0680f71c1b22f06e1a9b129a0a42b36911139
|
[
"Apache-2.0"
] | 75
|
2021-06-25T22:11:21.000Z
|
2022-03-30T13:05:38.000Z
|
smacc2_sm_reference_library/sm_dance_bot_warehouse_2/launch/gazebo_launch.py
|
droidware-ai/SMACC2
|
1aa0680f71c1b22f06e1a9b129a0a42b36911139
|
[
"Apache-2.0"
] | 14
|
2021-06-16T12:10:57.000Z
|
2022-03-01T18:23:27.000Z
|
# Copyright 2021 RobosoftAI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess
from launch.conditions import IfCondition, UnlessCondition
from launch.substitutions import LaunchConfiguration, PythonExpression
from launch_ros.actions.node import Node
def generate_launch_description():
declare_use_simulator_cmd = DeclareLaunchArgument(
"use_simulator", default_value="False", description="Whether to execute gzclient)"
)
use_simulator = LaunchConfiguration("use_simulator")
world = LaunchConfiguration("world")
headless = LaunchConfiguration("headless")
show_gz_lidar = LaunchConfiguration("show_gz_lidar")
sm_dance_bot_warehouse_2_dir = get_package_share_directory("sm_dance_bot_warehouse_2")
launch_dir = os.path.join(sm_dance_bot_warehouse_2_dir, "launch")
declare_use_simulator_cmd = DeclareLaunchArgument(
"use_simulator", default_value="True", description="Whether to start the simulator"
)
declare_headless_simulator_argument = DeclareLaunchArgument(
"headless", default_value="True", description="Whether to execute gzclient)"
)
declare_show_gz_lidar = DeclareLaunchArgument(
"show_gz_lidar",
default_value="True",
description="Whether to apply a namespace to the navigation stack",
)
declare_world_cmd = DeclareLaunchArgument(
"world",
default_value=os.path.join(sm_dance_bot_warehouse_2_dir, "worlds", "industrial_sim.world"),
description="Full path to world model file to load",
condition=IfCondition(show_gz_lidar),
)
# nolidar world
declare_world_cmd_2 = DeclareLaunchArgument(
"world",
default_value=os.path.join(sm_dance_bot_warehouse_2_dir, "worlds", "industrial_sim.world"),
description="Full path to world model file to load",
condition=UnlessCondition(show_gz_lidar),
)
declare_urdf = DeclareLaunchArgument(
"urdf",
default_value=os.path.join(
sm_dance_bot_warehouse_2_dir, "models", "turtlebot3_waffle", "model.sdf"
),
description="",
)
# Create the launch description and populate
ld = LaunchDescription()
xtermprefix = (
"xterm -xrm 'XTerm*scrollBar: true' -xrm 'xterm*rightScrollBar: true' "
"-hold -geometry 1000x600 -sl 10000 -e"
)
gzenv = dict(os.environ)
model_database_uri = os.environ["GAZEBO_MODEL_PATH"]
gzenv["GAZEBO_MODEL_DATABASE_URI"] = model_database_uri
# Specify the actions
start_gazebo_server_cmd = ExecuteProcess(
condition=IfCondition(use_simulator),
cmd=[
"gzserver",
"-s",
"libgazebo_ros_init.so",
"-s",
"libgazebo_ros_factory.so",
world,
"--verbose",
],
env=gzenv,
cwd=[launch_dir],
output="screen",
prefix=xtermprefix,
)
start_gazebo_client_cmd = ExecuteProcess(
condition=IfCondition(PythonExpression([use_simulator, " and not ", headless])),
cmd=["gzclient"],
cwd=[launch_dir],
env=gzenv,
output="screen",
)
spawn_entity_node = Node(
package="gazebo_ros",
executable="spawn_entity.py",
name="gazebo_ros",
arguments=[
"-entity",
"turtlebot3_waffle",
"-file",
LaunchConfiguration("urdf"),
"-x",
"0",
"-y",
"0",
"-z",
"0.5",
"-Y",
"0",
],
)
# Add any conditioned actions
ld.add_action(declare_headless_simulator_argument)
ld.add_action(declare_use_simulator_cmd)
ld.add_action(declare_world_cmd)
ld.add_action(declare_world_cmd_2)
ld.add_action(declare_urdf)
ld.add_action(declare_show_gz_lidar)
ld.add_action(start_gazebo_server_cmd)
ld.add_action(start_gazebo_client_cmd)
ld.add_action(spawn_entity_node)
return ld
| 31.662162
| 99
| 0.673496
|
fcfef06e483ef27f4bfa8e7838435f27896ab732
| 488
|
py
|
Python
|
clubsuite/suite/migrations/0031_event_image.py
|
fsxfreak/club-suite
|
312b5f41e7ff3f9dd332332f74d77c0667a04404
|
[
"MIT"
] | 14
|
2017-03-08T03:35:01.000Z
|
2021-01-03T00:26:18.000Z
|
clubsuite/suite/migrations/0031_event_image.py
|
fsxfreak/club-suite
|
312b5f41e7ff3f9dd332332f74d77c0667a04404
|
[
"MIT"
] | 44
|
2017-03-07T08:01:28.000Z
|
2017-03-16T07:50:05.000Z
|
clubsuite/suite/migrations/0031_event_image.py
|
fsxfreak/club-suite
|
312b5f41e7ff3f9dd332332f74d77c0667a04404
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-04 21:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('suite', '0030_merge_20170304_2119'),
]
operations = [
migrations.AddField(
model_name='event',
name='image',
field=models.ImageField(default='static/media/special_event.png', upload_to=''),
),
]
| 23.238095
| 92
| 0.625
|
ce056f67be2a3ed64284ee54f90379537fcae589
| 1,273
|
py
|
Python
|
keras_guide/4-regressor_example.py
|
giraffe-tree/play-tf
|
30f39f228d55fdeb35f1bd420b3bb29ecd3ade96
|
[
"MIT"
] | null | null | null |
keras_guide/4-regressor_example.py
|
giraffe-tree/play-tf
|
30f39f228d55fdeb35f1bd420b3bb29ecd3ade96
|
[
"MIT"
] | null | null | null |
keras_guide/4-regressor_example.py
|
giraffe-tree/play-tf
|
30f39f228d55fdeb35f1bd420b3bb29ecd3ade96
|
[
"MIT"
] | null | null | null |
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
'''
教程地址
https://morvanzhou.github.io/tutorials/machine-learning/keras/2-1-regressor/
'''
# create some data
X = np.linspace(-1, 1, 200)
np.random.shuffle(X) # randomize the data
Y = 0.5 * X + 2 + np.random.normal(0, 0.05, (200, ))
# plot data
plt.scatter(X, Y)
plt.show()
X_train, Y_train = X[:160], Y[:160] # first 160 data points
X_test, Y_test = X[160:], Y[160:] # last 40 data points
# build a neural network from the 1st layer to the last layer
model = Sequential()
model.add(Dense(units=1, input_dim=1))
# choose loss function and optimizing method
model.compile(loss='mse', optimizer='sgd')
# training
print('Training -----------')
for step in range(301):
cost = model.train_on_batch(X_train, Y_train)
if step % 100 == 0:
print('train cost: ', cost)
# test
print('\nTesting ------------')
cost = model.evaluate(X_test, Y_test, batch_size=40)
print('test cost:', cost)
W, b = model.layers[0].get_weights()
print('Weights=', W, '\nbiases=', b)
# plotting the prediction
Y_pred = model.predict(X_test)
plt.scatter(X_test, Y_test)
plt.plot(X_test, Y_pred)
plt.show()
| 24.960784
| 76
| 0.681854
|
c4ce40e3751bc9fca5aa7f751e79cf10fbc17716
| 20,556
|
py
|
Python
|
eve_ros2_examples/whole_body_robot_bringup.py
|
Halodi/eve-ros2-examples
|
22e8f084986ec1f680e3f77814eb008394a3f5d3
|
[
"Apache-2.0"
] | 2
|
2021-07-08T13:58:57.000Z
|
2021-11-04T12:02:12.000Z
|
eve_ros2_examples/whole_body_robot_bringup.py
|
Halodi/eve-ros2-examples
|
22e8f084986ec1f680e3f77814eb008394a3f5d3
|
[
"Apache-2.0"
] | 6
|
2021-04-06T17:12:47.000Z
|
2021-06-16T13:24:00.000Z
|
eve_ros2_examples/whole_body_robot_bringup.py
|
Halodi/eve-ros2-examples
|
22e8f084986ec1f680e3f77814eb008394a3f5d3
|
[
"Apache-2.0"
] | 3
|
2021-07-08T13:59:00.000Z
|
2022-01-23T20:52:00.000Z
|
#!/usr/bin/env python3
# Copyright 2021 Halodi Robotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import numpy as np
import rclpy
import rclpy.qos
from action_msgs.msg import GoalStatus
from builtin_interfaces.msg import Duration
from halodi_msgs.msg import (
JointName,
JointSpaceCommand,
ReferenceFrameName,
TaskSpaceCommand,
TrajectoryInterpolation,
WholeBodyTrajectory,
WholeBodyTrajectoryPoint,
)
from rclpy.node import Node
from scipy.spatial.transform import Rotation
from unique_identifier_msgs.msg import UUID
def generate_uuid_msg():
"""Generates a UUID msg based on the current time.
Parameters: None
Returns: UUID msg
"""
return UUID(uuid=np.asarray(list(uuid.uuid1().bytes)).astype(np.uint8))
def generate_task_space_command_msg(
body_frame_id, expressed_in_frame_id, xyzrpy, z_up=True
):
"""Generates a task space command msg.
Parameters:
- body_frame_id (enum): body part to be moved, e.g. ReferenceFrameName.PELVIS
- expressed_in_frame_id (enum): reference frame for body_frame_id, e.g. ReferenceFrameName.BASE
- xyzrpy (array of 6 floats): desired pose of body_frame_id relative to expressed_in_frame_id
, as a list/tuple/1D np.array of [ posX, posY, posZ, rotX, rotY, rotZ ]
- z_up (bool): whether or not xyzrpy follows the Z-up co-ordinate convention. Default: True
Returns: TaskSpaceCommand msg
"""
msg_ = TaskSpaceCommand(express_in_z_up=z_up)
msg_.body_frame.frame_id = body_frame_id
msg_.expressed_in_frame.frame_id = expressed_in_frame_id
msg_.pose.position.x = xyzrpy[0]
msg_.pose.position.y = xyzrpy[1]
msg_.pose.position.z = xyzrpy[2]
quat_ = Rotation.from_euler("xyz", xyzrpy[3:]).as_quat() # Euler to quaternion
msg_.pose.orientation.x = quat_[0]
msg_.pose.orientation.y = quat_[1]
msg_.pose.orientation.z = quat_[2]
msg_.pose.orientation.w = quat_[3]
return msg_
def generate_joint_space_command_msg(
joint_id, q_desired, qd_desired=0.0, qdd_desired=0.0
):
"""Generates a joint space command msg.
This msg has additional gains fields. If you do not wish to set these yourself,
please ensure that the use_default_gains bool is set to True.
Msgs generated by this function have use_default_gains set to True.
Parameters:
- joint_id (enum): joint to be moved, e.g. JointName.NECK_PITCH
- q_desired (float): desired final joint position
- q_desired (float): desired final joint velocity. Default: 0.0
- q_desired (float): desired final joint acceleration. Default: 0.0
Returns: JointSpaceCommand msg
"""
msg_ = JointSpaceCommand(joint=JointName(joint_id=joint_id), use_default_gains=True)
msg_.q_desired = q_desired
msg_.qd_desired = qd_desired
msg_.qdd_desired = qdd_desired
return msg_
class WholeBodyTrajectoryPublisher(Node):
"""A helper/example class to publish whole body trajectory messages.
Constructor parameters:
- initial_trajectory_msg (WholeBodyTrajectory): if not None, this is published first.
Default: None
- periodic_trajectory_msg (WholeBodyTrajectory): if not None, this is published
on a loop upon completion of initial_trajectory_msg if it was provided. Default: None
"""
def __init__(self, initial_trajectory_msg=None, periodic_trajectory_msg=None):
super().__init__(
"whole_body_robot_bringup"
) # initialize the underlying Node with the name whole_body_robot_bringup
self._publisher = self.create_publisher(
WholeBodyTrajectory, "/eve/whole_body_trajectory", 10
)
self._subscriber = self.create_subscription(
GoalStatus, "/eve/whole_body_trajectory_status", self.goal_status_cb, 10
) # create a GoalStatus subscriber with inbound queue size of 10
if initial_trajectory_msg is not None:
initial_trajectory_msg.trajectory_id = generate_uuid_msg() # populate UUID
self.get_logger().info("Publishing initial trajectory ...")
self._publisher.publish(
initial_trajectory_msg
) # publish initial_trajectory_msg
else:
periodic_trajectory_msg.trajectory_id = generate_uuid_msg() # populate UUID
self.get_logger().info("Publishing first periodic trajectory ...")
self._publisher.publish(
periodic_trajectory_msg
) # publish periodic_trajectory_msg instead
# store periodic_trajectory_msg for re-publishing in goal_status_cb
self._periodic_trajectory_msg = periodic_trajectory_msg
timer_period = 1.0 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.status_msg_received_ever = False
def timer_callback(self):
if not self.status_msg_received_ever:
self.get_logger().info("Publishing msg from timer")
self._publisher.publish(self._periodic_trajectory_msg)
def goal_status_cb(self, msg):
"""GoalStatus callback. Logs/prints some statuses and re-pubishes
periodic_trajectory_msg if it was provided to the constructor.
Parameters:
- msg (GoalStatus): msg from a GoalStatus subscription
Returns: None
"""
if not self.status_msg_received_ever:
self.timer.cancel()
self.get_logger().info("Timer is cancelled")
self.status_msg_received_ever = True
if msg.status == GoalStatus.STATUS_ACCEPTED:
self.get_logger().info("Goal accepted")
elif msg.status == GoalStatus.STATUS_CANCELED:
self.get_logger().info("Goal canceled")
elif msg.status == GoalStatus.STATUS_ABORTED:
self.get_logger().info("Goal aborted")
elif msg.status == GoalStatus.STATUS_SUCCEEDED:
self.get_logger().info("Goal succeeded!")
if self._periodic_trajectory_msg is not None:
self.get_logger().info("Republishing periodic trajectory ...")
self._periodic_trajectory_msg.trajectory_id = generate_uuid_msg()
self._publisher.publish(self._periodic_trajectory_msg)
def run_warmup_loop(args=None):
"""An example function that moves all the joints in a repeated movement sequence.
Parameters:
- args (?): for rclpy.init(). Default: None
Returns: None
"""
NOMINAL_PELVIS_HEIGHT_ABOVE_BASE = 0.91
cumulative_seconds_from_start_ = 0
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 3
periodic_trajectory_pt_msg_1_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
) # create a trajectory point msg, timestamped for 3 seconds in the future
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_PITCH, 0.5)
) # append a desired joint position of 0.5 radians for the pitch of the right shoulder
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_ROLL, -0.1)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_YAW, -0.2)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_ELBOW_PITCH, -0.2)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_ELBOW_YAW, -0.2)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_PITCH, 0.0)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_ROLL, 0.0)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_PITCH, 0.5)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_ROLL, 0.1)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_YAW, 0.2)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_ELBOW_PITCH, -0.2)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_ELBOW_YAW, 0.2)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_PITCH, 0.0)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_ROLL, 0.0)
)
periodic_trajectory_pt_msg_1_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.NECK_PITCH, 0.0)
)
periodic_trajectory_pt_msg_1_.task_space_commands.append(
generate_task_space_command_msg(
ReferenceFrameName.PELVIS,
ReferenceFrameName.BASE,
[0.0, 0.0, NOMINAL_PELVIS_HEIGHT_ABOVE_BASE, 0.0, 0.0, np.deg2rad(0.0)],
)
) # append a desired task space pose for the pelvis WRT base
# [posX, posY, posZ, roll, pitch, yaw]
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 1
periodic_trajectory_pt_msg_2_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
) # create another trajectory point msg, 1 additional second in the future
periodic_trajectory_pt_msg_2_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_ROLL, -1.5)
)
periodic_trajectory_pt_msg_2_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_ELBOW_PITCH, -1.5)
)
periodic_trajectory_pt_msg_2_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_ROLL, 1.5)
)
periodic_trajectory_pt_msg_2_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_ELBOW_PITCH, -1.5)
)
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 1
periodic_trajectory_pt_msg_3_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_YAW, 0.5)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_ELBOW_YAW, 0.5)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_PITCH, 0.5)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_ROLL, 0.5)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_YAW, -0.5)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_ELBOW_YAW, -0.5)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_PITCH, 0.5)
)
periodic_trajectory_pt_msg_3_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_ROLL, -0.5)
)
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 1
periodic_trajectory_pt_msg_4_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
)
periodic_trajectory_pt_msg_4_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_PITCH, -0.5)
)
periodic_trajectory_pt_msg_4_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_ROLL, -0.5)
)
periodic_trajectory_pt_msg_4_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_PITCH, -0.5)
)
periodic_trajectory_pt_msg_4_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_ROLL, 0.5)
)
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 1
periodic_trajectory_pt_msg_5_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_PITCH, -1.5)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_ELBOW_PITCH, -1.5)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_PITCH, 0.0)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_ROLL, 0.0)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_PITCH, -1.5)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_ELBOW_PITCH, -1.5)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_PITCH, 0.0)
)
periodic_trajectory_pt_msg_5_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_ROLL, 0.0)
)
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 2
periodic_trajectory_pt_msg_6_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
)
periodic_trajectory_pt_msg_6_.task_space_commands.append(
generate_task_space_command_msg(
ReferenceFrameName.PELVIS,
ReferenceFrameName.BASE,
[
0.1,
-0.3,
NOMINAL_PELVIS_HEIGHT_ABOVE_BASE,
np.deg2rad(20.0),
0.0,
np.deg2rad(30.0),
],
)
) # move the pelvis 0.1m forward, -0.3m to the left. Roll 20 degrees and yaw 30 degrees
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 2
periodic_trajectory_pt_msg_7_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
)
periodic_trajectory_pt_msg_7_.task_space_commands.append(
generate_task_space_command_msg(
ReferenceFrameName.PELVIS,
ReferenceFrameName.BASE,
[
-0.1,
0.3,
NOMINAL_PELVIS_HEIGHT_ABOVE_BASE,
np.deg2rad(-20.0),
0.0,
np.deg2rad(-30.0),
],
)
)
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 3
periodic_trajectory_pt_msg_8_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
)
periodic_trajectory_pt_msg_8_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.NECK_PITCH, 0.3)
)
periodic_trajectory_pt_msg_8_.task_space_commands.append(
generate_task_space_command_msg(
ReferenceFrameName.PELVIS,
ReferenceFrameName.BASE,
[0.0, 0.0, 0.65, 0.0, 0.0, np.deg2rad(0.0)],
)
) # do a squat
# an extra message to make sure the trajectory ends in a safe position
cumulative_seconds_from_start_ = cumulative_seconds_from_start_ + 1
periodic_trajectory_pt_msg_9_ = WholeBodyTrajectoryPoint(
time_from_start=Duration(sec=cumulative_seconds_from_start_)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_PITCH, 0.5)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_ROLL, -0.1)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_SHOULDER_YAW, -0.2)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_ELBOW_PITCH, -1.5)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_ELBOW_YAW, -0.2)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_PITCH, 0.0)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.RIGHT_WRIST_ROLL, 0.0)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_PITCH, 0.5)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_ROLL, 0.1)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_SHOULDER_YAW, 0.2)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_ELBOW_PITCH, -1.5)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_ELBOW_YAW, 0.2)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_PITCH, 0.0)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.LEFT_WRIST_ROLL, 0.0)
)
periodic_trajectory_pt_msg_9_.joint_space_commands.append(
generate_joint_space_command_msg(JointName.NECK_PITCH, 0.0)
)
periodic_trajectory_pt_msg_9_.task_space_commands.append(
generate_task_space_command_msg(
ReferenceFrameName.PELVIS,
ReferenceFrameName.BASE,
[0.0, 0.0, NOMINAL_PELVIS_HEIGHT_ABOVE_BASE, 0.0, 0.0, np.deg2rad(0.0)],
)
)
periodic_trajectory_msg_ = WholeBodyTrajectory(
append_trajectory=False
) # create a whole body trajectory msg that will
# override any trajectory currently being executed
periodic_trajectory_msg_.interpolation_mode.value = (
TrajectoryInterpolation.MINIMUM_JERK_CONSTRAINED
) # choose an interpolation mode
periodic_trajectory_msg_.trajectory_points.append(
periodic_trajectory_pt_msg_1_
) # pack in all the points created above
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_2_)
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_3_)
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_4_)
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_5_)
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_6_)
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_7_)
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_8_)
periodic_trajectory_msg_.trajectory_points.append(periodic_trajectory_pt_msg_9_)
rclpy.init(args=args) # initialize rclpy
wbtp_ = WholeBodyTrajectoryPublisher(
None, periodic_trajectory_msg_
) # create the helper class
rclpy.spin(
wbtp_
) # spin the node in the WholeBodyTrajectoryPublisher for blocking and pub/sub functionality
wbtp_.destroy_node() # shut down the node
rclpy.shutdown() # shut down rclpy
if __name__ == "__main__":
run_warmup_loop()
| 42.122951
| 99
| 0.746108
|
4fcc1fcc4178695bd9c5de3e27983311d683d3f8
| 5,269
|
py
|
Python
|
phanterpwa/interface/Admin/api/handlers.py
|
PhanterJR/phanterpwa
|
6daff40845b3a853cd08d319c4ce148f8deebed7
|
[
"MIT"
] | 2
|
2019-06-06T10:37:01.000Z
|
2021-10-16T03:36:28.000Z
|
phanterpwa/interface/Admin/api/handlers.py
|
PhanterJR/phanterpwa
|
6daff40845b3a853cd08d319c4ce148f8deebed7
|
[
"MIT"
] | null | null | null |
phanterpwa/interface/Admin/api/handlers.py
|
PhanterJR/phanterpwa
|
6daff40845b3a853cd08d319c4ce148f8deebed7
|
[
"MIT"
] | null | null | null |
import os
from phanterpwa.backend.request_handlers import (
admin,
auth,
credentials,
i18n_server,
oauth
)
# Your conttrolers
from .controllers import (
welcome,
developer,
)
from core import (
projectConfig,
Translator_api as Translator,
Translator_email,
Translator_captcha,
logger_api
)
from .models import *
_current_dir = os.path.join(os.path.dirname(__file__))
_debug = projectConfig['PROJECT']['debug']
SETTINGS = {
'debug': _debug
}
HANDLER = [
(r"/?(api)?/?", welcome.Welcome, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/websocket/?", developer.EchoWebSocket, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/client/?", credentials.SignClient, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/signforms/([0-9a-zA-Z_-]+)/?", credentials.SignForms, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/signcaptchaforms/([0-9a-zA-Z_-]+)/?", credentials.SignCaptchaForms, dict(
projectConfig=projectConfig,
DALDatabase=db,
Translator_captcha=Translator_captcha, i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/signlockform/?", credentials.SignLockForm, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/resigncredentials/?", credentials.ReSing, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/?", auth.Auth, dict(
projectConfig=projectConfig,
DALDatabase=db,
Translator_email=Translator_email,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/two-factor/([0-9a-zA-Z_\-\.]+)/?", auth.TwoFactor, dict(
projectConfig=projectConfig,
DALDatabase=db,
Translator_email=Translator_email,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/lock/?", auth.LockUser, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/image/([0-9]+)/?", auth.ImageUser, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/create/?", auth.CreateAccount, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/change/?", auth.ChangeAccount, dict(
projectConfig=projectConfig,
DALDatabase=db,
Translator_email=Translator_email,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/change-password/?", auth.ChangePassword, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/request-password/?", auth.RequestAccount, dict(
projectConfig=projectConfig,
DALDatabase=db,
Translator_email=Translator_email,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/auth/active-account/?", auth.ActiveAccount, dict(
projectConfig=projectConfig,
DALDatabase=db,
Translator_email=Translator_email,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/oauth/prompt/([a-zA-Z_-]+)?/?", oauth.Prompt, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/oauth/redirect/([a-zA-Z_-]+)?/?", oauth.Redirect, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/admin/usermanager/([0-9]+)?/?", admin.UserManager, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/i18n/([0-9a-zA-Z_-]+)/?", i18n_server.I18N, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/projects/?([0-9a-zA-Z_-]+)?/?", developer.Projects, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/config/?([0-9a-zA-Z_-]+)?/?", developer.Configs, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
(r"/api/automation/([0-9a-zA-Z_-]+)/?", developer.Automation, dict(
projectConfig=projectConfig,
DALDatabase=db,
i18nTranslator=Translator,
logger_api=logger_api
)),
]
| 29.768362
| 85
| 0.632568
|
53044914324e50207b58d77ee7b3ab0213e4ea75
| 680
|
py
|
Python
|
dask_sql/physical/rex/base.py
|
DaceT/dask-sql
|
c545f2bf9a786b0e9ff7f68c90da4dcc39cdcd73
|
[
"MIT"
] | 154
|
2020-08-28T03:03:39.000Z
|
2021-08-07T02:02:36.000Z
|
dask_sql/physical/rex/base.py
|
ciusji/dask-sql
|
3815a494e1e93558fca86a86d59578ba6696c9c5
|
[
"MIT"
] | 192
|
2020-08-26T20:05:24.000Z
|
2021-08-13T16:14:00.000Z
|
dask_sql/physical/rex/base.py
|
ciusji/dask-sql
|
3815a494e1e93558fca86a86d59578ba6696c9c5
|
[
"MIT"
] | 29
|
2020-08-28T17:15:51.000Z
|
2021-08-02T20:19:44.000Z
|
from typing import Any, Union
import dask.dataframe as dd
import dask_sql
from dask_sql.datacontainer import DataContainer
from dask_sql.java import org
class BaseRexPlugin:
"""
Base class for all plugins to convert between
a RexNode to a python expression (dask dataframe column or raw value).
Derived classed needs to override the class_name attribute
and the convert method.
"""
class_name = None
def convert(
self,
rex: org.apache.calcite.rex.RexNode,
dc: DataContainer,
context: "dask_sql.Context",
) -> Union[dd.Series, Any]:
"""Base method to implement"""
raise NotImplementedError
| 23.448276
| 74
| 0.686765
|
5e04e31fb26f00a78508e883c2e2e25b9a5b3d1c
| 20,879
|
py
|
Python
|
tests/api/test_attributes.py
|
cxsper/saleor
|
5566ddcdaf8f72ba872eca869798e66eb9cdae44
|
[
"BSD-3-Clause"
] | 1
|
2019-05-19T18:41:28.000Z
|
2019-05-19T18:41:28.000Z
|
tests/api/test_attributes.py
|
cxsper/saleor
|
5566ddcdaf8f72ba872eca869798e66eb9cdae44
|
[
"BSD-3-Clause"
] | null | null | null |
tests/api/test_attributes.py
|
cxsper/saleor
|
5566ddcdaf8f72ba872eca869798e66eb9cdae44
|
[
"BSD-3-Clause"
] | 1
|
2021-04-03T10:47:36.000Z
|
2021-04-03T10:47:36.000Z
|
import graphene
import pytest
from django.db.models import Q
from django.template.defaultfilters import slugify
from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType
from saleor.graphql.product.types.attributes import resolve_attribute_value_type
from saleor.graphql.product.utils import attributes_to_hstore
from saleor.product.models import Attribute, AttributeValue, Category
from tests.api.utils import get_graphql_content
def test_attributes_to_hstore(product, color_attribute):
color_value = color_attribute.values.first()
# test transforming slugs of existing attributes to IDs
input_data = [{"slug": color_attribute.slug, "value": color_value.slug}]
attrs_qs = product.product_type.product_attributes.all()
ids = attributes_to_hstore(input_data, attrs_qs)
assert str(color_attribute.pk) in ids
assert ids[str(color_attribute.pk)] == str(color_value.pk)
# test creating a new attribute value
input_data = [{"slug": color_attribute.slug, "value": "Space Grey"}]
ids = attributes_to_hstore(input_data, attrs_qs)
new_value = AttributeValue.objects.get(slug="space-grey")
assert str(color_attribute.pk) in ids
assert ids[str(color_attribute.pk)] == str(new_value.pk)
# test passing an attribute that doesn't belong to this product raises
# an error
input_data = [{"slug": "not-an-attribute", "value": "not-a-value"}]
with pytest.raises(ValueError):
attributes_to_hstore(input_data, attrs_qs)
def test_attributes_query(user_api_client, product):
attributes = Attribute.objects.prefetch_related("values")
query = """
query {
attributes(first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
"""
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attributes.count()
def test_attributes_in_category_query(user_api_client, product):
category = Category.objects.first()
query = """
query {
attributes(inCategory: "%(category_id)s", first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
""" % {
"category_id": graphene.Node.to_global_id("Category", category.id)
}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == Attribute.objects.count()
def test_attributes_in_collection_query(user_api_client, sale):
product_types = set(sale.products.all().values_list("product_type_id", flat=True))
expected_attrs = Attribute.objects.filter(
Q(product_type__in=product_types) | Q(product_variant_type__in=product_types)
)
query = """
query {
attributes(inCollection: "%(collection_id)s", first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
""" % {
"collection_id": graphene.Node.to_global_id("Collection", sale.id)
}
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == len(expected_attrs)
CREATE_ATTRIBUTES_QUERY = """
mutation createAttribute(
$name: String!, $values: [AttributeValueCreateInput],
$id: ID!, $type: AttributeTypeEnum!) {
attributeCreate(
id: $id, type: $type, input: {name: $name, values: $values}) {
errors {
field
message
}
attribute {
name
slug
values {
name
slug
}
}
productType {
id
name
}
}
}
"""
def test_create_attribute_and_attribute_values(
staff_api_client, permission_manage_products, product_type
):
query = CREATE_ATTRIBUTES_QUERY
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_name = "Example name"
name = "Value name"
variables = {
"name": attribute_name,
"id": node_id,
"type": AttributeTypeEnum.PRODUCT.name,
"values": [{"name": name, "value": "#1231"}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert not content["data"]["attributeCreate"]["errors"]
data = content["data"]["attributeCreate"]
assert data["attribute"]["name"] == attribute_name
assert data["attribute"]["slug"] == slugify(attribute_name)
assert len(data["attribute"]["values"]) == 1
assert data["attribute"]["values"][0]["name"] == name
assert data["attribute"]["values"][0]["slug"] == slugify(name)
attribute = Attribute.objects.get(name=attribute_name)
assert attribute in product_type.product_attributes.all()
assert data["productType"]["name"] == product_type.name
@pytest.mark.parametrize(
"name_1, name_2, error_msg",
(
("Red color", "Red color", "Provided values are not unique."),
("Red color", "red color", "Provided values are not unique."),
),
)
def test_create_attribute_and_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
permission_manage_products,
product_type,
):
query = CREATE_ATTRIBUTES_QUERY
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
variables = {
"name": "Example name",
"id": node_id,
"type": AttributeTypeEnum.PRODUCT.name,
"values": [
{"name": name_1, "value": "#1231"},
{"name": name_2, "value": "#121"},
],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeCreate"]["errors"]
assert errors
assert errors[0]["field"] == "values"
assert errors[0]["message"] == error_msg
def test_create_variant_attribute(
staff_api_client, permission_manage_products, product_type
):
product_type.has_variants = True
product_type.save()
query = CREATE_ATTRIBUTES_QUERY
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_name = "Example name"
variables = {
"name": attribute_name,
"id": node_id,
"type": AttributeTypeEnum.VARIANT.name,
"values": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert not content["data"]["attributeCreate"]["errors"]
attribute = Attribute.objects.get(name=attribute_name)
assert attribute in product_type.variant_attributes.all()
def test_create_attribute_incorrect_product_type_id(
staff_api_client, permission_manage_products, product_type
):
query = CREATE_ATTRIBUTES_QUERY
variables = {
"name": "Example name",
"id": "incorrect-id",
"type": AttributeTypeEnum.PRODUCT.name,
"values": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeCreate"]["errors"]
assert errors[0]["field"] == "id"
UPDATE_ATTRIBUTE_QUERY = """
mutation updateAttribute(
$id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!,
$removeValues: [ID]!) {
attributeUpdate(
id: $id,
input: {
name: $name, addValues: $addValues,
removeValues: $removeValues}) {
errors {
field
message
}
attribute {
name
slug
values {
name
slug
}
}
productType {
id
name
}
}
}
"""
def test_update_attribute_name(
staff_api_client, color_attribute, product_type, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "Wings name"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"name": name, "id": node_id, "addValues": [], "removeValues": []}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert data["attribute"]["name"] == name == attribute.name
assert data["productType"]["name"] == attribute.product_type.name
def test_update_attribute_remove_and_add_values(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "Wings name"
attribute_value_name = "Red Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
attribute_value_id = attribute.values.first().id
value_id = graphene.Node.to_global_id("AttributeValue", attribute_value_id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name, "value": "#1231"}],
"removeValues": [value_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert not data["errors"]
assert data["attribute"]["name"] == name == attribute.name
assert not attribute.values.filter(pk=attribute_value_id).exists()
assert attribute.values.filter(name=attribute_value_name).exists()
def test_update_empty_attribute_and_add_values(
staff_api_client, color_attribute_without_values, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute_without_values
name = "Wings name"
attribute_value_name = "Yellow Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name, "value": "#1231"}],
"removeValues": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
attribute.refresh_from_db()
assert attribute.values.count() == 1
assert attribute.values.filter(name=attribute_value_name).exists()
@pytest.mark.parametrize(
"name_1, name_2, error_msg",
(
("Red color", "Red color", "Provided values are not unique."),
("Red color", "red color", "Provided values are not unique."),
),
)
def test_update_attribute_and_add_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
color_attribute,
permission_manage_products,
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": "Example name",
"id": node_id,
"removeValues": [],
"addValues": [{"name": name_1, "value": "#1"}, {"name": name_2, "value": "#2"}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "addValues"
assert errors[0]["message"] == error_msg
def test_update_attribute_and_remove_others_attribute_value(
staff_api_client, color_attribute, size_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
size_attribute = size_attribute.values.first()
attr_id = graphene.Node.to_global_id("AttributeValue", size_attribute.pk)
variables = {
"name": "Example name",
"id": node_id,
"slug": "example-slug",
"addValues": [],
"removeValues": [attr_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "removeValues"
err_msg = "Value %s does not belong to this attribute." % str(size_attribute)
assert errors[0]["message"] == err_msg
def test_delete_attribute(
staff_api_client, color_attribute, permission_manage_products, product_type
):
attribute = color_attribute
query = """
mutation deleteAttribute($id: ID!) {
attributeDelete(id: $id) {
errors {
field
message
}
attribute {
id
}
productType {
name
}
}
}
"""
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeDelete"]
assert data["productType"]["name"] == attribute.product_type.name
with pytest.raises(attribute._meta.model.DoesNotExist):
attribute.refresh_from_db()
CREATE_ATTRIBUTE_VALUE_QUERY = """
mutation createAttributeValue(
$attributeId: ID!, $name: String!, $value: String) {
attributeValueCreate(
attribute: $attributeId, input: {name: $name, value: $value}) {
errors {
field
message
}
attribute {
values {
name
}
}
attributeValue {
name
type
slug
value
}
}
}
"""
def test_create_attribute_value(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
name = "test name"
value = "test-string"
variables = {"name": name, "value": value, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert not data["errors"]
attr_data = data["attributeValue"]
assert attr_data["name"] == name
assert attr_data["slug"] == slugify(name)
assert attr_data["value"] == value
assert attr_data["type"] == "STRING"
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_create_attribute_value_not_unique_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {
"name": value_name,
"value": "test-string",
"attributeId": attribute_id,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["errors"]
assert data["errors"][0]["message"]
assert not data["errors"][0]["field"]
UPDATE_ATTRIBUTE_VALUE_QUERY = """
mutation updateChoice(
$id: ID!, $name: String!, $value: String) {
attributeValueUpdate(
id: $id, input: {name: $name, value: $value}) {
errors {
field
message
}
attributeValue {
name
slug
value
}
attribute {
values {
name
}
}
}
}
"""
def test_update_attribute_value(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
name = "Crimson name"
variables = {"name": name, "value": "#RED", "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
value.refresh_from_db()
assert data["attributeValue"]["name"] == name == value.name
assert data["attributeValue"]["slug"] == slugify(name)
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_update_attribute_value_name_not_unique(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value.attribute.values.create(
name="Example Name", slug="example-name", value="#RED"
)
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"name": pink_attribute_value.name, "value": "#RED", "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
assert data["errors"]
assert data["errors"][0]["message"]
assert not data["errors"][0]["field"]
def test_update_same_attribute_value(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
attr_value = "#BLUE"
variables = {"name": value.name, "value": attr_value, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
assert not data["errors"]
assert data["attributeValue"]["value"] == attr_value
def test_delete_attribute_value(
staff_api_client, color_attribute, pink_attribute_value, permission_manage_products
):
value = color_attribute.values.get(name="Red")
query = """
mutation updateChoice($id: ID!) {
attributeValueDelete(id: $id) {
attributeValue {
name
slug
}
}
}
"""
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"id": node_id}
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
@pytest.mark.parametrize(
"raw_value, expected_type",
[
("#0000", AttributeValueType.COLOR),
("#FF69B4", AttributeValueType.COLOR),
("rgb(255, 0, 0)", AttributeValueType.COLOR),
("hsl(0, 100%, 50%)", AttributeValueType.COLOR),
("hsla(120, 60%, 70%, 0.3)", AttributeValueType.COLOR),
("rgba(100%, 255, 0, 0)", AttributeValueType.COLOR),
("http://example.com", AttributeValueType.URL),
("https://example.com", AttributeValueType.URL),
("ftp://example.com", AttributeValueType.URL),
("example.com", AttributeValueType.STRING),
("Foo", AttributeValueType.STRING),
("linear-gradient(red, yellow)", AttributeValueType.GRADIENT),
("radial-gradient(#0000, yellow)", AttributeValueType.GRADIENT),
],
)
def test_resolve_attribute_value_type(raw_value, expected_type):
assert resolve_attribute_value_type(raw_value) == expected_type
| 32.471229
| 88
| 0.637578
|
8e85b921cc4307c99a374ba052dd5d6f58a6b223
| 474
|
py
|
Python
|
packages/python/plotly/plotly/validators/mesh3d/colorbar/_minexponent.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/mesh3d/colorbar/_minexponent.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/mesh3d/colorbar/_minexponent.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minexponent", parent_name="mesh3d.colorbar", **kwargs
):
super(MinexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
| 31.6
| 80
| 0.647679
|
2f939a72fbb64e7dc423500b36e371b897a8fc9b
| 2,168
|
py
|
Python
|
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 3
|
2022-01-27T07:36:24.000Z
|
2022-02-22T09:32:53.000Z
|
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | null | null | null |
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 1
|
2022-02-02T08:21:39.000Z
|
2022-02-02T08:21:39.000Z
|
import matplotlib.font_manager as font_manager
import matplotlib.pyplot as plt
import pandas as pd
import os
# Read the data
path = os.path.join(os.getcwd(), "results")
df = pd.read_csv(os.path.join(path, "tracker_AND_cookies.csv"))
x = df["day"]
y1 = df["total_tracker"]
y2 = df["tracker_distinct"]
y3 = df["is_session"]
# Some styling stuff
fig, ax = plt.subplots(1, figsize=(7, 4))
legend_properties = {'weight': 'bold', 'size': 9}
font = font_manager.FontProperties(family='sans-serif',
weight='bold',
style='normal',
size=14)
plt.legend(loc='best', frameon=False, prop=font)
plt.xticks(weight='bold', fontname='sans-serif', size=14)
plt.yticks(weight='bold', fontname='sans-serif', size=14)
plt.xlabel("Measurement point", weight='bold', fontname='sans-serif', size=14)
# Add first y-axis (Number of tracking requests)
ax.plot(x, y1, color="#999999", label="Number of tracking requests", marker='o', linestyle='dashed')
ax.set_ylabel('Number of tracking requests')
ax.legend(loc=2, prop=legend_properties)
plt.ylabel("Number of tracking requests", weight='bold', fontname='sans-serif', size=14)
# Add second y-axis
ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis
ax2.plot(x, y2, color="#555555", label="Number of distinct trackers", marker='x', linestyle='solid')
ax2.set_ylabel('Number of distinct trackers')
ax2.set_ylim(3500, 4200)
ax2.legend(loc=1, prop=legend_properties)
plt.ylabel("Number of distinct trackers", weight='bold', fontname='sans-serif', size=14)
plt.yticks(weight='bold', fontname='sans-serif')
# Save plot to disc
plt.grid(False)
#plt.show()
plt.savefig(path + "/04_long_term_tracker_cookies.pdf", dpi=600,
transparent=False, bbox_inches='tight', format="pdf")
# Simple min / max calculations
max_value = y1.max()
min_value = y1.min()
max_day = y1.index[df['total_tracker'] == max_value].tolist()
min_day = y1.index[df['total_tracker'] == min_value].tolist()
print("Max at: ", max_day, "max value: ", max_value)
print("Min at: ", min_day, "min value: ", min_value)
print("std:", y1.std())
| 37.37931
| 100
| 0.683579
|
cd12dda2f23e2fd96a7cbb58e97e2ad573761bdb
| 6,567
|
py
|
Python
|
src/OCflow.py
|
lruthotto/NeuralOC
|
3f37c7349527fb1f5890077ebe987f2f77486a54
|
[
"MIT"
] | 13
|
2021-05-12T11:52:53.000Z
|
2022-03-30T14:50:28.000Z
|
src/OCflow.py
|
lruthotto/NeuralOC
|
3f37c7349527fb1f5890077ebe987f2f77486a54
|
[
"MIT"
] | null | null | null |
src/OCflow.py
|
lruthotto/NeuralOC
|
3f37c7349527fb1f5890077ebe987f2f77486a54
|
[
"MIT"
] | 3
|
2021-04-20T08:07:50.000Z
|
2022-02-15T22:37:12.000Z
|
# OCflow.py
import math
import torch
from torch.nn.functional import pad
from src.Phi import *
def OCflow(x, Phi, prob, tspan , nt, stepper="rk4", alph =[1.0,1.0,1.0,1.0,1.0,1.0], intermediates=False, noMean=False ):
"""
main workhorse of the approach
:param x: input data tensor nex-by-d
:param Phi: neural network
:param xtarget: target state for OC problem
:param tspan: time range to integrate over, ex. [0.0 , 1.0]
:param nt: number of time steps
:param stepper: string "rk1" or "rk4" Runge-Kutta schemes
:param alph: list, the alpha value multipliers
:param intermediates: if True, return the states and controls instead
:param noMean: if True, do not compute the mean across samples for Jc and cs
:return:
Jc - float, objective function value dot(alph,cs)
cs - list of the computed costs
"""
nex, d = x.shape
h = (tspan[1]-tspan[0]) / nt
z = pad(x, [0, 1, 0, 0], value=0)
P = Phi.getGrad(z) # initial condition gradPhi = p
P = P[:, 0:d]
# initialize "hidden" vector to propogate with all the additional dimensions for all the ODEs
# nex - by - (2*d + 4)
z = torch.cat( (x , torch.zeros(nex,4, dtype=x.dtype,device=x.device)) , 1)
tk = tspan[0]
if intermediates: # save the intermediate values as well
# make tensor of size z.shape[0], z.shape[1], nt
zFull = torch.zeros( *z.shape , nt+1, device=x.device, dtype=x.dtype)
zFull[:,:,0] = z
# hold the controls/thrust and torques on the path
tmp = prob.calcCtrls(z[:, 0:d], P)
ctrlFull = torch.zeros(*tmp.shape, nt + 1, dtype=x.dtype,device=x.device)
for k in range(nt):
if stepper == 'rk4':
z = stepRK4(ocOdefun, z, Phi, prob, alph, tk, tk + h)
elif stepper == 'rk1':
z = stepRK1(ocOdefun, z, Phi, prob, alph, tk, tk + h)
tk += h
if intermediates:
zFull[:, :, k+1] = z
tmp = pad(z[:,0:d], [0, 1, 0, 0], value=tk-h)
p = Phi.getGrad(tmp)[:,0:d]
ctrlFull[:, :, k + 1] = prob.calcCtrls(z[:, 0:d], p)
resG = ocG(z[:,0:d], prob.xtarget)
cG = 0.5 * torch.sum(resG**2, 1, keepdims=True)
# compute Phi at final time
tmp = pad(z[:,0:d], [0, 1, 0, 0], value=tspan[1])
Phi1 = Phi(tmp)
gradPhi1 = Phi.getGrad(tmp)[:, 0:d]
if noMean:
costL = z[:, -4].view(-1,1)
costG = cG.view(-1,1)
costHJt = z[:, -3].view(-1,1)
costHJf = torch.sum(torch.abs(Phi1 - alph[0] * cG), 1).view(-1,1)
costHJgrad = torch.sum(torch.abs(gradPhi1 - alph[0] * resG), 1).view(-1,1)
costQ = z[:, -2].view(-1,1)
costW = z[:, -1].view(-1,1)
cs = [costL, costG, costHJt, costHJf, costHJgrad, costQ, costW]
Jc = costL + alph[0] * costG + alph[3] * costHJt + alph[4] * costHJf + alph[5] * costHJgrad
return Jc, cs
# ASSUME all examples are equally weighted
costL = torch.mean(z[:,-4])
costG = torch.mean(cG)
costHJt = torch.mean(z[:,-3])
costHJf = torch.mean(torch.sum(torch.abs(Phi1 - alph[0] * cG), 1))
costHJgrad = torch.mean(torch.sum(torch.abs(gradPhi1 - alph[0] * resG), 1))
costQ = torch.mean(z[:, -2])
costW = torch.mean(z[:, -1])
cs = [costL, costG, costHJt, costHJf, costHJgrad, costQ, costW]
# Jc = sum(i[0] * i[1] for i in zip(cs, alph))
Jc = costL + alph[0]*costG + alph[3]*costHJt + alph[4]*costHJf + alph[5]*costHJgrad
if intermediates:
return zFull, ctrlFull
else:
return Jc, cs
def ocG(z, xtarget):
"""G for OC problems"""
d = xtarget.shape[0] # assumes xtarget has only one dimension
return z[:,0:d] - xtarget
def ocOdefun(x, t, net, prob, alph=None):
"""
the diffeq function for the 4 ODEs in one
d_t [z_x ; L_x ; hjt_x ; dQ_x ; dW_x] = odefun( [z_x ; L_x ; hjt_x ; dQ_x ; dW_x] , t )
z_x - state
L_x - accumulated transport costs
hjt_x - accumulated error between grad_t Phi and H
dQ_x - accumulated obstacle cost (maintained for printing purposes)
dW_x - accumulated interaction cost (maintained for printing purposes)
:param x: nex -by- d+4 tensor, state of diffeq
:param t: float, time
:param net: neural network Phi
:param prob: problem Object
:param alph: list, the 6 alpha values for the OC problem
:return:
"""
nex, d_extra = x.shape
d = (d_extra - 4)
z = pad(x[:, :d], (0, 1, 0, 0), value=t) # concatenate with the time t
gradPhi = net.getGrad(z)
L, H, Q, W = prob.calcLHQW(z[:,:d], gradPhi[:,0:d])
res = torch.zeros(nex,d+4, dtype=x.dtype, device=x.device) # [dx ; dv ; hjt]
res[:, 0:d] = - prob.calcGradpH(z[:,:d] , gradPhi[:,0:d]) # dx
res[:, d] = L.squeeze() # dv
res[:, d + 1] = torch.abs( gradPhi[:,-1] - H.squeeze() ) # HJt
res[:, d + 2] = Q.squeeze() # Q # included merely for printing
res[:, d + 3] = W.squeeze() # W # included merely for printing
return res # torch.cat((dx, dv, hjt, f), 1)
def stepRK1(odefun, z, Phi, prob, alph, t0, t1):
"""
Runge-Kutta 1 / Forward Euler integration scheme
:param odefun: function to apply at every time step
:param z: tensor nex-by-d+4, inputs
:param Phi: Module, the Phi potential function
:param alph: list, the 6 alpha values for the OC problem
:param t0: float, starting time
:param t1: float, end time
:return: tensor nex-by-d+4, features at time t1
"""
z += (t1 - t0) * odefun(z, t0, Phi, prob, alph=alph)
return z
def stepRK4(odefun, z, Phi, prob, alph, t0, t1):
"""
Runge-Kutta 4 integration scheme
:param odefun: function to apply at every time step
:param z: tensor nex-by-d+4, inputs
:param Phi: Module, the Phi potential function
:param alph: list, the 6 alpha values for the OC problem
:param t0: float, starting time
:param t1: float, end time
:return: tensor nex-by-d+4, features at time t1
"""
h = t1 - t0 # step size
z0 = z
K = h * odefun(z0, t0, Phi, prob, alph=alph)
z = z0 + (1.0/6.0) * K
K = h * odefun( z0 + 0.5*K , t0+(h/2) , Phi, prob, alph=alph)
z += (2.0/6.0) * K
K = h * odefun( z0 + 0.5*K , t0+(h/2) , Phi, prob, alph=alph)
z += (2.0/6.0) * K
K = h * odefun( z0 + K , t0+h , Phi, prob, alph=alph)
z += (1.0/6.0) * K
return z
| 33.676923
| 121
| 0.564489
|
98563e7091d0a948c6a26fd54f07ba0048f714a8
| 2,175
|
py
|
Python
|
01_Language/05_Python/study/lesson_03/15.game1.0.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 3
|
2020-06-28T07:42:51.000Z
|
2021-01-15T10:32:11.000Z
|
01_Language/05_Python/study/lesson_03/15.game1.0.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 9
|
2021-03-10T22:45:40.000Z
|
2022-02-27T06:53:20.000Z
|
01_Language/05_Python/study/lesson_03/15.game1.0.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 1
|
2021-01-15T10:51:24.000Z
|
2021-01-15T10:51:24.000Z
|
# 显示欢迎信息
print('-' * 20, '欢迎光临《唐僧大战白骨精》', '-' * 20)
# 显示身份选择的信息
print('请选择你的身份:')
print('\t1.唐僧')
print('\t2.白骨精')
# 游戏的身份选择
player_choose = input('请选择[1-2]:')
# 打印一条分割线
print('-' * 66)
# 根据用户的选择来显示不同的提示信息
if player_choose == '1':
# 选择1
print('你已经选择了1,你将以->唐僧<-的身份来进行游戏!')
elif player_choose == '2':
# 选择2
print('你竟然选择了白骨精,太不要脸了,你将以->唐僧<-的身份来进行游戏!')
else:
# 选择3
print('你的输入有误,系统将自动分配身份,你将以->唐僧<-的身份来进行游戏!')
# 进入游戏
# 创建变量,来保存玩家的生命值和攻击力
player_life = 2 # 生命值
player_attack = 2 # 攻击力
# 创建一个变量,保存boss的生命值和攻击力
boss_life = 10
boss_attack = 10
# 打印一条分割线
print('-' * 66)
# 显示玩家的信息(攻击力、生命值)
print(f'唐僧,你的生命值是 {player_life} , 你的攻击力是 {player_attack}')
# 由于游戏选项是需要反复显示的,所以必须将其编写到一个循环中
while True:
# 打印一条分割线
print('-' * 66)
# 显示游戏选项,游戏正式开始
print('请选择你要进行的操作:')
print('\t1.练级')
print('\t2.打BOSS')
print('\t3.逃跑')
game_choose = input('请选择要做的操作[1-3]:')
# 处理用户的选择
if game_choose == '1':
# 增加玩家的生命值和攻击力
player_life += 2
player_attack += 2
# 显示最新的信息
# 打印一条分割线
print('-' * 66)
# 显示玩家的信息(攻击力、生命值)
print(f'恭喜你升级了!,你现在的生命值是 {player_life} , 你的攻击力是 {player_attack}')
elif game_choose == '2':
# 玩家攻击boss
# 减去boss的生命值,减去的生命值应该等于玩家的攻击力
boss_life -= player_attack
# 打印一条分割线
print('-' * 66)
print('->唐僧<- 攻击了 ->白骨精<-')
# 检查boss是否死亡
if boss_life <= 0:
# boss死亡,player胜利,游戏结束
print(f'->白骨精<-受到了 {player_attack} 点伤害,重伤不治死了,->唐僧<-赢得了胜利!')
# 游戏结束
break
# boss要反击玩家
# 减去玩家的生命值
player_life -= boss_attack
print(' ->白骨精<- 攻击了 ->唐僧<-')
# 检查玩家是否死亡
if player_life <= 0:
# 玩家死亡
print(f'你受到了 {boss_attack} 点伤害,重伤不治死了!GAME OVER')
# 游戏结束
break
elif game_choose == '3':
# 打印一条分割线
print('-' * 66)
# 逃跑,退出游戏
print('->唐僧<-一扭头,撒腿就跑!GAME OVER')
break
else:
# 打印一条分割线
print('-' * 66)
print('你的输入有误,请重新输入!')
| 22.65625
| 74
| 0.514943
|
3f08a96b4d8aba4a010c336c8ad36fd7f7335952
| 101
|
py
|
Python
|
Week 2: Conditional statement and while loop/2 (30).py
|
MLunov/Python-programming-basics-HSE
|
7df8bba105db84d6b932c454fdc39193a648254e
|
[
"MIT"
] | null | null | null |
Week 2: Conditional statement and while loop/2 (30).py
|
MLunov/Python-programming-basics-HSE
|
7df8bba105db84d6b932c454fdc39193a648254e
|
[
"MIT"
] | null | null | null |
Week 2: Conditional statement and while loop/2 (30).py
|
MLunov/Python-programming-basics-HSE
|
7df8bba105db84d6b932c454fdc39193a648254e
|
[
"MIT"
] | null | null | null |
x = float(input())
y = float(input())
i = 1
while x < y:
x += 0.1 * x
i += 1
print(i)
| 12.625
| 19
| 0.425743
|
be38b1a4561b1274886efd85665008121c580604
| 23,937
|
py
|
Python
|
tests/api/common/experimental/test_mark_tasks.py
|
subrays/airflow
|
3c8c0b3b6411762a4e4977e519374d9fb16b541d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/api/common/experimental/test_mark_tasks.py
|
subrays/airflow
|
3c8c0b3b6411762a4e4977e519374d9fb16b541d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/api/common/experimental/test_mark_tasks.py
|
subrays/airflow
|
3c8c0b3b6411762a4e4977e519374d9fb16b541d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import time
from datetime import datetime
from airflow import configuration, models
from airflow.api.common.experimental.mark_tasks import (
set_state, _create_dagruns, set_dag_run_state_to_success, set_dag_run_state_to_failed,
set_dag_run_state_to_running)
from airflow.utils import timezone
from airflow.utils.db import create_session, provide_session
from airflow.utils.dates import days_ago
from airflow.utils.state import State
from airflow.models import DagRun
from tests.test_utils.db import clear_db_runs
DEV_NULL = "/dev/null"
configuration.load_test_config()
class TestMarkTasks(unittest.TestCase):
@classmethod
def setUpClass(cls):
dagbag = models.DagBag(include_examples=True)
cls.dag1 = dagbag.dags['example_bash_operator']
cls.dag1.sync_to_db()
cls.dag2 = dagbag.dags['example_subdag_operator']
cls.dag2.sync_to_db()
cls.execution_dates = [days_ago(2), days_ago(1)]
def setUp(self):
clear_db_runs()
drs = _create_dagruns(self.dag1, self.execution_dates,
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag1
dr.verify_integrity()
drs = _create_dagruns(self.dag2,
[self.dag2.default_args['start_date']],
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag2
dr.verify_integrity()
def tearDown(self):
clear_db_runs()
@staticmethod
def snapshot_state(dag, execution_dates):
TI = models.TaskInstance
with create_session() as session:
return session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
@provide_session
def verify_state(self, dag, task_ids, execution_dates, state, old_tis, session=None):
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.assertTrue(len(tis) > 0)
for ti in tis:
if ti.task_id in task_ids and ti.execution_date in execution_dates:
self.assertEqual(ti.state, state)
else:
for old_ti in old_tis:
if old_ti.task_id == ti.task_id and old_ti.execution_date == ti.execution_date:
self.assertEqual(ti.state, old_ti.state)
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=False)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
None, snapshot)
# set one and only one task to success
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set no tasks
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 0)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.FAILED, snapshot)
# dont alter other tasks
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 3)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=True, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 4)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=True,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(tasks=[task], execution_date=self.execution_dates[1],
upstream=False, downstream=False, future=False,
past=True, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
def test_mark_tasks_multiple(self):
# set multiple tasks to success
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
tasks = [self.dag1.get_task("runme_1"), self.dag1.get_task("runme_2")]
altered = set_state(tasks=tasks, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id for task in tasks], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# TODO: this skipIf should be removed once a fixing solution is found later
# We skip it here because this test case is working with Postgres & SQLite
# but not with MySQL
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'), "Flaky with MySQL")
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 14)
# cannot use snapshot here as that will require drilling down the
# the sub dag tree essentially recreating the same code as in the
# tested logic.
self.verify_state(self.dag2, task_ids, [self.execution_dates[0]],
State.SUCCESS, [])
class TestMarkDAGRun(unittest.TestCase):
@classmethod
def setUpClass(cls):
dagbag = models.DagBag(include_examples=True)
cls.dag1 = dagbag.dags['example_bash_operator']
cls.dag1.sync_to_db()
cls.dag2 = dagbag.dags['example_subdag_operator']
cls.dag2.sync_to_db()
cls.execution_dates = [days_ago(2), days_ago(1), days_ago(0)]
def setUp(self):
clear_db_runs()
def _set_default_task_instance_states(self, dr):
# success task
dr.get_task_instance('runme_0').set_state(State.SUCCESS)
# skipped task
dr.get_task_instance('runme_1').set_state(State.SKIPPED)
# retry task
dr.get_task_instance('runme_2').set_state(State.UP_FOR_RETRY)
# queued task
dr.get_task_instance('also_run_this').set_state(State.QUEUED)
# running task
dr.get_task_instance('run_after_loop').set_state(State.RUNNING)
# failed task
dr.get_task_instance('run_this_last').set_state(State.FAILED)
def _verify_task_instance_states_remain_default(self, dr):
self.assertEqual(dr.get_task_instance('runme_0').state, State.SUCCESS)
self.assertEqual(dr.get_task_instance('runme_1').state, State.SKIPPED)
self.assertEqual(dr.get_task_instance('runme_2').state, State.UP_FOR_RETRY)
self.assertEqual(dr.get_task_instance('also_run_this').state, State.QUEUED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.RUNNING)
self.assertEqual(dr.get_task_instance('run_this_last').state, State.FAILED)
@provide_session
def _verify_task_instance_states(self, dag, date, state, session):
TI = models.TaskInstance
tis = session.query(TI)\
.filter(TI.dag_id == dag.dag_id, TI.execution_date == date)
for ti in tis:
self.assertEqual(ti.state, state)
def _create_test_dag_run(self, state, date):
return self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=state,
execution_date=date
)
def _verify_dag_run_state(self, dag, date, state):
drs = models.DagRun.find(dag_id=dag.dag_id, execution_date=date)
dr = drs[0]
self.assertEqual(dr.get_state(), state)
@provide_session
def _verify_dag_run_dates(self, dag, date, state, middle_time, session=None):
# When target state is RUNNING, we should set start_date,
# otherwise we should set end_date.
DR = DagRun
dr = session.query(DR).filter(
DR.dag_id == dag.dag_id,
DR.execution_date == date
).one()
if state == State.RUNNING:
# Since the DAG is running, the start_date must be updated after creation
self.assertGreater(dr.start_date, middle_time)
# If the dag is still running, we don't have an end date
self.assertIsNone(dr.end_date)
else:
# If the dag is not running, there must be an end time
self.assertLess(dr.start_date, middle_time)
self.assertGreater(dr.end_date, middle_time)
def test_set_running_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
self.assertEqual(len(altered), 5)
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_running_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
self.assertEqual(len(altered), 1)
self._verify_dag_run_state(self.dag1, date, State.FAILED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_running_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, only the dag itself
self.assertEqual(len(altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_success_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
self.assertEqual(len(altered), 5)
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_success_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
self.assertEqual(len(altered), 1)
self._verify_dag_run_state(self.dag1, date, State.FAILED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_success_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, but only the dag object should be changed
self.assertEqual(len(altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_failed_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
self.assertEqual(len(altered), 5)
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_failed_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
self.assertEqual(len(altered), 1)
self._verify_dag_run_state(self.dag1, date, State.FAILED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_failed_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
time.sleep(2)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, since we've only altered the DAG itself
self.assertEqual(len(altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_state_without_commit(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
self._set_default_task_instance_states(dr)
will_be_altered = set_dag_run_state_to_running(self.dag1, date, commit=False)
# None of the tasks will be altered.
self.assertEqual(len(will_be_altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_failed(self.dag1, date, commit=False)
# Only the running task will be altered.
self.assertEqual(len(will_be_altered), 1)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_success(self.dag1, date, commit=False)
# All except the SUCCESS task should be altered.
self.assertEqual(len(will_be_altered), 5)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
@provide_session
def test_set_state_with_multiple_dagruns(self, session=None):
self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[0],
session=session
)
self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[1],
session=session
)
self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=self.execution_dates[2],
session=session
)
altered = set_dag_run_state_to_success(self.dag2, self.execution_dates[1], commit=True)
# Recursively count number of tasks in the dag
def count_dag_tasks(dag):
count = len(dag.tasks)
subdag_counts = [count_dag_tasks(subdag) for subdag in dag.subdags]
count += sum(subdag_counts)
return count
self.assertEqual(len(altered), count_dag_tasks(self.dag2))
self._verify_dag_run_state(self.dag2, self.execution_dates[1], State.SUCCESS)
# Make sure other dag status are not changed
models.DagRun.find(dag_id=self.dag2.dag_id,
execution_date=self.execution_dates[0])
self._verify_dag_run_state(self.dag2, self.execution_dates[0], State.FAILED)
models.DagRun.find(dag_id=self.dag2.dag_id,
execution_date=self.execution_dates[2])
self._verify_dag_run_state(self.dag2, self.execution_dates[2], State.RUNNING)
def test_set_dag_run_state_edge_cases(self):
# Dag does not exist
altered = set_dag_run_state_to_success(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_failed(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_running(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
# Invalid execution date
altered = set_dag_run_state_to_success(self.dag1, None)
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_failed(self.dag1, None)
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_running(self.dag1, None)
self.assertEqual(len(altered), 0)
# This will throw ValueError since dag.latest_execution_date
# need to be 0 does not exist.
self.assertRaises(ValueError, set_dag_run_state_to_success, self.dag2,
timezone.make_naive(self.execution_dates[0]))
# altered = set_dag_run_state_to_success(self.dag1, self.execution_dates[0])
# DagRun does not exist
# This will throw ValueError since dag.latest_execution_date does not exist
self.assertRaises(ValueError, set_dag_run_state_to_success,
self.dag2, self.execution_dates[0])
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
if __name__ == '__main__':
unittest.main()
| 44.575419
| 103
| 0.66416
|
a8eb82c43281050976819501094ed2ce10ec9f7a
| 13,938
|
py
|
Python
|
src/encoded/tests/test_types_biosample.py
|
4dn-dcic/fourfron
|
29601961706d2371b982e57ae085e8ebec3b2714
|
[
"MIT"
] | 11
|
2016-11-23T02:33:13.000Z
|
2021-06-18T14:21:20.000Z
|
src/encoded/tests/test_types_biosample.py
|
4dn-dcic/fourfron
|
29601961706d2371b982e57ae085e8ebec3b2714
|
[
"MIT"
] | 1,159
|
2016-11-21T15:40:24.000Z
|
2022-03-29T03:18:38.000Z
|
src/encoded/tests/test_types_biosample.py
|
4dn-dcic/fourfron
|
29601961706d2371b982e57ae085e8ebec3b2714
|
[
"MIT"
] | 5
|
2017-01-27T16:36:15.000Z
|
2019-06-14T14:39:54.000Z
|
import pytest
# from snovault.schema_utils import load_schema
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def biosample_cc_w_diff(testapp, de_term, lab, award):
item = {
"culture_start_date": "2018-01-01",
"differentiation_state": "Differentiated to definitive endoderm demonstrated by decreased Oct4 expression and increased Sox17 expression",
"tissue": de_term['@id'],
"in_vitro_differentiated": "Yes",
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]
@pytest.fixture
def biosample_1(testapp, human_biosource, lab, award):
item = {
'description': "GM12878 prepared for Hi-C",
'biosource': [human_biosource['@id'], ],
'award': award['@id'],
'lab': lab['@id'],
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def biosample_w_mod(testapp, biosample_1, mod_w_target):
return testapp.patch_json(biosample_1['@id'], {'modifications': [mod_w_target['@id']]}).json['@graph'][0]
@pytest.fixture
def biosample_w_treatment(testapp, biosample_1, rnai):
return testapp.patch_json(biosample_1['@id'], {'treatments': [rnai['@id']]}).json['@graph'][0]
def biosample_relation(derived_from):
return {"biosample_relation": [{"relationship_type": "derived from",
"biosample": derived_from['@id']}]}
def test_biosample_has_display_title(testapp, biosample_1):
# accession fallback used for display title here
assert biosample_1['display_title'] == biosample_1['accession']
# data from test/datafixtures
def test_update_biosample_relation(testapp, human_biosample, biosample_1):
patch_res = testapp.patch_json(human_biosample['@id'], biosample_relation(biosample_1))
res = testapp.get(biosample_1['@id'])
# expected relation: 'biosample': human_biosample['@id'],
# 'relationship_type': 'parent of'
assert res.json['biosample_relation'][0]['biosample']['@id'] == human_biosample['@id']
assert res.json['biosample_relation'][0]['relationship_type'] == 'parent of'
def test_biosample_calculated_properties(testapp, biosample_1, ):
"""
Test to ensure the calculated properties are in result returned from testapp
These have string 'None' returned if no value as they are used in Item page view
"""
res = testapp.get(biosample_1['@id']).json
assert 'modifications_summary' in res
assert 'modifications_summary_short' in res
assert 'treatments_summary' in res
assert 'biosource_summary' in res
def test_biosample_biosource_summary_one_biosource(testapp, biosample_1, human_biosource):
assert biosample_1['biosource_summary'] == human_biosource['biosource_name']
def test_biosample_biosource_summary_two_biosource(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
assert human_biosource['biosource_name'] in res['biosource_summary']
assert lung_biosource['biosource_name'] in res['biosource_summary']
assert ' and ' in res['biosource_summary']
def test_biosample_biosource_summary_w_differentiation(testapp, biosample_1, human_biosource, biosample_cc_w_diff, de_term):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
assert human_biosource['biosource_name'] in res['biosource_summary']
assert ' differentiated to ' in res['biosource_summary']
assert de_term['display_title'] in res['biosource_summary']
def test_biosample_sample_type_w_differentiation(testapp, biosample_1, biosample_cc_w_diff):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'in vitro differentiated cells'
def test_biosample_sample_type_immortalized_wo_differentiation(testapp, biosample_1, biosample_cc_wo_diff):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_wo_diff['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'immortalized cells'
def test_biosample_sample_type_bs_stem_cell_line(testapp, biosample_1, human_biosource):
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'stem cells'
def test_biosample_sample_type_bs_multicellular(testapp, biosample_1, human_biosource):
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'multicellular organism'}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'whole organisms'
def test_biosample_sample_type_bs_tissue(testapp, biosample_1, human_biosource):
bty = 'tissue'
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == bty
def test_biosample_sample_type_bs_lines_and_to_pluralize(testapp, biosample_1, human_biosource):
types = {
"primary cell": "primary cells",
"primary cell line": "primary cells",
"immortalized cell line": "immortalized cells",
"stem cell": "stem cells",
"induced pluripotent stem cell": "induced pluripotent stem cells"
}
for bty, bsty in types.items():
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == bsty
def test_biosample_sample_type_bs_multiple_same_type(testapp, biosample_1, human_biosource, GM12878_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], GM12878_biosource['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'immortalized cells'
def test_biosample_sample_type_bs_multiple_diff_types(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'mixed sample'
def test_biosample_modifications_summaries(biosample_w_mod):
assert biosample_w_mod['modifications_summary'] == 'Crispr for RAD21 gene'
assert biosample_w_mod['modifications_summary_short'] == 'RAD21 Crispr'
def test_biosample_modifications_summaries_no_mods(biosample_1):
assert biosample_1.get('modifications_summary') == 'None'
assert biosample_1.get('modifications_summary_short') == 'None'
def test_biosample_treatments_summary(biosample_w_treatment):
assert biosample_w_treatment.get('treatments_summary') == 'shRNA treatment'
def test_biosample_treatments_summary_no_treatment(biosample_1):
assert biosample_1.get('treatments_summary') == 'None'
def test_biosample_category_undifferentiated_stem_cells(testapp, biosample_1, human_biosource):
scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']]}).json['@graph'][0]
assert 'Human stem cell' in bios.get('biosample_category')
def test_biosample_category_differentiated_stem_cells(testapp, biosample_1, human_biosource, biosample_cc_w_diff):
scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']], 'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
cats = bios.get('biosample_category')
assert 'Human stem cell' not in cats
assert 'In vitro Differentiation' in cats
def test_biosample_biosource_category_two_biosource(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
cat = res.get('biosample_category')
assert len(cat) == 1
assert cat[0] == 'Mixed samples'
# setting up fixtures for testing tissue and organ calcprop
@pytest.fixture
def brain_term(testapp, uberon_ont, cns_term, ectoderm_term):
item = {
"is_slim_for": "organ",
"term_id": "brain_tid",
"term_name": "brain",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [cns_term['@id'], ectoderm_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def cns_term(testapp, uberon_ont, ectoderm_term):
item = {
"is_slim_for": "system",
"term_id": "cns_tid",
"term_name": "central nervous system",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [ectoderm_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def ectoderm_term(testapp, uberon_ont):
item = {
"is_slim_for": "developmental",
"term_id": "ectoderm_tid",
"term_name": "ectoderm",
"source_ontologies": [uberon_ont['@id']],
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def primary_cell_term(testapp, ontology):
item = {
"is_slim_for": "cell",
"term_id": "pcell_id",
"term_name": "primary cell",
"source_ontologies": [ontology['@id']],
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def cortical_neuron_term(testapp, uberon_ont, brain_term, cns_term,
ectoderm_term, primary_cell_term):
item = {
"term_id": "cort_neuron_id",
"term_name": "cortical neuron",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [brain_term['@id'], cns_term['@id'], ectoderm_term['@id'], primary_cell_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def bcc_diff_to_cortical(testapp, lab, award, cortical_neuron_term):
item = {
"culture_start_date": "2018-01-01",
"differentiation_state": "Stem cell differentiated to cortical neuron",
"tissue": cortical_neuron_term['@id'],
"in_vitro_differentiated": "Yes",
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]
@pytest.fixture
def diff_cortical_neuron_bs(testapp, F123_biosource, bcc_diff_to_cortical, lab, award):
item = {
"description": "Differentiated cortical neuron",
"biosource": [F123_biosource['@id']],
"cell_culture_details": [bcc_diff_to_cortical['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def brain_biosource(testapp, brain_term, lab, award):
item = {
"description": "Brain tissue",
"biosource_type": "tissue",
"tissue": brain_term['@id'],
"lab": lab['@id'],
"award": award['@id']
}
return testapp.post_json('/biosource', item).json['@graph'][0]
@pytest.fixture
def brain_biosample(testapp, brain_biosource, lab, award):
item = {
"description": "Brain Tissue Biosample",
"biosource": [brain_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def mixed_biosample(testapp, brain_biosource, lung_biosource, lab, award):
item = {
"description": "Mixed Tissue Biosample",
"biosource": [brain_biosource['@id'], lung_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
def test_get_tissue_organ_info_none_present(biosample_1):
assert 'tissue_organ_info' not in biosample_1
def test_get_tissue_organ_info_tissue_in_cell_culture(diff_cortical_neuron_bs, cortical_neuron_term):
org_sys = sorted(['brain', 'central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in diff_cortical_neuron_bs
assert diff_cortical_neuron_bs['tissue_organ_info']['tissue_source'] == cortical_neuron_term.get('display_title')
assert sorted(diff_cortical_neuron_bs['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_tissue_in_biosource(brain_biosample, brain_term):
org_sys = sorted(['central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in brain_biosample
assert brain_biosample['tissue_organ_info']['tissue_source'] == brain_term.get('display_title')
assert sorted(brain_biosample['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_tissue_mixed_biosample(mixed_biosample):
org_sys = sorted(['central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in mixed_biosample
assert mixed_biosample['tissue_organ_info']['tissue_source'] == 'mixed tissue'
assert sorted(mixed_biosample['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_none_if_only_cell_slim_terms(testapp, F123_biosource, lab, award):
item = {
"description": "F123 Biosample",
"biosource": [F123_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
f123_biosample = testapp.post_json('/biosample', item).json['@graph'][0]
assert 'tissue_organ_info' not in f123_biosample
| 41.482143
| 150
| 0.69845
|
f8880474597062a3ddf0b6dee9957a37e866d563
| 8,158
|
py
|
Python
|
scraper.py
|
ltasler/nepremicnine.net-scraper
|
27da32d500b6e6248c5c2a1d4f6bdf43cc6c199c
|
[
"Apache-2.0"
] | 3
|
2020-06-30T09:27:37.000Z
|
2020-11-11T19:10:39.000Z
|
scraper.py
|
ltasler/nepremicnine.net-scraper
|
27da32d500b6e6248c5c2a1d4f6bdf43cc6c199c
|
[
"Apache-2.0"
] | 3
|
2021-03-03T22:44:31.000Z
|
2021-12-13T20:03:36.000Z
|
scraper.py
|
ltasler/nepremicnine.net-scraper
|
27da32d500b6e6248c5c2a1d4f6bdf43cc6c199c
|
[
"Apache-2.0"
] | 3
|
2020-04-15T14:12:15.000Z
|
2021-08-14T13:16:03.000Z
|
#!/usr/bin/python
import sys
from lxml import html
from time import sleep
from datetime import datetime
import requests
import argparse
import json as jsonOld
import ujson as json
import re
import smtplib
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class Scraper:
_appdata = {}
_appdata_file = ''
def __init__(self, appdata_file):
timestamp = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print("=============== Scraper started at " + timestamp + " ===============")
print("Opening appdata file: " + appdata_file)
with open(appdata_file, "r") as file:
appdata = json.load(file)
if not appdata:
raise FileNotFoundError(f"Could not load {appdata_file}.")
self._appdata = appdata
self._appdata_file = appdata_file
# print("appdata file open")
def _check_for_removed(self):
"""
Checks for removed urls.
:return: return array of links to removed ones.
"""
print("Checking for removed links...")
visited = self._appdata["visited"]
removed = []
for i, v in enumerate(visited):
url = v["link"]
r = requests.get(url)
if r.status_code == 404:
# Does not exist anymore. Remove and append for report.
removed.append(v)
del self._appdata['visited'][i]
print("Removed " + removed.__len__().__str__())
return removed
def _does_offer_exists(self, offer_id):
"""
Checks if offer id exists in
:return: True if found false otherwise
"""
visited = self._appdata["visited"]
for v in visited:
if v["id"] == offer_id:
return True
return False
@staticmethod
def _get_page_number(url):
tmp = re.search(r"/[0-9]/", url).group(0)
return int(tmp[1:2])
def _check_for_new(self, url):
"""
Checks for new offers on url in appdata.
:return:
"""
new_offers = []
page_number = self._get_page_number(url)
while True: # Fake do while
# print("Checking page " + page_number.__str__() + " url: " + url)
# print("Checking page " + page_number.__str__())
page = requests.get(url)
page_tree = html.fromstring(page.content)
offers = page_tree.xpath('//div[@class="seznam"]/div[@itemprop="itemListElement"]')
if len(offers) == 0:
# Koncni pogoj za while loop. Nimamo vec ponudb
break
for offer in offers:
offer_id = offer.xpath('attribute::id')[0]
if self._does_offer_exists(offer_id):
# Ponuba obstaja, preskoci
continue
title = offer.xpath("div/h2/a/span/text()")[0]
link = f'{self._appdata["baseUrl"]}{offer.xpath("div/h2/a/attribute::href")[0]}'
offer_type = offer.xpath('div/div/span/span[@class="tipi"]/text()')[0]
desc = offer.xpath('div/div/div[@class="kratek_container"]/div/text()')[0]
size = offer.xpath('div/div/div[@class="main-data"]/span[@class="velikost"]/text()')[0]
price = offer.xpath('div/div/div[@class="main-data"]/span[@class="cena"]/text()')[0]
agency = offer.xpath('div/div/div[@class="main-data"]/span[@class="agencija"]/text()')[0]
# Imamo vse podatke pa jih se vpisimo
o = {
"id": offer_id,
"title": title,
"link": link,
"type": offer_type,
"desc": desc,
"size": size,
"price": price,
"agency": agency,
}
new_offers.append(o)
self._appdata["visited"].append(o)
# End of for
# na koncu se "gremo na naslednjo stran"
page_number = page_number + 1
url = re.sub(r"/([0-9]|[1-9][0-9]|[1-9][0-9][0-9])/", f"/{page_number}/", url)
# Spimo 2 sekundi, da slucaaaaaaaaaaaajno ne bomo dos-al
sleep(2)
# End of while
return new_offers
# End of _check_for_new
@staticmethod
def _get_item_text_message(n):
message_text = f'{n["title"]}\n{n["link"]}\n{n["desc"]}\nTip: {n["type"]}\n'
message_text += f'Velikost:{n["size"]}\nCena: {n["price"]}\nAgencija: {n["agency"]}\n\n'
return message_text
def send_mail(self, new, removed):
if len(new) == 0 and len(removed) == 0:
# Ce ni nic novega ne posiljaj..
return False
print("Sending mail...")
smtp = self._appdata["smtp"]
port = smtp["port"]
user = smtp["user"]
password = smtp["password"]
smtp_server = smtp["server"]
message = MIMEMultipart("alternative")
message["Subject"] = "Spremembe na nepremicnine.net"
message["From"] = user
message["To"] = ', '.join(self._appdata["mailRecipients"])
message_text = "Pozdravljen/a,\n\nPrinasam novice iz nepremicnine.net.\n\n\n"
if len(new) > 0:
message_text += "Novi oglasi na nepremicnine.net:\n\n"
for n in new:
message_text += self._get_item_text_message(n)
message_text += "-------------------------------------------------------------------------\n\n"
if len(removed) > 0:
message_text += "Odstranjeni oglasi na nepremicnine.net\n\n"
for r in removed:
message_text += self._get_item_text_message(r)
message_text += "-------------------------------------------------------------------------\n\n"
message_text += "Lep Pozdrav,\nMr. robotek."
part1 = MIMEText(message_text, "plain")
# TODO: Do the html MIME
message.attach(part1)
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(user, password)
server.sendmail(user, self._appdata["mailRecipients"], message.as_string())
return True
def run(self, nomail):
removed = self._check_for_removed()
new = []
for url in self._appdata["urls"]:
print("Checking URL: " + url)
found = self._check_for_new(url)
print("New found: " + found.__len__().__str__())
new.extend(found)
print("New combined: " + new.__len__().__str__())
success = True
if not nomail:
# poslji mail
success = self.send_mail(new, removed)
# Prejsni funkciji niso cisti, spreminjajo appdata, ki ga bomo sedaj zapisali nazaj za prihodnja izvajanja
if success:
print("Writing appdata file")
# Spreminjaj samo ce je bilo uspesno posiljanje
with open(self._appdata_file, 'w') as f:
json.dump(self._appdata, f, indent=2)
def purge(self):
self._appdata["visited"].clear()
with open(self._appdata_file, 'w') as f:
json.dump(self._appdata, f, indent=2)
print("Visited list purged")
APPDATA_FILE = "appdata.json"
def main(argv):
# Construct the argument parser
ap = argparse.ArgumentParser()
ap.add_argument('--purge', action='store_true', help="Purges the visited database in the appdata.json")
ap.add_argument('--nomail', action='store_true', help="Doesn't send the email, just saves the visited to appdata.json")
args = ap.parse_args(argv)
scraper = Scraper(APPDATA_FILE)
if args.purge:
scraper.purge()
else:
scraper.run(args.nomail)
if __name__ == '__main__':
sys.path.extend(['.', '..'])
main(sys.argv[1:])
| 35.780702
| 123
| 0.531871
|
506ac0879e0cfedac274b8dd237cdbfdc9c5848a
| 3,697
|
py
|
Python
|
lib/epub.py
|
ImChinaNB/wenku8dl
|
c55f4fa5d8144c6973bf447a6d705c9fabc82c55
|
[
"MIT"
] | 3
|
2021-11-07T01:35:44.000Z
|
2021-11-18T06:52:30.000Z
|
lib/epub.py
|
ImChinaNB/wenku8dl
|
c55f4fa5d8144c6973bf447a6d705c9fabc82c55
|
[
"MIT"
] | null | null | null |
lib/epub.py
|
ImChinaNB/wenku8dl
|
c55f4fa5d8144c6973bf447a6d705c9fabc82c55
|
[
"MIT"
] | 1
|
2021-11-25T14:00:41.000Z
|
2021-11-25T14:00:41.000Z
|
import mime, threading, datetime
from ebooklib import epub
from lib.logger import getLogger
from lib.constants import *
L = getLogger("epub")
class Book:
def __init__(self, meta: dict[str], book_id, title, chapter_cnt):
self.book = epub.EpubBook()
self.book.set_identifier(meta['identifier'])
self.book.set_title(meta['title'])
self.book.set_language(meta['language'])
if 'creator' in meta: self.book.add_author(meta['creator'], None, 'aut', 'creator')
if OPT['moreAuthor'] and 'contributor' in meta: self.book.add_metadata('DC', 'contributor', meta['contributor'])
if OPT['moreAuthor'] and 'publisher' in meta: self.book.add_metadata('DC', 'publisher', meta['publisher'])
if OPT['moreMeta'] and 'date' in meta: self.book.add_metadata('DC', 'date', meta['date'])
if OPT['moreMeta'] and 'description' in meta: self.book.add_metadata('DC', 'description', meta['description'])
self.book.add_metadata(None, 'meta', datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z', {'property': 'dcterms:modified'})
self.css = self.book.add_item(epub.EpubItem(uid="style", file_name="style/style.css", media_type="text/css", content=CSS))
self.chapters = [None for i in range(0, chapter_cnt)]
self.book_id = book_id
self.lock = threading.Lock()
self.titlePage = self.addChapter(-1, "标题", INTRO(title, meta['creator'], book_id), "title")
self.makerPage = self.addChapter(-1, "制作信息", MAKERINFO(meta['creator']), "makerinfo")
def addImage(self, filename, content):
img = epub.EpubItem(
file_name= "images/%s" % filename,
media_type= mime.Types.of(filename)[0].content_type,
content= content
)
self.lock.acquire()
self.book.add_item(img)
self.lock.release()
def addChapter(self, index, title, content, spec = None):
item_id = "item-%.4d" % (index + 1) if spec is None else "item-" + spec
file_name= '%.4d.xhtml' % (index + 1) if spec is None else spec + ".xhtml"
page = epub.EpubItem(
uid= item_id,
file_name= file_name,
media_type= 'application/xhtml+xml',
content= HTML(title, content).encode("utf-8")
)
self.lock.acquire()
self.book.add_item(page)
self.lock.release()
if spec is None: self.chapters[index] = (page, title, file_name, item_id)
return page
def finalize(self, filename: str, cover_data, cover_ext: str):
has_cover = cover_data is not None
if has_cover:
self.book.set_cover('images/cover' + cover_ext, cover_data, False)
self.coverPage = self.addChapter(-1, "封面", COVER('images/cover' + cover_ext), "cover")
else:
L.warning("empty cover data, skipping!")
self.coverPage = None
toc = ([ENTRY('cover.xhtml', -1, '封面')] if has_cover else []) + [ENTRY('title.xhtml', -1, '标题'), ENTRY('makerinfo.xhtml', -1, '制作信息'), ENTRY('contents.xhtml', -1, '目录')]
ch = [i for i in filter(lambda x: x is not None, self.chapters)]
for id, chapter in enumerate(ch):
toc.append(ENTRY(chapter[2], id + 1, chapter[1]))
self.tocPage = self.addChapter(-1, "目录", TOC("".join(toc)), "contents")
LINK = lambda id, text: epub.Link(id + '.xhtml', text, 'item-' + id)
extra_spine = [i[0] for i in ch]
extra_toc = [epub.Link(i[2], i[1], i[3]) for i in ch]
self.book.toc = ([LINK('cover', '封面')] if has_cover else []) + [LINK('title', '标题'), LINK('makerinfo', '制作信息'), LINK('contents', '目录')] + extra_toc
self.book.spine = ([self.coverPage] if has_cover else []) + [self.titlePage, self.makerPage, self.tocPage] + extra_spine
self.book.add_item(epub.EpubNcx())
self.book.add_item(epub.EpubNav())
epub.write_epub(filename, self.book)
return True
| 45.641975
| 173
| 0.656478
|
e2f7d0e59dd4e3dec8caa8d8ab0475cdee562174
| 6,028
|
py
|
Python
|
app/survey_config/census_config.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | 3
|
2020-09-28T13:21:21.000Z
|
2021-05-05T14:14:51.000Z
|
app/survey_config/census_config.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | 402
|
2019-11-06T17:23:03.000Z
|
2022-03-31T16:03:35.000Z
|
app/survey_config/census_config.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | 10
|
2020-03-03T14:23:27.000Z
|
2022-01-31T12:21:21.000Z
|
from dataclasses import dataclass, field
from typing import Iterable, Mapping, MutableMapping
from flask_babel import lazy_gettext
from flask_babel.speaklater import LazyString
from app.survey_config.link import Link
from app.survey_config.survey_config import SurveyConfig
EN_BASE_URL = "https://census.gov.uk"
CY_BASE_URL = "https://cyfrifiad.gov.uk"
NIR_BASE_URL = f"{EN_BASE_URL}/ni"
@dataclass
class CensusSurveyConfig(
SurveyConfig,
):
title_logo: str = "census-logo-en"
title_logo_alt: LazyString = lazy_gettext("Census 2021")
base_url: str = EN_BASE_URL
account_service_url: str = f"{base_url}/en/start"
design_system_theme: str = "census"
footer_links: Iterable[MutableMapping] = field(
default_factory=lambda: [
Link(
lazy_gettext("Help"),
f"{EN_BASE_URL}/help/how-to-answer-questions/online-questions-help/",
).__dict__,
Link(lazy_gettext("Contact us"), f"{EN_BASE_URL}/contact-us/").__dict__,
Link(
lazy_gettext("Languages"),
f"{EN_BASE_URL}/help/languages-and-accessibility/languages/",
).__dict__,
Link(
lazy_gettext("BSL and audio videos"),
f"{EN_BASE_URL}/help/languages-and-accessibility/accessibility/accessible-videos-with-bsl/",
).__dict__,
],
compare=False,
)
footer_legal_links: Iterable[Mapping] = field(
default_factory=lambda: [
Link(lazy_gettext("Cookies"), f"{EN_BASE_URL}/cookies/").__dict__,
Link(
lazy_gettext("Accessibility statement"),
f"{EN_BASE_URL}/accessibility-statement/",
).__dict__,
Link(
lazy_gettext("Privacy and data protection"),
f"{EN_BASE_URL}/privacy-and-data-protection/",
).__dict__,
Link(
lazy_gettext("Terms and conditions"),
f"{EN_BASE_URL}/terms-and-conditions/",
).__dict__,
],
compare=False,
)
data_layer: Iterable[Mapping] = field(
default_factory=lambda: [{"nisra": False}], compare=False
)
survey_title: LazyString = lazy_gettext("Census 2021")
@dataclass
class WelshCensusSurveyConfig(
CensusSurveyConfig,
):
title_logo: str = "census-logo-cy"
base_url: str = CY_BASE_URL
account_service_url: str = f"{CY_BASE_URL}/en/start"
footer_links: Iterable[MutableMapping] = field(
default_factory=lambda: [
Link(
lazy_gettext("Help"),
f"{CY_BASE_URL}/help/sut-i-ateb-y-cwestiynau/help-y-cwestiynau-ar-lein/",
).__dict__,
Link(lazy_gettext("Contact us"), f"{CY_BASE_URL}/cysylltu-a-ni/").__dict__,
Link(
lazy_gettext("Languages"),
f"{CY_BASE_URL}/help/ieithoedd-a-hygyrchedd/ieithoedd/",
).__dict__,
Link(
lazy_gettext("BSL and audio videos"),
f"{CY_BASE_URL}/help/ieithoedd-a-hygyrchedd/hygyrchedd/fideos-hygyrch-gyda-bsl/",
).__dict__,
],
compare=False,
hash=False,
)
footer_legal_links: Iterable[Mapping] = field(
default_factory=lambda: [
Link(lazy_gettext("Cookies"), f"{CY_BASE_URL}/cwcis/").__dict__,
Link(
lazy_gettext("Accessibility statement"),
f"{CY_BASE_URL}/datganiad-hygyrchedd/",
).__dict__,
Link(
lazy_gettext("Privacy and data protection"),
f"{CY_BASE_URL}/preifatrwydd-a-diogelu-data/",
).__dict__,
Link(
lazy_gettext("Terms and conditions"),
f"{CY_BASE_URL}/telerau-ac-amodau/",
).__dict__,
],
compare=False,
hash=False,
)
data_layer: Iterable[Mapping] = field(
default_factory=lambda: [{"nisra": False}], compare=False
)
@dataclass
class CensusNISRASurveyConfig(
CensusSurveyConfig,
):
base_url: str = NIR_BASE_URL
page_header_logo: str = "nisra-logo-en"
page_header_logo_alt: str = lazy_gettext(
"Northern Ireland Statistics and Research Agency logo"
)
header_logo: str = "nisra"
mobile_logo: str = "nisra-logo-en-mobile"
copyright_declaration: LazyString = lazy_gettext(
"Crown copyright and database rights 2021 NIMA MOU577.501."
)
copyright_text: LazyString = lazy_gettext(
"Use of address data is subject to the terms and conditions."
)
footer_links: Iterable[MutableMapping] = field(
default_factory=lambda: [
Link(
lazy_gettext("Help"),
f"{NIR_BASE_URL}/help/help-with-the-questions/online-questions-help/",
).__dict__,
Link(lazy_gettext("Contact us"), f"{NIR_BASE_URL}/contact-us/").__dict__,
],
compare=False,
hash=False,
)
footer_legal_links: Iterable[Mapping] = field(
default_factory=lambda: [
Link(lazy_gettext("Cookies"), f"{NIR_BASE_URL}/cookies/").__dict__,
Link(
lazy_gettext("Accessibility statement"),
f"{NIR_BASE_URL}/accessibility-statement/",
).__dict__,
Link(
lazy_gettext("Privacy and data protection"),
f"{NIR_BASE_URL}/privacy-and-data-protection/",
).__dict__,
Link(
lazy_gettext("Terms and conditions"),
f"{NIR_BASE_URL}/terms-and-conditions/",
).__dict__,
],
compare=False,
hash=False,
)
powered_by_logo: str = "nisra-logo-black-en"
powered_by_logo_alt: str = "NISRA - Northern Ireland Statistics and Research Agency"
account_service_url: str = NIR_BASE_URL
data_layer: Iterable[Mapping] = field(
default_factory=lambda: [{"nisra": True}], compare=False
)
| 35.668639
| 108
| 0.597545
|
d9c1847117d8e6f687e3528290ca10fe099228e5
| 24
|
py
|
Python
|
ciex/version.py
|
walkr/ciex
|
bbb61dff82ba767ce1c97caa83be69c0e139a0f5
|
[
"MIT"
] | null | null | null |
ciex/version.py
|
walkr/ciex
|
bbb61dff82ba767ce1c97caa83be69c0e139a0f5
|
[
"MIT"
] | null | null | null |
ciex/version.py
|
walkr/ciex
|
bbb61dff82ba767ce1c97caa83be69c0e139a0f5
|
[
"MIT"
] | null | null | null |
VERSION = '0.1.0-alpha'
| 12
| 23
| 0.625
|
24f400286e58b4149c14c7019027c83997034a03
| 7,711
|
py
|
Python
|
projects_automation/automation/handlers.py
|
konakov-ds/devman_projects_automation
|
f4298ec4d7b1799cfa7827231fb1fcd21e43a9c5
|
[
"MIT"
] | null | null | null |
projects_automation/automation/handlers.py
|
konakov-ds/devman_projects_automation
|
f4298ec4d7b1799cfa7827231fb1fcd21e43a9c5
|
[
"MIT"
] | null | null | null |
projects_automation/automation/handlers.py
|
konakov-ds/devman_projects_automation
|
f4298ec4d7b1799cfa7827231fb1fcd21e43a9c5
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from environs import Env
from .models import Student, Group, PM
from telegram import KeyboardButton
from telegram import ReplyKeyboardMarkup
from telegram import ReplyKeyboardRemove
from telegram.ext import CommandHandler
from telegram.ext import ConversationHandler
from telegram.ext import Filters
from telegram.ext import MessageHandler
env = Env()
env.read_env()
TIME_FROM, TIME_TO, SAVE_INPUT, UPDATE_INPUT = range(4)
TIME_FROM_PM, TIME_TO_PM, SAVE_INPUT_PM = range(4, 7)
START_PROJECT_KEYBOARD = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='Участвовать в проекте'),
],
],
resize_keyboard=True
)
START_PROJECT_KEYBOARD_PM = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='Приступить к управлению'),
],
],
resize_keyboard=True
)
ASK_TIME_FROM_KEYBOARD = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='17:00'),
KeyboardButton(text='17:30'),
],
[
KeyboardButton(text='18:00'),
KeyboardButton(text='18:30'),
],
[
KeyboardButton(text='19:00'),
KeyboardButton(text='19:30'),
],
[
KeyboardButton(text='20:00'),
KeyboardButton(text='20:30'),
],
],
resize_keyboard=True
)
ASK_TIME_FROM_KEYBOARD_PM = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='17:00'),
KeyboardButton(text='17:30'),
],
[
KeyboardButton(text='18:00'),
KeyboardButton(text='18:30'),
],
],
resize_keyboard=True
)
ASK_TIME_TO_KEYBOARD = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='17:30'),
KeyboardButton(text='18:00'),
],
[
KeyboardButton(text='18:30'),
KeyboardButton(text='19:00'),
],
[
KeyboardButton(text='19:30'),
KeyboardButton(text='20:00'),
],
[
KeyboardButton(text='20:30'),
KeyboardButton(text='21:00'),
],
],
resize_keyboard=True
)
ASK_TIME_TO_KEYBOARD_PM = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='20:00'),
KeyboardButton(text='20:30'),
],
[
KeyboardButton(text='21:00'),
KeyboardButton(text='21:30'),
],
],
resize_keyboard=True
)
def start(update, context):
message = update.message
user_name = message.chat.first_name
user_id = message.chat_id
pms = PM.objects.all()
pm_id = [pm.tg_id for pm in pms]
context.user_data['user_id'] = user_id
context.user_data['name'] = user_name
if user_id not in pm_id:
context.bot.send_message(
chat_id=user_id,
text=(
f'Привет, {user_name}.🤚\n\n'
'Скоро стартует проект! Будешь участвовать?'
),
reply_markup=START_PROJECT_KEYBOARD
)
return TIME_FROM
context.bot.send_message(
chat_id=user_id,
text=(
f'Привет, PM! How progress?\n\n'
'Скоро стартуют проекты! Нужно твое чуткое управление'
),
reply_markup=START_PROJECT_KEYBOARD_PM
)
return TIME_FROM_PM
def ask_student_time_from(update, context):
message = update.message
user_id = message.chat_id
context.bot.send_message(
chat_id=user_id,
text='Укажи удобный для тебя интервал начала созвона',
reply_markup=ASK_TIME_FROM_KEYBOARD
)
return TIME_TO
def ask_pm_time_from(update, context):
message = update.message
user_id = message.chat_id
context.bot.send_message(
chat_id=user_id,
text='Укажи удобный для тебя интервал начала работы',
reply_markup=ASK_TIME_FROM_KEYBOARD_PM
)
return TIME_TO_PM
def ask_student_time_to(update, context):
message = update.message
user_id = message.chat_id
context.user_data['working_interval_from'] = message.text
context.bot.send_message(
chat_id=user_id,
text='Укажи удобный для тебя интервал конца созвона',
reply_markup=ASK_TIME_TO_KEYBOARD
)
return SAVE_INPUT
def ask_pm_time_to(update, context):
message = update.message
user_id = message.chat_id
context.user_data['working_interval_from'] = message.text
context.bot.send_message(
chat_id=user_id,
text='Укажи удобный для тебя интервал конца работы',
reply_markup=ASK_TIME_TO_KEYBOARD_PM
)
return SAVE_INPUT_PM
def save_student_input(update, context):
message = update.message
user_id = message.chat_id
context.user_data['working_interval_to'] = message.text
if not Student.objects.filter(tg_id=context.user_data['user_id']):
Student.objects.create(
tg_id=context.user_data['user_id'],
name=context.user_data['name'],
level='new',
working_interval_from=context.user_data['working_interval_from'],
working_interval_to=context.user_data['working_interval_to']
)
else:
Student.objects.update(
working_interval_from=context.user_data['working_interval_from'],
working_interval_to=context.user_data['working_interval_to']
)
context.bot.send_message(
chat_id=user_id,
text=f'Отлично! Данные сохранены',
reply_markup=ReplyKeyboardRemove()
)
return UPDATE_INPUT
def update_student_time(update, context):
message = update.message
user_id = message.chat_id
print(message.text)
if message.text == 'Подтвердить новое время':
context.bot.send_message(
chat_id=user_id,
text='Отлично! Ты записан на проект!',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
elif message.text == 'Не смогу участвовать в проекте':
student = Student.objects.get(tg_id=user_id)
student.group = None
student.save()
context.bot.send_message(
chat_id=user_id,
text='Очень жаль :( Тогда ждем тебя на следующем!',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
def save_pm_input(update, context):
message = update.message
user_id = message.chat_id
context.user_data['working_interval_to'] = message.text
PM.objects.update(
tg_id=context.user_data['user_id'],
name=context.user_data['name'],
working_interval_from=context.user_data['working_interval_from'],
working_interval_to=context.user_data['working_interval_to']
)
context.bot.send_message(
chat_id=user_id,
text=f'Отлично! Данные сохранены',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
def stop(update):
update.message.reply_text("Стоп")
return ConversationHandler.END
project_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
TIME_FROM: [MessageHandler(Filters.text, ask_student_time_from)],
TIME_TO: [MessageHandler(Filters.text, ask_student_time_to)],
SAVE_INPUT: [MessageHandler(Filters.text, save_student_input)],
UPDATE_INPUT: [MessageHandler(Filters.text, update_student_time)],
TIME_FROM_PM: [MessageHandler(Filters.text, ask_pm_time_from)],
TIME_TO_PM: [MessageHandler(Filters.text, ask_pm_time_to)],
SAVE_INPUT_PM: [MessageHandler(Filters.text, save_pm_input)]
},
fallbacks=[CommandHandler('stop', stop)]
)
| 27.539286
| 77
| 0.638568
|
43f5a50bc13f521cbfc0e8daa181ab1f17ccd6eb
| 792
|
py
|
Python
|
plugin/src/main/resources/org/lcsim/plugin/web/examples/mainLoop.py
|
omar-moreno/lcsim
|
ddd2abc441dcbc7b99779abf00e27ebbb5d38f0a
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2018-10-30T17:37:41.000Z
|
2018-10-30T17:37:41.000Z
|
plugin/src/main/resources/org/lcsim/plugin/web/examples/mainLoop.py
|
omar-moreno/lcsim
|
ddd2abc441dcbc7b99779abf00e27ebbb5d38f0a
|
[
"BSD-3-Clause-LBNL"
] | 26
|
2017-06-28T16:42:04.000Z
|
2021-07-22T16:06:59.000Z
|
plugin/src/main/resources/org/lcsim/plugin/web/examples/mainLoop.py
|
omar-moreno/lcsim
|
ddd2abc441dcbc7b99779abf00e27ebbb5d38f0a
|
[
"BSD-3-Clause-LBNL"
] | 5
|
2017-06-27T14:36:26.000Z
|
2020-12-12T22:52:13.000Z
|
#! /usr/bin/env jython
###
# mainLoop.py
# Wrapper to enable running outside of JAS3
# 03-AUG-2005 Jan Strube
###
from java.io import File
from org.lcsim.util.aida import AIDA
from org.lcsim.util.loop import LCSimLoop
## importing the Java analysis module
import Analysis101
## if Analysis102 cannot be found, please uncomment and modify
## the following two lines to tell Jython where to find it
# import sys
# sys.path.append('full path to Python module')
# importing the Analysis102 class in the Jython module Analysis102
from Analysis102 import Analysis102
loop = LCSimLoop()
input = File("psiMuMu.slcio")
loop.setLCIORecordSource(input)
loop.add(Analysis101())
loop.add(Analysis102())
# loop over all events with -1 or over any other positive number
loop.loop(-1)
loop.dispose()
| 27.310345
| 66
| 0.763889
|
03967f822f548c644fca6d4174ca5db46959ecf1
| 131
|
py
|
Python
|
collections/CompanyLogo.py
|
silvioedu/HackerRank-Python-Practice
|
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
|
[
"MIT"
] | null | null | null |
collections/CompanyLogo.py
|
silvioedu/HackerRank-Python-Practice
|
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
|
[
"MIT"
] | null | null | null |
collections/CompanyLogo.py
|
silvioedu/HackerRank-Python-Practice
|
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
from collections import Counter
[print(*c) for c in Counter(sorted(input())).most_common(3)]
| 32.75
| 65
| 0.671756
|
f8e8891bc61b54d41b5c3c7c5d66b7bf67cb28a2
| 1,151
|
py
|
Python
|
Datacamp Assignments/Data Engineer Track/7. Command Line Automation in Python/22_fnmatch.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | null | null | null |
Datacamp Assignments/Data Engineer Track/7. Command Line Automation in Python/22_fnmatch.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | null | null | null |
Datacamp Assignments/Data Engineer Track/7. Command Line Automation in Python/22_fnmatch.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | 1
|
2021-03-10T09:40:05.000Z
|
2021-03-10T09:40:05.000Z
|
# Is this pattern True?
# As the head of data science you often get called in to help a new data scientists
# track down an important intermediate csv file that has gone missing. This has taken
# so much time, that you have decided to write an automated script that identify all
# csv files that are created by a user and then copy them to centralized storage.
# As the first step to create this near line backup solution you have to write a
# function that can filter and return only csv matches.
# Use the fnmatch.filter function to filter for csv files from a list of files.
# Make sure you write a Python function so it can be portable code that a larger system can be built from.
import fnmatch
# List of file names to process
files = ["data1.csv", "script.py", "image.png", "data2.csv", "all.py"]
# Function that returns
def csv_matches(list_of_files):
"""Return matches for csv files"""
matches = fnmatch.filter(list_of_files, "*.csv")
return matches
# Call function to find matches
matches = csv_matches(files)
print(f"Found matches: {matches}")
# <script.py> output:
# Found matches: ['data1.csv', 'data2.csv']
| 37.129032
| 106
| 0.735882
|
d1aa4ddac54abb1994b89b79515ae0c840368bf8
| 3,454
|
py
|
Python
|
oo/carro.py
|
tarcisosantos/pythonbirds
|
de4ea90efdeb27b7f77cb9f82b87a1ebf5cea45f
|
[
"MIT"
] | null | null | null |
oo/carro.py
|
tarcisosantos/pythonbirds
|
de4ea90efdeb27b7f77cb9f82b87a1ebf5cea45f
|
[
"MIT"
] | null | null | null |
oo/carro.py
|
tarcisosantos/pythonbirds
|
de4ea90efdeb27b7f77cb9f82b87a1ebf5cea45f
|
[
"MIT"
] | 1
|
2020-09-01T04:18:03.000Z
|
2020-09-01T04:18:03.000Z
|
"""
vocẽ deve criar uma classe carro que vai possuir
dois atributos compostos por outras duas classes:
1) Motor
2) Direção
O Motor terá a responsabilidade de controlar a velocidade,
ele oferece os seguintes atributos:
1) atributo de dado velocidade
2) Método acelerar, que deverá incrementar a velocidade de uma unidade
3) Método frear que deverá decrementar a velovciade em duas unidade
A Direção terá a responsabilidade de controlar a direção. Ela oferece
os seguintes atributos:
1) Valor de direção com valores possíveis: Norte, Sul, Leste, Oeste
2) Método girar_a_direita
3) Método girar_a_esquerda
N
O L
S
Exemplo:
# Testando Motor
>>> motor = Motor()
>>> motor.velocidade
0
>>> motor.acelerar()
>>> motor.velocidade
1
>>> motor.acelerar()
>>> motor.velocidade
2
>>> motor.acelerar()
>>> motor.velocidade
3
>>> motor.frear()
>>> motor.velocidade
1
>>> motor.frear()
>>> motor.velocidade
0
# Testando Direção
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Norte'
# Classe Carro com as Classes anterirores
>>> carro = Carro(direcao, motor)
>>> carro.calcular_velocidade()
0
>>> carro.acelerar()
>>> carro.calcular_velocidade()
1
>>> carro.acelerar()
>>> carro.calcular_velocidade()
2
>>> carro.frear()
>>> carro.calcular_velocidade()
0
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_direita()
>>> carro.calcular_direcao()
'Leste'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Oeste'
"""
class Carro():
def __init__(self, direcao, motor):
self.motor = motor
self.direcao = direcao
def calcular_velocidade(self):
return self.motor.velocidade
def acelerar(self):
self.motor.acelerar()
def frear(self):
self.motor.frear()
def calcular_direcao(self):
return self.direcao.valor
def girar_a_direita(self):
return self.direcao.girar_a_direita()
def girar_a_esquerda(self):
return self.direcao.girar_a_esquerda()
NORTE = 'Norte'
SUL = 'Sul'
LESTE = 'Leste'
OESTE = 'Oeste'
class Direcao:
rotacao_a_direita_dct = {
NORTE: LESTE, LESTE: SUL, SUL: OESTE, OESTE: NORTE
}
rotacao_a_esquerda_dct = {
NORTE: OESTE, LESTE: NORTE, SUL: LESTE, OESTE: SUL
}
def __init__(self):
self.valor = NORTE
def girar_a_direita(self):
self.valor = self.rotacao_a_direita_dct[self.valor]
def girar_a_esquerda(self):
self.valor = self.rotacao_a_esquerda_dct[self.valor]
class Motor:
def __init__(self):
self.velocidade = 0
def acelerar(self):
self.velocidade += 1
def frear(self):
self.velocidade -= 2
self.velocidade = max(0, self.velocidade)
| 24.153846
| 70
| 0.631152
|
a5e569e471cd48c2b7456e97a494b31b38d0a5bd
| 580
|
py
|
Python
|
tests/test_templatetags.py
|
andreyfedoseev/django-registry
|
59b053506f1023c763e49affa41da3f3730ff80d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_templatetags.py
|
andreyfedoseev/django-registry
|
59b053506f1023c763e49affa41da3f3730ff80d
|
[
"BSD-3-Clause"
] | 2
|
2016-02-24T06:49:03.000Z
|
2016-02-24T07:13:04.000Z
|
tests/test_templatetags.py
|
andreyfedoseev/django-registry
|
59b053506f1023c763e49affa41da3f3730ff80d
|
[
"BSD-3-Clause"
] | null | null | null |
from django import template
from djregistry import registry
import pytest
@pytest.mark.parametrize(
"template_string,rendered", (
("""{% load registry %}{{ "1"|from_registry }}""", "2"),
("""{% load registry %}{{ "foo"|from_registry }}""", ""),
("""{% load registry %}{{ "foo"|from_registry|default:"bar" }}""", "bar"),
)
)
def test_from_registry(template_string, rendered, monkeypatch):
monkeypatch.setattr(registry, "_registry", {
"1": "2"
})
assert template.Template(template_string).render(template.Context()) == rendered
| 32.222222
| 84
| 0.624138
|
c80d19fcae8409b0c842326102c23d0207b8f122
| 6,157
|
py
|
Python
|
sdk/lusid/models/result_value_string.py
|
finbourne/lusid-sdk-python-generated-preview
|
9c36c953e8149443a4390ed7f0c04d01211401b6
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/result_value_string.py
|
finbourne/lusid-sdk-python-generated-preview
|
9c36c953e8149443a4390ed7f0c04d01211401b6
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/result_value_string.py
|
finbourne/lusid-sdk-python-generated-preview
|
9c36c953e8149443a4390ed7f0c04d01211401b6
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class ResultValueString(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'value': 'str',
'result_value_type': 'str'
}
attribute_map = {
'value': 'value',
'result_value_type': 'resultValueType'
}
required_map = {
'value': 'optional',
'result_value_type': 'required'
}
def __init__(self, value=None, result_value_type=None, local_vars_configuration=None): # noqa: E501
"""ResultValueString - a model defined in OpenAPI"
:param value: the value itself
:type value: str
:param result_value_type: The available values are: ResultValue, ResultValueDictionary, ResultValue0D, ResultValueDecimal, ResultValueInt, ResultValueString, CashFlowValue, CashFlowValueSet (required)
:type result_value_type: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._result_value_type = None
self.discriminator = None
self.value = value
self.result_value_type = result_value_type
@property
def value(self):
"""Gets the value of this ResultValueString. # noqa: E501
the value itself # noqa: E501
:return: The value of this ResultValueString. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ResultValueString.
the value itself # noqa: E501
:param value: The value of this ResultValueString. # noqa: E501
:type value: str
"""
self._value = value
@property
def result_value_type(self):
"""Gets the result_value_type of this ResultValueString. # noqa: E501
The available values are: ResultValue, ResultValueDictionary, ResultValue0D, ResultValueDecimal, ResultValueInt, ResultValueString, CashFlowValue, CashFlowValueSet # noqa: E501
:return: The result_value_type of this ResultValueString. # noqa: E501
:rtype: str
"""
return self._result_value_type
@result_value_type.setter
def result_value_type(self, result_value_type):
"""Sets the result_value_type of this ResultValueString.
The available values are: ResultValue, ResultValueDictionary, ResultValue0D, ResultValueDecimal, ResultValueInt, ResultValueString, CashFlowValue, CashFlowValueSet # noqa: E501
:param result_value_type: The result_value_type of this ResultValueString. # noqa: E501
:type result_value_type: str
"""
if self.local_vars_configuration.client_side_validation and result_value_type is None: # noqa: E501
raise ValueError("Invalid value for `result_value_type`, must not be `None`") # noqa: E501
allowed_values = ["ResultValue", "ResultValueDictionary", "ResultValue0D", "ResultValueDecimal", "ResultValueInt", "ResultValueString", "CashFlowValue", "CashFlowValueSet"] # noqa: E501
if self.local_vars_configuration.client_side_validation and result_value_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `result_value_type` ({0}), must be one of {1}" # noqa: E501
.format(result_value_type, allowed_values)
)
self._result_value_type = result_value_type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResultValueString):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResultValueString):
return True
return self.to_dict() != other.to_dict()
| 33.644809
| 209
| 0.621407
|
df0a284a56f439044a94a287df63da74658ddf9c
| 1,242
|
py
|
Python
|
script/analyze/script.py
|
xing969541/SLKG
|
76044b363f1f6a19d9b05b2ff37b9f95681829b3
|
[
"MIT"
] | null | null | null |
script/analyze/script.py
|
xing969541/SLKG
|
76044b363f1f6a19d9b05b2ff37b9f95681829b3
|
[
"MIT"
] | null | null | null |
script/analyze/script.py
|
xing969541/SLKG
|
76044b363f1f6a19d9b05b2ff37b9f95681829b3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from uv import main
import pickle
import numpy as np
import pandas as pd
with open("embidDict.pkl", "br") as file:
di = pickle.load(file)
gdi = di["gene"]
igdi = {v:k for k,v in gdi.items()}
with open("PredData_gg.pkl", "br") as file:
(src, dst), mat = pickle.load(file)
src = src.detach().cpu().numpy()
dst = dst.detach().cpu().numpy()
table = pd.read_csv("Human_SL.csv", header=0)
sl = {}
for i, (l,r) in enumerate(zip(table["gene_a.name"], table["gene_b.name"])):
if l in sl:
sl[l][r] = i
else:
sl[l] = {r:i}
if r in sl:
sl[r][l] = i
else:
sl[r] = {l:i}
save = 1000
for i in range(200):
main('gg')
uv = np.load("uvdata_gg.npy")
pred = mat*uv
pred[src,dst] = 0
l, r = np.where(pred>0.3)
tri = (pred[l,r], [igdi[x] for x in l], [igdi[x] for x in r])
tri = list(zip(*tri))
tri.sort(key=lambda x:x[0], reverse=True)
count = sum(1 for s, l, r in tri if l in sl and r in sl[l])
tmp = len(l)/count
print(i, tmp)
if tmp < save:
save = tmp
with open('bestgguv.pkl','bw') as file:
pickle.dump(uv,file)
| 25.346939
| 76
| 0.513688
|
61fdfbca86ae0b578f3bd3062264344f782ea724
| 251
|
py
|
Python
|
dev/tools/run_tests.py
|
awillats/brian2
|
e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc
|
[
"BSD-2-Clause"
] | 674
|
2015-01-14T11:05:39.000Z
|
2022-03-29T04:53:50.000Z
|
dev/tools/run_tests.py
|
awillats/brian2
|
e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc
|
[
"BSD-2-Clause"
] | 937
|
2015-01-05T13:24:22.000Z
|
2022-03-25T13:10:13.000Z
|
dev/tools/run_tests.py
|
awillats/brian2
|
e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc
|
[
"BSD-2-Clause"
] | 237
|
2015-01-05T13:54:16.000Z
|
2022-03-15T22:16:32.000Z
|
'''
Run all the non-standalone tests using pytest. Exits with error code 1 if a test failed.
'''
import sys
import brian2
if __name__ == '__main__':
if not brian2.test(): # If the test fails, exit with a non-zero error code
sys.exit(1)
| 22.818182
| 88
| 0.681275
|
dc1ce362e5abe7c53b2a0f82919906e6023686fc
| 894
|
py
|
Python
|
day-22-Pong-game/ball.py
|
simonszuharev/designing-python
|
ed4c574137fecf43637f9798eef59051ed41fba2
|
[
"MIT"
] | 1
|
2021-04-21T00:26:05.000Z
|
2021-04-21T00:26:05.000Z
|
day-22-Pong-game/ball.py
|
simonszuharev/designing-python
|
ed4c574137fecf43637f9798eef59051ed41fba2
|
[
"MIT"
] | null | null | null |
day-22-Pong-game/ball.py
|
simonszuharev/designing-python
|
ed4c574137fecf43637f9798eef59051ed41fba2
|
[
"MIT"
] | null | null | null |
from turtle import Turtle
class Ball(Turtle):
def __init__(self):
super().__init__()
self.color("white")
self.shape("circle")
self.penup()
self.x_move = 10
self.y_move = 10
self.move_speed = 0.1
def __move_the_ball(self):
new_x = self.xcor() + self.x_move
new_y = self.ycor() + self.y_move
self.goto(new_x, new_y)
def __bounce(self):
self.y_move *= -1
def __bounce_back(self):
self.x_move *= -1
self.move_speed *= 0.9
def __reset_position(self):
self.goto(0,0)
self.move_speed = 0.1
self.x_move *= -1
self.move()
def bounce_back(self):
self.__bounce_back()
def bounce(self):
self.__bounce()
def move(self):
self.__move_the_ball()
def reset_position(self):
self.__reset_position()
| 20.790698
| 41
| 0.560403
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.