blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b93cd5e9e48d830d27790b90745f070aa03e1232
|
9bb6f7a8e547480edab135bdba01cdf84901b660
|
/December14/cocoa1.py
|
1397d64520903468af58645334cd84fd3dd9eaa1
|
[] |
no_license
|
super-cooper/Advent-of-Code-2018
|
ada35065028ba2f3e01614587a8e5c7fdc1e3965
|
421bf16e436a3d09fe13b284c20ab8dbf44989c6
|
refs/heads/master
| 2020-04-09T01:38:52.468971
| 2019-01-01T21:47:07
| 2019-01-01T21:47:07
| 159,912,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
scores, e1, e2 = [3, 7], 0, 1
n = int(input())
while len(scores) < n + 10:
scores.extend(map(int, str(scores[e1] + scores[e2])))
e1, e2 = (1 + scores[e1] + e1) % len(scores), (1 + scores[e2] + e2) % len(scores)
print(''.join(map(str, scores[-10:])))
|
[
"adamcooperrc@gmail.com"
] |
adamcooperrc@gmail.com
|
854c6b2812dfc8cef0a54855aa4f831665df4f1c
|
d9fe6ce3350cdb2b261f8fd2d5dd9cec7f436629
|
/nablapps/interactive/migrations/0010_auto_20190205_1436.py
|
4912ece124e2e2a5d63fea8144cc505d401d74a3
|
[
"MIT"
] |
permissive
|
pettaroni/nablaweb
|
d1421e47ef77e27ecdf1950d94cbd67d377f5355
|
5e610698a276884b9cd779a718dfdee641713636
|
refs/heads/master
| 2021-01-06T17:01:12.358350
| 2020-02-18T16:25:08
| 2020-02-18T16:25:08
| 241,408,081
| 0
| 0
|
MIT
| 2020-02-18T16:17:36
| 2020-02-18T16:17:35
| null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# Generated by Django 2.1.5 on 2019-02-05 14:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interactive', '0009_auto_20190205_1416'),
]
operations = [
migrations.RemoveField(
model_name='quiz',
name='publication_date',
),
migrations.AlterField(
model_name='quiz',
name='published',
field=models.NullBooleanField(default=True, verbose_name='Publisert'),
),
]
|
[
"oystein.hiaasen@gmail.com"
] |
oystein.hiaasen@gmail.com
|
cb7435a2465543b6f17d0a1405bb8cb894e41d92
|
ef158f8e811044fa02849289bfc4bd52e7acbfeb
|
/plus_grand.py
|
d41a44dfe35c6765f5b46781ba8b2bea429913cb
|
[] |
no_license
|
garou93/algorithms-python-collections
|
df2912e6c1798ea768257063fdedaa1ca96d0a6a
|
721598f91be86a2c579f9ab9236f046842919125
|
refs/heads/master
| 2023-07-16T09:27:54.551151
| 2021-09-05T08:07:24
| 2021-09-05T08:07:24
| 379,366,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
# haithem ben abdelaziz: haithem.ben.abdelaziz@gmail.com
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 21 21:10:38 2021
@author: haihem
"""
a = [3,8,3,5,9,1,104]
#plus grand tableau
#m=max(a)
print(max(a))
#indice plus grand
print([i for i, j in enumerate(a) if j == max(a)])
#cas nombre et string: toujours les lettres plus grands
def find_largest(numbers):
# Your code goes here
# Initialize maximum element
n=7
max = numbers[0]
# Traverse array elements from second
# and compare every element with
# current max
for i in range(1, n):
if numbers[i] > max:
max = numbers[i]
return max
numbers = [200,8,3,5,9,1,104]
print("---------------------------------------------------")
print(find_largest(numbers))
|
[
"haithem.ben.abdelaziz@gmail.com"
] |
haithem.ben.abdelaziz@gmail.com
|
d400e929aeef17d0f72bdab9293bd6a3ea74d8a2
|
d21ab06a071827a77a3e066758135b435595c3e0
|
/dictIO/phonebook.py
|
9b6db02725c6421d538baa8020368af33745842a
|
[] |
no_license
|
andytanghr/python-exercises
|
9e6cbe065b0554aeb476c8f17c330aa12ef51499
|
9f388c2781b105a2e38b8104a939caa1387e62cd
|
refs/heads/master
| 2021-05-04T00:48:41.585668
| 2018-02-20T14:57:43
| 2018-02-20T14:57:43
| 120,352,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,305
|
py
|
import json
phonebook = {
'Melissa': {
'number': '584-394-5857',
'email': 'melissa@melissa.com',
'website': 'www.melissa.com'
}
}
def displayMenu():
menu = '''
Electronic Phone Book
=====================
1. Look up an entry
2. Set an entry
3. Delete an entry
4. List all entries
5. Save entries
6. Restore saved entries
7. Quit
'''
print(menu)
choice = int(input('What do you want to do (1-7)? '))
if choice == 1:
return printContact()
elif choice == 2:
return addContact()
elif choice == 3:
return delContact()
elif choice == 4:
return listAll()
elif choice == 5:
return savePhonebook()
elif choice == 6:
return restorePhonebook()
elif choice == 7:
print('Bye.')
return quit()
# menu choice 1
def printContact():
name = input('Name: ')
while True:
if name not in phonebook:
name = input('Name not found. Enter name: ')
else:
number = phonebook[name]['number']
email = phonebook[name]['email']
website = phonebook[name]['website']
print('Found entry for {}: {}\n{}\n{}'.format(name, number, email, website))
return displayMenu()
# menu choice 2
def addContact():
newName = input('Name: ')
newNumber = input('Number: ')
newEmail = input('Email: ')
newWebsite = input('Website: ')
phonebook[newName] = {'number': newNumber, 'email':newEmail, 'website':newWebsite}
print('Entry stored for {}.'.format(newName))
return displayMenu()
# menu choice 3
def delContact():
name = input('Name: ')
while True:
if name not in phonebook:
name = input('Name not found. Enter name: ')
else:
print('Entry deleted for {}.'.format(name))
del phonebook[name]
return displayMenu()
# menu choice 4
def listAll():
for key, value in phonebook.items():
print('Found entry for {}: {}'.format(key, value))
return displayMenu()
# menu choice 5
def savePhonebook():
with open('phonebook.json', 'w') as data:
json.dump(phonebook, data)
print('Entries saved to phonebookdata.json\n')
return displayMenu()
# menu choice 6
def restorePhonebook():
with open('phonebook.json', 'r') as data:
global phonebook
phonebook = json.load(data)
print('Restored saved entries.')
return displayMenu()
displayMenu()
|
[
"30758932+andytanghr@users.noreply.github.com"
] |
30758932+andytanghr@users.noreply.github.com
|
a912c5cc9376aea176d078090f76580d4de5ed97
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations_async/__init__.py
|
05bef6268f24dc985a5ca1e307acc0438f7e1c5a
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations_async import Operations
from ._availability_sets_operations_async import AvailabilitySetsOperations
from ._proximity_placement_groups_operations_async import ProximityPlacementGroupsOperations
from ._virtual_machine_extension_images_operations_async import VirtualMachineExtensionImagesOperations
from ._virtual_machine_extensions_operations_async import VirtualMachineExtensionsOperations
from ._virtual_machine_images_operations_async import VirtualMachineImagesOperations
from ._usage_operations_async import UsageOperations
from ._virtual_machines_operations_async import VirtualMachinesOperations
from ._virtual_machine_sizes_operations_async import VirtualMachineSizesOperations
from ._images_operations_async import ImagesOperations
from ._virtual_machine_scale_sets_operations_async import VirtualMachineScaleSetsOperations
from ._virtual_machine_scale_set_extensions_operations_async import VirtualMachineScaleSetExtensionsOperations
from ._virtual_machine_scale_set_rolling_upgrades_operations_async import VirtualMachineScaleSetRollingUpgradesOperations
from ._virtual_machine_scale_set_vms_operations_async import VirtualMachineScaleSetVMsOperations
from ._log_analytics_operations_async import LogAnalyticsOperations
from ._virtual_machine_run_commands_operations_async import VirtualMachineRunCommandsOperations
from ._galleries_operations_async import GalleriesOperations
from ._gallery_images_operations_async import GalleryImagesOperations
from ._gallery_image_versions_operations_async import GalleryImageVersionsOperations
from ._disks_operations_async import DisksOperations
from ._snapshots_operations_async import SnapshotsOperations
__all__ = [
'Operations',
'AvailabilitySetsOperations',
'ProximityPlacementGroupsOperations',
'VirtualMachineExtensionImagesOperations',
'VirtualMachineExtensionsOperations',
'VirtualMachineImagesOperations',
'UsageOperations',
'VirtualMachinesOperations',
'VirtualMachineSizesOperations',
'ImagesOperations',
'VirtualMachineScaleSetsOperations',
'VirtualMachineScaleSetExtensionsOperations',
'VirtualMachineScaleSetRollingUpgradesOperations',
'VirtualMachineScaleSetVMsOperations',
'LogAnalyticsOperations',
'VirtualMachineRunCommandsOperations',
'GalleriesOperations',
'GalleryImagesOperations',
'GalleryImageVersionsOperations',
'DisksOperations',
'SnapshotsOperations',
]
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
83b28dfae5f1ca84681ff03e90a71b786b5a70af
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_LOG/test_c142773.py
|
37a9287d71bc936fc993044620543234de87ae83
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_physical_interface import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_log import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def import *
test_id = 142773
def test_c142773(browser):
try:
login_web(browser, url=dev1)
edit_log_filter(browser, index=9, level="全部")
loginfo1 = get_log(browser, 管理日志)
# print(loginfo1)
edit_log_filter(browser, index=9, level=["debug"])
loginfo2 = get_log(browser, 管理日志)
# print(loginfo2)
try:
assert "name=debug" in loginfo2 and "name=all" in loginfo1
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "name=debug" in loginfo2 and "name=all" in loginfo1
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
7b9cd7c6fc9a6073dfccdd903607664815e6dd41
|
58790f01a79ab1676db4e560da63e9b6fa1de2bc
|
/ecommerceapp/migrations/0003_customer_totalprice.py
|
d85791cb27a32e110b8a9757dc85fd7077785680
|
[] |
no_license
|
uday525252/ecommerceproject
|
e5d9355e7edf37f7c42befd8d2186e6b992fb4a6
|
28f85d5d70e87726f956246cf13637e553bc8de1
|
refs/heads/master
| 2023-05-07T16:50:30.081823
| 2021-06-03T04:35:25
| 2021-06-03T04:35:25
| 373,380,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# Generated by Django 3.1 on 2021-06-02 05:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerceapp', '0002_customer'),
]
operations = [
migrations.AddField(
model_name='customer',
name='totalprice',
field=models.IntegerField(default='1'),
preserve_default=False,
),
]
|
[
"udaykirantutor@gmail.com"
] |
udaykirantutor@gmail.com
|
632e8ba575b0717ce3c1b6802327b84c71482acf
|
4c77f1579b68f86891e64640e58224b9309e0b06
|
/src/gremlin/graph/traversals.py
|
cc58440c29cf45655a14bcd0cc0a2638c44cc0a4
|
[] |
no_license
|
rokujyouhitoma/gremlin
|
aa5ccd5646b8882fb94d915d652fa1d72537d811
|
783d765e5e6af28a3200103b528dc95144477cbc
|
refs/heads/main
| 2023-06-03T00:01:17.367580
| 2021-06-20T00:07:42
| 2021-06-20T00:07:42
| 324,054,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,570
|
py
|
import typing
from abc import ABCMeta
from gremlin.nodes import (
AnyNode,
IntegerNode,
MethodCallNode,
MultipleNode,
Node,
StringNode,
gNode,
)
from gremlin.structures import List
class Traversal(metaclass=ABCMeta):
def iterate(self) -> "Traversal":
pass
def next(self, amount: int) -> List:
pass
def none(self) -> "Traversal":
pass
def toList(self) -> "Traversal":
# TODO: Should support for -> default List<E>
pass
def toSet(self) -> "Traversal":
# TODO: Should support for -> default Set<E>
pass
class GraphTraversal(metaclass=ABCMeta):
def addE(self, edgeLabel: typing.Union[str, "GraphTraversal"]) -> "GraphTraversal":
pass
def addV(
self, vertexLabel: typing.Union[str, "GraphTraversal", None]
) -> "GraphTraversal":
pass
def aggregate(self, sideEffectKey: str) -> "GraphTraversal":
pass
def and_(self, *andTraversals: "GraphTraversal") -> "GraphTraversal":
pass
def as_(self, stepLabel: str, *stepLabels: str) -> "GraphTraversal":
pass
def asAdmin(self) -> "GraphTraversal":
pass
def barrier(self, maxBarrierSize: typing.Optional[int]) -> "GraphTraversal":
pass
def both(self, *edgeLabels: str) -> "GraphTraversal":
pass
def bothE(self, *edgeLabels: str) -> "GraphTraversal":
pass
def bothV(self) -> "GraphTraversal":
pass
def branch(self, branchTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def by(self, *args: typing.Union[str, "GraphTraversal"]) -> "GraphTraversal":
pass
def cap(self, sideEffectKey: str, *sideEffectKeys: str) -> "GraphTraversal":
pass
def choose(self, choiceTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def coalesce(self, coalesceTraversals: "GraphTraversal") -> "GraphTraversal":
pass
def coin(self, probability: int) -> "GraphTraversal":
pass
def connectedComponent(self) -> "GraphTraversal":
pass
def count(self) -> "GraphTraversal":
pass
def cyclicPath(self) -> "GraphTraversal":
pass
def dedup(self, *dedupLabels: str) -> "GraphTraversal":
pass
def drop(self) -> "GraphTraversal":
pass
def elementMap(self, *propertyKeys: str) -> "GraphTraversal":
pass
def emit(self) -> "GraphTraversal":
pass
def filter(self, filterTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def flatMap(self, flatMapTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def fold(self) -> "GraphTraversal":
pass
def from_(self, fromStepLabel: str) -> "GraphTraversal":
pass
def group(self, sideEffectKey: typing.Optional[str] = None) -> "GraphTraversal":
pass
def groupCount(
self, sideEffectKey: typing.Optional[str] = None
) -> "GraphTraversal":
pass
def has(
self, propertyKey: str, value: typing.Optional[typing.Any] = None
) -> "GraphTraversal":
pass
def hasId(self, id: str, *otherIds: str) -> "GraphTraversal":
pass
def hasKey(self, label: str, *otherLabels: str) -> "GraphTraversal":
pass
def hasLabel(self, label: str, *otherLabels: str) -> "GraphTraversal":
pass
def hasNot(self, propertyKey: str) -> "GraphTraversal":
pass
def hasValue(self, value: str, *otherValues: str) -> "GraphTraversal":
pass
def id(self) -> "GraphTraversal":
pass
def identity(self) -> "GraphTraversal":
pass
def in_(self, *edgeLabels: str) -> "GraphTraversal":
pass
def index(self) -> "GraphTraversal":
pass
def inE(self, *edgeLabels: str) -> "GraphTraversal":
pass
def inV(self) -> "GraphTraversal":
pass
def is_(self, value: typing.Any) -> "GraphTraversal":
pass
def iterate(self) -> "GraphTraversal":
pass
def key(self) -> "GraphTraversal":
pass
def label(self) -> "GraphTraversal":
pass
def limit(self, limit: int) -> "GraphTraversal":
pass
def local(self, localTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def loops(self) -> "GraphTraversal":
pass
def map(self, mapTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def match(self, *args: "GraphTraversal") -> "GraphTraversal":
pass
def max(self) -> "GraphTraversal":
pass
def mean(self) -> "GraphTraversal":
pass
def min(self) -> "GraphTraversal":
pass
def none(self) -> "GraphTraversal":
pass
def not_(self, notTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def option(self, traversalOption: "GraphTraversal") -> "GraphTraversal":
pass
def or_(self, orTraversals: "GraphTraversal") -> "GraphTraversal":
pass
def order(self, *args: "GraphTraversal") -> "GraphTraversal":
# TODO: Should support for order() and order(Scope scope)
pass
def otherV(self) -> "GraphTraversal":
pass
def out(self, *edgeLabels: str) -> "GraphTraversal":
pass
def outE(self, *edgeLabels: str) -> "GraphTraversal":
pass
def outV(self) -> "GraphTraversal":
pass
def pageRank(self, alpha: typing.Optional[int] = None) -> "GraphTraversal":
pass
def path(self) -> "GraphTraversal":
pass
def peerPressure(self) -> "GraphTraversal":
pass
def profile(self) -> "GraphTraversal":
pass
def project(self, projectKey: str, *otherProjectKeys: str) -> "GraphTraversal":
pass
def properties(self, *propertyKeys: str) -> "GraphTraversal":
pass
def propertyMap(self, *propertyKeys: str) -> "GraphTraversal":
pass
def range(self, low: int, high: int) -> "GraphTraversal":
pass
def read(self) -> "GraphTraversal":
pass
def repeat(
self,
loopName: typing.Union[str, "GraphTraversal"],
repeatTraversal: typing.Optional["GraphTraversal"],
) -> "GraphTraversal":
pass
def sack(self) -> "GraphTraversal":
pass
def sample(self, amountToSample: int) -> "GraphTraversal":
pass
def select(self, selectKey: str) -> "GraphTraversal":
pass
def shortestPath(self) -> "GraphTraversal":
pass
def simplePath(self) -> "GraphTraversal":
pass
def skip(self, skip: int) -> "GraphTraversal":
pass
def subgraph(self, sideEffectKey: str) -> "GraphTraversal":
pass
def store(self, sideEffectKey: str) -> "GraphTraversal":
pass
def sum(self) -> "GraphTraversal":
pass
def tail(self, limit: typing.Optional[int] = None) -> "GraphTraversal":
pass
def timeLimit(self, timeLimit: int) -> "GraphTraversal":
pass
def times(self, maxLoops: int) -> "GraphTraversal":
pass
def to(self, toStepLabel: str) -> "GraphTraversal":
pass
def tree(self, sideEffectKey: typing.Optional[str] = None) -> "GraphTraversal":
pass
def until(self, *args: "GraphTraversal") -> "GraphTraversal":
pass
def unfold(self) -> "GraphTraversal":
pass
def V(self) -> "GraphTraversal":
pass
def value(self) -> "GraphTraversal":
pass
def valueMap(self, *propertyKeys: str) -> "GraphTraversal":
pass
def values(self, propertyKey: str) -> "GraphTraversal":
pass
def where(self, whereTraversal: "GraphTraversal") -> "GraphTraversal":
pass
def with_(self, key: str) -> "GraphTraversal":
pass
def write(self) -> "GraphTraversal":
pass
class DefaultGraphTraversal(Traversal, GraphTraversal, MultipleNode):
nodes: typing.List[Node]
def __init__(self, nodes: typing.List[Node] = [gNode()]):
self.nodes = nodes
def addE(
self, edgeLabel: typing.Union[str, "GraphTraversal"]
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"addE",
[
StringNode(edgeLabel)
if isinstance(edgeLabel, str)
else AnyNode(edgeLabel)
],
)
)
return self
def addV(
self, vertexLabel: typing.Union[str, "GraphTraversal", None] = None
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"addV",
[]
if vertexLabel is None
else [
StringNode(vertexLabel)
if isinstance(vertexLabel, str)
else AnyNode(vertexLabel)
],
)
)
return self
def aggregate(self, sideEffectKey: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("aggregate", [StringNode(sideEffectKey)]))
return self
def and_(self, *andTraversals: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("and", [AnyNode(v) for v in andTraversals]))
return self
def as_(self, stepLabel: str, *stepLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("as", [StringNode(v) for v in (stepLabel, *stepLabels)])
)
return self
def asAdmin(self) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"asAdmin",
[],
)
)
return self
def barrier(
self, maxBarrierSize: typing.Optional[int] = None
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"barrier",
[] if maxBarrierSize is None else [IntegerNode(maxBarrierSize)],
)
)
return self
def both(self, *edgeLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"both",
[StringNode(v) for v in edgeLabels],
)
)
return self
def bothE(self, *edgeLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"bothE",
[StringNode(v) for v in edgeLabels],
)
)
return self
def bothV(self) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"bothV",
[],
)
)
return self
def branch(self, branchTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"branch",
[AnyNode(branchTraversal)],
)
)
return self
def by(self, *args: typing.Union[str, "GraphTraversal"]) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"by",
[StringNode(v) if isinstance(v, str) else AnyNode(v) for v in args],
)
)
return self
def cap(self, sideEffectKey: str, *sideEffectKeys: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"cap", [StringNode(v) for v in (sideEffectKey, *sideEffectKeys)]
)
)
return self
def choose(self, choiceTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"choose",
[AnyNode(choiceTraversal)],
)
)
return self
def coalesce(self, coalesceTraversals: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"coalesce",
[AnyNode(coalesceTraversals)],
)
)
return self
def coin(self, probability: int) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("coin", [IntegerNode(probability)]))
return self
def connectedComponent(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("connectedComponent", []))
return self
def count(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("count", []))
return self
def cyclicPath(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("cyclicPath", []))
return self
def dedup(self, *dedupLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"dedup",
[StringNode(v) for v in dedupLabels],
)
)
return self
def drop(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("drop", []))
return self
def elementMap(self, *propertyKeys: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"elementMap",
[StringNode(v) for v in propertyKeys],
)
)
return self
def emit(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("emit", []))
return self
def filter(self, filterTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("filter", [AnyNode(filterTraversal)]))
return self
def flatMap(self, flatMapTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("flatMap", [AnyNode(flatMapTraversal)]))
return self
def fold(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("fold", []))
return self
def from_(self, fromStepLabel: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("from", [StringNode(fromStepLabel)]))
return self
def group(
self, sideEffectKey: typing.Optional[str] = None
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"group",
[] if sideEffectKey is None else [StringNode(sideEffectKey)],
)
)
return self
def groupCount(
self, sideEffectKey: typing.Optional[str] = None
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"groupCount",
[] if sideEffectKey is None else [StringNode(sideEffectKey)],
)
)
return self
def has(
self, propertyKey: str, value: typing.Optional[typing.Any] = None
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"has",
[StringNode(propertyKey), StringNode(value)]
if value
else [StringNode(propertyKey)],
)
)
return self
def hasId(self, id: str, *otherIds: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("hasId", [StringNode(v) for v in (id, *otherIds)])
)
return self
def hasKey(self, label: str, *otherLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("hasKey", [StringNode(v) for v in (label, *otherLabels)])
)
return self
def hasLabel(self, label: str, *otherLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("hasLabel", [StringNode(v) for v in (label, *otherLabels)])
)
return self
def hasNot(self, propertyKey: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("hasNot", [StringNode(propertyKey)]))
return self
def hasValue(self, value: str, *otherValues: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("hasValue", [StringNode(v) for v in (value, *otherValues)])
)
return self
def id(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("id", []))
return self
def identity(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("identity", []))
return self
def in_(self, *edgeLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("in", [StringNode(v) for v in edgeLabels]))
return self
def index(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("index", []))
return self
def inE(self, *edgeLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("inE", [StringNode(v) for v in edgeLabels]))
return self
def inV(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("inV", []))
return self
def is_(self, value: typing.Any) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("is", [IntegerNode(value)]))
return self
def iterate(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("iterate", []))
return self
def key(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("key", []))
return self
def label(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("label", []))
return self
def limit(self, limit: int) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("limit", [IntegerNode(limit)]))
return self
def local(self, localTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("local", [AnyNode(localTraversal)]))
return self
def loops(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("loops", []))
return self
def map(self, mapTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("map", [AnyNode(mapTraversal)]))
return self
def match(self, *args: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("match", [AnyNode(v) for v in args]))
return self
def max(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("max", []))
return self
def mean(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("mean", []))
return self
def min(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("min", []))
return self
def none(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("none", []))
return self
def next(self, amount: int = 0) -> List:
args = [] if amount == 0 else [IntegerNode(amount)]
self.nodes.append(MethodCallNode("next", args))
return List(self.nodes)
def not_(self, notTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("not", [AnyNode(notTraversal)]))
return self
def option(self, traversalOption: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("option", [AnyNode(traversalOption)]))
return self
def or_(self, orTraversals: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("or", [AnyNode(orTraversals)]))
return self
def order(self, *args: "GraphTraversal") -> "DefaultGraphTraversal":
# TODO: Should support for order() and order(Scope scope)
self.nodes.append(MethodCallNode("order", [AnyNode(v) for v in args]))
return self
def otherV(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("otherV", []))
return self
def out(self, *edgeLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("out", [StringNode(v) for v in edgeLabels]))
return self
def outE(self, *edgeLabels: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("outE", [StringNode(v) for v in edgeLabels]))
return self
def outV(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("outV", []))
return self
def pageRank(self, alpha: typing.Optional[int] = None) -> "DefaultGraphTraversal":
# TODO: alpha is type of double
self.nodes.append(
MethodCallNode("pageRank", [] if alpha is None else [IntegerNode(alpha)])
)
return self
def path(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("path", []))
return self
def peerPressure(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("peerPressure", []))
return self
def profile(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("profile", []))
return self
def project(
self, projectKey: str, *otherProjectKeys: str
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"project", [StringNode(v) for v in (projectKey, *otherProjectKeys)]
)
)
return self
def properties(self, *propertyKeys: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("properties", [StringNode(v) for v in propertyKeys])
)
return self
def propertyMap(self, *propertyKeys: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("propertyMap", [StringNode(v) for v in propertyKeys])
)
return self
def range(self, low: int, high: int) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("range", [IntegerNode(low), IntegerNode(high)])
)
return self
def read(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("read", []))
return self
def repeat(
self,
loopName: typing.Union[str, "GraphTraversal"],
repeatTraversal: typing.Optional["GraphTraversal"] = None,
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"repeat",
[StringNode(typing.cast(str, loopName)), AnyNode(repeatTraversal)]
if repeatTraversal is not None
else [AnyNode(loopName)],
)
)
return self
def sack(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("sack", []))
return self
def sample(self, amountToSample: int) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("sample", [IntegerNode(amountToSample)]))
return self
def select(self, selectKey: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("select", [StringNode(selectKey)]))
return self
def shortestPath(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("shortestPath", []))
return self
def simplePath(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("simplePath", []))
return self
def skip(self, skip: int) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("skip", [IntegerNode(skip)]))
return self
def subgraph(self, sideEffectKey: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("subgraph", [StringNode(sideEffectKey)]))
return self
def store(self, sideEffectKey: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("store", [StringNode(sideEffectKey)]))
return self
def sum(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("sum", []))
return self
def tail(self, limit: typing.Optional[int] = None) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"tail",
[] if limit is None else [IntegerNode(limit)],
)
)
return self
def timeLimit(self, timeLimit: int) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("timeLimit", [IntegerNode(timeLimit)]))
return self
def times(self, maxLoops: int) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("times", [IntegerNode(maxLoops)]))
return self
def to(self, toStepLabel: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("to", [StringNode(toStepLabel)]))
return self
def tree(
self, sideEffectKey: typing.Optional[str] = None
) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode(
"tree",
[] if sideEffectKey is None else [StringNode(sideEffectKey)],
)
)
return self
def toList(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("toList", []))
return self
def toSet(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("toSet", []))
return self
def until(self, *args: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("until", [AnyNode(v) for v in args]))
return self
def unfold(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("unfold", []))
return self
def V(self) -> "DefaultGraphTraversal":
self = DefaultGraphTraversal()
self.nodes = [gNode()]
self.nodes.append(MethodCallNode("V", []))
return self
def value(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("value", []))
return self
def valueMap(self, *propertyKeys: str) -> "DefaultGraphTraversal":
self.nodes.append(
MethodCallNode("valueMap", [StringNode(v) for v in propertyKeys])
)
return self
def values(self, propertyKey: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("values", [StringNode(propertyKey)]))
return self
def where(self, whereTraversal: "GraphTraversal") -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("where", [AnyNode(whereTraversal)]))
return self
def with_(self, key: str) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("with", [StringNode(key)]))
return self
def write(self) -> "DefaultGraphTraversal":
self.nodes.append(MethodCallNode("write", []))
return self
|
[
"rokujyouhitomajp@gmail.com"
] |
rokujyouhitomajp@gmail.com
|
e661c88b5aabf2d6d3d0f44afff3d533ddfc6d03
|
6cceeeea6fb4f103ef01b31881aab56982290af1
|
/supervised_learning/0x0A-object_detection/0-main.py
|
3cac5979c14595c830d2dd1f1c2b39f27139495d
|
[] |
no_license
|
KatyaKalache/holbertonschool-machine_learning
|
7be17a2abf5873f2eb0e82804074ef388eb8cda6
|
7a23ec62d7deee6260067c8125f0e32ac9ef9f0e
|
refs/heads/master
| 2020-04-11T10:46:14.714349
| 2020-04-10T08:35:21
| 2020-04-10T08:35:21
| 161,725,673
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
#!/usr/bin/env python3
if __name__ == '__main__':
import numpy as np
Yolo = __import__('0-yolo').Yolo
np.random.seed(0)
anchors = np.array([[[116, 90], [156, 198], [373, 326]],
[[30, 61], [62, 45], [59, 119]],
[[10, 13], [16, 30], [33, 23]]])
yolo = Yolo('../../data/yolo.h5', '../../data/coco_classes.txt', 0.6, 0.5, anchors)
yolo.model.summary()
print('Class names:', yolo.class_names)
print('Class threshold:', yolo.class_t)
print('NMS threshold:', yolo.nms_t)
print('Anchor boxes:', yolo.anchors)
|
[
"katya@kalache.fr"
] |
katya@kalache.fr
|
798c9fa0d1722861c9bcfc89ca213fdbf7890707
|
d7a5ef00d186938ec551a198c18c4214a13aa511
|
/data_utils.py
|
fbaef085a040d8c9ee67f2b0ecfd3ad45bb2ff45
|
[
"MIT"
] |
permissive
|
vimaljosehere/KBGAN
|
4bb3ce3eaae776ba353c4c950410626f1c997212
|
ce5f41ca7779d7f2722dc11ed0db315648459f27
|
refs/heads/master
| 2020-06-09T19:43:19.787561
| 2019-06-24T23:55:49
| 2019-06-24T23:55:49
| 193,495,456
| 0
| 0
|
MIT
| 2019-06-24T11:50:00
| 2019-06-24T11:50:00
| null |
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
from random import randint
from collections import defaultdict
import torch
def heads_tails(n_ent, train_data, valid_data=None, test_data=None):
train_src, train_rel, train_dst = train_data
if valid_data:
valid_src, valid_rel, valid_dst = valid_data
else:
valid_src = valid_rel = valid_dst = []
if test_data:
test_src, test_rel, test_dst = test_data
else:
test_src = test_rel = test_dst = []
all_src = train_src + valid_src + test_src
all_rel = train_rel + valid_rel + test_rel
all_dst = train_dst + valid_dst + test_dst
heads = defaultdict(lambda: set())
tails = defaultdict(lambda: set())
for s, r, t in zip(all_src, all_rel, all_dst):
tails[(s, r)].add(t)
heads[(t, r)].add(s)
heads_sp = {}
tails_sp = {}
for k in tails.keys():
tails_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(tails[k])]),
torch.ones(len(tails[k])), torch.Size([n_ent]))
for k in heads.keys():
heads_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(heads[k])]),
torch.ones(len(heads[k])), torch.Size([n_ent]))
return heads_sp, tails_sp
def inplace_shuffle(*lists):
idx = []
for i in range(len(lists[0])):
idx.append(randint(0, i))
for ls in lists:
for i, item in enumerate(ls):
j = idx[i]
ls[i], ls[j] = ls[j], ls[i]
def batch_by_num(n_batch, *lists, n_sample=None):
if n_sample is None:
n_sample = len(lists[0])
for i in range(n_batch):
head = int(n_sample * i / n_batch)
tail = int(n_sample * (i + 1) / n_batch)
ret = [ls[head:tail] for ls in lists]
if len(ret) > 1:
yield ret
else:
yield ret[0]
def batch_by_size(batch_size, *lists, n_sample=None):
if n_sample is None:
n_sample = len(lists[0])
head = 0
while head < n_sample:
tail = min(n_sample, head + batch_size)
ret = [ls[head:tail] for ls in lists]
head += batch_size
if len(ret) > 1:
yield ret
else:
yield ret[0]
|
[
"cai_lw@126.com"
] |
cai_lw@126.com
|
d046195875fb2aa1f05bfd5640d8e0d4351decfe
|
12b38b947500ffabe48607adfc26cb3f6d86137a
|
/app/app.py
|
356f43505eba79e4b6619d1a9c77d21b3a635c47
|
[
"MIT"
] |
permissive
|
jlmcgehee21/torque_server
|
a8e3f4fa932db31c1206eaec4bda35f45981ceb3
|
7acfb588a50168de0751745ecadb08755282fcd5
|
refs/heads/master
| 2021-01-17T13:01:39.662187
| 2016-05-15T03:23:29
| 2016-05-15T03:23:29
| 58,543,719
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import os
from flask import Flask, g, redirect, url_for
from api import api
import time
from .extensions import influx_db, auth
BLUEPRINTS = (
api,
)
def create_app(config=None, app_name=None,):
"""Create a Flask app."""
if app_name is None:
app_name = __name__
app = Flask(app_name)
app.config.from_object(config)
app.secret_key = app.config['SECRET_KEY']
# Extensions that must init with .init_app(app)
influx_db.init_app(app)
configure_blueprints(app, BLUEPRINTS)
@app.before_first_request
def create_influx_db():
influx_db.connection.create_database(app.config['INFLUXDB_DATABASE'],
if_not_exists=True)
@auth.verify_password
def verify_password(api_key, _):
return api_key == app.config['API_KEY']
return app
def configure_blueprints(app, blueprints):
"""Configure blueprints in views."""
for blueprint in blueprints:
app.register_blueprint(blueprint)
|
[
"jlmcgehee21@gmail.com"
] |
jlmcgehee21@gmail.com
|
9d2c1b4816f839a127201bb7f40cdbd868072c9d
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1_neat/16_0_1_nthai_1.py
|
ec17df9d7ed5e83f1250e60016d5a1d1cf20fd6b
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
def getResult(num):
allnums = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
counter = 0
if num == 0:
return 'INSOMNIA'
while allnums and counter < 10000:
counter += 1
digits = set(map(int, str(num * counter)))
allnums = allnums - digits
if counter == 10000:
print('-----------------ERROR--------------------')
return counter * num
def printResult(num):
print('Case #{0}: {1}'.format(num, getResult(num)))
if __name__ == '__main__':
with open('A-large.in') as infile, open('A-large-out.txt', 'w') as outfile:
case = 0
for line in infile:
if case == 0:
case = 1
else:
res = getResult(int(line.strip()))
outfile.write('Case #{0}: {1}\n'.format(case, res))
case += 1
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
398fdb8a05c5bef22bcd84dc5484b8599937e755
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_azure_firewalls_operations.py
|
b18dcec1c77cd167725c728f44687be857c9a1f2
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 30,031
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations:
"""AzureFirewallsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def get(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> "_models.AzureFirewall":
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.AzureFirewall
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.AzureFirewall",
**kwargs
) -> "_models.AzureFirewall":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str', max_length=56, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureFirewall')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.AzureFirewall",
**kwargs
) -> AsyncLROPoller["_models.AzureFirewall"]:
"""Creates or updates the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to the create or update Azure Firewall operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.AzureFirewall
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AzureFirewall or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.AzureFirewall]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str', max_length=56, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> Optional["_models.AzureFirewall"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AzureFirewall"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.AzureFirewall"]:
"""Updates tags of an Azure Firewall resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to update azure firewall tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AzureFirewall or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.AzureFirewall]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.AzureFirewallListResult"]:
"""Lists all Azure Firewalls in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.AzureFirewallListResult"]:
"""Gets all the Azure Firewalls in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
3d0b683f903e32f3e287953fa7c7baedf310f878
|
e4f0882033c614a1c501d2c5a7dffa8eac12849a
|
/classificationnet.py
|
42a622b7b08e9cb032835ab050bcaca18e542265
|
[] |
no_license
|
fendouwangshan/APEN
|
c4084c1459ca90ff6efb7868c51aaa4d000c7d01
|
9a8c61d2ce0c848089bc4555518f4eb89e5a6f04
|
refs/heads/master
| 2022-12-10T15:51:11.735316
| 2020-09-09T07:08:41
| 2020-09-09T07:08:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
import torch.nn as nn
from torchvision import models
vgg16 = models.vgg16(pretrained=True)
class vgg(nn.Module):
def __init__(self,num_classes):
super(vgg, self).__init__()
self.features = vgg16.features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
self.model_name = 'vgg16'
def forward(self, x, y):
c1=self.features[0:26](x)
c2=self.features[26:28](c1)
c3=self.features[28:30](c2)
feature = self.features[30](c3)
f = feature.view(feature.size(0), -1)
if type(y)!= int:
f=f+y
c = self.classifier(f)
return c,c1,c2,c3
|
[
"1730401025@st.btbu.edu.cn"
] |
1730401025@st.btbu.edu.cn
|
359e0c55b1bc4c0f98004112b00b2bd7176fdef8
|
e70ccd8d27af13e1568e2d1d6f3d918df2b72e3f
|
/src/qrl/core/txs/LatticeTransaction.py
|
695bc941624a438139d2901ea6c3bc259a432202
|
[
"MIT"
] |
permissive
|
theQRL/QRL
|
21be76cdea5134ae8f8b83a334f3a569ca85ab14
|
7600ec054edecc22f0b86b76b8e00f11a161486a
|
refs/heads/master
| 2023-08-16T23:58:58.566589
| 2023-08-01T17:45:29
| 2023-08-01T17:45:29
| 71,055,814
| 462
| 215
|
MIT
| 2023-08-01T17:45:31
| 2016-10-16T14:32:40
|
Python
|
UTF-8
|
Python
| false
| false
| 5,031
|
py
|
from pyqrllib.pyqrllib import bin2hstr
from qrl.core.State import State
from qrl.core.StateContainer import StateContainer
from qrl.core.misc import logger
from qrl.core.txs.Transaction import Transaction
from qrl.generated.qrl_pb2 import LatticePKMetadata
class LatticeTransaction(Transaction):
def __init__(self, protobuf_transaction=None):
super(LatticeTransaction, self).__init__(protobuf_transaction)
@property
def pk1(self): # kyber_pk
return self._data.latticePK.pk1
@property
def pk2(self): # dilithium_pk
return self._data.latticePK.pk2
@property
def pk3(self): # ecdsa_pk
return self._data.latticePK.pk3
def get_data_bytes(self):
return self.master_addr + \
self.fee.to_bytes(8, byteorder='big', signed=False) + \
self.pk1 + \
self.pk2 + \
self.pk3
@staticmethod
def create(pk1: bytes, pk2: bytes, pk3: bytes, fee: int, xmss_pk: bytes, master_addr: bytes = None):
transaction = LatticeTransaction()
if master_addr:
transaction._data.master_addr = master_addr
transaction._data.fee = fee
transaction._data.public_key = xmss_pk
transaction._data.latticePK.pk1 = bytes(pk1)
transaction._data.latticePK.pk2 = bytes(pk2)
transaction._data.latticePK.pk3 = bytes(pk3)
transaction.validate_or_raise(verify_signature=False)
return transaction
def _validate_custom(self) -> bool:
if self.fee < 0:
logger.info('State validation failed for %s because: Negative send', bin2hstr(self.txhash))
return False
return True
def _validate_extended(self, state_container: StateContainer) -> bool:
if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]:
logger.warning("[LatticeTransaction] Hard Fork Feature not yet activated")
return False
dev_config = state_container.current_dev_config
if len(self.pk1) > dev_config.lattice_pk1_max_length: # TODO: to fix kyber pk value
logger.warning('Kyber PK length cannot be more than %s bytes', dev_config.lattice_pk1_max_length)
logger.warning('Found length %s', len(self.pk1))
return False
if len(self.pk2) > dev_config.lattice_pk2_max_length: # TODO: to fix dilithium pk value
logger.warning('Dilithium PK length cannot be more than %s bytes', dev_config.lattice_pk2_max_length)
logger.warning('Found length %s', len(self.pk2))
return False
if len(self.pk3) > dev_config.lattice_pk3_max_length: # TODO: to fix ecdsa pk value
logger.warning('ECDSA PK length cannot be more than %s bytes', dev_config.lattice_pk3_max_length)
logger.warning('Found length %s', len(self.pk3))
return False
tx_balance = state_container.addresses_state[self.addr_from].balance
if tx_balance < self.fee:
logger.info('State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash))
logger.info('balance: %s, amount: %s', tx_balance, self.fee)
return False
if (self.addr_from, self.pk1, self.pk2, self.pk3) in state_container.lattice_pk.data:
logger.info('State validation failed for %s because: Lattice PKs already exists for this address',
bin2hstr(self.txhash))
return False
return True
def set_affected_address(self, addresses_set: set):
super().set_affected_address(addresses_set)
def apply(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee, subtract=True)
state_container.paginated_lattice_pk.insert(address_state, self.txhash)
state_container.paginated_tx_hash.insert(address_state, self.txhash)
state_container.lattice_pk.data[(self.addr_from,
self.pk1, self.pk2, self.pk3)] = LatticePKMetadata(enabled=True)
return self._apply_state_changes_for_PK(state_container)
def revert(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee)
state_container.paginated_lattice_pk.remove(address_state, self.txhash)
state_container.paginated_tx_hash.remove(address_state, self.txhash)
state_container.lattice_pk.data[(self.addr_from,
self.pk1, self.pk2, self.pk3)] = LatticePKMetadata(enabled=False,
delete=True)
return self._revert_state_changes_for_PK(state_container)
|
[
"kaushal.forex@gmail.com"
] |
kaushal.forex@gmail.com
|
2927afbebbe1493ca4e13a8a5bee70ec710f5a8a
|
ef1526cffbac065fc55f3a0839bcf06c12061d13
|
/trainer.py
|
1107698be1ad8c271754f033cd67804547fb7322
|
[
"MIT"
] |
permissive
|
xilongzhou/MaskDnGAN
|
2a13e71159290db0d064e8f5bfd8837672e50247
|
c43e93483ce7c75c689f3ab01f21279700e52031
|
refs/heads/main
| 2023-08-11T09:26:10.534301
| 2021-08-28T05:14:20
| 2021-08-28T05:14:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,186
|
py
|
from network import GAN_Denoiser
class Trainer():
"""
Trainer creates the model and optimizers, and uses them to
updates the weights of the network while reporting losses
and the latest visuals to visualize the progress in training.
"""
def __init__(self, args):
# Save args
self.args = args
self.model = GAN_Denoiser(args)
self.generated = None
self.loss = None
# Create optimizers
self.optimizer_G, self.optimizer_D = \
self.model.create_optimizers()
def run_generator_one_step(self, data, warp):
self.optimizer_G.zero_grad()
g_losses, generated = self.model(data, warp, mode='generator')
g_loss = sum(g_losses.values()).mean()
g_loss.backward()
self.optimizer_G.step()
self.g_losses = g_losses
self.generated = generated
def run_validation(self, data, warp):
return self.model(data, warp, mode='inference')
def run_discriminator_one_step(self):
self.optimizer_D.zero_grad()
d_losses = self.model(self.generated, mode='discriminator')
d_loss = sum(d_losses.values()).mean()
d_loss.backward()
self.optimizer_D.step()
self.d_losses = d_losses
def get_latest_losses(self):
return {**self.g_losses, **self.d_losses}
def get_latest_generated(self):
return self.generated
def save(self, epoch):
self.model.save(epoch)
def get_lr(self, optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def start_epoch(self):
return self.model.start_epoch
def reset_loss(self):
self.loss = {'Reconstruction': 0,
'GAN': 0,
'GAN_Feat': 0,
'VGG': 0,
'D_Fake': 0,
'D_Real': 0}
def append_loss(self):
for (key, value) in self.get_latest_losses().items():
self.loss[key] += value.item()
def normalize_loss(self):
for (key, value) in self.loss.items():
self.loss[key] /= self.args.val_freq
|
[
"43282710+avinashpaliwal@users.noreply.github.com"
] |
43282710+avinashpaliwal@users.noreply.github.com
|
d1dcfccc842e5e4628718def51f98cc9a34cafb1
|
c19ebdaaa005e1247de6f99c2f8c817cfd379c86
|
/ssd_train.py
|
7f4fbf424b0473a35afbeefdefd60111be874783
|
[
"Apache-2.0"
] |
permissive
|
gunpowder1473/mySSD
|
ea5921eded5142eb259ff83dad132b948238a8db
|
ee7b44de1ef2f6013b6b17ca3cdefd52729ed479
|
refs/heads/master
| 2021-05-12T00:01:09.379574
| 2018-01-15T14:00:03
| 2018-01-15T14:00:03
| 117,524,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,926
|
py
|
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from Tfrecord import analysis_tfrecord
from network import ssd_network, ssd_network_calc
from Bboxes import bboxes_method
from Imagine import image_method
from Preprocessing import image_preprocessing
from Common import common_methods, common_config_class
slim = tf.contrib.slim
DATA_FORMAT = 'NHWC'
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
isess = tf.InteractiveSession(config=config)
# tf.app.flags.*** (参数名,默认值,参数描述)
# SSD 网络
tf.app.flags.DEFINE_float(
'loss_alpha', 1., 'Alpha parameter in the loss function.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.5, 'Matching threshold in the loss function.')
# 通用标志
tf.app.flags.DEFINE_string(
'train_dir','../mySSD/tmp/tfmodel2/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 60,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 0.95, 'GPU memory fraction to use.')
# 梯度下降算法标志
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
# 学习率标志
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.00001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
# 数据设定标志
tf.app.flags.DEFINE_string(
'dataset_name', 'pic_classes_30', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 4, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', "../PicData/", 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'ssd_300', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 4, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', 300, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
# Fine-Tuning 标志.
tf.app.flags.DEFINE_string(
'checkpoint_path', None, # 'E://SSD/tmp/tfmodel/model.ckpt',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', None,
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.DEBUG)
with tf.Graph().as_default():
deploy_config = common_config_class.DeploymentConfig(num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=0, num_replicas=1, num_ps_tasks=0)
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
dataset = analysis_tfrecord.getData()
ssd_params = ssd_network.SSDNet_300.default_params._replace(num_classes=FLAGS.num_classes)
ssd_net = ssd_network.SSDNet_300(ssd_params)
img_shape = ssd_params.img_shape
defaultboxes = ssd_net.creatDefaultbox(img_shape)
with tf.device(deploy_config.inputs_device()):
with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size,
shuffle=True)
[image, _, glabels, gbboxes] = provider.get(['image', 'shape', 'label', 'bbox'])
# Pre-processing image, labels and bboxes.
image, glabels, gbboxes = \
image_preprocessing.preprocessImage(image, glabels, gbboxes, out_shape=img_shape,
data_format=DATA_FORMAT, is_training=True)
gclasses, glocalisations, gscores = ssd_net.defaultBboxesEncodeAllLayer(glabels, gbboxes,
defaultboxes)
batch_shape = [1] + [len(defaultboxes)] * 3
r = tf.train.batch(
common_methods.listReshape([image, gclasses, glocalisations, gscores]),
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
b_image, b_gclasses, b_glocalisations, b_gscores = common_methods.listReshape(r, batch_shape)
batch_queue = slim.prefetch_queue.prefetch_queue(
common_methods.listReshape([b_image, b_gclasses, b_glocalisations, b_gscores]),
capacity=2 * deploy_config.num_clones)
arg_scope = ssd_net.argScope(weight_decay=FLAGS.weight_decay,
data_format=DATA_FORMAT)
def clone_fn(batch_queue):
b_image, b_gclasses, b_glocalisations, b_gscores = \
common_methods.listReshape(batch_queue.dequeue(), batch_shape)
with slim.arg_scope(arg_scope):
_, localisations, logits, end_points = ssd_net.creatNetwork(b_image, is_training=True)
ssd_net.losses(logits, localisations, b_gclasses, b_glocalisations, b_gscores,
match_threshold=FLAGS.match_threshold,
negative_ratio=FLAGS.negative_ratio,
alpha=FLAGS.loss_alpha)
return end_points
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = common_methods.creatClones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
summaries.add(tf.summary.scalar(loss.op.name, loss))
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
with tf.device(deploy_config.optimizer_device()):
learning_rate = common_methods.setLearningRate(FLAGS,
dataset.num_samples,
global_step)
optimizer = common_methods.setOptimizer(FLAGS, learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.moving_average_decay:
update_ops.append(variable_averages.apply(moving_average_variables))
variables_to_train = common_methods.getTrainableVariables(FLAGS)
total_loss, clones_gradients = common_methods.optimizerClones(clones, optimizer, var_list=variables_to_train)
summaries.add(tf.summary.scalar('total_loss', total_loss))
grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op')
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
summary_op = tf.summary.merge(list(summaries), name='summary_op')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
config = tf.ConfigProto(log_device_placement=False,
gpu_options=gpu_options)
saver = tf.train.Saver(max_to_keep=5,
keep_checkpoint_every_n_hours=1.0,
write_version=2,
pad_step_number=False)
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master='',
is_chief=True,
init_fn=common_methods.setInit(FLAGS),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
saver=saver,
save_interval_secs=FLAGS.save_interval_secs,
session_config=config,
sync_optimizer=None)
if __name__ == '__main__':
tf.app.run()
|
[
"836025852@qq.com"
] |
836025852@qq.com
|
0109c5ed6c4550d7d8153a0007a043c2a0058fbe
|
c719367f2f26a4f9f03239835dcf2a21d25dadf9
|
/week11/drp354_assignment_9/problem1.py
|
15df5065ecbdbcb9d186b7324d68444ce3ddd63d
|
[] |
no_license
|
jjdblast/Principle-of-Urban-Informatics
|
688fea3679646b619c9d95384b1190b1c0541267
|
538848d94b8ddf493b002cf18d6fd5452fd594aa
|
refs/heads/master
| 2020-06-13T22:07:23.802689
| 2015-01-22T06:01:06
| 2015-01-22T06:01:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,330
|
py
|
##############################################
##############################################
# Principle of urban informatics #
# Assignment 9 #
# Dimas Rinarso Putro | drp354@nyu.edu #
# No.1 #
##############################################
import csv
import shapefile
import sys
import math
import operator
from bokeh.plotting import *
from bokeh.sampledata.iris import flowers
from bokeh.objects import HoverTool
from collections import OrderedDict,defaultdict
def loadComplaints(complaintsFilename):
# Reads all complaints and keeps zips which have complaints.
with open(complaintsFilename) as f:
csvReader = csv.reader(f)
headers = csvReader.next()
zipIndex = headers.index('Incident Zip')
latColIndex = headers.index('Latitude')
lngColIndex = headers.index('Longitude')
agencyIndex = headers.index('Agency')
lat = []
lng = []
agencyDict = {}
colors = []
complaintsPerZip = {}
for row in csvReader:
try:
lat.append(float(row[latColIndex]))
lng.append(float(row[lngColIndex]))
agency = row[agencyIndex]
zipCode = row[zipIndex]
if not agency in agencyDict:
agencyDict[agency] = len(agencyDict)
if zipCode in complaintsPerZip:
if agency in complaintsPerZip[zipCode]:
complaintsPerZip[zipCode][agency]+=1
else:
complaintsPerZip[zipCode][agency]=1
else:
complaintsPerZip[zipCode]={}
complaintsPerZip[zipCode][agency]=1
except:
pass
return {'zip_complaints': complaintsPerZip}
def getZipBorough(zipBoroughFilename):
# Reads all complaints and keeps zips which have complaints.
with open(zipBoroughFilename) as f:
csvReader = csv.reader(f)
csvReader.next()
return {row[0]: row[1] for row in csvReader}
def drawPlot(shapeFilename, mapPoints, zipBorough):
# Read the ShapeFile
dat = shapefile.Reader(shapeFilename)
# Creates a dictionary for zip: {lat_list: [], lng_list: []}.
zipCodes = []
polygons = {'lat_list': [], 'lng_list': [], 'color_list' : []}
# Qualitative 6-class Set1
colorDict = {}
colorscale = ['#EA5455','#8743D4', '#66A7E1', '#45C966','#F4DF46','#E97F31','#7D7F72','#AE8E3B']
record_index = 0
colorIdx = 0
zid=[]
aid=[]
cid=[]
agencyList = []
for r in dat.iterRecords():
currentZip = r[0]
# Keeps only zip codes in NY area.
if currentZip in zipBorough:
zipCodes.append(currentZip)
# Gets shape for this zip.
shape = dat.shapeRecord(record_index).shape
points = shape.points
# Breaks into lists for lat/lng.
lngs = [p[0] for p in points]
lats = [p[1] for p in points]
# Stores lat/lng for current zip shape.
polygons['lng_list'].append(lngs)
polygons['lat_list'].append(lats)
# Calculate color, according to number of complaints
if currentZip in mapPoints['zip_complaints']:
# Top complaint type
sortedlist = sorted(mapPoints['zip_complaints'][currentZip].items(), key=operator.itemgetter(1), reverse=True)
agency = sortedlist[0][0]
#for hover
zid.append(str(currentZip))
cid.append(str(mapPoints['zip_complaints'][currentZip][agency]))
aid.append(str(agency))
#print currentZip, agency
if agency in colorDict:
color = colorDict[agency]
else:
#for hovering
colorDict[agency] = colorscale[colorIdx]
color = colorDict[agency]
agencyList.append(agency)
colorIdx+=1
else:
color = 'white'
aid.append("not available")
cid.append("not available")
zid.append("not available")
polygons['color_list'].append(color)
record_index += 1
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,previewsave"
source = ColumnDataSource(
data=dict(zid=zid,
aid=aid,
cid=cid,)
)
# Creates the Plot
output_file("shapeAndPoints.html", title="shape and points example")
hold()
patches(polygons['lng_list'], polygons['lat_list'], \
fill_color=polygons['color_list'], line_color="gray", \
tools=TOOLS, source=source,plot_width=1100, plot_height=700, \
title="Top complaints by Agency for each Zip codes in NY")
#legend
x, y = -73.69, 40.58
for agenIter,colorIter in colorDict.iteritems():
rect([x], [y], color=colorIter, width=0.01, height=0.01)
text([x+.01], [y], text=agenIter, angle=0, text_font_size="8pt", text_align="left", text_baseline="middle")
y = y -.01
#hover parameter
hover = curplot().select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("zip code", "@zid"),
("agency", "@aid"),
("complaints number", "@cid"),
])
show()
if __name__ == '__main__':
if len(sys.argv) != 4:
print 'Usage:'
print sys.argv[0] \
+ '<complaintsfilename> <zipboroughfilename> <shapefilename>'
print '\ne.g.: ' + sys.argv[0] \
+ ' data/nyshape.shp data/complaints.csv zip_borough.csv'
else:
mapPoints = loadComplaints(sys.argv[1])
zipBorough = getZipBorough(sys.argv[2])
drawPlot(sys.argv[3], mapPoints, zipBorough)
|
[
"dimasrinarso2003@yahoo.com"
] |
dimasrinarso2003@yahoo.com
|
5ef36ddd93c406aea348149a77e7a346d7fd167c
|
148478348d9db36ea95b1b3f06fbefdf179fa13b
|
/old/mcts.py
|
85989228e956d5f72f470191b94419a78a21bf46
|
[] |
no_license
|
JBLanier/directed_exploration
|
e394d545cb2f4e26907b8f9cefe8fe0f056b42f5
|
0f682c4934051eaa63502ebbb6c347a8f0bef69d
|
refs/heads/generator
| 2020-03-12T17:56:32.568009
| 2018-07-28T17:58:31
| 2018-07-28T17:58:31
| 130,749,104
| 0
| 0
| null | 2018-07-28T18:00:29
| 2018-04-23T19:53:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,816
|
py
|
import math
import numpy as np
EPS = 1e-8
class MCTS():
"""
This class handles the MCTS tree.
"""
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.args = args
self.Qsa = {} # stores Q values for s,a (as defined in the paper)
self.Nsa = {} # stores #times edge s,a was visited
self.Ns = {} # stores #times board s was visited
self.Ps = {} # stores initial policy (returned by neural net)
self.Es = {} # stores game.getGameEnded ended for board s
self.Vs = {} # stores game.getValidMoves for board s
def getActionProb(self, canonicalBoard, temp=1):
"""
This function performs numMCTSSims simulations of MCTS starting from
canonicalBoard.
Returns:
probs: a policy vector where the probability of the ith action is
proportional to Nsa[(s,a)]**(1./temp)
"""
for i in range(self.args.numMCTSSims):
self.search(canonicalBoard)
s = self.game.stringRepresentation(canonicalBoard)
counts = [self.Nsa[(s, a)] if (s, a) in self.Nsa else 0 for a in range(self.game.getActionSize())]
if temp == 0:
bestA = np.argmax(counts)
probs = [0] * len(counts)
probs[bestA] = 1
return probs
counts = [x ** (1. / temp) for x in counts]
probs = [x / float(sum(counts)) for x in counts]
return probs
def search(self, canonicalBoard):
"""
This function performs one iteration of MCTS. It is recursively called
till a leaf node is found. The action chosen at each node is one that
has the maximum upper confidence bound as in the paper.
Once a leaf node is found, the neural network is called to return an
initial policy P and a value v for the state. This value is propogated
up the search path. In case the leaf node is a terminal state, the
outcome is propogated up the search path. The values of Ns, Nsa, Qsa are
updated.
NOTE: the return values are the negative of the value of the current
state. This is done since v is in [-1,1] and if v is the value of a
state for the current player, then its value is -v for the other player.
Returns:
v: the negative of the value of the current canonicalBoard
"""
s = self.game.stringRepresentation(canonicalBoard)
if s not in self.Es:
self.Es[s] = self.game.getGameEnded(canonicalBoard, 1)
if self.Es[s] != 0:
# terminal node
return -self.Es[s]
if s not in self.Ps:
# leaf node
self.Ps[s], v = self.nnet.predict(canonicalBoard)
valids = self.game.getValidMoves(canonicalBoard, 1)
self.Ps[s] = self.Ps[s] * valids # masking invalid moves
sum_Ps_s = np.sum(self.Ps[s])
if sum_Ps_s > 0:
self.Ps[s] /= sum_Ps_s # renormalize
else:
# if all valid moves were masked make all valid moves equally probable
# NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.
# If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.
print("All valid moves were masked, do workaround.")
self.Ps[s] = self.Ps[s] + valids
self.Ps[s] /= np.sum(self.Ps[s])
self.Vs[s] = valids
self.Ns[s] = 0
return -v
valids = self.Vs[s]
cur_best = -float('inf')
best_act = -1
# pick the action with the highest upper confidence bound
for a in range(self.game.getActionSize()):
if valids[a]:
if (s, a) in self.Qsa:
u = self.Qsa[(s, a)] + self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s]) / (
1 + self.Nsa[(s, a)])
else:
u = self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s] + EPS) # Q = 0 ?
if u > cur_best:
cur_best = u
best_act = a
a = best_act
next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)
next_s = self.game.getCanonicalForm(next_s, next_player)
v = self.search(next_s)
if (s, a) in self.Qsa:
self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)
self.Nsa[(s, a)] += 1
else:
self.Qsa[(s, a)] = v
self.Nsa[(s, a)] = 1
self.Ns[s] += 1
return -v
|
[
"johnblanier@gmail.com"
] |
johnblanier@gmail.com
|
985b483695819b2ebf1b5a96b48d5fd8d0c25f20
|
2db704b6853682a9e3283b24013beadf5171d44b
|
/Behavioral_mode/template_method.py
|
58296f60cd7df78a6f03dbf2d0c2d3b33bc7e85b
|
[] |
no_license
|
zxy1013/creat_mode
|
d9a1487ac1c88f68a1ea17cd4c60910fbf237d66
|
45a75e29f13497059d0367466b97698b0e449734
|
refs/heads/master
| 2023-04-05T22:55:44.057347
| 2021-05-07T07:58:58
| 2021-05-07T07:58:58
| 365,157,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
from abc import ABCMeta, abstractmethod
from time import sleep
# 抽象类
class Window(metaclass=ABCMeta):
@abstractmethod
def start(self): # 原子操作/钩子操作 不变的东西Window写了 变的东西还需要子类实现
pass
@abstractmethod
def repaint(self): # 窗口重新绘制
pass
@abstractmethod
def stop(self): # 原子操作/钩子操作
pass
def run(self): # 模板方法
self.start()
while True:
try:
self.repaint()
sleep(1)
except KeyboardInterrupt: # terminal 运行 python template_method.py ctrl+c终止
break
self.stop()
# 具体类
class MyWindow(Window): # 接口+继承
def __init__(self, msg):
self.msg = msg
def start(self):
print("窗口开始运行")
def stop(self):
print("窗口结束运行")
def repaint(self):
print(self.msg)
MyWindow("Hello...").run()
|
[
"1253141170@qq.com"
] |
1253141170@qq.com
|
a2edc242756fe3bccdb3e77606306ba46b993019
|
8d64c023327783582a3beed5df826f532dc46999
|
/enemyArtHandler.py
|
58aabdd01cef923b8dc2f78d8671d648397376d6
|
[] |
no_license
|
aloobyalordant/7DRL2015
|
deaeebe4e07ac58749e70add212c365e1c4b8b1c
|
08381adc835deccb54c6703a489a8b7510339be6
|
refs/heads/master
| 2020-12-24T06:42:13.301673
| 2018-11-11T18:35:45
| 2018-11-11T18:35:45
| 48,754,319
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
import csv
import os
# A class that loads non-mechanical enemy data stuff (names, sprite location, colors, description) from a csv file,
# and then returns that data to the game when requested
class EnemyArtHandler:
def __init__(self,pathname, dataFile = "enemyArtData.csv"):
datapath = os.path.join(pathname, dataFile)
print("Loading enemy art data from " + datapath)
# create a dictionary for storing all the enemy data.
self.enemyArtDict = {}
with open(datapath) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.enemyArtDict[row['game_name']]=(row['Name'], row['Symbol'], (int(row['colorR']), int(row['colorG']), int(row['colorB'])), row['Description'] )
def getEnemyArtData(self, enemy_name):
returnData = None
#print ("testing...")
if ( enemy_name in self.enemyArtDict):
returnData = self.enemyArtDict[enemy_name]
else:
returnData = (None, None, (0,0,0), None)
return returnData
#print(row['game_name'])
#if row['game_name'] == enemy_name:
# # populate data from this row
# print("Found data!")
# break
#print(row['first_name'], row['last_name'])
|
[
"markelliotlloyd@gmail.com"
] |
markelliotlloyd@gmail.com
|
793b504aefa0334a3db3790c84bd5775c17b2bc2
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/identity/azure-identity/tests/test_app_service.py
|
13151d95d390eb967adfff1fb9634fb14fc79f24
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,984
|
py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
from azure.identity._credentials.app_service import AppServiceCredential
from azure.identity._constants import EnvironmentVariables
import pytest
from helpers import mock
from recorded_test_case import RecordedTestCase
PLAYBACK_URL = "https://msi-endpoint/token"
class RecordedTests(RecordedTestCase):
def __init__(self, *args, **kwargs):
super(RecordedTests, self).__init__(*args, **kwargs)
if self.is_live:
url = os.environ.get(EnvironmentVariables.MSI_ENDPOINT)
if not (url and EnvironmentVariables.MSI_SECRET in os.environ):
pytest.skip("Recording requires values for $MSI_ENDPOINT and $MSI_SECRET")
else:
self.scrubber.register_name_pair(url, PLAYBACK_URL)
self.patch = mock.MagicMock() # no need to patch anything when recording
else:
# in playback we need to set environment variables and clear any that would interfere
# (MSI_SECRET ends up in a header; vcr.py doesn't match headers, so the value doesn't matter)
env = {EnvironmentVariables.MSI_ENDPOINT: PLAYBACK_URL, EnvironmentVariables.MSI_SECRET: "redacted"}
self.patch = mock.patch.dict(os.environ, env, clear=True)
def test_system_assigned(self):
with self.patch:
credential = AppServiceCredential()
token = credential.get_token(self.scope)
assert token.token
assert isinstance(token.expires_on, int)
@pytest.mark.usefixtures("user_assigned_identity_client_id")
def test_user_assigned(self):
with self.patch:
credential = AppServiceCredential(client_id=self.user_assigned_identity_client_id)
token = credential.get_token(self.scope)
assert token.token
assert isinstance(token.expires_on, int)
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
0dc73c2a32d3f240e6706a68ceca1e73c17f8b5e
|
876b16099c5671be86e5ba15f9cc161a8df658e0
|
/Flaskapi/data_input.py
|
43370c8c8291c83ac315800b65aaf9c73221b416
|
[] |
no_license
|
KarthikChary1/Salary_prediction_Data_Scientists
|
90cddde98ffe476bd4890740e2e92b722b33c5b8
|
9cc4f629c46f2bb93a249b20949bd7ba9cd86825
|
refs/heads/master
| 2022-12-11T22:06:20.617123
| 2020-08-14T06:35:32
| 2020-08-14T06:35:32
| 286,713,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,161
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 14:29:06 2020
@author: VENUHYMA
"""
data_in=[3.6,
0.0,
0.0,
0.0,
1.0,
34.0,
0.0,
0.0,
1.0,
4644.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0]
|
[
"karthikchary078@gmail.com"
] |
karthikchary078@gmail.com
|
a2e42e709eda6693c8b426646fc61dabecabe108
|
b9c24117a729f16ea0aa1510874e0a69e6c26faa
|
/shallow and deep copy.py
|
9a7bc970b98d0149ab642736f76e20deb821e8f4
|
[] |
no_license
|
janakiraam/Python_Basic_Advance
|
1255c54084e604d65542fadb4479162eef111de0
|
b2aafaac5e9675d5c504e2912f1ccf97c160eb97
|
refs/heads/master
| 2023-03-24T23:51:52.315236
| 2021-03-06T16:44:16
| 2021-03-06T16:44:16
| 241,824,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,119
|
py
|
# Normal usage :
# ================
# =================
list_of_name = ['raam', 'janaki', 'ravi']
new_list_of_name = list_of_name
list_of_name.append('raju')
print(list_of_name)
print(new_list_of_name)
print('\n')
new_list_of_name.append('aravind')
print(list_of_name)
print(new_list_of_name)
print('\n')
print("id for old list and new list")
print(id(list_of_name),'\n', id(new_list_of_name))
print('\n\n')
print("id for old list,new list 2nd element")
print(id(list_of_name[1]),'\n', id(new_list_of_name[1]))
#with copy
#==============
#Shallow copy
#================
#below code will give out put as copy will not affect the old_list
#[[1, 2, 3], [4, 5, 6], [6, 7, 8], [9, 10, 11]]
#[['a', 'b', 'c'], [4, 5, 6], [6, 7, 8], [9, 10, 11]]
import copy
old_list = [[1,2,3],[4,5,6],[6,7,8],[9,10,11]]
new_list = copy.copy(old_list)
new_list[0] = ['a','b','c']
print(old_list)
print(new_list)
print('\n')
print("id for old list and new list")
print(id(old_list),'\n', id(new_list))
print('\n\n')
print("id for old list,new list 2nd element")
print(id(old_list[0][2]),'\n', id(new_list[0][2]))
#below code will give out put as copy will affect the old_list
#[[1, 2, 'c'], [4, 5, 6], [6, 7, 8], [9, 10, 11]]
#[[1, 2, 'c'], [4, 5, 6], [6, 7, 8], [9, 10, 11]]
import copy
old_list1 = [[1,2,3],[4,5,6],[6,7,8],[9,10,11]]
new_list1 = copy.copy(old_list1)
new_list1[0][2] = 'c'
print(old_list1)
print(new_list1)
print("id for old list and new list")
print(id(old_list1),'\n', id(new_list1))
print('\n\n')
print("id for old list,new list 2nd element")
print(id(old_list1[0][2]),'\n', id(new_list1[0][2]))
#shallow copy creates a copy of the object but refernces each element of the object.
#deepcopy
#=============
#[[1, 2, 3], [4, 5, 6], [6, 7, 8], [9, 10, 11]]
#[[1, 2, 'c'], [4, 5, 6], [6, 7, 8], [9, 10, 11]]
old_list2 = [[1,2,3],[4,5,6],[6,7,8],[9,10,11]]
new_list2 = copy.deepcopy(old_list2)
new_list2[0][2] = 'c'
print(old_list2)
print(new_list2)
##shallow copy creates a copy of the object and the element of the object.
|
[
"noreply@github.com"
] |
janakiraam.noreply@github.com
|
ffe6003a60e96f7e84532b6a1b26cb930799a22a
|
e67dcfdcc691d21e0fc85a6790923b9366b1a0b3
|
/retrieval_chatbot/route_exchange.py
|
5657c12c6655f3d655149fd4575d348b928968e4
|
[] |
no_license
|
Blackmamba-xuan/Meme
|
483f97e6811e12e86e49808a30a84fc1dd032e0c
|
623fba1f8f5bf5e5ce227803d531655f67496803
|
refs/heads/master
| 2020-07-11T06:07:07.512764
| 2019-11-01T13:00:35
| 2019-11-01T13:00:35
| 204,461,961
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
def main_model(input_utterance):
task_type = classification_task()
if task_type == "weather":
result = weather_api()
if task_type == "poems":
result = poems()
if ...:
...
...
...
if task_type == "chat":
result = rules()
if not result:
result = IR()
if not result:
reslut = seq2seq()
return output_content
|
[
"18094409g@connect.polyu.hk"
] |
18094409g@connect.polyu.hk
|
7cc93e0cd1acce5e592bf76f655825a0dc70c55d
|
b7a07ef19ee9c817fdc2d8bba2e20aaaeb811a04
|
/Interface/delayWindow.py
|
ac283db84ca8b4d82e04accbe2d18c82bd5998d2
|
[] |
no_license
|
CXZgogogo/Topo
|
378e027fe4083e005124b3152e7ba738fd92df28
|
613b1ed569a02cd25b55427d29a32bcad3656e20
|
refs/heads/master
| 2022-06-08T23:06:20.935581
| 2020-05-06T07:12:59
| 2020-05-06T07:12:59
| 261,679,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,544
|
py
|
from PyQt5.QtWidgets import QMessageBox
# import serial_test01 as ser
from SerialCommunication import Serial as ser
from Interface.delay import Ui_shiyan
from PyQt5.QtCore import *
from PyQt5 import QtWidgets
from Interface.sharedClass import *
import sys
# from getTopo import *
import matplotlib
matplotlib.use("Qt5Agg")
import logging
# 每次都会覆盖之前的日志文件
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.INFO, filename='../log/procedure.log', filemode ='w', )
'''
时延测量页面,定义了一个返回信号,用于测量页面和主页面之间的跳转
在combobox选项框中选择的两个节点,若是选择的是同一节点,在结果显示框中显示“输入的节点是同一节点”的提示信息,
若是选择的不是同一节点,则调用接口程序,根据接口程序返回的标志位判断测量过程是否出错,出错的话则显示“测量出错”的提示信息,
若是测量没有出错并判断结果信息无误,则显示测量结果
'''
TABLE = readConfigurationFile("Database").getCon("table_adjacentNode")
delayTABLE=readConfigurationFile("Database").getCon("table_delay")
fupinTABLE=readConfigurationFile("Database").getCon("table_fupin")
class shiyanWindow(QtWidgets.QMainWindow,Ui_shiyan):
# 定义返回之前界面的信号
backsignal2=pyqtSignal()
def __init__(self):
super(shiyanWindow,self).__init__()
self.setupUi(self) #加载时延显示窗口
# 初始化combobox
self.init_combobox1()
self.init_combobox2()
# 退出按钮槽函数
self.pushButton.clicked.connect(self.backmain)
# 确定按钮的槽函数
self.pushButton_2.clicked.connect(lambda: self.timeshow())
# 取消按钮的槽函数
self.pushButton_3.clicked.connect(lambda: self.cancelbt())
self.comboBox.currentIndexChanged.connect(self.btnState)
logging.info('时延页面开始')
def btnState(self):
combobox1_node=self.comboBox.currentText()
sql = "SELECT node FROM "+str(TABLE)+" WHERE src='%s'" % combobox1_node
neighborNodeList=self.lengthNum(sql)
self.comboBox_2.clear()
# self.comboBox_2.
for node in neighborNodeList:
self.comboBox_2.addItem(node['name'])
# combobox初始化函数
def init_combobox1(self):
# 查询邻接表中节点信息,设置combobox1的信息
sql = "SELECT DISTINCT(src) FROM "+str(TABLE)
realList1 = self.lengthNum(sql)
self.comboBox.setMaxCount(len(realList1))
for node in realList1:
self.comboBox.addItem(node['name'])
def init_combobox2(self):
# 查询邻接表中节点信息,设置combobox2的信息
sql = "SELECT DISTINCT(src) FROM "+str(TABLE)
realList1 = self.lengthNum(sql)
self.comboBox_2.setMaxCount(len(realList1))
for node in realList1:
self.comboBox_2.addItem(node['name'])
# 时延显示框
def timeshow(self):
# t1、t2是选项框中选择的节点
t1=self.comboBox.currentText()
t2=self.comboBox_2.currentText()
if t1 == t2:
logging.info('输入节点信息有误')
self.textEdit.setText("输入的两个节点是同一个节点")
else:
# 调用接口程序
flag=1
flag=ser.delay_measure_order(t1,t2)
# 根据接口程序返回的flag值判断测量是否出错
if flag==1:
sql = "SELECT * FROM "+delayTABLE+" WHERE src='%s' AND node='%s'" % (t1, t2)
data=self.delay_info(sql)
logging.info(data)
if data==0xFFFF:
self.textEdit.setText('节点不可达')
elif data>=0:
self.textEdit.setText(str(data)+'ns')
else:
self.textEdit.setText(str(data))
else:
start = QMessageBox.warning(self, "Warning!", '测量出错,点击OK退出测量界面', QMessageBox.Ok,QMessageBox.Ok)
if start == QMessageBox.Ok:
self.backmain()
# time.sleep(5)
self.backmain()
#使用邻接表查询所有节点的名字
def lengthNum(self,sql):
data = []
results=Sqlite().select(sql)
try:
for row in results:
result = {}
result['name'] = str(row[0])
data.append(result)
except:
logging.info("查询出错")
return data
def delay_info(self,sql):
try:
reseult = Sqlite().select(sql)
data = reseult[0][2]
except:
print("查询出错")
return data
# 取消按钮的槽函数,点击取消按钮,选项框的内容会返回初始化的状态
def cancelbt(self):
self.comboBox.clear()
self.init_combobox1()
self.comboBox_2.clear()
self.init_combobox2()
# 界面信号显示函数,点击退出按钮返回主页面
def backmain(self):
logging.info('返回主页面')
self.backsignal2.emit()
self.setVisible(0)
if __name__ == '__main__':
# QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
the_window = shiyanWindow()
the_window.show()
sys.exit(app.exec_())
|
[
"1289010857@qq.com"
] |
1289010857@qq.com
|
4d42db21772329cedb1feb2ae1647f5c401730ee
|
3246cad39a0e05a049e3e39fae6af58338048a4b
|
/PCS/stackinghaybales.py
|
86c5f0aec0b26e4ab0ebe8c812b06da2bf1ff9af
|
[] |
no_license
|
josephsurin/cp-practice
|
dd333a93a43b4cf892035b39bd5732f2fc04d060
|
da9208ce9ce1b808a998944599fdc1faf7a1209c
|
refs/heads/master
| 2021-07-14T13:28:15.945699
| 2020-06-30T23:39:11
| 2020-06-30T23:39:11
| 174,022,379
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
from bisect import bisect_left
from itertools import combinations
def bsearch(a, x):
p = bisect_left(a, x)
return p != len(a) and x == a[p]
T = int(input())
for _ in range(T):
yay = False
N, H = [int(s) for s in input().split()]
Hs = [int(s) for s in input().split()]
A, B = Hs[:N//2], Hs[N//2:]
As = [0]
Bs = [0]
for n in range(1, 1+len(A)):
As += [sum(c) for c in combinations(A, n)]
for n in range(1, 1+len(B)):
Bs += [sum(c) for c in combinations(B, n)]
Bs.sort()
for a in As:
if a > H: continue
if bsearch(Bs, H - a):
yay = True
break
print("YAY") if yay else print("NAY")
|
[
"joseph.surin@gmail.com"
] |
joseph.surin@gmail.com
|
4f01af13340f00b1a771eeebe991e21e898ef8b4
|
d488f052805a87b5c4b124ca93494bc9b78620f7
|
/google-cloud-sdk/lib/googlecloudsdk/core/updater/update_check.py
|
6ccb6d970b3fddbb2a5bd1e2adfb2cc3e7311736
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
PacktPublishing/DevOps-Fundamentals
|
5ce1fc938db66b420691aa8106ecfb3f9ceb1ace
|
60597e831e08325c7e51e8557591917f7c417275
|
refs/heads/master
| 2023-02-02T04:48:15.346907
| 2023-01-30T08:33:35
| 2023-01-30T08:33:35
| 131,293,311
| 13
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,232
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements update checking and notification to the user.
It provides a context manager around the cache file that stores information
about the last update check. The general process is as follows:
1) This stores the last time an update check occurred, so the check will only
be done if the update check frequency has expired.
2) When an update check is done, all notifications in the latest snapshot are
queried to see if their condition matches the current state of the SDK. Any
notifications that match are "activated" and cached.
3) Every time a command is run, Notify() is called to notify the user of
available updates. It loops over the activated notifications and determines
if any of the triggers match the current command invocation. If there is a
match, the notification is printed and the last nag time is recorded for that
particular notification. At most one notification is printed per command.
The priority is determined by the order the notifications are registered
in the component snapshot.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
import os
import time
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import schemas
import six
class UpdateCheckData(object):
"""A class to hold update checking data and to perform notifications."""
UPDATE_CHECK_FREQUENCY_IN_SECONDS = 86400 # Once a day.
def __init__(self):
self._last_update_check_file = config.Paths().update_check_cache_path
self._dirty = False
self._data = self._LoadData()
def _LoadData(self):
"""Deserializes data from the json file."""
if not os.path.isfile(self._last_update_check_file):
return schemas.LastUpdateCheck.FromDictionary({})
with open(self._last_update_check_file) as fp:
try:
data = json.loads(fp.read())
return schemas.LastUpdateCheck.FromDictionary(data)
except ValueError:
log.debug('Failed to parse update check cache file. Using empty '
'cache instead.')
return schemas.LastUpdateCheck.FromDictionary({})
def _SaveData(self):
"""Serializes data to the json file."""
if not self._dirty:
return
with open(self._last_update_check_file, 'w') as fp:
fp.write(json.dumps(self._data.ToDictionary()))
self._dirty = False
def __enter__(self):
return self
def __exit__(self, *args):
self._SaveData()
def LastUpdateCheckRevision(self):
"""Gets the revision of the snapshot from the last update check.
Returns:
long, The revision of the last checked snapshot. This is a long int but
formatted as an actual date in seconds (i.e 20151009132504). It is *NOT*
seconds since the epoch.
"""
return self._data.last_update_check_revision
def LastUpdateCheckTime(self):
"""Gets the time of the last update check as seconds since the epoch.
Returns:
int, The time of the last update check in seconds since the epoch.
"""
return self._data.last_update_check_time
def SecondsSinceLastUpdateCheck(self):
"""Gets the number of seconds since we last did an update check.
Returns:
int, The amount of time in seconds.
"""
return time.time() - self._data.last_update_check_time
def ShouldDoUpdateCheck(self):
"""Checks if it is time to do an update check.
Returns:
True, if enough time has elapsed and we should perform another update
check. False otherwise.
"""
return (self.SecondsSinceLastUpdateCheck() >=
UpdateCheckData.UPDATE_CHECK_FREQUENCY_IN_SECONDS)
def UpdatesAvailable(self):
"""Returns whether we already know about updates that are available.
Returns:
bool, True if we know about updates, False otherwise.
"""
return bool([
notification for notification in self._data.notifications
if notification.condition.check_components
])
def SetFromSnapshot(self, snapshot, component_updates_available, force=False):
"""Sets that we just did an update check and found the given snapshot.
If the given snapshot is different than the last one we saw, refresh the set
of activated notifications for available updates for any notifications with
matching conditions.
You must call Save() to persist these changes or use this as a context
manager.
Args:
snapshot: snapshots.ComponentSnapshot, The latest snapshot available.
component_updates_available: bool, True if there are updates to components
we have installed. False otherwise.
force: bool, True to force a recalculation of whether there are available
updates, even if the snapshot revision has not changed.
"""
if force or self.LastUpdateCheckRevision() != snapshot.revision:
log.debug('Updating notification cache...')
current_version = config.INSTALLATION_CONFIG.version
current_revision = config.INSTALLATION_CONFIG.revision
activated = []
possible_notifications = snapshot.sdk_definition.notifications
for notification in possible_notifications:
if notification.condition.Matches(
current_version, current_revision, component_updates_available):
log.debug('Activating notification: [%s]', notification.id)
activated.append(notification)
self._data.notifications = activated
self._CleanUpLastNagTimes()
self._data.last_update_check_time = time.time()
self._data.last_update_check_revision = snapshot.revision
self._dirty = True
def SetFromIncompatibleSchema(self):
"""Sets that we just did an update check and found a new schema version.
An incompatible schema version means there are definitely updates available
but we can't read the notifications to correctly notify the user. This will
install a default notification for the incompatible schema.
You must call Save() to persist these changes or use this as a context
manager.
"""
log.debug('Incompatible schema found. Activating default notification.')
# Nag once a week to update if the schema changed and we don't know what's
# going on anymore.
notification_spec = schemas.NotificationSpec(
id='incompatible',
condition=schemas.Condition(None, None, None, None, False),
trigger=schemas.Trigger(frequency=604800, command_regex=None),
notification=schemas.Notification(None, None, None)
)
self._data.notifications = [notification_spec]
self._CleanUpLastNagTimes()
self._data.last_update_check_time = time.time()
self._data.last_update_check_revision = 0 # Doesn't matter
self._dirty = True
def _CleanUpLastNagTimes(self):
"""Clean the map holding the last nag times for each notification.
If a notification is no longer activate, it is removed from the map. Any
notifications that are still activated have their last nag times preserved.
"""
activated_ids = [n.id for n in self._data.notifications]
self._data.last_nag_times = (
dict(
(name, value)
for name, value in six.iteritems(self._data.last_nag_times)
if name in activated_ids))
def Notify(self, command_path):
"""Notify the user of any available updates.
This should be called for every command that is run. It does not actually
do an update check, and does not necessarily notify the user each time. The
user will only be notified if there are activated notifications and if the
trigger for one of the activated notifications matches. At most one
notification will be printed per command. Order or priority is determined
by the order in which the notifications are registered in the component
snapshot file.
Args:
command_path: str, The '.' separated path of the command that is currently
being run (i.e. gcloud.foo.bar).
"""
# Only nag if we are running in an interactive terminal.
if not log.out.isatty() or not log.status.isatty():
return
for notification in self._data.notifications:
name = notification.id
last_nag_time = self._data.last_nag_times.get(name, 0)
# Only notify if the trigger matches. Exit as soon as one notification
# is printed.
if notification.trigger.Matches(last_nag_time, command_path):
log.status.write(notification.notification.NotificationMessage())
self._data.last_nag_times[name] = time.time()
self._dirty = True
break
|
[
"saneetk@packtpub.com"
] |
saneetk@packtpub.com
|
3009a0127697ed914a468e906398c7a75618b777
|
aea257b107055b068b9b76ffec3b5510c8a70854
|
/manage.py
|
3567b48281fb8b8e43244b1bf890a77d1da362be
|
[] |
no_license
|
Rajkishor1994/MigrationsPro
|
0073ff430821b41a07403380f18cef2261731bb6
|
3a24c475a4ab5331f11312d4fdd11389685041a2
|
refs/heads/master
| 2022-12-01T09:34:52.240894
| 2020-08-13T16:18:10
| 2020-08-13T16:18:10
| 287,323,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MigrationPro.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"rajkishorpatel2020@gmail.com"
] |
rajkishorpatel2020@gmail.com
|
b032ad08d3b3065c5de630e39a8aa2182566f721
|
67a0355498545cae7970dccd4a400ad56e2c942e
|
/CaliperGraphsSingle - Copy.py
|
cd3f631dfac88c8a1a753053663ef831d6c57e55
|
[] |
no_license
|
bradmartin333/ChartsAndFigures
|
58c7701800db7a704c7c8c85a6f079339a0fa9b1
|
ea57df73ba29e6bb0cbcbcd45fa08c695287fd7a
|
refs/heads/main
| 2023-01-31T03:00:52.850041
| 2020-12-14T16:35:04
| 2020-12-14T16:35:04
| 321,408,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mp
file = 'LED'
variable = 'White'
types =[['ContactPost', 'NoStampPost', '100umMesa', 'NoStampMesa'],
['black', 'black', 'black', 'white'],
[0.25, 0.35, 0.45, 0.55],
['/', '', '', '\\']]
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(4): # Iterate through plots
data = pd.read_excel(file + '.xlsx', sheet_name=types[0][i])
thisData = data[variable]
ax.hist(thisData.values.tolist(),
color=types[1][i],
alpha=types[2][i],
hatch=types[3][i],
histtype='stepfilled',
density=True,
bins=3,
ec='k')
ax.set_title(variable)
ax.set_ylabel('Density')
ax.set_xlabel('Measured Pitch (microns)')
plt.legend(types[0], loc='upper left');
plt.show()
|
[
"noreply@github.com"
] |
bradmartin333.noreply@github.com
|
939c745e451bfe5a503c3b80cf6f016aa69f713e
|
66bf5c4eee37b11ef90f096b6d9b0228409e1f6e
|
/GoodAI.py
|
dbd7015049bc5459e018a50dbeac32aea00b5287
|
[] |
no_license
|
vermaapoorva/TicTacToe
|
0c1b612ff22cbfa14300c767201d857ac590ffcc
|
07f60704c3a0bd8ddf453c190c39d4d9bf9e8c2e
|
refs/heads/master
| 2021-01-02T20:28:25.368585
| 2020-06-30T22:01:34
| 2020-06-30T22:01:34
| 239,786,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
import random
import TicTacToeBoard
def computer_AI(board):
# If possible, win immediately
for i in range(0,9):
if board.is_free(i):
copied_board = board.copy()
copied_board.input_move(i, TicTacToeBoard.NOUGHT)
if copied_board.win(TicTacToeBoard.NOUGHT):
return i
# Prevent player from winning immediately
for i in range(0,9):
if board.is_free(i):
copied_board = board.copy()
copied_board.input_move(i, TicTacToeBoard.CROSS)
if copied_board.win(TicTacToeBoard.CROSS):
return i
if board.is_free(4):
return 4
move = choose_move_from_list(board, [0,2,6,8])
if move is not None:
return move
move = choose_move_from_list(board, [1,3,5,7])
if move is not None:
return move
def choose_move_from_list(board, moves_list):
#Randomly select a value from a list of values
possible_moves = []
for i in moves_list:
if board.is_free(i):
possible_moves.append(i)
if len(possible_moves) > 0:
return random.choice(possible_moves)
|
[
"noreply@github.com"
] |
vermaapoorva.noreply@github.com
|
35fe41746b2676448b15ca7357c272a97101293c
|
dc65113c485f9e256b1dbdcc4e357f0a6a3a2652
|
/Session03/create.py
|
e4bbd5e1d13a00d3815948f27580ad48d974a44e
|
[] |
no_license
|
tvloc38/loctran-fundamentals-C4E18
|
ce44dcf9aaf8f62110f79391132cc3a5e766152c
|
522ad3c9d88170464d635c461bb16be58b38222b
|
refs/heads/master
| 2020-03-18T15:44:57.552022
| 2018-09-09T05:07:02
| 2018-09-09T05:07:02
| 134,927,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
fav = ["an", "ngu", "lam viec"]
print("Hi there, here you favorite things so far", end=" ")
print(*fav, sep=", ")
add = input("Name one thing you want to add? ")
fav.append(add)
print("Hi there, here you favorite things so far", end=" ")
print(*fav, sep=", ")
|
[
"auwater@BBJulie.net.fpt"
] |
auwater@BBJulie.net.fpt
|
52b7eea9a050eed7f435bdd2937508bcf221a867
|
fa7295b2e6d37d8bd19153982ae66cfb550485eb
|
/Python/Esfera.py
|
7b4b17e9a3e4f25f88faeb04c8202ae2d1a1dc9c
|
[] |
no_license
|
rNexeR/Intro520
|
78da63e0eb18098881b95254371621f58cd9b18d
|
836c371aebd0c2ddbbe5b285e78f212987d0e7c9
|
refs/heads/master
| 2021-01-10T21:20:02.650956
| 2015-09-11T00:33:10
| 2015-09-11T00:33:10
| 41,833,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
from visual import *
sphere()
#box()
|
[
"rnexer@gmail.com"
] |
rnexer@gmail.com
|
dea6df01c0631c976e880034fba3284ad134ab64
|
51e91402a7bc9bdc2c4924d8cb9905bb832099ce
|
/apps/users/urls.py
|
d8634ad3dbf7c2db179b8f2623b9b9ef565956e0
|
[] |
no_license
|
pywjh/Myblog
|
26aef534ae7963bca437fd8f947184f487ea0112
|
d3896c2d5333614b2ced9658b5968ff6f1c7cf0f
|
refs/heads/master
| 2021-07-24T09:07:33.432962
| 2020-05-15T08:00:42
| 2020-05-15T08:00:42
| 167,568,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/29 15:14
# @Author : wjh
# @File : urls.py
from django.urls import path
from . import views
app_name = 'users'
urlpatterns = [
path('login/', views.LoginViews.as_view(), name='login'),
path('register/', views.RegisterView.as_view(), name='register'),
path('logout/', views.LogoutView.as_view(), name='logout'),
]
|
[
"15885405914@163.com"
] |
15885405914@163.com
|
66b3156915e1f3b46de157ac6faffe557b2aa457
|
b68c92fe89b701297f76054b0f284df5466eb698
|
/LeetCode/Solved/Easy/SortArrayByParity.py
|
5ef6610d9aedbd1aaa33126b7e37f5df4949867d
|
[] |
no_license
|
makrandp/python-practice
|
32381a8c589f9b499ab6bde8184a847b066112f8
|
60218fd79248bf8138158811e6e1b03261fb38fa
|
refs/heads/master
| 2023-03-27T18:11:56.066535
| 2021-03-28T04:02:00
| 2021-03-28T04:02:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
'''
905. Sort Array By Parity
Given an array A of non-negative integers, return an array consisting of all the even elements of A, followed by all the odd elements of A.
You may return any answer array that satisfies this condition.
Example 1:
Input: [3,1,2,4]
Output: [2,4,3,1]
The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
Note:
1 <= A.length <= 5000
0 <= A[i] <= 5000
'''
class Solution:
def sortArrayByParity(self, A: List[int]) -> List[int]:
o, e = list(), list()
for n in A:
if n % 2 == 0:
e.append(n)
else:
o.append(n)
return e + o
|
[
"awalexweber99@gmail.com"
] |
awalexweber99@gmail.com
|
4b017969fbab241b1cc35b3426fdff46ec1d1471
|
b8350d2accaa09c1a626a40fc420649647716921
|
/PollingApp/settings.py
|
329a7676fc846cf06c0bd271525fa7037df3a36b
|
[] |
no_license
|
Shahriar-Nibir/Django-Polling-App
|
763b20a2a88625bf348a6b4af55fd07f109e23d2
|
96a4da9d50c82f95a79ebb361a84b4ff5cfd9405
|
refs/heads/master
| 2023-03-16T21:10:03.987365
| 2021-03-11T12:57:12
| 2021-03-11T12:57:12
| 340,272,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,449
|
py
|
"""
Django settings for PollingApp project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from decouple import config
import mimetypes
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
mimetypes.add_type("text/css", ".css", True)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'PollingApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templete')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'PollingApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static')
# ]
#STATIC_ROOT = os.path.join(BASE_DIR, 'asset')
STATICFILES_DIR = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/image/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/image')
|
[
"shahriarnibir615@gmail.com"
] |
shahriarnibir615@gmail.com
|
46c5a276fd993f87a1989f4b32bfcecd5f5fb22b
|
2b72ba57136ba502ac17461f6fa8ff9119c9e6e3
|
/main.py
|
d5a3d6d65330dbe709851ccbc0ee9cc5547960c2
|
[] |
no_license
|
therobertc/alpaca-trading-algo
|
d29e97d89a84048ef9441c0052f552f1bd2135fc
|
226411086451bd1e4920d4e54144085dc4d4dad5
|
refs/heads/master
| 2023-01-14T14:02:32.387480
| 2020-11-19T00:13:08
| 2020-11-19T00:13:08
| 311,785,897
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
#import alpaca_trade_api
import alpaca_trade_api as tradeapi
import time
#live_trading_key
#paper trading key
key = "PKISDQ3TNH5GKBZLZM56"
sec = "GENERATE NEW API SECRET KEY"
#API endpoint URL
url = "https://paper-api.alpaca.markets"
#api_versions
#important to read documentation on version updates
api = tradeapi.REST(key, sec, url, api_version='v2')
account = api.get_account()
#Should print 'ACTIVE'
print(account.status)
#Place buy order:
#When placing orders, use the API
#api.submit_order() allows you to pass params to place order
#Example buy order:
#time_in_force="gtc"
#All args in the submit_order() MUST BE STR!
#Symbol means the stock ticker (FB, APPL, IBM,)
#qty means quantity
#side means buy or sell
#type means market or limit
#if using limit orders add
#limit_price=20.50
order = api.submit_order(symbol="NIO",
qty="100",
side="buy",
type="market",
time_in_force="day")
time.sleep(5)
print(order)
|
[
"rob.jcalderon@gmail.com"
] |
rob.jcalderon@gmail.com
|
066dc8d71ad062c05947a5e3b00572768533e252
|
a6418fe6229a408557cb53acbf94a6f4b30a2c2a
|
/pygazebo/pygazebo/test.py
|
21c4d2919a2513a8e091d4f8352cdb6356e18992
|
[] |
no_license
|
Freedom-Guo/Pygazebo_TurtleBot3
|
02474f013b5f90a2a0e7a34a581d30d370ec2411
|
f2270ed86085dbd3a4576dd15fe7ed0c0a8bd5f6
|
refs/heads/main
| 2023-03-22T10:12:22.772549
| 2021-03-05T09:32:56
| 2021-03-05T09:32:56
| 344,750,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
import random
import pygazebo as gazebo
import numpy as np
import matplotlib.pyplot as plt
import time
import math
gazebo.initialize()
world = gazebo.new_world_from_file("/home/freedomguo/3D_collisionavoidance/world/turtlebot3_stage_2.world")
# world = gazebo.new_world_from_file("../worlds/pioneer_laser.world")
agents = world.get_agents()
agents[0].get_joint_names()
agents[1].get_joint_names()
agents[2].get_joint_names()
world.info()
agents[0].set_pose(((-1, -1, 0), (0, 0, 0)))
agents[1].set_pose(((1, 1, 0), (0, 0, 0)))
agents[2].set_pose(((0, 0, 0), (0, 0, 0)))
agents[3].set_pose(((1, -1, 0), (0, 0, 0)))
agents[4].set_pose(((-1, 1, 0), (0, 0, 0)))
# class evader_observation
for i in range(10000000):
# observation = agent.sense()
# add reward and text to observation
# action = model.compute_action(observation)
# agent.take_action(action)
# len = random.randint(10, 20)
len = 200
print(len)
# agent.set_pose(((random.random() * (-1.9), random.random() * 1.9, 0.00), (0.00, 0.00, 0.00)))
print("start sim time:")
print(world.get_sim_time())
print("start wall time:")
print(world.get_wall_time())
time_start = time.time()
for j in range(len):
agents[0].set_twist(((0.4, 0, 0),(0, 0, 0)))
# print(world.get_sim_time())
# time_start=world.get_sim_time()
world.step()
# print(world.get_sim_time())
# time_end=world.get_sim_time()
# timer = time_end - time_start
# print(timer)
time_end = time.time()
print(time_end - time_start)
pose = []
pose.append(agents[0].get_pose())
pose.append(agents[1].get_pose())
# print(agent.get_twist())
print("end sim time:")
print(world.get_sim_time())
print("end wall time:")
print(world.get_wall_time())
print(pose)
# world.info()
if i % 2 == 1:
obs1 = agents[0].get_ray_observation(
"default::pursuer0::turtlebot3_waffle::lidar::hls_lfcd_lds")
obs2 = agents[0].get_ray_observation(
"default::pursuer1::turtlebot3_waffle::lidar::hls_lfcd_lds")
obs3 = agents[0].get_camera_observation("default::camera::camera_link::camera")
# print(obs1)
npdata1 = np.array(obs1, copy=False)
npdata2 = np.array(obs2, copy=False)
npdata3 = np.array(obs3, copy=False)
plt.imshow(npdata3)
plt.show()
print(npdata1)
|
[
"2354323727@qq.com"
] |
2354323727@qq.com
|
22671a7b227fefc57639bba7a66549f10b6c7acd
|
056d8a5f562f7d76ccae743383f8bebe9bbf4170
|
/set2/challenge12.py
|
7fd03ee504dd854cb8f17184d99bb7116839806f
|
[] |
no_license
|
dychen/cryptopals
|
7fc0d93fc1f65fafe5eba90967540d6d7b0bb817
|
267fd7425774d9d373d7c0a9922bbb1b7bf44a2b
|
refs/heads/master
| 2020-04-06T18:38:10.051305
| 2015-08-22T07:14:34
| 2015-08-22T07:14:34
| 31,119,952
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,285
|
py
|
"""
Byte-at-a-time ECB decryption (Simple)
--------------------------------------
Copy your oracle function to a new function that encrypts buffers under ECB
mode using a consistent but unknown key (for instance, assign a single random
key, once, to a global variable).
Now take that same function and have it append to the plaintext, BEFORE
ENCRYPTING, the following string:
Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK
Spoiler alert.
Do not decode this string now. Don't do it.
Base64 decode the string before appending it. Do not base64 decode the string
by hand; make your code do it. The point is that you don't know its contents.
What you have now is a function that produces:
AES-128-ECB(your-string || unknown-string, random-key)
It turns out: you can decrypt "unknown-string" with repeated calls to the
oracle function!
Here's roughly how:
1. Feed identical bytes of your-string to the function 1 at a time --- start
with 1 byte ("A"), then "AA", then "AAA" and so on. Discover the block size of
the cipher. You know it, but do this step anyway.
2. Detect that the function is using ECB. You already know, but do this step
anyways.
3. Knowing the block size, craft an input block that is exactly 1 byte short
(for instance, if the block size is 8 bytes, make "AAAAAAA"). Think about what
the oracle function is going to put in that last byte position.
4. Make a dictionary of every possible last byte by feeding different strings
to the oracle; for instance, "AAAAAAAA", "AAAAAAAB", "AAAAAAAC", remembering
the first block of each invocation.
5. Match the output of the one-byte-short input to one of the entries in your
dictionary. You've now discovered the first byte of unknown-string.
6. Repeat for the next byte.
Congratulations.
This is the first challenge we've given you whose solution will break real
crypto. Lots of people know that when you encrypt something in ECB mode, you
can see penguins through it. Not so many of them can decrypt the contents of
those ciphertexts, and now you can. If our experience is any guideline, this
attack will get you code execution in security tests about once a year.
"""
from challenge10 import b642hex, aes_ecb_encrypt
from challenge11 import rand_bytes
def is_ascii(char):
"""
@param char [str]: ASCII character
@returns True if @char is a valid ASCII character and False otherwise
"""
return ord(char) not in set([128] + [153] + range(161, 255) + range(0, 9) +
range(11, 32) + [127] + range(129, 153) +
range(154, 161))
class SessionOracle:
"""
Encrypts input PTs with AES 128 in ECB mode using a session key and padding
the PT with a secret string. Our goal is to figure out the secret string
with repeated calls to the oracle.
"""
__SECRET_STRING = (
'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg'
'aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq'
'dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg'
'YnkK'
)
def __init__(self):
self.__key = rand_bytes(16) # Establish a random 16-byte key for the
# length of this "session"
def encrypt(self, pt):
"""
Returns E_k(pt || SECRET_STRING) where E_k is AES 128 in ECB mode and
|| is the concatenation operator. The same key k is used for the entire
session.
@param pt [str]: ASCII PT
@returns [str]: ASCII CT
"""
padded_pt = pt + b642hex(self.__SECRET_STRING).decode('hex')
return aes_ecb_encrypt(self.__key, padded_pt)
def decrypt_session_secret():
"""
Decrypt an AES 128 ECB mode oracle with a session key and PKCS#7 padding
with the following steps:
1. Find out the blocksize and make sure the oracle is in ECB mode.
(see get_blocksize())
2. Get the unpadded length of the session message.
(see get_msg_length())
3. Decode the session message by checking payload messages of the form
P || M || Y where P is a prefix of the desired length, M is the
first |M| known bytes of the message, and Y is the character we
think might be the next character of the message. We check this
against the results of sending the payload P at the index of Y. If
the bytes match, we might have found a hit. Since multiple bytes can
match here, we use a recursive strategy to find the entire string.
(see decode())
"""
def get_blocksize(maxlen=1024):
"""
Returns the blocksize of the oracle. Continually add one byte of
padding until, for an input message of length M, the first M/2 bytes of
the output message are equal to the second M/2 bytes. This also checks
that the oracle is using ECB mode. If no repetition is found up to a
large maximum length, then assume that ECB mode is not being used.
@returns [int]: Blocksize of the ECB mode cipher or False if no
blocksize is found (this implies that ECB mode is not
set).
"""
paddedlen = len(oracle.encrypt(''))
padbytes = 1
while (oracle.encrypt('A' * padbytes)[:padbytes/2]
!= oracle.encrypt('A' * padbytes)[padbytes/2:padbytes]):
if padbytes > maxlen:
return False
padbytes += 1
return padbytes / 2
def get_msg_length():
"""
Returns the non-padded length of the secret message. The way to do this
is to find the length of the padded message and subtract the number of
bytes of padding. The number of bytes of padding can be found by
continually adding one byte of padding until the length of the returned
message changes (it will increase by blocksize).
@returns [int]: Non-padded length of the secret message.
"""
paddedlen = len(oracle.encrypt(''))
for i in range(1, blocksize+1): # 1 pad is required where msglen %
# blocksize is 0
# blocksize pads are required where
# msglen % blocksize is 1
if paddedlen != len(oracle.encrypt('A' * i)):
return paddedlen - i + 1
def next_byte(padlen, blockidx, msg):
"""
Sends the following payloads to the oracle:
'A'*@padlen || @msg || Y, where Y is a char from 0x00 to 0xff
'A'*@padlen, the target payload
Assuming @msg is correct (is equal to the first |@msg| bytes of the
secret message), the first |'A'*@padlen| + |@msg| bytes of all payloads
(including the target payload) should be the same, and the next byte
should be compared. Note that |'A'*@padlen| + |@msg| + 1 should be a
multiple of the blocksize because we want to check the equality of full
blocks. The next character is the Y where the CT of the first
|'A'*@padlen| + |@msg| + 1 bytes are equal to the same first bytes of
CT from the target payload. Since each PT uniquely maps to a CT, there
can only be one correct Y. And there must be at least one correct Y
because the domain of Y spans the set of all possible characters.
Return Y.
@param padlen [int]: Length of the payload.
@param blockidx [int]: The block the target byte is in.
@param msg [str]: Current known message.
@returns [str]: Next character in the message.
"""
payload_prefix = 'A' * padlen
blockcmp = blocksize * (blockidx + 1)
# Mapping of { ptbyte: ct[:blockcmp] } for all pt bytes [int] in
# (0, 255).
ct_mapping = [oracle.encrypt(payload_prefix + msg + chr(c))[:blockcmp]
for c in range(256)]
target_str = oracle.encrypt(payload_prefix)[:blockcmp]
possibilities = [chr(i) for i, ctprefix in enumerate(ct_mapping)
if ctprefix == target_str][0] # Should always be
# of length 1
return possibilities
def decode():
"""
Decodes the secret message by finding successive bytes of the message
blockwise. To find the first byte, the padding starts at blocksize - 1
and decreases until the first entire block is found. Then the process
repeats for successive blocks until the entire message is found.
@returns [str]: The decoded oracle secret message.
"""
msg = ''
padlen = blocksize - 1
blockidx = 0
while len(msg) < msglen:
if padlen == 0:
padlen = blocksize
blockidx += 1
msg += next_byte(padlen, blockidx, msg)
padlen -= 1
return msg
oracle = SessionOracle()
blocksize = get_blocksize()
msglen = get_msg_length()
return decode()
if __name__=='__main__':
print decrypt_session_secret()
|
[
"daniel@a16z.com"
] |
daniel@a16z.com
|
d325b9be14d75eede58d5c841d095ea2f32060a1
|
8d0c2f23b802b50edf9314a782aff39517a59918
|
/venv/bin/flask
|
b1d3d69212cd29fccc6c1ca71aa4695623464654
|
[] |
no_license
|
Andchenn/CookiesPool
|
f224e8737665956f55c6ef5f4f80135670856082
|
690f321f884296818c92748207c2fd116725582d
|
refs/heads/master
| 2020-03-26T03:09:28.221394
| 2018-08-12T06:04:12
| 2018-08-12T06:04:12
| 144,441,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
#!/home/feng/PycharmProjects/Login/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"1476204794@qq.com"
] |
1476204794@qq.com
|
|
87f494cac3d5aa590858d5ac8ffbef57ec70e2f0
|
e2a2df02f1c30a31207e58bffbb074021c6dfd4f
|
/data_analysis_2/first-level/first.py
|
53eb75a60b5c1583c811199c5877613ef7284ced
|
[] |
no_license
|
yuriyward/data_analysis
|
e0903702ee0cfdad9a48454df185962614d844a7
|
c0291da9df3d742b68acc3132f24b4fc9121bd05
|
refs/heads/master
| 2023-08-08T05:25:03.803527
| 2018-11-29T22:51:21
| 2018-11-29T22:51:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,946
|
py
|
import numpy as np
import pandas
from scipy import stats
import matplotlib.pyplot as plt
from collections import Counter
import seaborn as sns
def read_data():
global data
data = pandas.read_csv(
'../data/abalone.data',
header=None,
names=[
'Sex', 'Length', 'Diameter', 'Height', 'Whole weight',
'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings'
]
)
def mean_med_min_max():
# print(data)
length_min = np.min(data['Length'])
length_max = np.max(data['Length'])
length_median = np.median(data['Length'])
diameter_min = np.min(data['Diameter'])
diameter_max = np.max(data['Diameter'])
diameter_median = np.median(data['Diameter'])
height_min = np.min(data['Height'])
height_max = np.max(data['Height'])
height_median = np.median(data['Height'])
whole_weight_min = np.min(data['Whole weight'])
whole_weight_max = np.max(data['Whole weight'])
whole_weight_median = np.median(data['Whole weight'])
shucked_weight_min = np.min(data['Shucked weight'])
shucked_weight_max = np.max(data['Shucked weight'])
shucked_weight_median = np.median(data['Shucked weight'])
viscera_weight_min = np.min(data['Viscera weight'])
viscera_weight_max = np.max(data['Viscera weight'])
viscera_weight_median = np.median(data['Viscera weight'])
shell_weight_min = np.min(data['Shell weight'])
shell_weight_max = np.max(data['Shell weight'])
shell_weight_median = np.median(data['Shell weight'])
rings_min = np.min(data['Rings'])
rings_max = np.max(data['Rings'])
rings_median = np.median(data['Rings'])
sex_mode = stats.mode(data["Sex"])[0][0]
sex_mode_dominate = Counter(data["Sex"]).most_common(1)
print("1 zadanie")
print("Length min - {}".format(length_min))
print("Length max - {}".format(length_max))
print("Length median - {}".format(length_median))
print("Diameter min - {}".format(diameter_min))
print("Diameter max - {}".format(diameter_max))
print("Diameter median - {}".format(diameter_median))
print("Height min - {}".format(height_min))
print("Height max - {}".format(height_max))
print("Height median - {}".format(height_median))
print("Whole weight min - {}".format(whole_weight_min))
print("Whole weight max - {}".format(whole_weight_max))
print("Whole weight median - {}".format(whole_weight_median))
print("Shucked weight min - {}".format(shucked_weight_min))
print("Shucked weight max - {}".format(shucked_weight_max))
print("Shucked weight median - {}".format(shucked_weight_median))
print("Viscera weight min - {}".format(viscera_weight_min))
print("Viscera weight max - {}".format(viscera_weight_max))
print("Viscera weight median - {}".format(viscera_weight_median))
print("Shell weight min - {}".format(shell_weight_min))
print("Shell weight max - {}".format(shell_weight_max))
print("Shell weight median - {}".format(shell_weight_median))
print("Rings min - {}".format(rings_min))
print("Rings max - {}".format(rings_max))
print("Rings median - {}".format(rings_median))
print("Sex mode - {}".format(sex_mode))
print("Sex mode dominate - {}\n".format(sex_mode_dominate))
def plot_histogram():
corr = data.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True
)
plt.show()
whole_weight_histogram = data['Length']
shucked_weight_histogram = data['Diameter']
legend = ['Length', 'Diameter']
plt.hist([whole_weight_histogram, shucked_weight_histogram], color=['orange', 'green'])
plt.xlabel('Length')
plt.ylabel('Diameter')
plt.title('Length and Diameter')
plt.grid()
plt.legend(legend)
plt.show()
if __name__ == "__main__":
read_data()
mean_med_min_max()
plot_histogram()
|
[
"yuriy.babyak@outlook.com"
] |
yuriy.babyak@outlook.com
|
2ef6c853d57178c1a9db42cd2c9660cd2b2a3efe
|
a3281e0308119657faf79637bc66f02e1c8717c1
|
/blog_src/blog_src/asgi.py
|
9a6cd011b93e53b4ffb7ad590e29a41770544217
|
[] |
no_license
|
Divve99/web-pages_2
|
f2060bcbc80db9f13b8b4e4ebdfc7660ce995bae
|
92d62610cbed0cae27cbbe7bb4abfaa02e79814c
|
refs/heads/master
| 2022-12-19T10:23:24.123057
| 2020-09-22T10:14:07
| 2020-09-22T10:14:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for blog_src project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_src.settings')
application = get_asgi_application()
|
[
"aparnanaidu20@yahoo.com"
] |
aparnanaidu20@yahoo.com
|
4bae25882f49ca2d8b00d02deae8726d043f23ac
|
561e8e91b147f66c1bc0d0c2550268b487512aa3
|
/Analysis_3.py
|
4bd351b07ba42c6db392d82564710e095f3ddc09
|
[] |
no_license
|
Khushali26/TwitterDataSentimentAnalysis
|
7fb0e604eef2980a054a5aefd0c8c8e0747e1abb
|
26b3f2a3d3f9a59686544b7ef80700bfbcb4f4d9
|
refs/heads/master
| 2020-04-01T03:08:18.277885
| 2018-12-10T00:30:58
| 2018-12-10T00:30:58
| 152,810,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,888
|
py
|
import numpy as np
import pandas as pd
import json
import pip
pip.main(['install','twitter'])
pip.main(['install','textblob'])
from textblob import TextBlob
!python -m textblob.download_corpora
import twitter
from twitter import Twitter
from twitter import OAuth
from twitter import TwitterHTTPError
from twitter import TwitterStream
from pandas.io.json import json_normalize
ck = 'CP7fgUIajeNTjx2GWAOw8gJLn'
cs = 'EW8cDRlfKrF3D91n1OdwqZPtWs2AVy3MqFH7Zxm7usx3f9qkJT'
at = '498725176-adTcq6fMyqlzvEINcg8ujCxUT2f4TafNsLJFg2yx'
ats = 'q94CVXaaAmHXuhQqjL4b26Q5Vdl5lx5PJhQT8f4M6nvfm'
oauth = OAuth(at,ats,ck,cs)
api = Twitter(auth=oauth)
t_loc = api.trends.available()
loc_df = json_normalize(t_loc)
loc_df[(loc_df['countryCode']=='US') & loc_df['name'].str.contains('New')]
ny_trends = api.trends.place(_id = '2459115')
nydf = json_normalize(ny_trends,'trends')
nydf.head()
nydf.sort_values('tweet_volume',ascending=False).head(5)
q= 'StudentsStandUp'
df = pd.DataFrame()
mid = 0
for i in range(10):
if i==0:
search_result = api.search.tweets(q=q, count = 100)
else:
search_result = api.search.tweets(q=q, count = 100, max_id=mid)
dftemp =json_normalize(search_result,'statuses')
mid = dftemp['id'].min()
mid = mid - 1
df = df.append(dftemp, ignore_index= True)
df.shape
tweettext = df['text']
blob =TextBlob(tweettext[0])
list(blob.noun_phrases)
blob.tags
wordlist = pd.DataFrame()
for t in tweettext:
tx = TextBlob(t)
l = list(tx.noun_phrases)
if len(l)!=0:
wordlist = wordlist.append(l,ignore_index=True)
allworld = wordlist.groupby(0).size()
allworld
top20allworld = allworld.sort_values(0,ascending=False).head(20)
top20allworld.plot(kind='bar',title='Top 20 Tweets')
wordlist = pd.DataFrame()
for t in tweettext:
tx = TextBlob(t)
ww = []
for word,tag in tx.tags:
if tag in ('NN','NNS','NNP','NNPS'):
ww.append(word.lemmatize())
if len(ww) != 0:
wordlist = wordlist.append(ww, ignore_index=True)
allworld = wordlist.groupby(0).size()
allworld
top20allworld = allworld.sort_values(0,ascending=False).head(20)
top20allworld.plot(kind='bar',title='Top 20 Tweets')
pip.main(['install','newspaper3k'])
import newspaper
url = 'https://www.bloomberg.com/news/articles/2018-02-22/airbnb-is-adding-hotels-and-a-loyalty-program'
article = newspaper.Article(url)
article.download()
article.parse()
article.nlp()
article.title
article.summary
blob2 = TextBlob(article.text)
blob2.sentences[1]
wordlist = pd.DataFrame()
ssList=[]
for t in blob2.sentences:
ww=[]
for word, tags in t.tags:
if tag in ('NN','NNS','NNP','NNPS','VB','VBD','VBG','VBN','VBP','VBZ'):
ww.append(word.lemmatize())
ss = ' '.join(ww)
ssList.append(ss.lower())
wordlist = wordlist.append(ssList, ignore_index=True)
wordlist
len(blob2.sentences)
|
[
"noreply@github.com"
] |
Khushali26.noreply@github.com
|
1e4434b6a362482aede50fa79ed03e8a6e71be40
|
78c367a2231e41b1d69c4f484f94ce652b3e700d
|
/algorithm/facenet_wgan/generator.py
|
b84d5ba76783b91238d4131781cc885459697422
|
[] |
no_license
|
Mr-Yu-21/FIC-Facial-Image-Compression-
|
4dcdd8e803c3df0d248bbac20d7fa3fa741891e2
|
809c137cccec5b0790b3cf96a163f4de483026fe
|
refs/heads/main
| 2023-04-29T02:34:19.827637
| 2021-05-19T11:01:53
| 2021-05-19T11:01:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
import torch.nn as nn
import torch
from encoder import Encoder
from decoder import Decoder
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, x):
x = self.encoder(x)
x = torch.squeeze(x, 1)
x = torch.unsqueeze(x, 2)
x = torch.unsqueeze(x, 3)
x = self.decoder(x)
return x
|
[
"2993968012@qq.com"
] |
2993968012@qq.com
|
beec3f446371debd2b3c38ec8b3642d0c5a1514a
|
248c535f3612c646bccadecafdca649fd788bb1f
|
/.history/config_20210927034406.py
|
dbb1cfe87f74853e85d866bbe6c1b60da716e2cf
|
[
"MIT"
] |
permissive
|
GraceOswal/pitch-perfect
|
3b923e4de5fff1a405dcb54374a1ba0522232025
|
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
|
refs/heads/master
| 2023-08-16T01:42:18.742154
| 2021-10-01T06:59:11
| 2021-10-01T06:59:11
| 410,224,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
import os
from dotenv import load_dotenv as ld
ld()
class Config:
debug = True
SECRET_KEY = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS =
|
[
"graceoswal88@gmail.com"
] |
graceoswal88@gmail.com
|
7c945093ff498c1f2f06405646c3c67c9a86d5a5
|
a4930cecb0da288d745d7e16233ee7e9a08cf9d9
|
/lab_03/CDA.py
|
bc2c433b5c8d0184a53d8ab3a56d334e7624a4a3
|
[
"MIT"
] |
permissive
|
Winterpuma/bmstu_CG
|
514b8e97a03a517f718edd17a49f3033ab292623
|
59a0793aed8e9e8d4f435cb2e8263d610dfac688
|
refs/heads/master
| 2020-04-23T08:32:15.376453
| 2019-12-30T12:52:55
| 2019-12-30T12:52:55
| 171,040,446
| 26
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
# Алгоритм цифрового дифференциального анализатора
#from math import round
def cda_test(ps, pf):
dx = abs(pf[0] - ps[0])
dy = abs(pf[1] - ps[1])
if dx > dy:
L = dx
else:
L = dy
sx = (pf[0] - ps[0]) / L
sy = (pf[1] - ps[1]) / L
x = ps[0]
y = ps[1]
while abs(x - pf[0]) > 1 or abs(y - pf[1]) > 1:
x += sx
y += sy
def draw_line_cda(canvas, ps, pf, fill):
dx = abs(pf[0] - ps[0])
dy = abs(pf[1] - ps[1])
# for stairs counting
if dx:
tg = dy / dx
else:
tg = 0
# steep - max growth
if dx > dy:
steep = dx
else:
steep = dy
sx = (pf[0] - ps[0]) / steep # step of x
sy = (pf[1] - ps[1]) / steep # step of y
# set line to start
x = ps[0]
y = ps[1]
stairs = []
st = 1
while abs(x - pf[0]) > 1 or abs(y - pf[1]) > 1:
canvas.create_line(round(x), round(y), round(x + 1), round(y + 1), fill=fill)
if (abs(int(x) - int(x + sx)) >= 1 and tg > 1) or (abs(int(y) - int(y + sy)) >= 1 >= tg):
stairs.append(st)
st = 0
else:
st += 1
x += sx
y += sy
if st:
stairs.append(st)
return stairs
|
[
"32524078+Winterpuma@users.noreply.github.com"
] |
32524078+Winterpuma@users.noreply.github.com
|
b2ac17820d9376270ad9e59b52048295c486afb4
|
e23407eea7eefee5065fe19aabf028b3f29b64af
|
/plenum/server/consensus/primary_selector.py
|
1078366cb6c09d98b6de2fb6cb108a47b8fb05c7
|
[
"Apache-2.0"
] |
permissive
|
kkc90/indy-plenum
|
b4731b776b4231d1f725131945143e2395defcf9
|
29adcb033487138e1711898104e95fbbb527f8b2
|
refs/heads/master
| 2020-09-07T12:52:53.510544
| 2019-11-08T15:22:10
| 2019-11-08T15:22:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,716
|
py
|
from abc import ABCMeta, abstractmethod
from typing import List
from common.exceptions import LogicError
from stp_core.common.log import getlogger
logger = getlogger()
class PrimariesSelector(metaclass=ABCMeta):
@abstractmethod
def select_primaries(self, view_no: int, instance_count: int, validators: List[str]) -> List[str]:
pass
class RoundRobinPrimariesSelector(PrimariesSelector):
def select_primaries(self, view_no: int, instance_count: int, validators: List[str]) -> List[str]:
# Select primaries for current view_no
if instance_count == 0:
return []
# Build a set of names of primaries, it is needed to avoid
# duplicates of primary nodes for different replicas.
primaries = []
master_primary = None
for i in range(instance_count):
if i == 0:
primary_name = self._next_primary_node_name_for_master(view_no, validators)
master_primary = primary_name
else:
primary_name = self._next_primary_node_name_for_backup(master_primary, validators, primaries)
primaries.append(primary_name)
if len(primaries) != instance_count:
raise LogicError('instances inconsistency')
if len(primaries) != len(set(primaries)):
raise LogicError('repeating instances')
return primaries
def _next_primary_node_name_for_master(self, view_no: int, validators: List[str]) -> str:
"""
Returns name and rank of the next node which is supposed to be a new Primary on master instance.
In fact it is not round-robin on this abstraction layer as currently the primary of master instance is
pointed directly depending on view number, instance id and total
number of nodes.
But since the view number is incremented by 1 before primary selection
then current approach may be treated as round robin.
"""
return validators[view_no % len(validators)]
def _next_primary_node_name_for_backup(self, master_primary: str, validators: List[str],
primaries: List[str]) -> str:
"""
Returns name of the next node which
is supposed to be a new Primary for backup instance in round-robin
fashion starting from primary of master instance.
"""
master_primary_rank = validators.index(master_primary)
nodes_count = len(validators)
rank = (master_primary_rank + 1) % nodes_count
name = validators[rank]
while name in primaries:
rank = (rank + 1) % nodes_count
name = validators[rank]
return name
|
[
"alexander.sherbakov@dsr-corporation.com"
] |
alexander.sherbakov@dsr-corporation.com
|
0c097470ff806797d2dd7234e512c87b70fc4f6c
|
7db470ac37a2dd717ce41db19d6949a91109efb5
|
/Code/share/gps/support/core/lal_utils/lal_view.py
|
4484c1d64c800fee5522e02909c644190236ad11
|
[] |
no_license
|
AaronC98/PlaneSystem
|
63e7d486ec8ca8ff1e234193ac1ebaf5cb348b22
|
5b91c1816aadfa3a08bba730b8dfd3f6a0785463
|
refs/heads/master
| 2023-04-26T19:30:01.098578
| 2021-06-03T19:55:05
| 2021-06-03T19:55:05
| 373,625,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,539
|
py
|
"""A tree view to explore Ada/SPARK code using libadalang.
"""
import GPS
import os
import libadalang
from modules import Module
from gi.repository import Gtk, Gdk, GLib, Pango
from gps_utils import make_interactive
COL_LABEL = 0
COL_FOREGROUND = 1
COL_START_LINE = 2
COL_START_COLUMN = 3
COL_END_LINE = 4
COL_END_COLUMN = 5
class LAL_View_Widget():
"""The widget for the Libadalang view"""
def __init__(self):
self.box = Gtk.VBox()
self.compact_mode = True
# The view has a compact mode (the default) and a full tree mode.
# In full tree mode, the whole tree is displayed and refreshed when
# the buffer is modified. In compact mode, The tree is refreshed
# everytime the cursor location changes, and only shows the current
# tree path.
# A label to push diagnostics messages and token info
self.message_label = Gtk.Label()
self.message_label.set_halign(Gtk.Align.START)
self.message_label.set_ellipsize(Pango.EllipsizeMode.END)
# The model: see COL_* constants above
self.store = Gtk.TreeStore(str, Gdk.RGBA, int, int, int, int)
# Initialize the tree view
self.view = Gtk.TreeView(self.store)
self.node_col = Gtk.TreeViewColumn("Node")
cell = Gtk.CellRendererText()
self.node_col.pack_start(cell, True)
self.node_col.add_attribute(cell, "markup", COL_LABEL)
self.node_col.add_attribute(cell, "foreground-rgba", COL_FOREGROUND)
self.view.append_column(self.node_col)
self.view.connect("button_press_event", self._on_view_button_press)
full_mode_toggle = Gtk.CheckButton("full tree (slow)")
full_mode_toggle.connect("toggled", self._full_mode_toggled)
# Pack things together
label_box = Gtk.HBox()
label_box.pack_start(self.message_label, True, True, 3)
label_box.pack_start(full_mode_toggle, False, False, 3)
self.box.pack_start(label_box, False, False, 3)
scroll = Gtk.ScrolledWindow()
scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scroll.add(self.view)
self.box.pack_start(scroll, True, True, 3)
# The contextual menu
self.menu = Gtk.Menu()
item = Gtk.MenuItem()
item.set_label("view in Python Console")
item.connect("activate", self._on_view_in_python_console)
self.menu.append(item)
self.menu.show_all()
# This is the current location
self.file = None
self.line = 1
self.column = 1
self.unit = None # The current successfully loaded AU, if any
self.token = None # The current token, if any
# The list of iters that are currently highlighted
self.highlighted_iters = []
# The colors to highlight the tree with
self.default_fg = Gdk.RGBA()
self.highlight_fg = Gdk.RGBA()
# Initialize the colors
self.preferences_changed()
# Initialize the contents
self.refresh()
def _full_mode_toggled(self, b):
"""React to the toggle of the full mode button"""
self.compact_mode = not b.get_active()
self.refresh()
b = GPS.EditorBuffer.get(open=False)
if b:
cursor = b.current_view().cursor()
self.show_current_location(cursor.line(), cursor.column())
def _selected_row(self):
"""Return the selected row in self, if any"""
_, paths = self.view.get_selection().get_selected_rows()
if not paths:
return None
it = self.store.get_iter(paths[0])
return self.store[it]
def _on_view_in_python_console(self, _):
"""Contextual menu 'view in Python console'"""
row = self._selected_row()
if not row:
return False
GPS.execute_action("open Python")
GPS.Console("Python").add_input(
"node = lal_utils.node('{}', {}, {})".format(
row[COL_LABEL].split(" ")[0][3:-4],
row[COL_START_LINE],
row[COL_START_COLUMN]))
def _on_view_button_press(self, _, event):
"""React to a button_press on the view.
"""
if event.button == 3:
# On this button, raise the contextual menu
self.menu.popup(None, None, None, None, 3, 0)
return False
if event.get_click_count() == (True, 2):
# On a double click, select the node in the editor
buf = GPS.EditorBuffer.get(open=False)
if not buf:
return False
row = self._selected_row()
if not row:
return False
begin_loc = buf.at(row[COL_START_LINE], row[COL_START_COLUMN])
# Scroll to the location
buf.current_view().goto(begin_loc)
# Select the current node
buf.select(begin_loc,
buf.at(row[COL_END_LINE], row[COL_END_COLUMN]))
return False
def preferences_changed(self):
"""Apply the contents of the preferences"""
prev = (self.default_fg, self.highlight_fg)
default = GPS.Preference("Src-Editor-Reference-Style").get()
self.default_fg.parse(default.split('@')[1])
highlight = GPS.Preference("Src-Editor-Keywords-Variant").get()
self.highlight_fg.parse(highlight.split('@')[1])
if prev != (self.default_fg, self.highlight_fg):
self.show_current_location(self.line, self.column)
def _add_node(self, parent, node):
"""Add a node as child of parent. parent can be None"""
if not node:
return
start_line = node.sloc_range.start.line
start_column = node.sloc_range.start.column
end_line = node.sloc_range.end.line
end_column = node.sloc_range.end.column
if not self.compact_mode or (
(start_line, start_column) <=
(self.line, self.column) <=
(end_line, end_column)):
it = self.store.append(parent)
text = "<b>{}</b>{}".format(
# Uncomment this for a representation useful for debug:
# GLib.markup_escape_text(repr(node)),
node.kind_name,
" {}".format(GLib.markup_escape_text(node.text))
if start_line == end_line else "")
self.store[it] = [
text,
self.default_fg,
start_line,
start_column,
end_line,
end_column,
]
for n in node.children:
self._add_node(it, n)
def _traverse_and_highlight(self, it, line, column):
"""Traverse the subtree starting at iter it, and highlight the
nodes that encompass the location at line/column.
Return the deepest iter found that matches.
"""
lowest_found = None
child = self.store.iter_children(it)
while child:
row = self.store[child]
if row[COL_START_LINE] > line:
# We are past the point where the nodes will match the
# location: we can stop traversing.
return lowest_found
if ((row[COL_START_LINE], row[COL_START_COLUMN]) <=
(line, column) <=
(row[COL_END_LINE], row[COL_END_COLUMN])):
# This node encompasses the location: highlight it...
lowest_found = child
self.highlighted_iters.append(child)
row[COL_FOREGROUND] = self.highlight_fg
# ... and look below it for more
below = self._traverse_and_highlight(child, line, column)
if below:
lowest_found = below
child = self.store.iter_next(child)
return lowest_found
def show_current_location(self, line, column):
"""Highlight the given location in the tree and scroll to it"""
if not self.unit:
return
self.line = line
self.column = column
if self.compact_mode:
self.store.clear()
self._add_node(None, self.unit.root)
self.view.expand_all()
else:
# Clear all previous highlighting
for j in self.highlighted_iters:
self.store[j][COL_FOREGROUND] = self.default_fg
self.highlighted_iters = []
lowest_found = self._traverse_and_highlight(None, line, column)
# If we have finished iterating, scroll to the lowest found
if lowest_found:
self.view.scroll_to_cell(
self.store.get_path(lowest_found),
self.node_col, True, 0.5, 0.5)
# Display the current token in the label
self.token = self.unit.lookup_token(libadalang.Sloc(line, column))
if self.token:
self.message_label.set_markup(
"Token: <b>{}</b> {}".format(self.token.kind,
self.token.text.strip()
if self.token.text else ''))
else:
self.message_label.set_text("")
def refresh(self):
"""Refresh the contents of the view"""
buf = GPS.EditorBuffer.get(open=False)
if not buf:
return
self.view.set_model(None)
self.store.clear()
self.highlighted_iters = []
self.file = buf.file()
if not self.file.language().lower() == "ada":
return
self.store.clear()
unit = buf.get_analysis_unit()
if unit.diagnostics:
self.message_label.set_text(
"\n".join([str(d) for d in unit.diagnostics]))
self.unit = None
return
else:
self.unit = unit
self.message_label.set_text("{} loaded ok".format(
os.path.basename(buf.file().name())))
if self.compact_mode:
# In compact mode, the view is regenerated when we change
# locations
pass
else:
# In full mode, display the whole tree now
self._add_node(None, unit.root)
self.view.set_model(self.store)
self.view.expand_all()
class LAL_View(Module):
""" A GPS module, providing the libadalang view """
view_title = "Libadalang"
mdi_position = GPS.MDI.POSITION_RIGHT
mdi_group = GPS.MDI.GROUP_DEBUGGER_STACK
def __init__(self):
self.widget = None
def setup(self):
# Create an "open Libadalang" action
make_interactive(
self.get_view,
category="Views",
name="open Libadalang")
GPS.Hook("location_changed").add_debounce(
self.location_changed_debounced)
def preferences_changed(self, name='', pref=None):
if self.widget:
self.widget.preferences_changed()
def location_changed_debounced(self, _, file, line, column):
if self.widget:
if file != self.widget.file:
self.widget.refresh()
self.widget.show_current_location(line, column)
def buffer_edited(self, file):
if self.widget:
self.widget.refresh()
def on_view_destroy(self):
self.widget = None
def create_view(self):
self.widget = LAL_View_Widget()
return self.widget.box
|
[
"46718011+AaronC98@users.noreply.github.com"
] |
46718011+AaronC98@users.noreply.github.com
|
9e243d91dbc7d98392a1a3a667e36c2efaf787a6
|
cb68601bebd3ce06128e20bf24a553cd666bd1e5
|
/0x0A-python-inheritance/3-is_kind_of_class.py
|
de160f572d73cb0fe1c7dd945dc3e10a12e93e1e
|
[] |
no_license
|
pgomezboza/holbertonschool-higher_level_programming
|
11755d9823d828518f3f4844b68e7a0cc92be0a7
|
47f04a767fa26c9ecc30fa43fd8e9681cfa60147
|
refs/heads/main
| 2023-08-21T20:30:14.043327
| 2021-10-25T00:50:29
| 2021-10-25T00:50:29
| 361,944,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
#!/usr/bin/python3
"""
Module: 3-is_kind_of_class
"""
def is_kind_of_class(obj, a_class):
"""
finds if obj is an instance of a_class or a class
inherited from a_class.
args:
obj: objecto to look
a_class: class to be check
return: true or false.
"""
if issubclass(type(obj), a_class):
return True
return False
|
[
"pgomezboza@gmail.com"
] |
pgomezboza@gmail.com
|
47dd7590646cb7dec2e36e3857f155b2538ccc1b
|
0df898bf192b6ad388af160ecbf6609445c34f96
|
/middleware/backend/app/alembic/versions/20201110_072326_.py
|
10c7da8e75d56b8f336ce266199ba95ee4a6fdf3
|
[] |
no_license
|
sasano8/magnet
|
a5247e6eb0a7153d6bbca54296f61194925ab3dc
|
65191c877f41c632d29133ebe4132a0bd459f752
|
refs/heads/master
| 2023-01-07T10:11:38.599085
| 2020-11-13T02:42:41
| 2020-11-13T02:42:41
| 298,334,432
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
"""empty message
Revision ID: 20201110_072326
Revises: 20201110_072235
Create Date: 2020-11-10 07:23:27.554149
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '20201110_072326'
down_revision = '20201110_072235'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('crypto_trade_results', 'fact_profit',
existing_type=sa.NUMERIC(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('crypto_trade_results', 'fact_profit',
existing_type=sa.NUMERIC(),
nullable=True)
# ### end Alembic commands ###
|
[
"y-sasahara@ys-method.com"
] |
y-sasahara@ys-method.com
|
2dd94770181038d1043c10188f896ea0d9257387
|
437a375d600b4847362946cde1bccb531a92d6aa
|
/park/park/urls.py
|
ebfed66868b36567a741bf2a052954f13a89425e
|
[] |
no_license
|
JoshuaRosenfeld/park
|
f99ff79077397cb989438a002afba1665e336a6b
|
fea21c9a4f3df157f3cc26cdf21fc410e8a45d24
|
refs/heads/master
| 2021-01-10T16:37:24.252695
| 2015-12-07T17:07:31
| 2015-12-07T17:07:31
| 45,141,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', auth_views.login),
url(r'^accounts/logout/$', auth_views.logout,),
url(r'^spots/', include('spots.urls', namespace='spots')),
]
|
[
"jlr0802@gmail.com"
] |
jlr0802@gmail.com
|
8bf23c064642fb7e8106648faf05340c5abe2751
|
b2797dc00ae1fed53e50f09cae6618c6a1612b22
|
/rlpyt/models/dqn/atari_iqn_model.py
|
f974b12105a6b95008074ef66f0d38db8dd5d63c
|
[] |
no_license
|
frederikschubert/ARA
|
c0bc6eaab2cbced2ee3bf09f950f15a1761f2672
|
3af30d7b03d3a393de659ab0f281060a7a7cbad3
|
refs/heads/master
| 2023-08-15T00:50:48.451363
| 2021-09-27T13:26:34
| 2021-09-27T13:26:34
| 385,935,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlpyt.models.quantile_conv2d_model import QuantileConv2dModel
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims
class AtariIqnModel(QuantileConv2dModel):
def forward(self, observation, tau, prev_action=None, prev_reward=None):
img = observation.type(torch.float) # Expect torch.uint8 inputs
img = img / 255.0 # From [0-255] to [0-1], in place.
# Infer (presence of) leading dimensions: [T,B], [B], or [].
lead_dim, T, B, img_shape = infer_leading_dims(img, 3)
quantile_values = super().forward(
img.view(T * B, *img_shape),
tau=tau,
prev_action=prev_action,
prev_reward=prev_reward,
)
quantile_values = restore_leading_dims(quantile_values, lead_dim, T, B)
return quantile_values
|
[
"frederik.schubert@mailbox.org"
] |
frederik.schubert@mailbox.org
|
d58da21638423d026b507688d69aaedd6ec2c7b2
|
f572f83a5ba79469d47f4b018aa1f99c47e23593
|
/train_code/nn_dropout.py
|
cfaaef0bd1b23636dfe5203f15b507693203638d
|
[] |
no_license
|
Seongmun-Hong/Deeptector
|
89d2017c6b9838e1dc5853c782a786c934b31e33
|
6f8d090f85833b24b011f9f244bfea1063673565
|
refs/heads/master
| 2021-04-09T13:26:55.954155
| 2019-11-26T05:35:34
| 2019-11-26T05:35:34
| 125,702,009
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,403
|
py
|
# Lab 10 MNIST and Dropout
import tensorflow as tf
import numpy as np
import random
# import matplotlib.pyplot as plt
tf.set_random_seed(777) # reproducibility
xy = np.loadtxt('training_data4.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, -1]
#output의 갯수!!!!!!
nb_classes = 16 # 1:punch_l 2:punch_r 3:punch_l2 4:punch_r2 5:hold
X = tf.placeholder(tf.float32, [None, 864])
Y = tf.placeholder(tf.int32, [None, nb_classes]) # 1:punch_l 2:punch_r 3:punch_l2 4:punch_r2 5:hold
y_data = y_data.astype(int)
one_hot_targets = np.eye(nb_classes)[y_data]
print(one_hot_targets)
#Y_one_hot = tf.one_hot(y_data, nb_classes) # one hot
#pre = np.array(Y_one_hot, dtype=np.float32)
#print("one_hot", Y_one_hot)
#Y_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])
#print("reshape", Y_one_hot)
#print(x_data.shape, one_hot_targets.shape)
W = tf.Variable(tf.random_normal([864, nb_classes]), name='weight')
b = tf.Variable(tf.random_normal([nb_classes]), name='bias')
# parameters
learning_rate = 0.0001
training_epochs = 16
batch_size = 5
total_batch = int(640 / batch_size)
# dropout (keep_prob) rate 0.7 on training, but should be 1 for testing
keep_prob = tf.placeholder(tf.float32)
# weights & bias for nn layers
W1 = tf.get_variable("W1", shape=[864, 512],
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([512]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
'''
W2 = tf.get_variable("W2", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
W3 = tf.get_variable("W3", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
W4 = tf.get_variable("W4", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
'''
W5 = tf.get_variable("W5", shape=[512, nb_classes],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([nb_classes]))
hypothesis = tf.matmul(L1, W5) + b5
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# initialize
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# train my model
for epoch in range(50):
avg_cost = 0
for i in range(640):
feed_dict = {X: x_data, Y: one_hot_targets, keep_prob: 0.7}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(c))
saver.save(sess, 'model-softmax3.ckpt')
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: x_data, Y: one_hot_targets, keep_prob: 1}))
a=640
b=0
# Get one and predict
for i in range(640):
print("Label: ", sess.run(tf.argmax(one_hot_targets[i:i + 1], 1)))
result = sess.run(tf.argmax(hypothesis, 1), feed_dict={X: x_data[i:i + 1], keep_prob: 1})
print("Predict : ", result)
if(sess.run(tf.argmax(one_hot_targets[i:i + 1], 1)) == result):
b=b+1
print("Acc : ", (b/a*100))
'''
# plt.imshow(mnist.test.images[r:r + 1].
# reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show()
Epoch: 0001 cost = 0.447322626
Epoch: 0002 cost = 0.157285590
Epoch: 0003 cost = 0.121884535
Epoch: 0004 cost = 0.098128681
Epoch: 0005 cost = 0.082901778
Epoch: 0006 cost = 0.075337573
Epoch: 0007 cost = 0.069752543
Epoch: 0008 cost = 0.060884363
Epoch: 0009 cost = 0.055276413
Epoch: 0010 cost = 0.054631256
Epoch: 0011 cost = 0.049675195
Epoch: 0012 cost = 0.049125314
Epoch: 0013 cost = 0.047231930
Epoch: 0014 cost = 0.041290121
Epoch: 0015 cost = 0.043621063
Learning Finished!
Accuracy: 0.9804
'''
|
[
"hsm63746244@gmail.com"
] |
hsm63746244@gmail.com
|
270343a6f74c1670971833bb68841c7b2fbf8908
|
6328ce8d58260fcdd81994c35a2a8aea0fcfd5f7
|
/kglib/cross_correlation/Normalized_Xcorr.py
|
6c92542b07d8429ee1991a4151ef8d07393c523c
|
[
"MIT"
] |
permissive
|
kgullikson88/gullikson-scripts
|
6e03aafa069d357f5dc4d29b4cd02a20c1f9a098
|
8a9f00a6977dad8d4477eef1d664fd62e9ecab75
|
refs/heads/master
| 2021-01-10T08:19:46.440979
| 2016-04-19T21:22:15
| 2016-04-19T21:22:15
| 49,740,211
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,840
|
py
|
from __future__ import print_function, division, absolute_import
import numpy as np
from scipy.ndimage import convolve
# Try and use the faster Fourier transform functions from the anfft module if
# available
try:
import anfft as _anfft
# measure == True for self-optimisation of repeat Fourier transforms of
# similarly-shaped arrays
def fftn(A, shape=None):
if shape != None:
A = _checkffttype(A)
A = procrustes(A, target=shape, side='after', padval=0)
return _anfft.fftn(A, measure=True)
def ifftn(B, shape=None):
if shape != None:
B = _checkffttype(B)
B = procrustes(B, target=shape, side='after', padval=0)
return _anfft.ifftn(B, measure=True)
def _checkffttype(C):
# make sure input arrays are typed correctly for FFTW
if C.dtype == 'complex256':
# the only incompatible complex type --> complex64
C = np.complex128(C)
elif C.dtype not in ['float32', 'float64', 'complex64', 'complex128']:
# any other incompatible type --> float64
C = np.float64(C)
return C
# Otherwise use the normal scipy fftpack ones instead (~2-3x slower!)
except ImportError:
print(
"Module 'anfft' (FFTW Python bindings) could not be imported.\n"
"To install it, try running 'easy_install anfft' from the terminal.\n"
"Falling back on the slower 'fftpack' module for ND Fourier transforms.")
from scipy.fftpack import fftn, ifftn
class TemplateMatch(object):
"""
N-dimensional template search by normalized cross-correlation or sum of
squared differences.
Arguments:
------------------------
template The template to search for
method The search method. Can be "ncc", "ssd" or
"both". See documentation for norm_xcorr and
fast_ssd for more details.
Example use:
------------------------
from scipy.misc import lena
from matplotlib.pyplot import subplots
image = lena()
template = image[240:281,240:281]
TM = TemplateMatch(template,method='both')
ncc,ssd = TM(image)
nccloc = np.nonzero(ncc == ncc.max())
ssdloc = np.nonzero(ssd == ssd.min())
fig,[[ax1,ax2],[ax3,ax4]] = subplots(2,2,num='ND Template Search')
ax1.imshow(image,interpolation='nearest')
ax1.set_title('Search image')
ax2.imshow(template,interpolation='nearest')
ax2.set_title('Template')
ax3.hold(True)
ax3.imshow(ncc,interpolation='nearest')
ax3.plot(nccloc[1],nccloc[0],'w+')
ax3.set_title('Normalized cross-correlation')
ax4.hold(True)
ax4.imshow(ssd,interpolation='nearest')
ax4.plot(ssdloc[1],ssdloc[0],'w+')
ax4.set_title('Sum of squared differences')
fig.tight_layout()
fig.canvas.draw()
"""
def __init__(self, template, method='ssd'):
if method not in ['ncc', 'ssd', 'both']:
raise Exception('Invalid method "%s". ' \
'Valid methods are "ncc", "ssd" or "both"'
% method)
self.template = template
self.method = method
def __call__(self, a):
if a.ndim != self.template.ndim:
raise Exception('Input array must have the same number ' \
'of dimensions as the template (%i)'
% self.template.ndim)
if self.method == 'ssd':
return self.fast_ssd(self.template, a, trim=True)
elif self.method == 'ncc':
return norm_xcorr(self.template, a, trim=True)
elif self.method == 'both':
return norm_xcorr(self.template, a, trim=True, do_ssd=True)
def norm_xcorr(t, a, method=None, trim=True, do_ssd=False):
"""
Fast normalized cross-correlation for n-dimensional arrays
Inputs:
----------------
t The template. Must have at least 2 elements, which
cannot all be equal.
a The search space. Its dimensionality must match that of
the template.
method The convolution method to use when computing the
cross-correlation. Can be either 'direct', 'fourier' or
None. If method == None (default), the convolution time
is estimated for both methods and the best one is chosen
for the given input array sizes.
trim If True (default), the output array is trimmed down to
the size of the search space. Otherwise, its size will
be (f.shape[dd] + t.shape[dd] -1) for dimension dd.
do_ssd If True, the sum of squared differences between the
template and the search image will also be calculated.
It is very efficient to calculate normalized
cross-correlation and the SSD simultaneously, since they
require many of the same quantities.
Output:
----------------
nxcorr An array of cross-correlation coefficients, which may
vary from -1.0 to 1.0.
[ssd] [Returned if do_ssd == True. See fast_ssd for details.]
Wherever the search space has zero variance under the template,
normalized cross-correlation is undefined. In such regions, the
correlation coefficients are set to zero.
References:
Hermosillo et al 2002: Variational Methods for Multimodal Image
Matching, International Journal of Computer Vision 50(3),
329-343, 2002
<http://www.springerlink.com/content/u4007p8871w10645/>
Lewis 1995: Fast Template Matching, Vision Interface,
p.120-123, 1995
<http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>
<http://en.wikipedia.org/wiki/Cross-correlation#Normalized_cross-correlation>
Alistair Muldal
Department of Pharmacology
University of Oxford
<alistair.muldal@pharm.ox.ac.uk>
Sept 2012
"""
if t.size < 2:
raise Exception('Invalid template')
if t.size > a.size:
raise Exception('The input array must be smaller than the template')
std_t, mean_t = np.std(t), np.mean(t)
if std_t == 0:
raise Exception('The values of the template must not all be equal')
t = np.float64(t)
a = np.float64(a)
# output dimensions of xcorr need to match those of local_sum
outdims = np.array([a.shape[dd] + t.shape[dd] - 1 for dd in xrange(a.ndim)])
# would it be quicker to convolve in the spatial or frequency domain? NB
# this is not very accurate since the speed of the Fourier transform
# varies quite a lot with the output dimensions (e.g. 2-radix case)
if method == None:
spatialtime, ffttime = get_times(t, a, outdims)
if spatialtime < ffttime:
method = 'spatial'
else:
method = 'fourier'
if method == 'fourier':
# # in many cases, padding the dimensions to a power of 2
# # *dramatically* improves the speed of the Fourier transforms
# # since it allows using radix-2 FFTs
# fftshape = [nextpow2(ss) for ss in a.shape]
# Fourier transform of the input array and the inverted template
# af = fftn(a,shape=fftshape)
# tf = fftn(ndflip(t),shape=fftshape)
af = fftn(a, shape=outdims)
tf = fftn(ndflip(t), shape=outdims)
# 'non-normalized' cross-correlation
xcorr = np.real(ifftn(tf * af))
else:
xcorr = convolve(a, t, mode='constant', cval=0)
# local linear and quadratic sums of input array in the region of the
# template
ls_a = local_sum(a, t.shape)
ls2_a = local_sum(a ** 2, t.shape)
# now we need to make sure xcorr is the same size as ls_a
xcorr = procrustes(xcorr, ls_a.shape, side='both')
# local standard deviation of the input array
ls_diff = ls2_a - (ls_a ** 2) / t.size
ls_diff = np.where(ls_diff < 0, 0, ls_diff)
sigma_a = np.sqrt(ls_diff)
# standard deviation of the template
sigma_t = np.sqrt(t.size - 1.) * std_t
# denominator: product of standard deviations
denom = sigma_t * sigma_a
# numerator: local mean corrected cross-correlation
numer = (xcorr - ls_a * mean_t)
# sigma_t cannot be zero, so wherever the denominator is zero, this must
# be because sigma_a is zero (and therefore the normalized cross-
# correlation is undefined), so set nxcorr to zero in these regions
tol = np.sqrt(np.finfo(denom.dtype).eps)
nxcorr = np.where(denom < tol, 0, numer / denom)
# if any of the coefficients are outside the range [-1 1], they will be
# unstable to small variance in a or t, so set them to zero to reflect
# the undefined 0/0 condition
nxcorr = np.where(np.abs(nxcorr - 1.) > np.sqrt(np.finfo(nxcorr.dtype).eps), nxcorr, 0)
# calculate the SSD if requested
if do_ssd:
# quadratic sum of the template
tsum2 = np.sum(t ** 2.)
# SSD between template and image
ssd = ls2_a + tsum2 - 2. * xcorr
# normalise to between 0 and 1
ssd -= ssd.min()
ssd /= ssd.max()
if trim:
nxcorr = procrustes(nxcorr, a.shape, side='both')
ssd = procrustes(ssd, a.shape, side='both')
return nxcorr, ssd
else:
if trim:
nxcorr = procrustes(nxcorr, a.shape, side='both')
return nxcorr
def fast_ssd(t, a, method=None, trim=True):
"""
Fast sum of squared differences (SSD block matching) for n-dimensional
arrays
Inputs:
----------------
t The template. Must have at least 2 elements, which
cannot all be equal.
a The search space. Its dimensionality must match that of
the template.
method The convolution method to use when computing the
cross-correlation. Can be either 'direct', 'fourier' or
None. If method == None (default), the convolution time
is estimated for both methods and the best one is chosen
for the given input array sizes.
trim If True (default), the output array is trimmed down to
the size of the search space. Otherwise, its size will
be (f.shape[dd] + t.shape[dd] -1) for dimension dd.
Output:
----------------
ssd An array containing the sum of squared differences
between the image and the template, with the values
normalized in the range -1.0 to 1.0.
Wherever the search space has zero variance under the template,
normalized cross-correlation is undefined. In such regions, the
correlation coefficients are set to zero.
References:
Hermosillo et al 2002: Variational Methods for Multimodal Image
Matching, International Journal of Computer Vision 50(3),
329-343, 2002
<http://www.springerlink.com/content/u4007p8871w10645/>
Lewis 1995: Fast Template Matching, Vision Interface,
p.120-123, 1995
<http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>
Alistair Muldal
Department of Pharmacology
University of Oxford
<alistair.muldal@pharm.ox.ac.uk>
Sept 2012
"""
if t.size < 2:
raise Exception('Invalid template')
if t.size > a.size:
raise Exception('The input array must be smaller than the template')
std_t, mean_t = np.std(t), np.mean(t)
if std_t == 0:
raise Exception('The values of the template must not all be equal')
# output dimensions of xcorr need to match those of local_sum
outdims = np.array([a.shape[dd] + t.shape[dd] - 1 for dd in xrange(a.ndim)])
# would it be quicker to convolve in the spatial or frequency domain? NB
# this is not very accurate since the speed of the Fourier transform
# varies quite a lot with the output dimensions (e.g. 2-radix case)
if method == None:
spatialtime, ffttime = get_times(t, a, outdims)
if spatialtime < ffttime:
method = 'spatial'
else:
method = 'fourier'
if method == 'fourier':
# # in many cases, padding the dimensions to a power of 2
# # *dramatically* improves the speed of the Fourier transforms
# # since it allows using radix-2 FFTs
# fftshape = [nextpow2(ss) for ss in a.shape]
# Fourier transform of the input array and the inverted template
# af = fftn(a,shape=fftshape)
# tf = fftn(ndflip(t),shape=fftshape)
af = fftn(a, shape=outdims)
tf = fftn(ndflip(t), shape=outdims)
# 'non-normalized' cross-correlation
xcorr = np.real(ifftn(tf * af))
else:
xcorr = convolve(a, t, mode='constant', cval=0)
# quadratic sum of the template
tsum2 = np.sum(t ** 2.)
# local quadratic sum of input array in the region of the template
ls2_a = local_sum(a ** 2, t.shape)
# now we need to make sure xcorr is the same size as ls2_a
xcorr = procrustes(xcorr, ls2_a.shape, side='both')
# SSD between template and image
ssd = ls2_a + tsum2 - 2. * xcorr
# normalise to between 0 and 1
ssd -= ssd.min()
ssd /= ssd.max()
if trim:
ssd = procrustes(ssd, a.shape, side='both')
return ssd
def local_sum(a, tshape):
"""For each element in an n-dimensional input array, calculate
the sum of the elements within a surrounding region the size of
the template"""
# zero-padding
a = ndpad(a, tshape)
# difference between shifted copies of an array along a given dimension
def shiftdiff(a, tshape, shiftdim):
ind1 = [slice(None, None), ] * a.ndim
ind2 = [slice(None, None), ] * a.ndim
ind1[shiftdim] = slice(tshape[shiftdim], a.shape[shiftdim] - 1)
ind2[shiftdim] = slice(0, a.shape[shiftdim] - tshape[shiftdim] - 1)
return a[ind1] - a[ind2]
# take the cumsum along each dimension and subtracting a shifted version
# from itself. this reduces the number of computations to 2*N additions
# and 2*N subtractions for an N-dimensional array, independent of its
# size.
#
# See:
# <http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>
for dd in xrange(a.ndim):
a = np.cumsum(a, dd)
a = shiftdiff(a, tshape, dd)
return a
# # for debugging purposes, ~10x slower than local_sum for a (512,512) array
# def slow_2D_local_sum(a,tshape):
# out = np.zeros_like(a)
# for ii in xrange(a.shape[0]):
# istart = np.max((0,ii-tshape[0]//2))
# istop = np.min((a.shape[0],ii+tshape[0]//2+1))
# for jj in xrange(a.shape[1]):
# jstart = np.max((0,jj-tshape[1]//2))
# jstop = np.min((a.shape[1],jj+tshape[0]//2+1))
# out[ii,jj] = np.sum(a[istart:istop,jstart:jstop])
# return out
def get_times(t, a, outdims):
k_conv = 1.21667E-09
k_fft = 2.65125E-08
# # uncomment these lines to measure timing constants
# k_conv,k_fft,convreps,fftreps = benchmark(t,a,outdims,maxtime=60)
# print "-------------------------------------"
# print "Template size:\t\t%s" %str(t.shape)
# print "Search space size:\t%s" %str(a.shape)
# print "k_conv:\t%.6G\treps:\t%s" %(k_conv,str(convreps))
# print "k_fft:\t%.6G\treps:\t%s" %(k_fft,str(fftreps))
# print "-------------------------------------"
# spatial convolution time scales with the total number of elements
convtime = k_conv * (t.size * a.size)
# Fourier convolution time scales with N*log(N), cross-correlation
# requires 2x FFTs and 1x iFFT. ND FFT time scales with
# prod(dimensions)*log(prod(dimensions))
ffttime = 3 * k_fft * (np.prod(outdims) * np.log(np.prod(outdims)))
# print "Predicted spatial:\t%.6G\nPredicted fourier:\t%.6G" %(convtime,ffttime)
return convtime, ffttime
def benchmark(t, a, outdims, maxtime=60):
import resource
# benchmark spatial convolutions
# ---------------------------------
convreps = 0
tic = resource.getrusage(resource.RUSAGE_SELF).ru_utime
toc = tic
while (toc - tic) < maxtime:
convolve(a, t, mode='constant', cval=0)
# xcorr = convolve(a,t,mode='full')
convreps += 1
toc = resource.getrusage(resource.RUSAGE_SELF).ru_utime
convtime = (toc - tic) / convreps
# convtime == k(N1+N2)
N = t.size * a.size
k_conv = convtime / N
# benchmark 1D Fourier transforms
# ---------------------------------
veclist = [np.random.randn(ss) for ss in outdims]
fft1times = []
fftreps = []
for vec in veclist:
reps = 0
tic = resource.getrusage(resource.RUSAGE_SELF).ru_utime
toc = tic
while (toc - tic) < maxtime:
fftn(vec)
toc = resource.getrusage(resource.RUSAGE_SELF).ru_utime
reps += 1
fft1times.append((toc - tic) / reps)
fftreps.append(reps)
fft1times = np.asarray(fft1times)
# fft1_time == k*N*log(N)
N = np.asarray([vec.size for vec in veclist])
k_fft = np.mean(fft1times / (N * np.log(N)))
# # benchmark ND Fourier transforms
# # ---------------------------------
# arraylist = [t,a]
# fftntimes = []
# fftreps = []
# for array in arraylist:
# reps = 0
# tic = resource.getrusage(resource.RUSAGE_SELF).ru_utime
# toc = tic
# while (toc-tic) < maxtime:
# fftn(array,shape=a.shape)
# reps += 1
# toc = resource.getrusage(resource.RUSAGE_SELF).ru_utime
# fftntimes.append((toc-tic)/reps)
# fftreps.append(reps)
# fftntimes = np.asarray(fftntimes)
# # fftn_time == k*prod(dimensions)*log(prod(dimensions)) for an M-dimensional array
# nlogn = np.array([aa.size*np.log(aa.size) for aa in arraylist])
# k_fft = np.mean(fftntimes/nlogn)
return k_conv, k_fft, convreps, fftreps
# return k_conv,k_fft1,k_fftn
def ndpad(a, npad=None, padval=0):
"""
Pads the edges of an n-dimensional input array with a constant value
across all of its dimensions.
Inputs:
----------------
a The array to pad
npad* The pad width. Can either be array-like, with one
element per dimension, or a scalar, in which case the
same pad width is applied to all dimensions.
padval The value to pad with. Must be a scalar (default is 0).
Output:
----------------
b The padded array
*If npad is not a whole number, padding will be applied so that the
'left' edge of the output is padded less than the 'right', e.g.:
a == np.array([1,2,3,4,5,6])
ndpad(a,1.5) == np.array([0,1,2,3,4,5,6,0,0])
In this case, the average pad width is equal to npad (but if npad was
not a multiple of 0.5 this would not still hold). This is so that ndpad
can be used to pad an array out to odd final dimensions.
"""
if npad == None:
npad = np.ones(a.ndim)
elif np.isscalar(npad):
npad = (npad,) * a.ndim
elif len(npad) != a.ndim:
raise Exception('Length of npad (%i) does not match the ' \
'dimensionality of the input array (%i)'
% (len(npad), a.ndim))
# initialise padded output
padsize = [a.shape[dd] + 2 * npad[dd] for dd in xrange(a.ndim)]
b = np.ones(padsize, a.dtype) * padval
# construct an N-dimensional list of slice objects
ind = [slice(np.floor(npad[dd]), a.shape[dd] + np.floor(npad[dd])) for dd in xrange(a.ndim)]
# fill in the non-pad part of the array
b[ind] = a
return b
# def ndunpad(b,npad=None):
# """
# Removes padding from each dimension of an n-dimensional array (the
# reverse of ndpad)
# Inputs:
# ----------------
# b The array to unpad
# npad* The pad width. Can either be array-like, with one
# element per dimension, or a scalar, in which case the
# same pad width is applied to all dimensions.
# Output:
# ----------------
# a The unpadded array
# *If npad is not a whole number, padding will be removed assuming that
# the 'left' edge of the output is padded less than the 'right', e.g.:
# b == np.array([0,1,2,3,4,5,6,0,0])
# ndpad(b,1.5) == np.array([1,2,3,4,5,6])
# This is consistent with the behaviour of ndpad.
# """
# if npad == None:
# npad = np.ones(b.ndim)
# elif np.isscalar(npad):
# npad = (npad,)*b.ndim
# elif len(npad) != b.ndim:
# raise Exception('Length of npad (%i) does not match the '\
# 'dimensionality of the input array (%i)'
# %(len(npad),b.ndim))
# ind = [slice(np.floor(npad[dd]),b.shape[dd]-np.ceil(npad[dd])) for dd in xrange(b.ndim)]
# return b[ind]
def procrustes(a, target, side='both', padval=0):
"""
Forces an array to a target size by either padding it with a constant or
truncating it
Arguments:
a Input array of any type or shape
target Dimensions to pad/trim to, must be a list or tuple
"""
try:
if len(target) != a.ndim:
raise TypeError('Target shape must have the same number of dimensions as the input')
except TypeError:
raise TypeError('Target must be array-like')
try:
b = np.ones(target, a.dtype) * padval
except TypeError:
raise TypeError('Pad value must be numeric')
except ValueError:
raise ValueError('Pad value must be scalar')
aind = [slice(None, None)] * a.ndim
bind = [slice(None, None)] * a.ndim
# pad/trim comes after the array in each dimension
if side == 'after':
for dd in xrange(a.ndim):
if a.shape[dd] > target[dd]:
aind[dd] = slice(None, target[dd])
elif a.shape[dd] < target[dd]:
bind[dd] = slice(None, a.shape[dd])
# pad/trim comes before the array in each dimension
elif side == 'before':
for dd in xrange(a.ndim):
if a.shape[dd] > target[dd]:
aind[dd] = slice(a.shape[dd] - target[dd], None)
elif a.shape[dd] < target[dd]:
bind[dd] = slice(target[dd] - a.shape[dd], None)
# pad/trim both sides of the array in each dimension
elif side == 'both':
for dd in xrange(a.ndim):
if a.shape[dd] > target[dd]:
diff = (a.shape[dd] - target[dd]) / 2.
aind[dd] = slice(np.floor(diff), a.shape[dd] - np.ceil(diff))
elif a.shape[dd] < target[dd]:
diff = (target[dd] - a.shape[dd]) / 2.
bind[dd] = slice(np.floor(diff), target[dd] - np.ceil(diff))
else:
raise Exception('Invalid choice of pad type: %s' % side)
b[bind] = a[aind]
return b
def ndflip(a):
"""Inverts an n-dimensional array along each of its axes"""
ind = (slice(None, None, -1),) * a.ndim
return a[ind]
# def nextpow2(n):
# """get the next power of 2 that's greater than n"""
# m_f = np.log2(n)
# m_i = np.ceil(m_f)
# return 2**m_i
|
[
"kevin.gullikson@gmail.com"
] |
kevin.gullikson@gmail.com
|
ce292be2901110f156ffab596434149b7b74fc9a
|
a0b099d4d1ed950ae8a21d0cf8a65a0e97c19e4a
|
/setup.py
|
c5abd288126ba8b77dfcbbdb4151eb253850119e
|
[
"MIT"
] |
permissive
|
jdonkervliet/dedalov2
|
f1b81b21e5e092222cdee6ace3789fec2a196cee
|
255b707e36acce384596662bde97a333f900fc89
|
refs/heads/master
| 2020-05-14T17:40:39.613678
| 2019-07-15T19:30:43
| 2020-02-29T19:06:35
| 181,896,330
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="dedalov2",
python_requires='>=3.7.2',
version="0.0.1",
author="Jesse Donkervliet",
author_email="j.donkervliet+dedalo@gmail.com",
description="Explain groups of URIs on the Semantic Web",
long_description=long_description,
long_description_content_type="text/markdown",
package_dir={"": "."},
packages=["dedalov2"],
install_requires=[
"hdt>=2.2.1",
"psutil>=5.6.3",
],
url="https://github.com/jdonkervliet/dedalov2",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"j.j.r.donkervliet@vu.nl"
] |
j.j.r.donkervliet@vu.nl
|
e28bbc9c4c6764d0f73d4f807b475e298db2d173
|
fc5734ad9b0dc154b3a36ec2f5d848b3d693473f
|
/solutions/Strings/string_rotation.py
|
1043f636e9825fc4b25b17aaff50db95a52885ee
|
[
"MIT"
] |
permissive
|
aimdarx/data-structures-and-algorithms
|
8e51ec2144b6e0c413bc7ef0c46aba749fd70a99
|
1659887b843c5d20ee84a24df152fb4f763db757
|
refs/heads/master
| 2023-08-28T12:00:33.073788
| 2021-11-07T08:31:28
| 2021-11-07T08:31:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
"""
String Rotation:
Assume you have a method isSubstring which checks if one word is a substring of another.
Given two strings, sl and s2, write code to check if s2 is a rotation of sl using only one call to isSubstring
(e.g.,"waterbottle" is a rotation of"erbottlewat").
"""
def is_substring(str, substring):
return substring in str
def string_rotation(s1, s2):
if len(s1) != len(s2):
return False
return is_substring(s1+s1, s2)
print(string_rotation("waterbottle", "erbottlewat") == True)
print(string_rotation("waterbottleq", "erbottlewat") == False)
print(string_rotation("waterbottleq", "erbottlewatq") == False)
print(string_rotation("CodingInterview", "erviewCodingInt") == True)
print(string_rotation("Test", "est") == False)
|
[
"noreply@github.com"
] |
aimdarx.noreply@github.com
|
bd9a1f78e4a556f15d2ad64818463fb0ae59adcb
|
a15551724e193e086a6b69b3af9c91cd8c70b67a
|
/Sorting_algo_graph/interesting graph.py
|
5b96ce154cf0e3d2b232339d4c1af03f5f3c18a0
|
[] |
no_license
|
arymandeshwal/Sorting_algo_representation
|
7de12d0fc4982e37df35574579987287e522084a
|
44b4f40fce50d27d39972178b774a3850a4d140f
|
refs/heads/master
| 2020-08-06T07:27:45.964785
| 2019-10-04T19:32:12
| 2019-10-04T19:32:12
| 212,888,496
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
##x= [1,2,3,4,5]
##y= [10,20,30,500,40]
##
##
##plt.bar(x,y)
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate():
xs=[]
ys=[]
for i in range(1,20):
xs.append(i)
ys.append(i*10)
ax1.clear()
ax1.bar(xs,ys)
fig.canvas.draw()
##ani = animation.FuncAnimation(fig,animate)
animate()
|
[
"noreply@github.com"
] |
arymandeshwal.noreply@github.com
|
cddba6213ee471ef0db6de44dc5080df3860c224
|
09dec9bf9703fa7f7159b9e8b71438cd4808de69
|
/picture/views.py
|
a1571ac333d3c09c3235a7a1903fa7245a831271
|
[] |
no_license
|
wiejar/ZaawTprO
|
bd3dc01bb5e0b92c52deb67738985bab9d2d05ed
|
32ba3256d4a138a336b353e7af91a3eb85625396
|
refs/heads/master
| 2016-09-06T08:35:57.284630
| 2015-09-10T19:18:35
| 2015-09-10T19:18:35
| 33,627,793
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# -*- coding: utf-8 -*-
from picture.models import Picture
__author__ = 'slawek'
from rest_framework import viewsets
from picture.serializers import PictureSerializers
class PictureViewSet(viewsets.ModelViewSet):
"""
View - get serialized picture data to client-side application.
"""
queryset = Picture.objects.all()
serializer_class = PictureSerializers
|
[
"kowalskislawomirkarol@gmail.com"
] |
kowalskislawomirkarol@gmail.com
|
055e5acbc789048d5312b82ad4a0b25569faf6af
|
625488541743e8d02da02ccd8073ba0018f577cd
|
/main.py
|
a1c7ee9a0c80b51f75e2209d54edb1331150c14c
|
[] |
no_license
|
Archit1008/Vs_Code_Editor
|
01257120ea2b93469139fbab16aa53149b0cbc0b
|
25ecc0ef24c54591f06a0b2cfcc7d1145283cc77
|
refs/heads/main
| 2023-08-07T23:54:09.450659
| 2021-09-11T18:51:18
| 2021-09-11T18:51:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,011
|
py
|
from tkinter import*
from PIL import ImageTk
from tkinter import messagebox, filedialog
import subprocess
import os
class Vs_code:
def __init__(self,root):
self.root = root
self.root.title("Untitle- VS Editor - Developed By Vaibhav")
self.root.geometry("1200x700+0+0")
self.path_name = ""
self.color_theme = StringVar()
self.color_theme.set('Light Default')
self.font_size = 18
self.file_name=""
# ==============Menus Icons============
self.new_icon = ImageTk.PhotoImage(file="icons/new.png")
self.open_icon = ImageTk.PhotoImage(file="icons/Open-file-icon.png")
self.save_icon = ImageTk.PhotoImage(file="icons/1save-icon.png")
self.save_as_icon = ImageTk.PhotoImage(file="icons/2save-as-icon.png")
self.exit_icon = ImageTk.PhotoImage(file="icons/exit-icon.png")
self.light_default = ImageTk.PhotoImage(file="icons/default.jpg")
self.light_plus = ImageTk.PhotoImage(file="icons/light_plus.jpg")
self.dark = ImageTk.PhotoImage(file="icons/dark.jpg")
self.red = ImageTk.PhotoImage(file="icons/red.jpg")
self.monokai = ImageTk.PhotoImage(file="icons/monaki.jpg")
self.night_blue = ImageTk.PhotoImage(file="icons/nightblue.jpg")
#==============Menus============
Mymenu=Menu(self.root)
Filemenu=Menu(Mymenu, tearoff=False)
Filemenu.add_command(label="New File", image=self.new_icon,compound=LEFT, accelerator="Ctl+N", command=self.new_file)
Filemenu.add_command(label="Open File", image=self.open_icon,compound=LEFT, accelerator="Ctl+O", command=self.open_file)
Filemenu.add_command(label="Save File", image=self.save_icon,compound=LEFT, accelerator="Ctl+S", command=self.save_file)
Filemenu.add_command(label="Save As File", image=self.save_as_icon,compound=LEFT, accelerator="Ctl+Alt+S", command=self.save_as_file)
Filemenu.add_command(label="Exit File", image=self.exit_icon,compound=LEFT, accelerator="Ctl+Q", command=self.exit_file)
color_theme = Menu(Mymenu, tearoff=False)
color_theme.add_radiobutton(label="Light Default", value='Light Default', variable=self.color_theme, image=self.light_default, compound=LEFT, command=self.color_change)
color_theme.add_radiobutton(label="Light Plus", value='Light Plus', variable=self.color_theme, image=self.light_plus, compound=LEFT, command=self.color_change)
color_theme.add_radiobutton(label="Dark", value='Dark', variable=self.color_theme, image=self.dark, compound=LEFT, command=self.color_change)
color_theme.add_radiobutton(label="Red", value='Red', variable=self.color_theme, image=self.red, compound=LEFT, command=self.color_change)
color_theme.add_radiobutton(label="Monokai", value='Monokai', variable=self.color_theme, image=self.monokai, compound=LEFT, command=self.color_change)
color_theme.add_radiobutton(label="Night Blue", value='Night Blue', variable=self.color_theme, image=self.night_blue, compound=LEFT, command=self.color_change)
Mymenu.add_cascade(label="File", menu=Filemenu)
Mymenu.add_cascade(label="Color Theme", menu=color_theme)
Mymenu.add_command(label="Clear", command=self.clear)
Mymenu.add_separator()
Mymenu.add_command(label="Run", command=self.run)
self.root.config(menu=Mymenu)
# =============== Menu End Here ===============
# =========== Code Editor Frame ==========
EditorFrame=Frame(self.root,bg="white")
EditorFrame.place(x=0,y=0,relwidth=1,height=450)
scrolly=Scrollbar(EditorFrame,orient=VERTICAL)
scrolly.pack(side=RIGHT,fill=Y)
self.text_editor=Text(EditorFrame,bg='white',font=("times new roman",self.font_size),yscrollcommand=scrolly.set)
self.text_editor.pack(fill=BOTH,expand=1)
scrolly.config(command=self.text_editor.yview)
# =========== Output Editor Frame ==========
OutputFrame = LabelFrame(self.root,text="Output", bg="white", bd=3, font=("Arial",15))
OutputFrame.place(x=0, y=450, relwidth=1, height=250)
scrolly = Scrollbar(OutputFrame, orient=VERTICAL)
scrolly.pack(side=RIGHT, fill=Y)
self.text_output = Text(OutputFrame, bg='white', font=("times new roman", 17), yscrollcommand = scrolly.set)
self.text_output.pack(fill=BOTH, expand=1)
scrolly.config(command=self.text_output.yview)
# ========================== shortcuts ========================
self.root.bind('<Control-plus>',self.font_size_inc)
self.root.bind('<Control-minus>', self.font_size_dec)
self.root.bind('<Control-n>', self.new_file)
self.root.bind('<Control-o>', self.open_file)
self.root.bind('<Control-s>', self.save_file)
self.root.bind('<Control-Alt-s>', self.save_as_file)
self.root.bind('<Control-q>', self.exit_file)
# ========================== All Functions ========================
def font_size_inc(self,event=None):
self.font_size+=1
self.text_editor.config(font=('times new roman',self.font_size))
def font_size_dec(self,event=None):
self.font_size-=1
self.text_editor.config(font=('times new roman',self.font_size))
def new_file(self,event=None):
self.path_name=""
self.file_name = ""
self.root.title("Untitle- VS Editor - Developed By Vaibhav")
self.text_editor.delete('1.0', END)
self.text_output.delete('1.0', END)
def open_file(self,event=None):
path = filedialog.askopenfilename(filetype=[('Python Files', '*.py')], defaultextension=('.py'))
if path!="":
info = os.path.split(path)
self.file_name = info[1]
self.root.title(f"{self.file_name}- VS Editor - Developed By Vaibhav")
self.path_name = path
fp = open(self.path_name, "r")
data=fp.read()
self.text_editor.delete("1.0", END)
self.text_editor.insert('1.0', data)
fp.close()
def save_file(self,event=None):
if self.path_name=="" and self.file_name=="":
self.save_as_file()
else:
fp=open(self.path_name, 'w')
fp.write(self.text_editor.get('1.0',END))
fp.close()
def save_as_file(self,event=None):
path = filedialog.asksaveasfilename(filetype=[('Python Files', '*.py')], defaultextension=('.py'))
if path!="":
info = os.path.split(path)
self.file_name = info[1]
self.root.title(f"{self.file_name}- VS Editor - Developed By Vaibhav")
self.path_name = path
fp = open(self.path_name, "w")
fp.write(self.text_editor.get('1.0',END))
fp.close()
def exit_file(self,event=None):
if messagebox.askokcancel('Confirm Exit','Are you sure want to exit Vs editor')==True:
self.root.destroy()
def color_change(self):
if self.color_theme.get()=="Light Default":
self.text_editor.config(bg='white', fg='black')
self.text_output.config(bg='white', fg='black')
elif self.color_theme.get()=="Light Plus":
self.text_editor.config(bg='#e0e0e0', fg='#474747')
self.text_output.config(bg='#e0e0e0', fg='#474747')
elif self.color_theme.get()=="Dark":
self.text_editor.config(bg='#2d2d2d', fg='#c4c4c4')
self.text_output.config(bg='#2d2d2d', fg='#c4c4c4')
elif self.color_theme.get()=="Red":
self.text_editor.config(bg='#ffe8e8', fg='#2d2d2d')
self.text_output.config(bg='#ffe8e8', fg='#2d2d2d')
elif self.color_theme.get()=="Monokai":
self.text_editor.config(bg='#d3b774', fg='#474747')
self.text_output.config(bg='#d3b774', fg='#474747')
elif self.color_theme.get()=="Night Blue":
self.text_editor.config(bg='#6b9dc2', fg='#ededed')
self.text_output.config(bg='#6b9dc2', fg='#ededed')
def clear(self):
self.text_editor.delete('1.0', END)
self.text_output.delete('1.0', END)
def run(self):
if self.path_name=='':
if messagebox.askokcancel('Error', 'Please save the file to execute the code',icon='error', parent=self.root)==True:
self.save_file()
else:
command=f'python {self.path_name}'
run_file=subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
#print(self.path_name,command)
output, error=run_file.communicate()
self.text_output.delete('1.0', END)
self.text_output.insert('1.0',output)
self.text_output.insert('1.0',error)
root=Tk()
obj=Vs_code(root)
root.mainloop()
|
[
"noreply@github.com"
] |
Archit1008.noreply@github.com
|
3277647540b48355be9ed39c7da40af81a33e829
|
08a7da9926cbf32d5c66a726c2873e0a2603cf34
|
/ex085.py
|
48824bd75fc156e074bcb76379f53cb2948e36dd
|
[] |
no_license
|
lucassilva-2003/Cursoemvideo_Python_100exs
|
ae9b0bcd2abdaadb308c6d31fd8a6ecb801708b0
|
8450ca7099cbc222abea0e2aa003864d4f7b7467
|
refs/heads/master
| 2022-11-25T13:23:32.078187
| 2020-07-23T18:45:21
| 2020-07-23T18:45:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
lista = [[], []] # Cria duas listas dentro de uma lista
for c in range(1, 8):
n = int(input(f'Digite um valor para a posição {c}: '))
if n % 2 == 0: # se o valor for par ele é colocado na primeira lista lista [0]
lista[0].append(n)
else: # se não ele é colocado na segunda lista[1]
lista[1].append(n)
lista[0].sort() # Organiza os valores em ordem crescente
lista[1].sort()
print(f'Os valores pares digitados foram: {lista[0]}\nOs valores ímpares digitados foram: {lista[1]} ')
|
[
"noreply@github.com"
] |
lucassilva-2003.noreply@github.com
|
907594a44b147cda939842c8cfb807c93df0a453
|
7f6775640ee7ad886a57665a07d20e28cf239da7
|
/hd_mesos/migrations/0001_initial.py
|
4091815dcaaf719a3e43095bb2ff484d0524d6f0
|
[] |
no_license
|
Rmond/mysite
|
fa643924d1988fa0877cd16d57539b370fdfc396
|
2bc732d279b640d0d998ee2049a882168aa756b7
|
refs/heads/master
| 2020-03-16T14:12:33.666271
| 2018-12-28T05:16:56
| 2018-12-28T05:16:56
| 130,977,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,120
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-04-25 06:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Users',
fields=[
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(max_length=32, primary_key=True, serialize=False, unique=True)),
('nickname', models.CharField(max_length=16)),
('password', models.CharField(max_length=128)),
('role', models.PositiveSmallIntegerField(default=1)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Host_Group',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='HostGroup',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('groupname', models.CharField(max_length=32, unique=True)),
],
),
migrations.CreateModel(
name='HostInfo',
fields=[
('hostname', models.CharField(max_length=32)),
('ip', models.CharField(max_length=16, primary_key=True, serialize=False)),
('software', models.CharField(default='', max_length=128)),
('tags', models.CharField(default='', max_length=64)),
('idle', models.BooleanField(default=True)),
('vginfo', models.TextField(null=True)),
('memory_total', models.IntegerField(default=0)),
('memory_used', models.IntegerField(default=0)),
('memory_free', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='User_Host',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('ip', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hd_mesos.HostInfo')),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='User_Hostgroup',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hd_mesos.HostGroup')),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='User_Task',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('taskid', models.CharField(default='', max_length=48)),
('taskname', models.CharField(default='', max_length=32)),
('acked', models.BooleanField(default=False)),
('star_time', models.DateTimeField()),
('hosts', models.TextField()),
('status', models.CharField(default='Running', max_length=50)),
('result', models.TextField(null=True)),
('date_done', models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name='User_Shell_Task',
fields=[
('user_task_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hd_mesos.User_Task')),
('shell_cmd', models.CharField(max_length=128)),
],
bases=('hd_mesos.user_task',),
),
migrations.CreateModel(
name='User_Yum_Task',
fields=[
('user_task_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hd_mesos.User_Task')),
('soft_list', models.TextField(null=True)),
],
bases=('hd_mesos.user_task',),
),
migrations.AddField(
model_name='user_task',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='host_group',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hd_mesos.HostGroup'),
),
migrations.AddField(
model_name='host_group',
name='ip',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hd_mesos.HostInfo'),
),
]
|
[
"710324581@qq.com"
] |
710324581@qq.com
|
fbcfe14dd13f5a30f79f31f21d29a58d41e981ea
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/-57053121/PyQt5/QtNetwork/QSslError.py
|
d32f401606abccbd6549153dde9d3bab66afe1c3
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,851
|
py
|
# encoding: utf-8
# module PyQt5.QtNetwork
# from C:\Users\Doly\Anaconda3\lib\site-packages\PyQt5\QtNetwork.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
class QSslError(__sip.simplewrapper):
"""
QSslError()
QSslError(QSslError.SslError)
QSslError(QSslError.SslError, QSslCertificate)
QSslError(QSslError)
"""
def certificate(self): # real signature unknown; restored from __doc__
""" certificate(self) -> QSslCertificate """
return QSslCertificate
def error(self): # real signature unknown; restored from __doc__
""" error(self) -> QSslError.SslError """
pass
def errorString(self): # real signature unknown; restored from __doc__
""" errorString(self) -> str """
return ""
def swap(self, QSslError): # real signature unknown; restored from __doc__
""" swap(self, QSslError) """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
AuthorityIssuerSerialNumberMismatch = 20
CertificateBlacklisted = 24
CertificateExpired = 6
CertificateNotYetValid = 5
CertificateRejected = 18
CertificateRevoked = 13
CertificateSignatureFailed = 4
CertificateUntrusted = 17
HostNameMismatch = 22
InvalidCaCertificate = 14
InvalidNotAfterField = 8
InvalidNotBeforeField = 7
InvalidPurpose = 16
NoError = 0
NoPeerCertificate = 21
NoSslSupport = 23
PathLengthExceeded = 15
SelfSignedCertificate = 9
SelfSignedCertificateInChain = 10
SubjectIssuerMismatch = 19
UnableToDecodeIssuerPublicKey = 3
UnableToDecryptCertificateSignature = 2
UnableToGetIssuerCertificate = 1
UnableToGetLocalIssuerCertificate = 11
UnableToVerifyFirstCertificate = 12
UnspecifiedError = -1
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
c6e7b980440b388f5c28db485b2c6df09de7162f
|
994be8da4cb2d79edc1d078d04f0c2326eecbd0a
|
/app/functions.py
|
3bf68d685991bf8eaa96b9877bff85e51e4a493b
|
[
"MIT"
] |
permissive
|
tjgavlick/whiskey-blog
|
0e9287d6ff51d3f27d4147d13dd1efa3550a107c
|
f34a339cca0bcfab2afeb7d0ab3d29f5f95ffe04
|
refs/heads/master
| 2020-12-23T21:10:49.156725
| 2016-08-04T23:35:13
| 2016-08-04T23:35:13
| 56,724,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
# -*- coding: utf-8 -*-
import datetime
from flask import request
from werkzeug import url_encode
from app import constants
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in constants.ALLOWED_EXTENSIONS
def format_price(price):
if price % 1 == 0:
return '${}'.format(int(price))
return '${}'.format(Decimal(price).quantize(Decimal('1.00')))
def format_age(age):
if age % 1 == 0:
if age == 1:
return 'One year'
else:
return '{} years'.format(int(age))
elif age < 1:
return '{} months'.format(round(age * 12))
elif age < 2:
return 'One year {} months'.format(round(age % 1 * 12))
return '{} years {} months'.format(int(age), round(age % 1 * 12))
def format_age_range(age1, age2):
if age1 > age2:
age1, age2 = age2, age1
if age1 % 1 == 0 and age2 % 1 == 0:
return '{} – {} years'.format(int(age1), int(age2))
return format_age(age1) + ' – ' + format_age(age2)
def format_proof(proof):
if proof % 1 == 0:
return str(int(proof))
return proof
def format_date(t):
return t.strftime('%B %d, %Y')
def format_datetime(t):
return t.strftime('%B %d, %Y at %H:%M')
def modify_query(**new_values):
args = request.args.copy()
for key, val in new_values.items():
args[key] = val
return '{}?{}'.format(request.path, url_encode(args))
|
[
"tjgavlick@gmail.com"
] |
tjgavlick@gmail.com
|
75f77ccdaf3e7164f93e7d4b4ae4dfa223110249
|
a3be9ec7b7da211842c818fc63c85bf10469c647
|
/environnement_virtuel/bin/pylupdate5
|
f88e0a7124e9cfb4ad93ad8c39f74cb01bef520e
|
[] |
no_license
|
clementverc/CurrentConverter
|
97ef4e52e6b0144e830d703d8789f3ae3bb89a96
|
173d047f0178fb597d9748e0d12e6c2ea6ed615e
|
refs/heads/master
| 2020-12-05T12:24:19.903746
| 2020-01-06T14:08:00
| 2020-01-06T14:08:00
| 232,108,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
#!/Users/clement/Documents/ProjetPython/environnement_virtuel/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from PyQt5.pylupdate_main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"clement.vercucque@orange.fr"
] |
clement.vercucque@orange.fr
|
|
3265522f3031fdb9f900fd1d7525c2d9a49fa979
|
3c0526e87e9ea80ef2676058f3023e9111d0eab9
|
/twilio/rest/taskrouter/v1/workspace/workspace_cumulative_statistics.py
|
2fbd634e0ae9b138bca32021ab70fa46f8faec03
|
[] |
no_license
|
smgood/cryptobot
|
f24ef69b9b253b9e421bf5bbef3b750fcc1a7332
|
13448eb8dfc34fedaba4d10ce642c38fe80a3526
|
refs/heads/master
| 2020-07-21T15:36:55.353596
| 2019-09-07T03:37:56
| 2019-09-07T03:37:56
| 206,910,383
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,742
|
py
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class WorkspaceCumulativeStatisticsList(ListResource):
""" """
def __init__(self, version, workspace_sid):
"""
Initialize the WorkspaceCumulativeStatisticsList
:param Version version: Version that contains the resource
:param workspace_sid: The workspace_sid
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsList
"""
super(WorkspaceCumulativeStatisticsList, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, }
def get(self):
"""
Constructs a WorkspaceCumulativeStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsContext
"""
return WorkspaceCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
def __call__(self):
"""
Constructs a WorkspaceCumulativeStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsContext
"""
return WorkspaceCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.WorkspaceCumulativeStatisticsList>'
class WorkspaceCumulativeStatisticsPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the WorkspaceCumulativeStatisticsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param workspace_sid: The workspace_sid
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsPage
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsPage
"""
super(WorkspaceCumulativeStatisticsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of WorkspaceCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
"""
return WorkspaceCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.WorkspaceCumulativeStatisticsPage>'
class WorkspaceCumulativeStatisticsContext(InstanceContext):
""" """
def __init__(self, version, workspace_sid):
"""
Initialize the WorkspaceCumulativeStatisticsContext
:param Version version: Version that contains the resource
:param workspace_sid: The workspace_sid
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsContext
"""
super(WorkspaceCumulativeStatisticsContext, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, }
self._uri = '/Workspaces/{workspace_sid}/CumulativeStatistics'.format(**self._solution)
def fetch(self, end_date=values.unset, minutes=values.unset,
start_date=values.unset, task_channel=values.unset,
split_by_wait_time=values.unset):
"""
Fetch a WorkspaceCumulativeStatisticsInstance
:param datetime end_date: Filter cumulative statistics by an end date.
:param unicode minutes: Filter cumulative statistics by up to ‘x’ minutes in the past.
:param datetime start_date: Filter cumulative statistics by a start date.
:param unicode task_channel: Filter real-time and cumulative statistics by TaskChannel.
:param unicode split_by_wait_time: A comma separated values for viewing splits of tasks canceled and accepted above the given threshold in seconds.
:returns: Fetched WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
"""
params = values.of({
'EndDate': serialize.iso8601_datetime(end_date),
'Minutes': minutes,
'StartDate': serialize.iso8601_datetime(start_date),
'TaskChannel': task_channel,
'SplitByWaitTime': split_by_wait_time,
})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return WorkspaceCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.WorkspaceCumulativeStatisticsContext {}>'.format(context)
class WorkspaceCumulativeStatisticsInstance(InstanceResource):
""" """
def __init__(self, version, payload, workspace_sid):
"""
Initialize the WorkspaceCumulativeStatisticsInstance
:returns: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
"""
super(WorkspaceCumulativeStatisticsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'avg_task_acceptance_time': deserialize.integer(payload['avg_task_acceptance_time']),
'start_time': deserialize.iso8601_datetime(payload['start_time']),
'end_time': deserialize.iso8601_datetime(payload['end_time']),
'reservations_created': deserialize.integer(payload['reservations_created']),
'reservations_accepted': deserialize.integer(payload['reservations_accepted']),
'reservations_rejected': deserialize.integer(payload['reservations_rejected']),
'reservations_timed_out': deserialize.integer(payload['reservations_timed_out']),
'reservations_canceled': deserialize.integer(payload['reservations_canceled']),
'reservations_rescinded': deserialize.integer(payload['reservations_rescinded']),
'split_by_wait_time': payload['split_by_wait_time'],
'wait_duration_until_accepted': payload['wait_duration_until_accepted'],
'wait_duration_until_canceled': payload['wait_duration_until_canceled'],
'tasks_canceled': deserialize.integer(payload['tasks_canceled']),
'tasks_completed': deserialize.integer(payload['tasks_completed']),
'tasks_created': deserialize.integer(payload['tasks_created']),
'tasks_deleted': deserialize.integer(payload['tasks_deleted']),
'tasks_moved': deserialize.integer(payload['tasks_moved']),
'tasks_timed_out_in_workflow': deserialize.integer(payload['tasks_timed_out_in_workflow']),
'workspace_sid': payload['workspace_sid'],
'url': payload['url'],
}
# Context
self._context = None
self._solution = {'workspace_sid': workspace_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkspaceCumulativeStatisticsContext for this WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsContext
"""
if self._context is None:
self._context = WorkspaceCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def avg_task_acceptance_time(self):
"""
:returns: The average time from Task creation to acceptance
:rtype: unicode
"""
return self._properties['avg_task_acceptance_time']
@property
def start_time(self):
"""
:returns: The start_time
:rtype: datetime
"""
return self._properties['start_time']
@property
def end_time(self):
"""
:returns: The end_time
:rtype: datetime
"""
return self._properties['end_time']
@property
def reservations_created(self):
"""
:returns: The total number of Reservations that were created for Workers
:rtype: unicode
"""
return self._properties['reservations_created']
@property
def reservations_accepted(self):
"""
:returns: The total number of Reservations accepted by Workers
:rtype: unicode
"""
return self._properties['reservations_accepted']
@property
def reservations_rejected(self):
"""
:returns: The total number of Reservations that were rejected
:rtype: unicode
"""
return self._properties['reservations_rejected']
@property
def reservations_timed_out(self):
"""
:returns: The total number of Reservations that were timed out
:rtype: unicode
"""
return self._properties['reservations_timed_out']
@property
def reservations_canceled(self):
"""
:returns: The total number of Reservations that were canceled
:rtype: unicode
"""
return self._properties['reservations_canceled']
@property
def reservations_rescinded(self):
"""
:returns: The total number of Reservations that were rescinded
:rtype: unicode
"""
return self._properties['reservations_rescinded']
@property
def split_by_wait_time(self):
"""
:returns: The splits of the tasks canceled and accepted based on the provided SplitByWaitTime parameter.
:rtype: dict
"""
return self._properties['split_by_wait_time']
@property
def wait_duration_until_accepted(self):
"""
:returns: The wait duration stats for tasks that were accepted.
:rtype: dict
"""
return self._properties['wait_duration_until_accepted']
@property
def wait_duration_until_canceled(self):
"""
:returns: The wait duration stats for tasks that were canceled.
:rtype: dict
"""
return self._properties['wait_duration_until_canceled']
@property
def tasks_canceled(self):
"""
:returns: The total number of Tasks that were canceled
:rtype: unicode
"""
return self._properties['tasks_canceled']
@property
def tasks_completed(self):
"""
:returns: The total number of Tasks that were completed
:rtype: unicode
"""
return self._properties['tasks_completed']
@property
def tasks_created(self):
"""
:returns: The total number of Tasks created
:rtype: unicode
"""
return self._properties['tasks_created']
@property
def tasks_deleted(self):
"""
:returns: The total number of Tasks that were deleted
:rtype: unicode
"""
return self._properties['tasks_deleted']
@property
def tasks_moved(self):
"""
:returns: The total number of Tasks that were moved from one queue to another
:rtype: unicode
"""
return self._properties['tasks_moved']
@property
def tasks_timed_out_in_workflow(self):
"""
:returns: The total number of Tasks that were timed out of their Workflows
:rtype: unicode
"""
return self._properties['tasks_timed_out_in_workflow']
@property
def workspace_sid(self):
"""
:returns: The workspace_sid
:rtype: unicode
"""
return self._properties['workspace_sid']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self, end_date=values.unset, minutes=values.unset,
start_date=values.unset, task_channel=values.unset,
split_by_wait_time=values.unset):
"""
Fetch a WorkspaceCumulativeStatisticsInstance
:param datetime end_date: Filter cumulative statistics by an end date.
:param unicode minutes: Filter cumulative statistics by up to ‘x’ minutes in the past.
:param datetime start_date: Filter cumulative statistics by a start date.
:param unicode task_channel: Filter real-time and cumulative statistics by TaskChannel.
:param unicode split_by_wait_time: A comma separated values for viewing splits of tasks canceled and accepted above the given threshold in seconds.
:returns: Fetched WorkspaceCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_cumulative_statistics.WorkspaceCumulativeStatisticsInstance
"""
return self._proxy.fetch(
end_date=end_date,
minutes=minutes,
start_date=start_date,
task_channel=task_channel,
split_by_wait_time=split_by_wait_time,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.WorkspaceCumulativeStatisticsInstance {}>'.format(context)
|
[
"sean@smgoodrich.com"
] |
sean@smgoodrich.com
|
1e4e83e230dd2f77b65787b08e278e29ca6b9990
|
2c384f470f8f1fcf8e2000fe67e668580df239b4
|
/week 3/hw_knn.py
|
9b7dae74a481ad08e03b64a6164cc5333cd1f78d
|
[] |
no_license
|
psheshke/mipt_canvas
|
0f046210fb079924c560847831041d664647cfe4
|
172cc333bfc681d96a87029bf37cdb459e7b9bc9
|
refs/heads/master
| 2020-05-02T22:56:19.662016
| 2019-06-11T11:12:14
| 2019-06-11T11:12:14
| 178,267,255
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,068
|
py
|
# coding: utf-8
# <img src="https://s8.hostingkartinok.com/uploads/images/2018/08/308b49fcfbc619d629fe4604bceb67ac.jpg" width=500, height=450>
# <h3 style="text-align: center;"><b>Физтех-Школа Прикладной математики и информатики (ФПМИ) МФТИ</b></h3>
# ---
# На основе [курса по Машинному Обучению ФИВТ МФТИ](https://github.com/ml-mipt/ml-mipt) и [Открытого курса по Машинному Обучению](https://habr.com/ru/company/ods/blog/322626/).
# ---
# <h2 style="text-align: center;"><b>k Nearest Neighbor(KNN)</b></h2>
# Метод ближайших соседей (k Nearest Neighbors, или kNN) — очень популярный метод классификации, также иногда используемый в задачах регрессии. Это один из самых понятных подходов к классификации. На уровне интуиции суть метода такова: посмотри на соседей, какие преобладают, таков и ты. Формально основой метода является гипотеза компактности: если метрика расстояния между примерами введена достаточно удачно, то схожие примеры гораздо чаще лежат в одном классе, чем в разных.
# <img src='https://hsto.org/web/68d/a45/6f0/68da456f00f8434e87628dbe7e3f54a7.png' width=600>
#
# Для классификации каждого из объектов тестовой выборки необходимо последовательно выполнить следующие операции:
#
# * Вычислить расстояние до каждого из объектов обучающей выборки
# * Отобрать объектов обучающей выборки, расстояние до которых минимально
# * Класс классифицируемого объекта — это класс, наиболее часто встречающийся среди $k$ ближайших соседей
# Будем работать с подвыборкой из [данных о типе лесного покрытия из репозитория UCI](http://archive.ics.uci.edu/ml/datasets/Covertype). Доступно 7 различных классов. Каждый объект описывается 54 признаками, 40 из которых являются бинарными. Описание данных доступно по ссылке, а так же в файле `covtype.info.txt`.
# ### Обработка данных
# In[1]:
import pandas as pd
# ССылка на датасет (лежит в в папке): https://drive.google.com/open?id=1-Z4NlDy11BzSwW13k8EgodRis0uRy1K6
# In[2]:
all_data = pd.read_csv('forest_dataset.csv',)
all_data.head()
# In[3]:
all_data.shape
# Выделим значения метки класса в переменную `labels`, признаковые описания в переменную `feature_matrix`. Так как данные числовые и не имеют пропусков, переведем их в `numpy`-формат с помощью метода `.values`.
# In[4]:
labels = all_data[all_data.columns[-1]].values
feature_matrix = all_data[all_data.columns[:-1]].values
# Сейчас будем работать со всеми 7 типами покрытия (данные уже находятся в переменных `feature_matrix` и `labels`, если Вы их не переопределили). Разделите выборку на обучающую и тестовую с помощью метода `train_test_split`, используйте значения параметров `test_size=0.2`, `random_state=42`. Обучите логистическую регрессию на данном датасете.
# In[7]:
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# In[19]:
train_feature_matrix, test_feature_matrix, train_labels, test_labels = train_test_split(feature_matrix, labels, test_size=0.2, random_state=42)
# нормируйте данные по параметрам нормировки для train_feature_matrix
scaler = StandardScaler()
train_feature_matrix = scaler.fit_transform(train_feature_matrix)
test_feature_matrix = scaler.transform(test_feature_matrix)
# ### Обучение модели
# Качество классификации/регрессии методом ближайших соседей зависит от нескольких параметров:
#
# * число соседей `n_neighbors`
# * метрика расстояния между объектами `metric`
# * веса соседей (соседи тестового примера могут входить с разными весами, например, чем дальше пример, тем с меньшим коэффициентом учитывается его "голос") `weights`
#
# Обучите на датасете `KNeighborsClassifier` из `sklearn`.
# In[21]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
clf = KNeighborsClassifier()
clf.fit(train_feature_matrix, train_labels)
pred_labels = clf.predict(test_feature_matrix)
accuracy_score(test_labels, pred_labels)
# ### Вопрос 1:
# * Какое качество у вас получилось?
# Подбирем параметры нашей модели
# * Переберите по сетке от `1` до `10` параметр числа соседей
#
# * Также вы попробуйте использоввать различные метрики: `['manhattan', 'euclidean']`
#
# * Попробуйте использовать различные стратегии вычисления весов: `[‘uniform’, ‘distance’]`
# In[66]:
from sklearn.model_selection import GridSearchCV
params = {'weights': ["uniform", "distance"], 'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'metric': ["manhattan", "euclidean"]}
clf_grid = GridSearchCV(clf, params, cv=5, scoring='accuracy', n_jobs=-1)
clf_grid.fit(train_feature_matrix, train_labels)
# Выведем лучшие параметры
# In[23]:
clf_grid.best_params_
# ### Вопрос 2:
# * Какую metric следует использовать?
# In[67]:
print(clf_grid.best_params_['metric'])
# ### Вопрос 3:
# * Сколько n_neighbors следует использовать?
# In[68]:
print(clf_grid.best_params_['n_neighbors'])
# ### Вопрос 4:
# * Какой тип weights следует использовать?
# In[69]:
print(clf_grid.best_params_['weights'])
# Используя найденное оптимальное число соседей, вычислите вероятности принадлежности к классам для тестовой выборки (`.predict_proba`).
# In[43]:
optimal_clf = KNeighborsClassifier(n_neighbors = 10)
optimal_clf.fit(train_feature_matrix, train_labels)
pred_prob = optimal_clf.predict_proba(test_feature_matrix)
# In[50]:
import matplotlib.pyplot as plt
import numpy as np
unique, freq = np.unique(test_labels, return_counts=True)
freq = list(map(lambda x: x / len(test_labels),freq))
pred_freq = pred_prob.mean(axis=0)
plt.figure(figsize=(10, 8))
plt.bar(range(1, 8), pred_freq, width=0.4, align="edge", label='prediction')
plt.bar(range(1, 8), freq, width=-0.4, align="edge", label='real')
plt.legend()
plt.show()
# ### Вопрос 5:
# * Какая прогнозируемая вероятность pred_freq класса под номером 3(до 2 знаков после запятой)?
# In[56]:
round(pred_freq[2], 2)
|
[
"eaagekyan@mail.ru"
] |
eaagekyan@mail.ru
|
dff4e2d8383c878901837392bdfdc2ac9f6ec193
|
01b174952357069b2330bcfb4760a7f81817c12d
|
/JamStats.py
|
57b579dbdfd735eefd9d4683f854539314f099ea
|
[] |
no_license
|
jostaylor/Music_Playlist_Search_Engine_and_Stats
|
86db0c2d5c5aee67196d4eade645e78a84dde927
|
925781efd0851f7809f4717e007e375445d22a7b
|
refs/heads/main
| 2023-02-26T22:18:42.197047
| 2021-02-09T22:39:27
| 2021-02-09T22:39:27
| 337,553,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,633
|
py
|
# This program finds different statistics about Dueber and David's Daily Jams
import tkinter
import math
import time
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
# Asks user where they are
'''
location = 0
while location != 1 and location != 2 and location != 3 and location != 4:
location = int(input("Are you at\n1)The School\n2)Your moms house\n3)On your laptop"))
if location == 1:
# Opens folder
jamList = open("H:\\PythonStuff\\CanopyFiles\\JamList.txt", "r")
elif location == 2:
# Opens folder
jamList = open("C:\\Users\\Josh\\Desktop\\Everything\\Music and Movies\\JamList.txt", "r")
elif location == 3:
# Opens folder
jamList = open("C:\\Users\\josha\\Desktop\\Everything\\Music and Movies\\JamList.txt", "r")
else:
print("Please choose one of the options listed")
print(location)
'''
# Above code has been removed because the text file is now in the same folder (for Github)
jamList = open("JamList.txt", "r")
# Declares empty arrays
songNames = []
songArtists = []
songLengths = []
songDateAdded = []
'''Reads the jam list file and inserts data into arrays'''
# Iterates through file and adds file info to the arrays
count = 0
for line in jamList:
if count == 0:
songNames.append(line)
elif count == 1:
songArtists.append(line)
elif count == 2:
songLengths.append(line)
elif count == 3:
songDateAdded.append(line)
if count < 4:
count += 1
else:
count = 0
listOfJamsAndYears = open("C:\\Users\\josha\\Desktop\\Everything\\Music and Movies\\JamYearsFileCompleted.txt", "r")
yearCount = 0
songYears = []
for line in listOfJamsAndYears:
if yearCount == 1:
songYears.append(line)
if yearCount >= 2:
yearCount = 0
else:
yearCount += 1
'''Takes out the '\n' from each value in the arrays'''
# Loop that iterates through all 4 arrays
for i in range(len(songNames)):
# Replaces song names
fixedName = songNames[i].replace("\n", "")
songNames[i] = fixedName
# Replaces song artists
fixedArtist = songArtists[i].replace("\n", "")
songArtists[i] = fixedArtist
# Replaces song lengths
fixedLength = songLengths[i].replace("\n", "")
songLengths[i] = fixedLength
# Replaces song dates
fixedDateAdded = songDateAdded[i].replace("\n", "")
songDateAdded[i] = fixedDateAdded
fixedYear = songYears[i].replace("\n", "")
songYears[i] = fixedYear
def getDurationOfJamsBetween(startIndex, endIndex):
# Iterates through the songLengths array and accumulates values
totalSongTimeInSeconds = 0
for i in range(endIndex-startIndex):
minutes = int(songLengths[startIndex+i][0])
seconds = int(songLengths[startIndex+i][2:])
songDurationInSeconds = (minutes*60) + seconds
totalSongTimeInSeconds += songDurationInSeconds
# Calculates extra time in minutes and seconds
totalSongTimeInHours = float(totalSongTimeInSeconds) / 3600
totalSongTimeExtraMinutes = totalSongTimeInHours - int(totalSongTimeInHours)
totalSongTimeExtraMinutes *= 60
totalSongTimeExtraSeconds = totalSongTimeExtraMinutes - int(totalSongTimeExtraMinutes)
totalSongTimeExtraSeconds *= 60
# Converts to integers; these are the correct values
totalSongTimeInHours = int(totalSongTimeInHours)
totalSongTimeExtraMinutes = int(totalSongTimeExtraMinutes)
totalSongTimeExtraSeconds = int(totalSongTimeExtraSeconds)
# Puts values into array
totalDuration = [totalSongTimeInHours, totalSongTimeExtraMinutes, totalSongTimeExtraSeconds]
return totalDuration
'''Finds average length of the jams'''
# Gets total time in hours
totalMinutes = getDurationOfJamsBetween(0, len(songLengths))[1] + \
(float(getDurationOfJamsBetween(0, len(songLengths))[2]) / 60)
totalTimeInHours = getDurationOfJamsBetween(0, len(songLengths))[0] + totalMinutes / 60
# Gets average length of jams
totalTimeInSeconds = totalTimeInHours * 3600
averageJamLengthInSeconds = totalTimeInSeconds / len(songLengths)
averageJamLengthInSeconds /= 60 # Average length of jam in minutes
aslString = str(averageJamLengthInSeconds)
# Converts to string to isolate decimal to and converts to minute and seconds form
extraAverageSeconds = aslString[1:]
totalExtraAverageSeconds = float(extraAverageSeconds) * 60 # Average length (seconds)
totalAverageMinutes = int(aslString[:1]) # Average length (minutes)
'''Sorts the jams in order of length'''
# Puts the song lengths array into seconds
songLengthsInSeconds = []
for time in songLengths:
minutes = time[:1]
seconds = int(minutes) * 60
seconds += int(time[2:])
songLengthsInSeconds.append(seconds)
# Sorts the indexes of the song lengths array
songLengthIndexes = sorted(list(range(len(songLengthsInSeconds))), key=lambda k: songLengthsInSeconds[k])
# Sorts the jam titles in a new array
songNamesSortedByTime = []
for i in range(len(songNames)):
songNamesSortedByTime.append(songNames[songLengthIndexes[i]])
# Sorts the jam lengths in a new array
songLengthsSortedByTime = []
for i in range(len(songLengths)):
songLengthsSortedByTime.append(songLengths[songLengthIndexes[i]])
'''Sorts the jams in order of the amount of songs each artist has on jams'''
# Sorts the artist alphabetically
songArtistsSorted = sorted(songArtists)
# Puts them into a array without repeats and finds how many song each artist has
artistWithoutRepeats = [] # Each item is a list of two values: [song artist, how many songs]
for artist in songArtistsSorted: # Iterates through all the songs
for newArtist in artistWithoutRepeats: # Iterates through all songs already iterated
if newArtist[0] == artist:
newArtist[1] += 1
break
else:
artistWithoutRepeats.append([artist, 1])
# Sorts artistWithoutRepeats array by the amount of songs each artist has
for i in range(len(artistWithoutRepeats)-1, 0, -1):
for j in range(i):
if artistWithoutRepeats[j][1] < artistWithoutRepeats[j+1][1]:
temp = artistWithoutRepeats[j+1]
artistWithoutRepeats[j+1] = artistWithoutRepeats[j]
artistWithoutRepeats[j] = temp
'''Sorts the jams into what year they were released chronologically'''
# Sorts indexes of songYears array (puts them in order)
songYearsIndexes = sorted(list(range(len(songYears))), key=lambda k: songYears[k])
#print songYearsIndexes
# Sorts song names into a new array
songNamesSortedByYear = []
for i in range(len(songYears)):
songNamesSortedByYear.append(songNames[songYearsIndexes[i]])
# Sorts song years into a new array
songYearsSortedByYear = []
for i in range(len(songYears)):
songYearsSortedByYear.append(songYears[songYearsIndexes[i]])
# Finds which year has the most jams
# PRACTICALLY USELESS NOT SINCE I KNOW THE ANSWER
mostYears = 0
mostYearsYear = 0
currentYearCount = 1
for i in range(len(songYearsSortedByYear)):
if i == 0:
currentYear = songYearsSortedByYear[i]
mostYears += 1
currentYearCount = 1
else:
if songYearsSortedByYear[i] == songYearsSortedByYear[i-1]:
currentYearCount += 1
else:
currentYearCount = 1
if currentYearCount > mostYears:
mostYears += 1
mostYearsYear = songYearsSortedByYear[i]
'''Calculates other info about the jams in relation to the year a song was released'''
# Calculates average year
bigAssInt = 0
for i in range(len(songYears)):
bigAssInt += int(songYears[i])
jamAvgYear = bigAssInt / len(songYears)
# Gets median year
jamMedianYear = songYearsSortedByYear[int(((len(songYearsSortedByYear)) / 2) + 0.5)]
# Calculates how many songs there are from each decade
jamsReleasedInThe60s = []
jamsReleasedInThe70s = []
jamsReleasedInThe80s = []
jamsReleasedInThe90s = []
jamsReleasedInThe00s = []
for i in range(len(songNames)):
if songYears[i][2] == '6':
jamsReleasedInThe60s.append([i+1, songNames[i], songYears[i]])
if songYears[i][2] == '7':
jamsReleasedInThe70s.append([i+1, songNames[i], songYears[i]])
if songYears[i][2] == '8':
jamsReleasedInThe80s.append([i + 1, songNames[i], songYears[i]])
if songYears[i][2] == '9':
jamsReleasedInThe90s.append([i+1, songNames[i], songYears[i]])
if songYears[i][2] == '0':
jamsReleasedInThe00s.append([i+1, songNames[i], songYears[i]])
'''Finds how often a jam is added to the playlist during different eras'''
def getRateJamsWereAdded(firstDateIndex, lastDateIndex):
"""Finds the rate the jams were added between two points.
Returns a list with two value [rate, totalDays]"""
# NOTE: Pre-summer jams are jams 1-46. Summer jams are 47-81. The new school year is 82-present
# Gets the first and last date
firstDate = songDateAdded[firstDateIndex]
lastDate = songDateAdded[lastDateIndex]
# Finds the difference in time between the first and last dates
years = int(lastDate[:4]) - int(firstDate[:4])
months = int(lastDate[5:7]) - int(firstDate[5:7])
days = int(lastDate[8:]) - int(firstDate[8:])
# Converts the total time into days
totalDays = 0
totalDays += (years * 365.25)
totalDays += (months * 30.42)
totalDays += days
# Gets the final rate
jamAddedRate = totalDays / ((lastDateIndex - firstDateIndex) + 1)
return [jamAddedRate, totalDays]
'''Organizes the jams based on David's and Ethan's Jams'''
# Iterates through the songNames and put them into two lists
ethansJams = []
davidsJams = []
for i in range(len(songNames)):
# Adds Take Me Home Tonight to both lists because it was a mutual jam
if i == 0:
ethansJams.append(songNames[i])
davidsJams.append(songNames[i])
elif i % 2 == 0:
davidsJams.append(songNames[i])
else:
ethansJams.append(songNames[i])
'''Makes the search engine where one can search a jam or an artist and find info'''
def searchEngine(inpt):
# Creates empty lists and puts input to lowercase
songResults = []
artistResults = []
inpt = inpt.lower()
# Goes through song names
for i in range(len(songNames)): # Iterates through all the songs
for j in range(0, len(songNames[i]) - (len(inpt) - 1)): # Iterates through each character of the song, but
# subtracts the length of the input to avoid an index out of range error
if songNames[i][j:(j+len(inpt))].lower() == inpt: # Checks to see if the section (that is the length of
# the input) is the same of the input
songResults.append(songNames[i]) # Adds to the results
break
# Goes through artist names (copy and paste of above)
for i in range(len(songArtists)): # Iterates through all the artists
for j in range(0, len(songArtists[i]) - (len(inpt) - 1)): # Iterates through each character of the artist,
# but subtracts the length of the input to avoid an index out of range error
if songArtists[i][j:(j+len(inpt))].lower() == inpt: # Checks to see if the section (that is the length
# of the input) is the same of the input
for result in artistResults: # Iterates through results. This process gets rid of artist repeats
if songArtists[i] == result:
break
else:
artistResults.append(songArtists[i]) # Adds to the results
break
# Combines the two results arrays
totalResults = []
for song in songResults:
totalResults.append(song)
for artist in artistResults:
totalResults.append(artist)
if len(totalResults) == 0:
print("\nNo results were returned")
print("Please input again")
response = input("")
if response.lower() == 'end':
return None
else:
searchEngine(response)
# Prints all the results
print("\nHere are the results. Enter the number of the jam or artist you would like to search.")
print("Jams:")
for i in range(len(songResults)):
print("%d: %s" % (i+1, songResults[i]))
if len(songResults) == 0:
print("No results")
print("\nArtists:")
for i in range(len(artistResults)):
print("%d: %s" % (len(songResults) + i+1, artistResults[i]))
if len(artistResults) == 0:
print("No results")
# Handles input and runs one of the getInfo methods
while True:
response = int(input("\n"))
if len(songResults) >= response > 0:
getSongInfo(totalResults[response - 1])
break
elif len(songResults) < response <= len(totalResults):
getArtistInfo(totalResults[response - 1])
break
else:
print('Please enter a valid value.')
def getSongInfo(song):
"""Gets this info: The artist, the duration of the song, who it was added
by, the date it was added, the number jam it is, how long the jams were when
this song was added, the jams added before and after this jam, the other
jams by that artist"""
# Gets song index
songIndex = 0
for i in range(len(songNames)):
if song == songNames[i]:
songIndex = i
break
# Gets song duration
songLength = songLengths[songIndex]
songDurationInMinutes = int(songLength[0])
songDurationInSeconds = int(songLength[2:])
# Gets the date and who added it
if songIndex % 2 == 0:
whoAddedSong = "David"
else:
whoAddedSong = "Ethan"
dateSongAdded = songDateAdded[songIndex]
# Gets other songs by that artist
artist = songArtists[songIndex]
otherSongsByArtist = ""
for i in range(len(songArtists)):
if i == songIndex:
continue
elif songArtists[i] == artist:
otherSongsByArtist += "%s, " % songNames[i]
otherSongsByArtist = otherSongsByArtist[:-2]
# Prints information
print(song)
print("Artist: %s" % songArtists[songIndex])
print("Released: %s" % songYears[songIndex])
print("Duration: %d minutes and %d seconds" % (songDurationInMinutes, songDurationInSeconds))
print("Jam #%d" % (songIndex + 1))
print("Added by: %s" % whoAddedSong)
print("Date Added: %s" % dateSongAdded)
print("Other jams by %s: %s" % (artist, otherSongsByArtist))
print("Duration of the jams after song was added: %d hours, %d minutes, and %d seconds" % \
(getDurationOfJamsBetween(0, songIndex + 1)[0], getDurationOfJamsBetween(0, songIndex + 1)[1],
getDurationOfJamsBetween(0, songIndex + 1)[2]))
# These if statements prevent errors involving the first and last jams
if songIndex != 0:
print("Song that came before %s: %s" % (song, songNames[songIndex-1]))
if songIndex != len(songNames) - 1:
print("Song that came after %s: %s" % (song, songNames[songIndex+1]))
else:
print("Fun Fact: This is the most recently added jam!")
# Prints prompt for next search
print("\nSearch for another jam or artist.. (Enter \'end\' to quit to the main menu)")
response = input("")
if response.lower() != 'end':
searchEngine(response)
def getArtistInfo(artist):
"""Gets the info: How many jams the artist has, what the jams are and their
dates added and duration, who added each jam of this artist, the amount of
time in days between each jam, """
songsByArtist = []
songDurationsByArtist = []
songDatesAddedByArtist = []
whoAddedSongByArtist = []
songIndexesByArtist = []
for i in range(len(songArtists)):
if songArtists[i] == artist:
songsByArtist.append(songNames[i])
songDurationsByArtist.append(songLengths[i])
songDatesAddedByArtist.append(songDateAdded[i])
songIndexesByArtist.append(i)
if i % 2 == 0:
whoAddedSongByArtist.append("David")
else:
whoAddedSongByArtist.append("Ethan")
# Makes new duration list that is a list of lists with 2 values (minutes and seconds for each value
songDurationsByArtist2dList = [] # [Minutes, seconds]
for time in songDurationsByArtist:
songDurationsByArtist2dList.append([int(time[0]), int(time[2:])])
# Prints the information
print("%s has %d song(s):" % (artist, len(songsByArtist)))
for i in range(len(songsByArtist)):
print("%s:" % songsByArtist[i])
print("\tJam #%d" % (songIndexesByArtist[i]))
print("\tJam Added by: %s" % (whoAddedSongByArtist[i]))
print("\tDuration: %d minutes and %s seconds" % (songDurationsByArtist2dList[i][0],
songDurationsByArtist2dList[i][1]))
print("\tReleased: %s" % songYears[songIndexesByArtist[i]])
print("\tDate Added: %s" % (songDatesAddedByArtist[i]))
# Prints prompt for next search
print("\nSearch for another jam or artist.. (Enter \'end\' to quit to the main menu)")
response = input("")
if response.lower() != 'end':
searchEngine(response)
'''Tkinter code'''
'''
window = Tkinter.Tk()
inputString = Tkinter.StringVar()
inputString.set("")
speed_intvar = Tkinter.IntVar()
speed_intvar.set(5)
stopAnimation = Tkinter.BooleanVar()
stopAnimation.set(False)
direction = 0.5
def stopAnimate():
stopAnimation.set(True)
# Creates the layout
jam_entry = Tkinter.Entry(window, textvariable=inputString)
jam_entry.grid(row=1, column=3)
entry_label = Tkinter.Label(window, text="Input a jam or artist here:")
entry_label.grid(row=0, column=3)
search_button = Tkinter.Button(window, text='Search', command=stopAnimate)
search_button.grid(row=1, column=4)
canvas = Tkinter.Canvas(window, width=500, height=500, background="black")
canvas.grid(row=2, column=1, columnspan=5)
theJamsLogoBackground = canvas.create_rectangle(200, 250, 362, 300, fill="black")
theJamsLogoText = canvas.create_text(280, 280, text="THE JAMS", font=("Comic Sans", 25), fill='blue')
# Animates the Jams logo
def animateLogo():
# Get the slider data and create x- and y-components of velocity
velocity_x = speed_intvar.get() * math.cos(direction) # adj = hyp*cos()
velocity_y = speed_intvar.get() * math.sin(direction) # opp = hyp*sin()
# Change the canvas item's coordinates
canvas.move(theJamsLogoText, velocity_x, velocity_y)
canvas.move(theJamsLogoBackground, velocity_x, velocity_y)
x1, y1, x2, y2 = canvas.coords(theJamsLogoBackground)
global direction
# If crossing left or right of canvas
if x2 > canvas.winfo_width() or x1 < 0:
direction = math.pi - direction # Reverse the x-component of velocity
# If crossing top or bottom of canvas
if y2 > canvas.winfo_height() or y1 < 0:
direction *= -1 # Reverse the y-component of velocity
# Insert a break for when screen changes
if stopAnimation.get() is 0:
canvas.after(1, animateLogo)
animateLogo()
window.mainloop()
'''
'''Prints info and displays interface'''
# Prints options
choice = 0
while choice != 69:
print("\nWhat would you like to know about the jams?")
print("1) General Info")
print("2) The Jams in chronological order")
print("3) The Jams sorted by the song length")
print("4) The Jams sorted by the amount of songs each artist has")
print("5) The Jams sorted by who added each jam")
print("6) Search info for a jam or an artist")
print("7) The Jams sorted by year released")
print("8) Search Years Online (cAreful here)")
print("69) Exit")
choice = int(input())
print("") # new line
# Puts options with the print code
if choice == 1:
# Prints the info of the jams
print("There are %d jams as of %s" % (len(songNames), songDateAdded[len(songDateAdded) - 1]))
print("The total length of the jams is %d hours, %d minutes, and %d seconds." % \
(getDurationOfJamsBetween(0, len(songLengths))[0], getDurationOfJamsBetween(0, len(songLengths))[1],
getDurationOfJamsBetween(0, len(songLengths))[2]))
print("The average jam is %d minutes and %d seconds" % (totalAverageMinutes, totalExtraAverageSeconds))
print("Over %d days, 1 jam was added every %f days" % (getRateJamsWereAdded(0, len(songDateAdded) - 1)[1],
getRateJamsWereAdded(0, len(songDateAdded) - 1)[0]))
# TODO the values above and below must be altered when the jams are no longer in session :_(
schoolYearRate = (getRateJamsWereAdded(0, 45)[0] + getRateJamsWereAdded(81, len(songDateAdded) - 1)[0]) / 2
print("\tDuring the school year, 1 jam was added every %f days." % schoolYearRate)
print("\tHowever, during the summer (over the course of %d days), 1 jam was added every %f days"\
% (getRateJamsWereAdded(46, 80)[1], getRateJamsWereAdded(46, 80)[0]))
print("Amount of songs released in the 60s: %d" % (len(jamsReleasedInThe60s)))
print("Amount of songs released in the 70s: %d" % (len(jamsReleasedInThe70s)))
print("Amount of songs released in the 80s: %d" % (len(jamsReleasedInThe80s)))
print("Amount of songs released in the 90s: %d" % (len(jamsReleasedInThe90s)))
print("Amount of songs released in the 00s: %d" % (len(jamsReleasedInThe00s)))
print("The years with the most songs on the Jams are 1970 and 1972, both with 17 jams")
print("The average of all the years is the same as the median being %d" % (jamAvgYear))
# TODO who (david and ethan) added songs from what years.
elif choice == 2:
for i in range(len(songNames)):
print("%d. %s (%s)" % (i+1, songNames[i], songLengths[i]))
elif choice == 3:
for i in range(len(songNamesSortedByTime)):
print("%s (%s)" % (songNamesSortedByTime[i], songLengthsSortedByTime[i]))
elif choice == 4:
for i in range(len(artistWithoutRepeats)): # Iterates through each artist
# Finds all the songs the artist has in the playlist
songsOfArtist = []
for artistIndex in range(len(songArtists)):
if artistWithoutRepeats[i][0] == songArtists[artistIndex]:
songsOfArtist.append(songNames[artistIndex])
# Puts all the songs in songsOfArtist into one string
songsOfArtistString = ""
for j in range(len(songsOfArtist)):
if j < len(songsOfArtist)-1:
songsOfArtistString += "%s, " % (songsOfArtist[j])
else:
songsOfArtistString += songsOfArtist[j]
# Prints the final product
print("%s[%d]:" % (artistWithoutRepeats[i][0], artistWithoutRepeats[i][1],))
print(songsOfArtistString, "\n")
elif choice == 5:
# Prints David's Jams
print("David's Jams")
for i in range(len(davidsJams)):
print("%d: %s" % ((i*2) + 1, davidsJams[i]))
# Prints Ethan's Jams
print("\nEthan's Jams")
print("1: Take Me Home Tonight")
for i in range(1, len(ethansJams)):
print("%d: %s" % ((i*2), ethansJams[i]))
elif choice == 6:
searchEngine(input("Input a song or artist\n"))
elif choice == 7:
for i in range(len(songYears)):
print("%d: %s (%s)" % (i+1, songNamesSortedByYear[i], songYearsSortedByYear[i]))
elif choice == 8:
'''Gets the year each song came out'''
jamYearsFile = open('C:\\Users\\josha\\Desktop\\Everything\\Music and Movies\\JamYearsFile.txt', 'w')
# Tracks the amount of time this damn process takes
start_time = datetime.now()
print(start_time)
# Declares lists
songYears = []
yearsToCheck = []
searchesThatDontWork = []
# Loops through each song in songNames
for i in range(len(songNames)):
jamYearsFile.write(songNames[i])
jamYearsFile.write("\n")
# Forms a string that is safe and good to search via url
songName = songNames[i].replace(" ", "_")
songName = songName.replace("'", "")
songName = songName.replace("(", "_")
songName = songName.replace(")", "_")
songName = songName.replace("&", "and")
songArtist = songArtists[i].replace(" ", "_")
songArtist = songArtist.replace("'", "")
songArtist = songArtist.replace("(", "_")
songArtist = songArtist.replace(")", "_")
songArtist = songArtist.replace("&", "and")
# Creates url
url = "https://www.google.com/search?q=" + songName + '+by+' + songArtist
# Web-scrapes the url via html
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, "lxml")
# print which song we are on and creates itWorked boolean
print(i+1, url)
itWorked = False
# Finds the little pieces of html that most likely have the year
for node in soup.find_all("span", "_tA"):
year = node.contents
year = str(year)
# Narrows down to the year a little more
if len(year) == 9:
# Checks to see if the song is from the 90s or 2000s and organizes them separately
if year[3] == '2' or year[5] == '9':
yearsToCheck.append([i+1, songNames[i], year[3:7]])
itWorked = True
# Effectively grabs the year and organizes it
doubleResultCheckerThatIsSupposedToFixit = False
if year[3] == '1' or year[3] == '2': # This finds the year
if doubleResultCheckerThatIsSupposedToFixit is True:
continue
itWorked = True
print(i+1, year, url, songNames[i])
doubleResultCheckerThatIsSupposedToFixit = True
jamYearsFile.write(year[3:7])
jamYearsFile.write("\n")
jamYearsFile.write("\n")
# Checks if no year was found in the song search
if itWorked is False:
searchesThatDontWork.append([i+1, songNames[i], url])
songYears.append('didnt work')
print(songNames[i], "didn't work")
# Updates the time
print((datetime.now() - start_time))
# Prints the full list of songs and years
print("songNames length: %d \nsongYears length: %d \n songYearsToCheck length: %d" \
% (len(songNames), len(songYears), len(yearsToCheck)))
print('\nFull list:')
for i in range(len(songNames) - 1):
print("%d) %s: %s" % (i+1, songNames[i], songYears[i]))
# Prints the jams to check from 90s and 2000s
print('\nYears to check:')
for i in range(len(yearsToCheck)):
print(yearsToCheck[i])
# Prints the year searches that didn't work
print("\nsearches that didn't work:")
for i in range(len(searchesThatDontWork)):
print(searchesThatDontWork[i])
elif choice == 69:
print("ok bye bye")
else:
print("Not an option")
|
[
"joshataylor99@gmail.com"
] |
joshataylor99@gmail.com
|
acda7499126a87857ce83af8d1d80462994ae4b7
|
159c89a4939f263f978b8224eca171bff18b5b06
|
/application/db.py
|
2a73d84db0c43db72b5e506b11660df2f13fa06f
|
[] |
no_license
|
Leovim/lend
|
c6ae68bda102221b11e925e930a53e006df429e4
|
34af793978bc780e4427f188930bb9c19800060f
|
refs/heads/master
| 2020-05-09T22:00:41.537997
| 2014-04-04T10:18:29
| 2014-04-04T10:18:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,940
|
py
|
#coding=utf8
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from config import options
Base = declarative_base()
engine = create_engine(options.sqlalchemy, encoding="utf-8", pool_recycle=3600)
class User(Base):
__tablename__ = 'user'
user_id = Column(Integer, primary_key=True)
username = Column(String(20))
password = Column(String(40))
phone = Column(String(11))
real_name = Column(String(5))
bank_number = Column(String(20))
alipay_number = Column(String(40))
credit = Column(Integer)
avatar = Column(String(50))
status = Column(Integer)
identify_number = Column(String(20))
school = Column(String(10))
department = Column(String(20))
major = Column(String(20))
student_id = Column(String(15))
dorm = Column(String(20))
pic1 = Column(String(50))
pic2 = Column(String(50))
pic3 = Column(String(50))
pic4 = Column(String(50))
def __init__(self, username, password, phone, real_name=None,
bank_number=None, alipay_number=None, credit=1, avatar=None,
status=0, identify_number=None, school=None, department=None,
major=None, student_id=None, dorm=None, pic1=None, pic2=None,
pic3=None, pic4=None):
self.username = username
self.password = password
self.phone = phone
self.real_name = real_name
self.bank_number = bank_number
self.alipay_number = alipay_number
self.credit = int(credit)
self.avatar = avatar
self.status = status
self.identify_number = identify_number
self.school = school
self.department = department
self.major = major
self.student_id = student_id
self.dorm = dorm
self.pic1 = pic1
self.pic2 = pic2
self.pic3 = pic3
self.pic4 = pic4
def __repr__(self):
return "<User('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s'," \
" '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', " \
"'%s')>" % \
(self.user_id, self.username, self.password, self.phone,
self.real_name, self.bank_number, self.alipay_number,
self.credit, self.avatar, self.status, self.identify_number,
self.school, self.department, self.major, self.student_id,
self.dorm, self.pic1, self.pic2, self.pic3, self.pic4)
def as_dict(self):
c = dict()
for item in self.__table__.columns:
c[item.name] = getattr(self, item.name)
return c
class Guarantee(Base):
__tablename__ = 'guarantee'
guarantee_id = Column(Integer, primary_key=True)
guarantor_id = Column(Integer)
warrantee_id = Column(Integer)
status = Column(Integer)
def __init__(self, guarantor_id, warrantee_id, status):
self.guarantor_id = int(guarantor_id)
self.warrantee_id = int(warrantee_id)
self.status = int(status)
def __repr__(self):
return "<Guarantee('%s', '%s', '%s')>" % (self.guarantor_id,
self.warrantee_id,
self.status)
def as_dict(self):
c = dict()
for item in self.__table__.columns:
c[item.name] = getattr(self, item.name)
return c
class Loan(Base):
__tablename__ = 'loan'
loan_id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.user_id'))
guarantor1 = Column(Integer, ForeignKey('guarantee.guarantor_id'))
guarantor2 = Column(Integer, ForeignKey('guarantee.guarantor_id'))
loan_amount = Column(Integer)
remain_amount = Column(Float)
loan_date = Column(String(20))
due_date = Column(String(20))
split_status = Column(Integer)
due_status = Column(Integer)
check_status = Column(Integer)
def __init__(self, user_id, guarantor1, guarantor2, loan_amount,
remain_amount, loan_date, due_date, split_status, due_status,
check_status):
self.user_id = int(user_id)
self.guarantor1 = guarantor1
self.guarantor2 = guarantor2
self.loan_amount = int(loan_amount)
self.remain_amount = int(remain_amount)
self.loan_date = loan_date
self.due_date = due_date
self.split_status = int(split_status)
self.due_status = int(due_status)
self.check_status = int(check_status)
def __repr__(self):
return "<Loan('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', " \
"'%s', '%s')>" %\
(self.loan_id, self.user_id, self.guarantor1, self.guarantor2,
self.loan_amount, self.remain_amount, self.loan_date,
self.due_date, self.split_status, self.due_status,
self.check_status)
def as_dict(self):
c = dict()
for item in self.__table__.columns:
c[item.name] = getattr(self, item.name)
return c
class Behaviour(Base):
__tablename__ = 'behaviour'
behaviour_id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('loan.user_id'))
loan_id = Column(Integer, ForeignKey('loan.loan_id'))
bhv_type = Column(Integer)
money = Column(Float)
time = Column(String(20))
check_status = Column(Integer)
def __init__(self, user_id, loan_id, bhv_type, money, time, check_status):
self.user_id = int(user_id)
self.loan_id = int(loan_id)
self.bhv_type = int(bhv_type)
self.money = money
self.time = time
self.check_status = int(check_status)
def __repr__(self):
return "<Behaviour('%s', '%s', '%s', '%s', '%s', '%s', '%s')>" % \
(self.behaviour_id, self.user_id, self.loan_id, self.bhv_type,
self.money, self.time, self.check_status)
def as_dict(self):
c = dict()
for item in self.__table__.columns:
c[item.name] = getattr(self, item.name)
return c
class SplitLoan(Base):
__tablename__ = 'split_loan'
split_id = Column(Integer, primary_key=True)
loan_id = Column(Integer, ForeignKey('loan.loan_id'))
total_time = Column(Integer)
interval_due = Column(Integer)
amount_per = Column(Float)
next_date = Column(String(20))
def __init__(self, loan_id, total_time, interval_due, amount_per,
next_date):
self.loan_id = int(loan_id)
self.total_time = int(total_time)
self.interval_due = int(interval_due)
self.amount_per = amount_per
self.next_date = next_date
def __repr__(self):
return "<Behaviour('%s', '%s', '%s', '%s', '%s', '%s')>" % \
(self.split_id, self.loan_id, self.total_time,
self.interval_due, self.amount_per, self.next_date)
def as_dict(self):
c = dict()
for item in self.__table__.columns:
c[item.name] = getattr(self, item.name)
return c
class Pay(Base):
__tablename__ = 'pay'
pay_id = Column(Integer, primary_key=True)
loan_id = Column(Integer, ForeignKey('loan.loan_id'))
type = Column(String(40))
amount = Column(Float)
check_status = Column(Integer)
date = Column(String(20))
def __init__(self, loan_id, type, amount, check_status, date):
self.loan_id = int(loan_id)
self.type = type
self.amount = amount
self.check_status = check_status
self.date = date
def __repr__(self):
return "<Behaviour('%s', '%s', '%s', '%s', '%s', '%s')>" % \
(self.pay_id, self.loan_id, self.type,
self.amount, self.check_status, self.date)
def as_dict(self):
c = dict()
for item in self.__table__.columns:
c[item.name] = getattr(self, item.name)
return c
|
[
"changtong1993@gmail.com"
] |
changtong1993@gmail.com
|
d315f5bcb6c815382fec9c8e4b88c18bba852efd
|
f6b626820cb9911b2abf232ec4d220351cc11cf7
|
/Обучение/Стоп слово.py
|
f5d1e472dbdc506ee5e39183d382cfc61a277401
|
[] |
no_license
|
aaskorohodov/Learning_Python
|
fc6b5889a8b481842fb7c2dd54aafbde32c2f005
|
d736ff0d8c7b3b40de23c92cfe750762863cedc8
|
refs/heads/master
| 2023-07-26T14:13:04.353880
| 2021-08-31T11:43:53
| 2021-08-31T11:43:53
| 390,396,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
words = list(input().split())
c = 0
while c < len(words) and words[c] != "stop" :
print(words[c])
c += 1
for x in words:
if x == "stop" or x == "asd":
break
print(x)
|
[
"aaskorohodov@gmail.com"
] |
aaskorohodov@gmail.com
|
4de0b26402d5cf3518aba5692c0dd3420af8fb04
|
fbf546c2896c6cd6805f1cbf10a54a3f9e0ba8f0
|
/assignment.py
|
fd39e95534357a883b3171582ade5c8acdd3f887
|
[] |
no_license
|
YouHyeJi/assignment
|
c4584107718eb6c3d206950daf090a3208c43b6b
|
a2c24a6f8a49e09eea273e63fdcd60c421008860
|
refs/heads/master
| 2020-05-05T14:03:02.078662
| 2019-04-08T08:34:02
| 2019-04-08T08:34:02
| 180,104,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
class contact:
def __init__(self, name, phone_number, sex):
self.name = name
self.phone_number = phone_number
self.sex = sex
result = []
while True:
name = input("이름을 입력하세요 : ")
if name == "그만":
for i in result:
print("이름은" + i.name + "전화번호는" + i.phone_number + "성별은" + i.sex)
break
phone_number = input("전화번호를 입력하세요 : ")
sex = input("성별을 입력하세요(male이나 female로 작성해주세요) :")
# if sex == "male" or sex == "female" :
if sex != "male" and sex != "female" :
sex = "unknown"
# print("이름은" + name + "전화번호는" + phone_number + "성별은" + sex)
person = contact(name, phone_number, sex)
result.append(person)
for i in result:
print("이름은" + i.name + "전화번호는" + i.phone_number + "성별은" + i.sex)
|
[
"gpwl0773@gmail.com"
] |
gpwl0773@gmail.com
|
8c83aff26eee45cf733ddd482d1d776cd006d303
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_112500.19-074112.7/sdB_SDSSJ_112500.19-074112.7_lc.py
|
c5af2aeaea090865c4a951f0aa7f46d762d25355
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[171.250792,-7.686861], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SDSSJ_112500.19-074112.7 /sdB_SDSSJ_112500.19-074112.7_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
f0c1510eecf19b8fca78dd77970e528b1b07f019
|
79122ab4de578e1758e30db113aa7d6ae26ef15e
|
/neuraltoolkit/findspikesfromraw.py
|
088637889300b523f99f00b85a1fc9ec22023f4a
|
[] |
no_license
|
kasterlevi/neuraltoolkit
|
ac32533a9ee7362ea107fd4c97cb162585682054
|
922144df19e7849b7e92411669cb8a6cc43ba0ed
|
refs/heads/master
| 2023-08-21T00:13:35.848682
| 2021-10-22T00:01:36
| 2021-10-22T00:01:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,676
|
py
|
import neuraltoolkit as ntk
import numpy as np
import matplotlib.pyplot as plt
# rawfile = '/Volumes/bs001r/users/EAB_09-12/EAB_00010_2018-06-08_15-06-33/Headstages_64_Channels_int16_2018-06-10_11-21-42.bin'
# number_of_channels = 64
# hs = 'hs64'
# nsec = 1
# number_of_probes = 1
# ch_list = [43,44,63]
# thresh = -50
# Get filename
print("Enter filename ")
rawfile = str(input())
print(rawfile)
# Get number of ptonrd
print("Enter total number of probes: ")
number_of_probes = np.int16(eval(input()))
print(number_of_probes)
# Get number of channels
print("Enter total number of channels : ")
number_of_channels = np.int16(eval(input()))
print(number_of_channels)
if number_of_probes > 1:
hs = []
for i in range(number_of_probes):
# Get number of channels
print("Enter probe type (Ex. hs64) : ")
hstype = input()
print(hstype)
hs.append(hstype)
else:
# Get number of channels
print("Enter probe type (Ex. hs64) : ")
hs = input()
print(hs)
print("hstype ", hs)
# Get number of seconds
print("Enter total number of seconds to plot : ")
nsec = np.int16(eval(input()))
print(nsec)
#Get channel numbers
print('Enter channels to plot: ')
print('Ex: 32 35 48')
ch_list = np.asarray(input().split(' '),dtype='int')
print(ch_list)
# Get threshold
print("What threshold to use for spikes? (Recommended: -70)")
thresh = np.int16(eval(input()))
print(thresh)
# get data
tt, ddgc = ntk.load_raw_binary_gain_chmap_nsec(rawfile, number_of_channels, hs,
25000, nsec, number_of_probes)
# bandpass data
bdgc = ntk.butter_bandpass(ddgc, 500, 7500, 25000, 3)
plt.figure(1)
for i in range(len(ch_list)):
# print(i, " ", ch_list[i])
ax = plt.subplot(len(ch_list), 1, i+1)
plt.plot(bdgc[ch_list[i], :])
plt.xticks([])
# plt.yticks([])
plt.title('Ch '+ str(ch_list[i]+1))
bdgc_thres = bdgc
bdgc_thres[bdgc_thres>thresh] = 0
plt.figure(2)
for i in range(len(ch_list)):
# print(i, " ", ch_list[i])
ax = plt.subplot(len(ch_list), 1, i+1)
plt.plot(bdgc_thres[ch_list[i], :])
plt.xticks([])
# plt.yticks([])
plt.title('Ch '+ str(ch_list[i]+1))
bdgc_grad = bdgc_thres-np.roll(bdgc_thres,1)
bdgc_grad[bdgc_grad==0] = 1000
bdgc_grad[bdgc_grad!=1000] = -1000
bdgc_grad[bdgc_grad==1000] = 1
bdgc_grad[bdgc_grad==(-1000)] = 0
plt.figure(3)
for i in range(len(ch_list)):
# print(i, " ", ch_list[i])
ax = plt.subplot(len(ch_list), 1, i+1)
plt.plot(bdgc_grad[ch_list[i], :])
plt.xticks([])
# plt.yticks([])
plt.title('Ch '+ str(ch_list[i]+1))
plt.show()
#need to fix this
# spiketimes = np.where(bdgc_grad==1)
|
[
"sbrunwas@Samuels-MacBook-Pro.local"
] |
sbrunwas@Samuels-MacBook-Pro.local
|
8c286b1aea6608b37c1d6b43bd135b0c104cda52
|
e7ecf4f96272193a0e943d23c097b1af0ce3e8a5
|
/Scripts/FileManager/UniLeipzigApiCaller.py
|
35eae67c1cb0322f8b7cf61a5e3f415e712cdc0b
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
ReleasedBrainiac/MultiAmbiguityToTopicMapping
|
12a57b2e458bae77c3c9b607d5d4dc65d7ec38ad
|
89cb8c5a64dd47b9fb80b67cf1bafa55498aaabc
|
refs/heads/master
| 2020-05-19T06:51:09.301824
| 2019-09-04T07:36:14
| 2019-09-04T07:36:14
| 184,884,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,863
|
py
|
import requests
import json
from SupportMethods.ContentSupport import isNotNone, isNotEmptyString, isInt
class UniLeipzigAPICaller():
def __init__(self, word:str, result_limit:int, base_url:str = "http://api.corpora.uni-leipzig.de/ws/sentences/", corpus:str = "deu_news_2012_1M", task:str = "sentences"):
"""
The constructor for the ApiCaller.
:param word:str: desired word
:param result_limit:int: limit of results
:param base_url:str: base url of the api providing server
:param corpus:str=: the desired corpus
:param task:str="sentences": the desired task
"""
try:
self._search_word = word if (isNotNone(word) and isNotEmptyString(word)) else None
self._search_limit = result_limit if (isNotNone(result_limit) and isInt(result_limit)) else 1
self._base_url = base_url if (isNotNone(base_url) and isNotEmptyString(base_url)) else "http://api.corpora.uni-leipzig.de/ws/sentences/"
self._corpus = corpus if (isNotNone(corpus) and isNotEmptyString(corpus)) else "deu_news_2012_1M"
self._task = task if (isNotNone(task) and isNotEmptyString(task)) else "sentences"
self._search_url_param = "?limit="
self._search_url = None
except Exception as ex:
template = "An exception of type {0} occurred in [UniLeipzigAPICaller.Constructor]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def UrlBuilder(self):
"""
This function constructs the url.
"""
try:
if isNotNone(self._search_word):
self._search_url = self._base_url + self._corpus +"/" + self._task +"/" + self._search_word + self._search_url_param + str(self._search_limit)
except Exception as ex:
template = "An exception of type {0} occurred in [UniLeipzigAPICaller.UrlBuilder]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def GetRequestJson(self):
"""
This function returns the json response.
"""
try:
self.UrlBuilder()
if isNotNone(self._search_url):
response = requests.get(self._search_url)
if isNotNone(response) and response.status_code is 200:
json_content = json.loads(response.content)
if json_content["count"] > 0:
return json_content
#else:
# if (input("Request failed on ["+self._search_word+"]! Retry? (j/n)") is "j"):
# self.GetRequestJson()
print("Request failed on ["+self._search_word+"]!")
return None
except Exception as ex:
template = "An exception of type {0} occurred in [UniLeipzigAPICaller.GetRequestJson]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def GetFoundSentences(self):
"""
This function returns the sentences from get response.
"""
try:
if (self._task is "sentences"):
sentences_list = []
json = self.GetRequestJson()
if isNotNone(json):
for sentence_obj in json['sentences']:
sentences_list.append(sentence_obj['sentence'])
return sentences_list
else:
return None
except Exception as ex:
template = "An exception of type {0} occurred in [UniLeipzigAPICaller.GetFoundSentences]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
|
[
"tobias.turke@gmx.net"
] |
tobias.turke@gmx.net
|
ec903f9ccda2297f466d3d5d649495d6ea6a3c8d
|
77063a816306b2b6996ec649199c474cb3043da5
|
/catkin_ws/src/task_planning/src/lever_sampling.py
|
5b3832c2d6e74d0336e02506b956c99d5808ed6c
|
[] |
no_license
|
mcubelab/mpalm_affordances
|
c08af865b5c1a5f34b3cb3567f17ce382b9a2462
|
00333129e867aaa90e6ac706497ae79ce0b00588
|
refs/heads/master
| 2022-07-10T21:38:16.388647
| 2020-06-19T17:26:05
| 2020-06-19T17:26:05
| 213,663,907
| 0
| 0
| null | 2022-06-21T23:26:11
| 2019-10-08T14:22:02
|
Python
|
UTF-8
|
Python
| false
| false
| 15,901
|
py
|
import sys, os
sys.path.append(os.environ['CODE_BASE'] + '/catkin_ws/src/config/src')
import itertools
import numpy as np
import rospy
from helper import helper
from helper import roshelper
from helper import visualize_helper
import util
from copy import deepcopy
import tf.transformations as tfm
from geometry_msgs.msg import PoseStamped
import objects
class LeverSampling(object):
def __init__(self, sampler):
self.sampler = sampler
self.samples_dict = {}
self.samples_dict['placement_id'] = []
self.samples_dict['placement_end_id'] = []
self.samples_dict['sample_ids'] = []
self.samples_dict['rotation_points'] = []
self.samples_dict['face_ids'] = []
self.samples_dict['gripper_poses'] = []
self.samples_dict['face_normals'] = []
self.generate_lever_samples()
self.samples_dict['object_pose'] = []
self.compute_object_poses()
def compute_object_poses(self):
for placement_id in range(len(self.samples_dict['placement_id'])):
T = self.sampler.object.stable_placement_dict['T'][placement_id]
object_pose = roshelper.pose_from_matrix(T, frame_id='yumi_body')
object_poses_list = []
for i in range(len(self.samples_dict['sample_ids'][placement_id])):
object_poses_list.append(object_pose)
self.samples_dict['object_pose'].append(object_poses_list)
def find_opposite_face(self, face_anchor_id, neighboors):
#frank hack: this is only valid for boxes
all_face_ids = range(len(self.sampler.object.stable_placement_dict['convex_face_3d']))
helper.intersection(neighboors, all_face_ids)
different_elements = np.setdiff1d(all_face_ids, neighboors + [face_anchor_id])
assert (len(list(different_elements))==1, "more than one opposite face")
return int(different_elements[0])
def get_points_face(self, face_anchor_id, placement_id):
neighboors_anchor_face = self.sampler.object.stable_placement_dict['neighboors'][face_anchor_id]
face_rotate_id = self.find_opposite_face(face_anchor_id, neighboors_anchor_face)
face_list_placement = self.sampler.object.stable_placement_dict['convex_face_stable_config'][
placement_id]
face_rotate_sampling_base = face_list_placement[face_rotate_id]
face_anchor_sampling_base = face_list_placement[face_anchor_id]
return face_rotate_sampling_base, face_anchor_sampling_base, face_rotate_id
def get_face_normals(self, face_anchor_id, placement_id):
neighboors_anchor_face = self.sampler.object.stable_placement_dict['neighboors'][face_anchor_id]
face_rotate_id = self.find_opposite_face(face_anchor_id, neighboors_anchor_face)
normal_list_placement = self.sampler.object.stable_placement_dict['normal_stable_config'][
placement_id]
normal_rotate_sampling_base = normal_list_placement[face_rotate_id]
normal_anchor_sampling_base = normal_list_placement[face_anchor_id]
return normal_rotate_sampling_base, normal_anchor_sampling_base
def get_highest_points(self, face_rotate_sampling_base, face_anchor_sampling_base):
points_rotate_sampling_base = util.find_highest_points(face_rotate_sampling_base)
points_anchor_sampling_base = util.find_highest_points(face_anchor_sampling_base)
return points_rotate_sampling_base, points_anchor_sampling_base
def compute_lever_z_axes(self, face_anchor_id, placement_id):
face_rotate_sampling_base, face_anchor_sampling_base, face_rotate_id = self.get_points_face(face_anchor_id, placement_id)
points_rotate_sampling_base, points_anchor_sampling_base = self.get_highest_points(face_rotate_sampling_base, face_anchor_sampling_base)
z_axis_rotate_sampling_base = helper.axis_from_points(points_rotate_sampling_base)
z_axis_anchor_sampling_base = helper.axis_from_points(points_anchor_sampling_base, vec_guide=z_axis_rotate_sampling_base)
return z_axis_rotate_sampling_base, z_axis_anchor_sampling_base
def compute_lever_y_axes(self, face_anchor_id, placement_id):
face_rotate_sampling_base, face_anchor_sampling_base, face_rotate_id = self.get_points_face(face_anchor_id, placement_id)
y_axis_rotate_sampling_base, y_axis_anchor_sampling_base = self.get_face_normals(face_anchor_id, placement_id)
return y_axis_rotate_sampling_base, y_axis_anchor_sampling_base
def compute_lever_x_axes(self, y_axis_rotate_sampling_base, y_axis_anchor_sampling_base, z_axis_rotate_sampling_base, z_axis_anchor_sampling_base):
x_axis_rotate_sampling_base = np.cross(y_axis_rotate_sampling_base, z_axis_rotate_sampling_base)
x_axis_anchor_sampling_base = np.cross(y_axis_anchor_sampling_base, z_axis_anchor_sampling_base)
return x_axis_rotate_sampling_base, x_axis_anchor_sampling_base
def find_lever_points(self, face_anchor_id, placement_id):
face_rotate_sampling_base, face_anchor_sampling_base, face_rotate_id = self.get_points_face(face_anchor_id, placement_id)
points_rotate_sampling_base, points_anchor_sampling_base = self.get_highest_points(face_rotate_sampling_base, face_anchor_sampling_base)
lever_point_rotate_sampling_base = util.find_mid_point(points_rotate_sampling_base)
lever_point_anchor_sampling_base = util.find_mid_point(points_anchor_sampling_base)
return lever_point_rotate_sampling_base, lever_point_anchor_sampling_base, face_rotate_id
def compute_nominal_gripper_poses(self,face_anchor_id, placement_id, trans_anchor, trans_rotate):
z_axis_rotate_sampling_base, z_axis_anchor_sampling_base = self.compute_lever_z_axes(face_anchor_id,
placement_id)
y_axis_rotate_sampling_base, y_axis_anchor_sampling_base = self.compute_lever_y_axes(face_anchor_id,
placement_id)
x_axis_rotate_sampling_base, x_axis_anchor_sampling_base = self.compute_lever_x_axes(y_axis_rotate_sampling_base,
y_axis_anchor_sampling_base,
z_axis_rotate_sampling_base,
z_axis_anchor_sampling_base)
gripper_pose_rotate = roshelper.pose_from_vectors(x_axis_rotate_sampling_base,
y_axis_rotate_sampling_base,
z_axis_rotate_sampling_base,
trans_rotate,
frame_id="proposals_base")
gripper_pose_anchor = roshelper.pose_from_vectors(x_axis_anchor_sampling_base,
y_axis_anchor_sampling_base,
z_axis_anchor_sampling_base,
trans_anchor,
frame_id="proposals_base")
return gripper_pose_rotate, gripper_pose_anchor
def find_lever_angle_sign(self, gripper_pose_proposals_base):
#1. extract vectors
T = roshelper.matrix_from_pose(gripper_pose_proposals_base)
x_vec, y_vec, z_vec = helper.matrix2vec(T)
#2. return sign based on condition
if x_vec[2]>0:
return -1
else:
return 1
def tilt_gripper_poses(self, gripper_rotate, gripper_anchor, rotate_angle=0*np.pi/180, anchor_angle=0*np.pi/180):
#1. convert to gripper frame
gripper_rotate_gripper_frame = roshelper.convert_reference_frame(gripper_rotate,
gripper_rotate,
roshelper.unit_pose(),
frame_id = "gripper_rotate")
gripper_anchor_gripper_frame = roshelper.convert_reference_frame(gripper_anchor,
gripper_anchor,
roshelper.unit_pose(),
frame_id = "gripper_anchor")
#2. rotate
rotate_angle = self.find_lever_angle_sign(gripper_rotate) * rotate_angle
anchor_angle = self.find_lever_angle_sign(gripper_anchor) * anchor_angle
pose_transform_rotate = roshelper.pose_from_matrix(tfm.euler_matrix(0, 0, rotate_angle,'sxyz'),
frame_id="proposals_base")
pose_transform_rotation_anchor = roshelper.pose_from_matrix(tfm.euler_matrix(0, 0, anchor_angle,'sxyz'),
frame_id="proposals_base")
gripper_rotate_tilded_gripper_frame = roshelper.transform_pose(gripper_rotate_gripper_frame, pose_transform_rotate)
gripper_anchor_tilded_gripper_frame = roshelper.transform_pose(gripper_anchor_gripper_frame, pose_transform_rotation_anchor)
#3. convert back to proposals base frame
gripper_rotate_tilded_proposals_base = roshelper.convert_reference_frame(gripper_rotate_tilded_gripper_frame,
roshelper.unit_pose(),
gripper_rotate,
frame_id = "proposals_base")
gripper_anchor_tilded_proposals_base = roshelper.convert_reference_frame(gripper_anchor_tilded_gripper_frame,
roshelper.unit_pose(),
gripper_anchor,
frame_id = "proposals_base")
return gripper_rotate_tilded_proposals_base, gripper_anchor_tilded_proposals_base
def generate_pose_samples(self, gripper_pose_rotate, gripper_pose_anchor):
gripper_poses_list = []
gripper_nominal_list = [gripper_pose_anchor, gripper_pose_rotate]
# gripper_index_list = [[0,1],[0,1]]
# flip_index_list = [[None, None],["x","y"]]
# for i in range(len(flip_index_list)):
# gripper_index = gripper_index_list[i]
# flip_index = flip_index_list[i]
# gripper_left = roshelper.flip_orientation(gripper_nominal_list[gripper_index[0]], flip_axis=flip_index[0], constant_axis=flip_index[1])
# gripper_right = roshelper.flip_orientation(gripper_nominal_list[gripper_index[1]], flip_axis=flip_index[0], constant_axis=flip_index[1])
# gripper_poses = [gripper_left, gripper_right]
# gripper_poses_list.append(gripper_poses)
return gripper_nominal_list
def generate_lever_samples(self):
#1. loop through stable placements
lever_id = 0
for placement_id, face in enumerate(self.sampler.object.stable_placement_dict['convex_face_3d']):
#2. rotate all grasp points, normals, grasp_poses
lever_id_list_new = []
lever_rotation_points_points_list_new = []
lever_face_list_new = []
lever_placement_start_list_new = []
lever_placement_end_list_new = []
lever_gripper_pose_list_new = []
lever_face_normals_list_new = []
#2. determine neighboor faces (all neighboors have associated potential lever action)
neighboors = self.sampler.object.stable_placement_dict['neighboors'][placement_id]
for face_anchor_id in neighboors:
index = self.sampler.object.stable_placement_dict['neighboors'][placement_id].index(face_anchor_id)
rotation_points = self.sampler.object.stable_placement_dict['common_points'][placement_id][index]
#3. identify lever points
lever_point_rotate_sampling_base, lever_point_anchor_sampling_base, face_rotate_id = \
self.find_lever_points(face_anchor_id, placement_id)
#4. determine gripper pose from lever points
gripper_pose_rotate, gripper_pose_anchor = self.compute_nominal_gripper_poses(face_anchor_id,
placement_id,
lever_point_anchor_sampling_base,
lever_point_rotate_sampling_base)
gripper_pose_rotate_tilde, gripper_pose_anchor_tilde = self.tilt_gripper_poses(gripper_pose_rotate, gripper_pose_anchor)
gripper_poses_list = self.generate_pose_samples(gripper_pose_rotate_tilde,
gripper_pose_anchor_tilde)
face_normals = [self.sampler.object.stable_placement_dict['normal_stable_config'][placement_id][face_anchor_id],
self.sampler.object.stable_placement_dict['normal_stable_config'][placement_id][face_rotate_id]]
# 3. check collisions between grippers and table
lever_id_list_new.append(lever_id)
lever_face_list_new.append(lever_id)
lever_rotation_points_points_list_new.append(rotation_points)
lever_placement_start_list_new.append(placement_id)
lever_placement_end_list_new.append(face_anchor_id)
lever_gripper_pose_list_new.append(gripper_poses_list)
lever_face_normals_list_new.append(face_normals)
lever_id += 1
self.samples_dict['sample_ids'].append(lever_id_list_new)
self.samples_dict['rotation_points'].append(lever_rotation_points_points_list_new)
self.samples_dict['face_ids'].append([face_anchor_id, face_rotate_id])
self.samples_dict['placement_id'].append(lever_placement_start_list_new)
self.samples_dict['placement_end_id'].append(lever_placement_end_list_new)
self.samples_dict['gripper_poses'].append(lever_gripper_pose_list_new)
self.samples_dict['face_normals'].append(lever_face_normals_list_new)
def identify_placement_ids(stable_placement_dict, placement_list):
index_list = []
sample_id_list = []
placement_end_list = stable_placement_dict['placement_end_id'][placement_list[0]]
sample_ids_list = stable_placement_dict['sample_ids'][placement_list[0]]
counter = 0
for placement_end, sample_id in zip(placement_end_list, sample_ids_list):
if placement_end==placement_list[-1]:
index_list.append(counter)
sample_id_list.append(sample_id)
counter += 1
return index_list, sample_id_list
def get_gripper_poses_from_samples(stable_placement_dict, placement_list):
index_list, sample_id_list = identify_placement_ids(stable_placement_dict, placement_list)
#get all samples that can perform a given placement
gripper_pose_element_list = []
for index in index_list:
gripper_pose_element_list.append(stable_placement_dict['gripper_poses'][placement_list[0]][index])
return gripper_pose_element_list[0], index_list[0]
|
[
"asimeono@mit.edu"
] |
asimeono@mit.edu
|
8aaf5edf7bb152c4373b9ec1501bb333897e8344
|
1d9dca90f8aefe637c5ffcb72972d629b588e231
|
/Assignments/PS1/starter_code/pacman.py
|
5bd460c4085f797847afcab7a72b44c4858f4929
|
[] |
no_license
|
fanfeng2015/Artificial-Intelligence
|
2bcfec6031d62538753476b46b4ea4bb4b801b23
|
acb5fe537a4e2404d7192139481950c1d65fc88b
|
refs/heads/master
| 2020-04-25T12:58:34.108015
| 2019-05-08T07:19:52
| 2019-05-08T07:19:52
| 172,794,260
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,336
|
py
|
# pacman.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Pacman.py holds the logic for the classic pacman game along with the main
code to run a game. This file is divided into three sections:
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python pacman.py' from the command line.
The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util, layout
import sys, types, time, random, os
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
Note that in classic Pacman, Pacman is always agent 0.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
# static variable keeps track of which states have had getLegalActions called
explored = set()
def getAndResetExplored():
tmp = GameState.explored.copy()
GameState.explored = set()
return tmp
getAndResetExplored = staticmethod(getAndResetExplored)
def getLegalActions( self, agentIndex=0 ):
"""
Returns the legal actions for the agent specified.
"""
# GameState.explored.add(self)
if self.isWin() or self.isLose(): return []
if agentIndex == 0: # Pacman is moving
return PacmanRules.getLegalActions( self )
else:
return GhostRules.getLegalActions( self, agentIndex )
def generateSuccessor( self, agentIndex, action):
"""
Returns the successor state after the specified agent takes the action.
"""
# Check that successors exist
if self.isWin() or self.isLose(): raise Exception('Can\'t generate a successor of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction( state, action )
else: # A ghost is moving
GhostRules.applyAction( state, action, agentIndex )
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer( state.data.agentStates[agentIndex] )
# Resolve multi-agent effects
GhostRules.checkDeath( state, agentIndex )
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
GameState.explored.add(self)
GameState.explored.add(state)
return state
def getLegalPacmanActions( self ):
return self.getLegalActions( 0 )
def generatePacmanSuccessor( self, action ):
"""
Generates the successor state after the specified pacman move
"""
return self.generateSuccessor( 0, action )
def getPacmanState( self ):
"""
Returns an AgentState object for pacman (in game.py)
state.pos gives the current position
state.direction gives the travel vector
"""
return self.data.agentStates[0].copy()
def getPacmanPosition( self ):
return self.data.agentStates[0].getPosition()
def getGhostStates( self ):
return self.data.agentStates[1:]
def getGhostState( self, agentIndex ):
if agentIndex == 0 or agentIndex >= self.getNumAgents():
raise Exception("Invalid index passed to getGhostState")
return self.data.agentStates[agentIndex]
def getGhostPosition( self, agentIndex ):
if agentIndex == 0:
raise Exception("Pacman's index passed to getGhostPosition")
return self.data.agentStates[agentIndex].getPosition()
def getGhostPositions(self):
return [s.getPosition() for s in self.getGhostStates()]
def getNumAgents( self ):
return len( self.data.agentStates )
def getScore( self ):
return float(self.data.score)
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
def getNumFood( self ):
return self.data.food.count()
def getFood(self):
"""
Returns a Grid of boolean food indicator variables.
Grids can be accessed via list notation, so to check
if there is food at (x,y), just call
currentFood = state.getFood()
if currentFood[x][y] == True: ...
"""
return self.data.food
def getWalls(self):
"""
Returns a Grid of boolean wall indicator variables.
Grids can be accessed via list notation, so to check
if there is a wall at (x,y), just call
walls = state.getWalls()
if walls[x][y] == True: ...
"""
return self.data.layout.walls
def hasFood(self, x, y):
return self.data.food[x][y]
def hasWall(self, x, y):
return self.data.layout.walls[x][y]
def isLose( self ):
return self.data._lose
def isWin( self ):
return self.data._win
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__( self, prevState = None ):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
else:
self.data = GameStateData()
def deepCopy( self ):
state = GameState( self )
state.data = self.data.deepCopy()
return state
def __eq__( self, other ):
"""
Allows two states to be compared.
"""
return hasattr(other, 'data') and self.data == other.data
def __hash__( self ):
"""
Allows states to be keys of dictionaries.
"""
return hash( self.data )
def __str__( self ):
return str(self.data)
def initialize( self, layout, numGhostAgents=1000 ):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numGhostAgents)
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
SCARED_TIME = 40 # Moves ghosts are scared
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
TIME_PENALTY = 1 # Number of points lost each round
class ClassicGameRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, timeout=30):
self.timeout = timeout
def newGame( self, layout, pacmanAgent, ghostAgents, display, quiet = False, catchExceptions=False):
agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
initState = GameState()
initState.initialize( layout, len(ghostAgents) )
game = Game(agents, display, self, catchExceptions=catchExceptions)
game.state = initState
self.initialState = initState.deepCopy()
self.quiet = quiet
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if state.isWin(): self.win(state, game)
if state.isLose(): self.lose(state, game)
def win( self, state, game ):
if not self.quiet: print("Pacman emerges victorious! Score: %d" % state.data.score)
game.gameOver = True
def lose( self, state, game ):
if not self.quiet: print("Pacman died! Score: %d" % state.data.score)
game.gameOver = True
def getProgress(self, game):
return float(game.state.getNumFood()) / self.initialState.getNumFood()
def agentCrash(self, game, agentIndex):
if agentIndex == 0:
print("Pacman crashed")
else:
print("A ghost crashed")
def getMaxTotalTime(self, agentIndex):
return self.timeout
def getMaxStartupTime(self, agentIndex):
return self.timeout
def getMoveWarningTime(self, agentIndex):
return self.timeout
def getMoveTimeout(self, agentIndex):
return self.timeout
def getMaxTimeWarnings(self, agentIndex):
return 0
class PacmanRules:
"""
These functions govern how pacman interacts with his environment under
the classic game rules.
"""
PACMAN_SPEED=1
def getLegalActions( state ):
"""
Returns a list of possible actions.
"""
return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action ):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions( state )
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector( action, PacmanRules.PACMAN_SPEED )
pacmanState.configuration = pacmanState.configuration.generateSuccessor( vector )
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint( next )
if manhattanDistance( nearest, next ) <= 0.5 :
# Remove food
PacmanRules.consume( nearest, state )
applyAction = staticmethod( applyAction )
def consume( position, state ):
x,y = position
# Eat food
if state.data.food[x][y]:
state.data.scoreChange += 10
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
# TODO: cache numFood?
numFood = state.getNumFood()
if numFood == 0 and not state.data._lose:
state.data.scoreChange += 500
state.data._win = True
# Eat capsule
if( position in state.getCapsules() ):
state.data.capsules.remove( position )
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
for index in range( 1, len( state.data.agentStates ) ):
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod( consume )
class GhostRules:
"""
These functions dictate how ghosts interact with their environment.
"""
GHOST_SPEED=1.0
def getLegalActions( state, ghostIndex ):
"""
Ghosts cannot stop, and cannot turn around unless they
reach a dead end, but can turn 90 degrees at intersections.
"""
conf = state.getGhostState( ghostIndex ).configuration
possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
reverse = Actions.reverseDirection( conf.direction )
if Directions.STOP in possibleActions:
possibleActions.remove( Directions.STOP )
if reverse in possibleActions and len( possibleActions ) > 1:
possibleActions.remove( reverse )
return possibleActions
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action, ghostIndex):
legal = GhostRules.getLegalActions( state, ghostIndex )
if action not in legal:
raise Exception("Illegal ghost action " + str(action))
ghostState = state.data.agentStates[ghostIndex]
speed = GhostRules.GHOST_SPEED
if ghostState.scaredTimer > 0: speed /= 2.0
vector = Actions.directionToVector( action, speed )
ghostState.configuration = ghostState.configuration.generateSuccessor( vector )
applyAction = staticmethod( applyAction )
def decrementTimer( ghostState):
timer = ghostState.scaredTimer
if timer == 1:
ghostState.configuration.pos = nearestPoint( ghostState.configuration.pos )
ghostState.scaredTimer = max( 0, timer - 1 )
decrementTimer = staticmethod( decrementTimer )
def checkDeath( state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range( 1, len( state.data.agentStates ) ):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, index )
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, agentIndex )
checkDeath = staticmethod( checkDeath )
def collide( state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod( collide )
def canKill( pacmanPosition, ghostPosition ):
return manhattanDistance( ghostPosition, pacmanPosition ) <= COLLISION_TOLERANCE
canKill = staticmethod( canKill )
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod( placeGhost )
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None: return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key,val = p, 1
opts[key] = val
return opts
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar = 'TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a','--agentArgs',dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed: random.seed('cs470')
# Choose a layout
args['layout'] = layout.getLayout( options.layout )
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print('Replaying recorded game %s.' % options.gameToReplay)
import pickle
f = open(options.gameToReplay)
try: recorded = pickle.load(f)
finally: f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir): continue
moduleNames = [f for f in os.listdir(moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception('Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman + ' is not specified in any *Agents.py.')
def replayGame( layout, actions, display ):
import pacmanAgents, ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1) for i in range(layout.getNumGhosts())]
game = rules.newGame( layout, agents[0], agents[1:], display )
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor( *action )
# Change the display
display.update( state.data )
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames( layout, pacman, ghosts, display, numGames, record, numTraining = 0, catchExceptions=False, timeout=30 ):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range( numGames ):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame( layout, pacman, ghosts, gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet: games.append(game)
if record:
import time, pickle
fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
f = file(fname, 'w')
components = {'layout': layout, 'actions': game.moveHistory}
pickle.dump(components, f)
f.close()
if (numGames-numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True)/ float(len(wins))
print('Average Score:', sum(scores) / float(len(scores)))
print('Scores: ', ', '.join([str(score) for score in scores]))
print('Win Rate: %d/%d (%.2f)' % (wins.count(True), len(wins), winRate))
print('Record: ', ', '.join([ ['Loss', 'Win'][int(w)] for w in wins]))
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python pacman.py
See the usage string for more details.
> python pacman.py --help
"""
args = readCommand( sys.argv[1:] ) # Get game components based on input
runGames( **args )
# import cProfile
# cProfile.run("runGames( **args )")
pass
|
[
"fanfeng2015@gmail.com"
] |
fanfeng2015@gmail.com
|
1609f70860867a529b1e53248793fe3397b745f3
|
bab1a8b7547d1ceb2177998dafde85d1a80c949d
|
/zfit/core/integration.py
|
d217ed4b4ea670e2fc93246de66d2be6220277a3
|
[
"BSD-3-Clause"
] |
permissive
|
olantwin/zfit
|
99d518e75171f0bad0fc54d3fcd9d1db95a87363
|
d5e6fbcc73cec053752be08c2157e9e1c4cdd8f6
|
refs/heads/master
| 2021-07-14T02:17:42.271975
| 2019-03-30T12:42:42
| 2019-03-30T12:42:42
| 179,468,861
| 0
| 0
|
BSD-3-Clause
| 2019-04-04T09:44:49
| 2019-04-04T09:44:48
| null |
UTF-8
|
Python
| false
| false
| 22,208
|
py
|
"""
This module contains functions for the numeric as well as the analytic (partial) integration.
"""
import collections
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Callable, Optional, Union, Type, Tuple, List
import zfit
from zfit import ztf
from zfit.core.dimension import BaseDimensional
from zfit.core.interfaces import ZfitData, ZfitSpace, ZfitModel
from zfit.util.container import convert_to_container
from zfit.util.temporary import TemporarilySet
from ..util import ztyping
from ..util.exception import DueToLazynessNotImplementedError
from .limits import convert_to_space, Space, supports
from ..settings import ztypes
@supports()
def auto_integrate(func, limits, n_axes, x=None, method="AUTO", dtype=ztypes.float,
mc_sampler=tfp.mcmc.sample_halton_sequence,
mc_options=None):
if method == "AUTO": # TODO unfinished, other methods?
method = "mc"
# TODO method
if method.lower() == "mc":
mc_options = mc_options or {}
draws_per_dim = mc_options['draws_per_dim']
integral = mc_integrate(x=x, func=func, limits=limits, n_axes=n_axes, method=method, dtype=dtype,
mc_sampler=mc_sampler, draws_per_dim=draws_per_dim,
importance_sampling=None)
return integral
# TODO implement numerical integration method
def numeric_integrate():
"""Integrate `func` using numerical methods."""
integral = None
return integral
def mc_integrate(func: Callable, limits: ztyping.LimitsType, axes: Optional[ztyping.AxesTypeInput] = None,
x: Optional[ztyping.XType] = None, n_axes: Optional[int] = None, draws_per_dim: int = 20000,
method: str = None,
dtype: Type = ztypes.float,
mc_sampler: Callable = tfp.mcmc.sample_halton_sequence,
importance_sampling: Optional[Callable] = None) -> tf.Tensor:
"""Monte Carlo integration of `func` over `limits`.
Args:
func (callable): The function to be integrated over
limits (:py:class:`~zfit.Space`): The limits of the integral
axes (tuple(int)): The row to integrate over. None means integration over all value
x (numeric): If a partial integration is performed, this are the value where x will be evaluated.
n_axes (int): the number of total dimensions (old?)
draws_per_dim (int): How many random points to draw per dimensions
method (str): Which integration method to use
dtype (dtype): |dtype_arg_descr|
mc_sampler (callable): A function that takes one argument (`n_draws` or similar) and returns
random value between 0 and 1.
importance_sampling ():
Returns:
numerical: the integral
"""
if axes is not None and n_axes is not None:
raise ValueError("Either specify axes or n_axes")
limits = convert_to_space(limits)
axes = limits.axes
partial = (axes is not None) and (x is not None) # axes, value can be tensors
if axes is not None and n_axes is None:
n_axes = len(axes)
if n_axes is not None and axes is None:
axes = tuple(range(n_axes))
lower, upper = limits.limits
if np.infty in upper[0] or -np.infty in lower[0]:
raise ValueError("MC integration does (currently) not support unbound limits (np.infty) as given here:"
"\nlower: {}, upper: {}".format(lower, upper))
lower = ztf.convert_to_tensor(lower, dtype=dtype)
upper = ztf.convert_to_tensor(upper, dtype=dtype)
n_samples = draws_per_dim
chunked_normalization = zfit.run.chunksize < n_samples
# chunked_normalization = True
if chunked_normalization:
if partial:
raise DueToLazynessNotImplementedError("This feature is not yet implemented: needs new Datasets")
n_chunks = int(np.ceil(n_samples / zfit.run.chunksize))
chunksize = int(np.ceil(n_samples / n_chunks))
print("starting normalization with {} chunks and a chunksize of {}".format(n_chunks, chunksize))
avg = normalization_chunked(func=func, n_axes=n_axes, dtype=dtype, x=x,
num_batches=n_chunks, batch_size=chunksize, space=limits)
else:
# TODO: deal with n_obs properly?
samples_normed = mc_sampler(dim=n_axes, num_results=n_samples, dtype=dtype)
# samples_normed = tf.reshape(samples_normed, shape=(n_vals, int(n_samples / n_vals), n_axes))
# samples_normed = tf.expand_dims(samples_normed, axis=0)
samples = samples_normed * (upper - lower) + lower # samples is [0, 1], stretch it
# samples = tf.transpose(samples, perm=[2, 0, 1])
if partial: # TODO(Mayou36): shape of partial integral?
data_obs = x.obs
new_obs = []
x = x.value()
value_list = []
index_samples = 0
index_values = 0
if len(x.shape) == 1:
x = tf.expand_dims(x, axis=1)
for i in range(n_axes + x.shape[-1].value):
if i in axes:
new_obs.append(limits.obs[index_samples])
value_list.append(samples[:, index_samples])
index_samples += 1
else:
new_obs.append(data_obs[index_values])
value_list.append(tf.expand_dims(x[:, index_values], axis=1))
index_values += 1
value_list = [tf.cast(val, dtype=dtype) for val in value_list]
x = value_list
x = PartialIntegralSampleData(sample=value_list,
space=Space(obs=new_obs))
else:
x = samples
# convert rnd samples with value to feedable vector
reduce_axis = 1 if partial else None
avg = tf.reduce_mean(func(x), axis=reduce_axis)
# avg = tfp.monte_carlo.expectation(f=func, samples=x, axis=reduce_axis)
# TODO: importance sampling?
# avg = tfb.monte_carlo.expectation_importance_sampler(f=func, samples=value,axis=reduce_axis)
integral = avg * tf.cast(ztf.convert_to_tensor(limits.area()), dtype=avg.dtype)
return integral
# return ztf.to_real(integral, dtype=dtype)
def normalization_nograd(func, n_axes, batch_size, num_batches, dtype, space, x=None, shape_after=()):
upper, lower = space.limits
lower = ztf.convert_to_tensor(lower, dtype=dtype)
upper = ztf.convert_to_tensor(upper, dtype=dtype)
def body(batch_num, mean):
start_idx = batch_num * batch_size
end_idx = start_idx + batch_size
indices = tf.range(start_idx, end_idx, dtype=tf.int32)
samples_normed = tfp.mcmc.sample_halton_sequence(n_axes,
# num_results=batch_size,
sequence_indices=indices,
dtype=dtype,
randomized=False)
# halton_sample = tf.random_uniform(shape=(n_axes, batch_size), dtype=dtype)
samples_normed.set_shape((batch_size, n_axes))
samples_normed = tf.expand_dims(samples_normed, axis=0)
samples = samples_normed * (upper - lower) + lower
func_vals = func(samples)
if shape_after == ():
reduce_axis = None
else:
reduce_axis = 1
if len(func_vals.shape) == 1:
func_vals = tf.expand_dims(func_vals, -1)
batch_mean = tf.reduce_mean(func_vals, axis=reduce_axis) # if there are gradients
# batch_mean = tf.reduce_mean(sample)
# batch_mean = tf.guarantee_const(batch_mean)
# with tf.control_dependencies([batch_mean]):
err_weight = 1 / tf.to_double(batch_num + 1)
# err_weight /= err_weight + 1
# print_op = tf.print(batch_mean)
print_op = tf.print(batch_num + 1)
with tf.control_dependencies([print_op]):
return batch_num + 1, mean + err_weight * (batch_mean - mean)
cond = lambda batch_num, _: batch_num < num_batches
initial_mean = tf.constant(0, shape=shape_after, dtype=dtype)
initial_body_args = (0, initial_mean)
_, final_mean = tf.while_loop(cond, body, initial_body_args, parallel_iterations=1,
swap_memory=False, back_prop=True)
# def normalization_grad(x):
return final_mean
def normalization_chunked(func, n_axes, batch_size, num_batches, dtype, space, x=None, shape_after=()):
x_is_none = x is None
@tf.custom_gradient
def normalization_func(x):
if x_is_none:
x = None
value = normalization_nograd(func=func, n_axes=n_axes, batch_size=batch_size, num_batches=num_batches,
dtype=dtype,
space=space, x=x, shape_after=shape_after)
def grad_fn(dy, variables=None):
if variables is None:
return dy, None
normed_grad = normalization_nograd(func=lambda x: tf.stack(tf.gradients(func(x), variables)),
n_axes=n_axes, batch_size=batch_size, num_batches=num_batches,
dtype=dtype,
space=space,
x=x, shape_after=(len(variables),))
return dy, tf.unstack(normed_grad)
return value, grad_fn
fake_x = 1 if x_is_none else x
return normalization_func(fake_x)
def chunked_average(func, x, num_batches, batch_size, space, mc_sampler):
avg = normalization_nograd()
def chunked_average(func, x, num_batches, batch_size, space, mc_sampler):
lower, upper = space.limits
fake_resource_var = tf.get_variable("fake_hack_ResVar_for_custom_gradient",
initializer=ztf.constant(4242.))
fake_x = ztf.constant(42.) * fake_resource_var
@tf.custom_gradient
def dummy_func(fake_x): # to make working with custom_gradient
if x is not None:
raise DueToLazynessNotImplementedError("partial not yet implemented")
def body(batch_num, mean):
if mc_sampler == tfp.mcmc.sample_halton_sequence:
start_idx = batch_num * batch_size
end_idx = start_idx + batch_size
indices = tf.range(start_idx, end_idx, dtype=tf.int32)
sample = mc_sampler(space.n_obs, sequence_indices=indices,
dtype=ztypes.float, randomized=False)
else:
sample = mc_sampler(shape=(batch_size, space.n_obs), dtype=ztypes.float)
sample = tf.guarantee_const(sample)
sample = (np.array(upper[0]) - np.array(lower[0])) * sample + lower[0]
sample = tf.transpose(sample)
sample = func(sample)
sample = tf.guarantee_const(sample)
batch_mean = tf.reduce_mean(sample)
batch_mean = tf.guarantee_const(batch_mean)
# with tf.control_dependencies([batch_mean]):
err_weight = 1 / tf.to_double(batch_num + 1)
# err_weight /= err_weight + 1
# print_op = tf.print(batch_mean)
print_op = tf.print(batch_num + 1, mean, err_weight * (batch_mean - mean))
with tf.control_dependencies([print_op]):
return batch_num + 1, mean + err_weight * (batch_mean - mean)
# return batch_num + 1, tf.guarantee_const(mean + err_weight * (batch_mean - mean))
cond = lambda batch_num, _: batch_num < num_batches
initial_mean = tf.convert_to_tensor(0, dtype=ztypes.float)
_, final_mean = tf.while_loop(cond, body, (0, initial_mean), parallel_iterations=1,
swap_memory=False, back_prop=False, maximum_iterations=num_batches)
def dummy_grad_with_var(dy, variables=None):
raise DueToLazynessNotImplementedError("Who called me? Mayou36")
if variables is None:
raise DueToLazynessNotImplementedError("Is this needed? Why? It's not a NN. Please make an issue.")
def dummy_grad_func(x):
values = func(x)
if variables:
gradients = tf.gradients(values, variables, grad_ys=dy)
else:
gradients = None
return gradients
return chunked_average(func=dummy_grad_func, x=x, num_batches=num_batches, batch_size=batch_size,
space=space, mc_sampler=mc_sampler)
def dummy_grad_without_var(dy):
return dummy_grad_with_var(dy=dy, variables=None)
print_op = tf.print(final_mean)
with tf.control_dependencies([print_op]):
return tf.guarantee_const(final_mean), dummy_grad_with_var
try:
return dummy_func(fake_x)
except TypeError:
return dummy_func(fake_x)
class PartialIntegralSampleData(BaseDimensional, ZfitData):
def __init__(self, sample: List[tf.Tensor], space: ZfitSpace):
"""Takes a list of tensors and "fakes" a dataset. Useful for tensors with non-matching shapes.
Args:
sample (List[tf.Tensor]):
space ():
"""
if not isinstance(sample, list):
raise TypeError("Sample has to be a list of tf.Tensors")
super().__init__()
self._space = space
self._sample = sample
self._reorder_indices_list = list(range(len(sample)))
@property
def space(self) -> "zfit.Space":
return self._space
def sort_by_axes(self, axes, allow_superset: bool = False):
axes = convert_to_container(axes)
new_reorder_list = [self._reorder_indices_list[self.space.axes.index(ax)] for ax in axes]
value = self.space.with_axes(axes=axes), new_reorder_list
getter = lambda: (self.space, self._reorder_indices_list)
def setter(value):
self._space, self._reorder_indices_list = value
return TemporarilySet(value=value, getter=getter, setter=setter)
def sort_by_obs(self, obs, allow_superset: bool = False):
obs = convert_to_container(obs)
new_reorder_list = [self._reorder_indices_list[self.space.obs.index(ob)] for ob in obs]
value = self.space.with_obs(obs=obs), new_reorder_list
getter = lambda: (self.space, self._reorder_indices_list)
def setter(value):
self._space, self._reorder_indices_list = value
return TemporarilySet(value=value, getter=getter, setter=setter)
def value(self, obs: List[str] = None):
return self
def unstack_x(self):
unstacked_x = [self._sample[i] for i in self._reorder_indices_list]
if len(unstacked_x) == 1:
unstacked_x = unstacked_x[0]
return unstacked_x
class AnalyticIntegral:
def __init__(self, *args, **kwargs):
"""Hold analytic integrals and manage their dimensions, limits etc."""
super(AnalyticIntegral, self).__init__(*args, **kwargs)
self._integrals = collections.defaultdict(dict)
def get_max_axes(self, limits: ztyping.LimitsType, axes: ztyping.AxesTypeInput = None) -> Tuple[int]:
"""Return the maximal available axes to integrate over analytically for given limits
Args:
limits (:py:class:`~zfit.Space`): The integral function will be able to integrate over this limits
axes (tuple): The axes over which (or over a subset) it will integrate
Returns:
Tuple[int]:
"""
if not isinstance(limits, Space):
raise TypeError("`limits` have to be a `Space`")
# limits = convert_to_space(limits=limits)
return self._get_max_axes_limits(limits, out_of_axes=limits.axes)[0] # only axes
def _get_max_axes_limits(self, limits, out_of_axes): # TODO: automatic caching? but most probably not relevant
if out_of_axes:
out_of_axes = frozenset(out_of_axes)
implemented_axes = frozenset(d for d in self._integrals.keys() if d <= out_of_axes)
else:
implemented_axes = set(self._integrals.keys())
implemented_axes = sorted(implemented_axes, key=len, reverse=True) # iter through biggest first
for axes in implemented_axes:
limits_matched = []
for lim, integ in self._integrals[axes].items():
if integ.limits >= limits:
limits_matched.append(lim)
if limits_matched: # one or more integrals available
return tuple(sorted(axes)), limits_matched
return (), () # no integral available for this axes
def get_max_integral(self, limits: ztyping.LimitsType,
axes: ztyping.AxesTypeInput = None) -> Union[None, "Integral"]:
"""Return the integral over the `limits` with `axes` (or a subset of them).
Args:
limits (:py:class:`~zfit.Space`):
axes (Tuple[int]):
Returns:
Union[None, Integral]: Return a callable that integrated over the given limits.
"""
limits = convert_to_space(limits=limits, axes=axes)
axes, limits = self._get_max_axes_limits(limits=limits, out_of_axes=axes)
axes = frozenset(axes)
integrals = [self._integrals[axes][lim] for lim in limits]
integral_fn = max(integrals, key=lambda l: l.priority, default=None)
return integral_fn
def register(self, func: Callable, limits: ztyping.LimitsType,
priority: int = 50, *,
supports_norm_range: bool = False, supports_multiple_limits: bool = False) -> None:
"""Register an analytic integral.
Args:
func (callable): The integral function. Takes 1 argument.
axes (tuple): |dims_arg_descr|
limits (:py:class:`~zfit.Space`): |limits_arg_descr| `Limits` can be None if `func` works for any
possible limits
priority (int): If two or more integrals can integrate over certain limits, the one with the higher
priority is taken (usually around 0-100).
supports_norm_range (bool): If True, norm_range will (if needed) be given to `func` as an argument.
supports_multiple_limits (bool): If True, multiple limits may be given as an argument to `func`.
"""
# if limits is False:
# raise ValueError("Limits for the analytical integral have to be specified or None (for any limits).")
# if limits is None:
# limits = tuple((Space.ANY_LOWER, Space.ANY_UPPER) for _ in range(len(axes)))
# limits = convert_to_space(axes=axes, limits=limits)
# else:
# limits = convert_to_space(axes=self.axes, limits=limits)
# limits = limits.get_limits()
if not isinstance(limits, Space):
raise TypeError("Limits for registering an integral have to be `Space`")
axes = frozenset(limits.axes)
# add catching everything unsupported:
func = supports(norm_range=supports_norm_range, multiple_limits=supports_multiple_limits)(func)
limits = limits.with_axes(axes=tuple(sorted(limits.axes)))
self._integrals[axes][limits.limits] = Integral(func=func, limits=limits,
priority=priority) # TODO improve with
# database-like access
def integrate(self, x: Optional[ztyping.XType], limits: ztyping.LimitsType, axes: ztyping.AxesTypeInput = None,
norm_range: ztyping.LimitsType = None, model: ZfitModel = None, params: dict = None) -> ztyping.XType:
"""Integrate analytically over the axes if available.
Args:
x (numeric): If a partial integration is made, x are the value to be evaluated for the partial
integrated function. If a full integration is performed, this should be `None`.
limits (:py:class:`~zfit.Space`): The limits to integrate
axes (Tuple[int]): The dimensions to integrate over
norm_range (bool): |norm_range_arg_descr|
params (dict): The parameters of the function
Returns:
Union[tf.Tensor, float]:
Raises:
NotImplementedError: If the requested integral is not available.
"""
if axes is None:
axes = limits.axes
axes = frozenset(axes)
integral_holder = self._integrals.get(axes)
# limits = convert_to_space(axes=self.axes, limits=limits)
if integral_holder is None:
raise NotImplementedError("Integral is not available for axes {}".format(axes))
integral_fn = self.get_max_integral(limits=limits)
if integral_fn is None:
raise NotImplementedError(
"Integral is available for axes {}, but not for limits {}".format(axes, limits))
if x is None:
try:
integral = integral_fn(x=x, limits=limits, norm_range=norm_range, params=params, model=model)
except TypeError:
integral = integral_fn(limits=limits, norm_range=norm_range, params=params, model=model)
else:
integral = integral_fn(x=x, limits=limits, norm_range=norm_range, params=params, model=model)
return integral
class Integral: # TODO analytic integral
def __init__(self, func: Callable, limits: "zfit.Space", priority: Union[int, float]):
"""A lightweight holder for the integral function."""
self.limits = limits
self.integrate = func
self.axes = limits.axes
self.priority = priority
def __call__(self, *args, **kwargs):
return self.integrate(*args, **kwargs)
# to be "the" future integral class
class Integration:
def __init__(self, mc_sampler, draws_per_dim):
self.mc_sampler = mc_sampler
self.draws_per_dim = draws_per_dim
|
[
"mayou36@jonas.eschle.com"
] |
mayou36@jonas.eschle.com
|
0e3a4182b0655427f3f767ef4844a5f92e0082a3
|
e7360e5a9aa3ac92eec2aad3fb36613febcb5e8a
|
/b2b/migrations/0020_auto_20200818_1342.py
|
715be2f088f7dda69a95fc4f47fc9bb69d14fdec
|
[] |
no_license
|
devmikado/phonebooth
|
6a7b49d3748424f52cbbaed9fa67a72f9f23d5bc
|
2711b4913c2ee7bc5f677b7aaaf192658dbe9754
|
refs/heads/master
| 2023-02-09T21:21:26.483410
| 2021-01-05T13:41:10
| 2021-01-05T13:41:10
| 324,892,889
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
# Generated by Django 2.0 on 2020-08-18 13:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social_django', '0010_uid_db_index'),
('nationality', '0006_delete_dummymodel'),
('b2b', '0019_auto_20200818_1331'),
]
operations = [
migrations.RemoveField(
model_name='personaaccountsmaster',
name='persona_id',
),
migrations.RemoveField(
model_name='personaaccountsmaster',
name='social_auth_id',
),
migrations.RemoveField(
model_name='personaculturemaster',
name='culture_id',
),
migrations.RemoveField(
model_name='personaculturemaster',
name='persona_id',
),
migrations.RemoveField(
model_name='personalocationsmaster',
name='loaction_id',
),
migrations.RemoveField(
model_name='personalocationsmaster',
name='persona_id',
),
migrations.RemoveField(
model_name='personasentimentsmaster',
name='persona_id',
),
migrations.RemoveField(
model_name='personasentimentsmaster',
name='sentiment_id',
),
migrations.AddField(
model_name='customer_persona',
name='culture_id',
field=models.ManyToManyField(to='nationality.nationality_prediction'),
),
migrations.AddField(
model_name='customer_persona',
name='loaction_id',
field=models.ManyToManyField(to='nationality.usa_cities_master'),
),
migrations.AddField(
model_name='customer_persona',
name='sentiment_id',
field=models.ManyToManyField(to='b2b.sentimentMaster'),
),
migrations.AddField(
model_name='customer_persona',
name='social_auth_id',
field=models.ManyToManyField(to='social_django.UserSocialAuth'),
),
migrations.DeleteModel(
name='PersonaAccountsMaster',
),
migrations.DeleteModel(
name='PersonaCultureMaster',
),
migrations.DeleteModel(
name='PersonaLocationsMaster',
),
migrations.DeleteModel(
name='PersonaSentimentsMaster',
),
]
|
[
"v.mote@splendornet.com"
] |
v.mote@splendornet.com
|
bca07c6ee8c8e2f2ba5bff9f8f1583f23c7dee76
|
97d397dae2eee337e494949ba6f8655cc35bd09b
|
/venv/bin/django-admin
|
cbf88ae96839329d2d8062b5a0c5f0b2ebda8638
|
[] |
no_license
|
arphone-main/youtube
|
f5283d66c9919e61fb0e6591e3a7ff77f6a5765d
|
976792530467a0f1f58375f372ec44a0ad8b0991
|
refs/heads/main
| 2023-05-12T08:05:18.091043
| 2021-05-29T05:13:57
| 2021-05-29T05:13:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
#!/Users/apple/PycharmProjects/youtube/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"chayan.datta@decentro.tech"
] |
chayan.datta@decentro.tech
|
|
1529b6e30ae281f1bc292ea9ee0eb3250db256ff
|
48b90238414fd31df392e2f871a3901f8568c3da
|
/djenga_tests/urls.py
|
092138416100f565fae4fa4410fe1de232199edb
|
[
"BSD-3-Clause"
] |
permissive
|
2ps/djenga
|
ccae21bec16fca1c5eb04811886b446daa0c1807
|
85ac2c7b0b0e80b55aff43f027814d05b9b0532c
|
refs/heads/master
| 2021-08-17T03:09:48.444869
| 2021-07-21T02:32:14
| 2021-07-21T02:32:14
| 19,199,344
| 6
| 1
|
BSD-3-Clause
| 2019-06-26T21:57:44
| 2014-04-27T08:21:30
|
Python
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
"""test_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"pshingavi@gmail.com"
] |
pshingavi@gmail.com
|
a59c87e12366efded39a4a976f77c45e22a61dce
|
67efb90cf4dc60b7ef9cda949a9a8a95a601783e
|
/src/imovie/urls.py
|
1eec77abe9d9ff5a7995b403a07a9f70bc5e19a1
|
[] |
no_license
|
osama-abbasss/download-apple-movie-trailer
|
05654331d59bd9a5a90997792b34e01565334979
|
3dd6b0ac8107f02c31e9d9afdaeefdd325720187
|
refs/heads/master
| 2022-06-04T03:12:50.056430
| 2020-05-01T18:50:18
| 2020-05-01T18:50:18
| 260,534,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from django.urls import path
from . import views
app_name = 'imovie'
urlpatterns = [
path('', views.download_trailer, name='list')
]
|
[
"eng.oabbass@gmail.com"
] |
eng.oabbass@gmail.com
|
24a3ddcdb2ec4e348fbc71bc3f5ee8fd12bb1d69
|
b2ee0959efe9bc1a1acaecc5487c711fe7f4b1e4
|
/app01/models.py
|
4736d636d1e682fe7a2ecf7a96bf8f1fd044a71c
|
[] |
no_license
|
wuxiaoliu123/admin_demo
|
9834712ad722f10b28d5047f307f883ad5a588cc
|
a1f7c6b9a3a9deecf45e750d2f040feb07725c28
|
refs/heads/master
| 2020-05-27T00:49:10.925915
| 2019-05-24T08:21:49
| 2019-05-24T08:21:49
| 188,429,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
class Author(models.Model):
nid = models.AutoField(primary_key=True)
name=models.CharField( max_length=32)
age=models.IntegerField()
#将admin中的表名替换成中文
class Meta:
verbose_name = "作者"
verbose_name_plural =verbose_name
# 与AuthorDetail建立一对一的关系
authorDetail=models.OneToOneField(to="AuthorDetail",on_delete=models.CASCADE)
def __str__(self):
return self.name
class AuthorDetail(models.Model):
nid = models.AutoField(primary_key=True)
birthday=models.DateField()
telephone=models.BigIntegerField()
addr=models.CharField( max_length=64)
#将admin中的表名替换成中文
class Meta:
verbose_name = "作者信息"
verbose_name_plural =verbose_name
# def __str__(self):
# return self.birthday
class Publish(models.Model):
nid = models.AutoField(primary_key=True)
name=models.CharField( max_length=32)
city=models.CharField( max_length=32)
email=models.EmailField()
#将admin中的表名替换成中文
class Meta:
verbose_name = "出版社"
verbose_name_plural =verbose_name
def __str__(self):
return self.name
class Book(models.Model):
nid = models.AutoField(primary_key=True)
title = models.CharField( max_length=32)
publishDate=models.DateField()
price=models.DecimalField(max_digits=5,decimal_places=2)
# 与Publish建立一对多的关系,外键字段建立在多的一方
publish=models.ForeignKey(to="Publish",to_field="nid",on_delete=models.CASCADE)
# 与Author表建立多对多的关系,ManyToManyField可以建在两个模型中的任意一个,自动创建第三张表
authors=models.ManyToManyField(to='Author',)
#将admin中的表名替换成中文
class Meta:
verbose_name = "书籍"
verbose_name_plural =verbose_name
def __str__(self):
return self.title
|
[
"6165@qq.com"
] |
6165@qq.com
|
36b3a20cb723efb4eac4fa1c0e78c20049cd3c24
|
e26cc73cd3990389a0aebc1d6890d3dabe1a1744
|
/sklearn/utils/_array_api.py
|
ed16ce767a0cd3231219676c182a58d615d6c073
|
[
"BSD-3-Clause"
] |
permissive
|
neurodata/scikit-learn
|
705f2d0f09a843a01e0c7a959c412c220cac272c
|
3ad522ac06b92c20223d4e141a3565839b6a8057
|
refs/heads/submodulev3
| 2023-08-16T17:24:51.220830
| 2023-08-15T21:58:09
| 2023-08-15T21:58:09
| 197,068,409
| 8
| 6
|
BSD-3-Clause
| 2023-08-24T23:48:17
| 2019-07-15T20:33:57
|
Python
|
UTF-8
|
Python
| false
| false
| 17,520
|
py
|
"""Tools to support array_api."""
import itertools
import math
from functools import wraps
import numpy
import scipy.special as special
from .._config import get_config
from .fixes import parse_version
def yield_namespace_device_dtype_combinations():
"""Yield supported namespace, device, dtype tuples for testing.
Use this to test that an estimator works with all combinations.
Returns
-------
array_namespace : str
The name of the Array API namespace.
device : str
The name of the device on which to allocate the arrays. Can be None to
indicate that the default value should be used.
dtype : str
The name of the data type to use for arrays. Can be None to indicate
that the default value should be used.
"""
for array_namespace in [
# The following is used to test the array_api_compat wrapper when
# array_api_dispatch is enabled: in particular, the arrays used in the
# tests are regular numpy arrays without any "device" attribute.
"numpy",
# Stricter NumPy-based Array API implementation. The
# numpy.array_api.Array instances always a dummy "device" attribute.
"numpy.array_api",
"cupy",
"cupy.array_api",
"torch",
]:
if array_namespace == "torch":
for device, dtype in itertools.product(
("cpu", "cuda"), ("float64", "float32")
):
yield array_namespace, device, dtype
else:
yield array_namespace, None, None
def _check_array_api_dispatch(array_api_dispatch):
"""Check that array_api_compat is installed and NumPy version is compatible.
array_api_compat follows NEP29, which has a higher minimum NumPy version than
scikit-learn.
"""
if array_api_dispatch:
try:
import array_api_compat # noqa
except ImportError:
raise ImportError(
"array_api_compat is required to dispatch arrays using the API"
" specification"
)
numpy_version = parse_version(numpy.__version__)
min_numpy_version = "1.21"
if numpy_version < parse_version(min_numpy_version):
raise ImportError(
f"NumPy must be {min_numpy_version} or newer to dispatch array using"
" the API specification"
)
def device(x):
"""Hardware device the array data resides on.
Parameters
----------
x : array
Array instance from NumPy or an array API compatible library.
Returns
-------
out : device
`device` object (see the "Device Support" section of the array API spec).
"""
if isinstance(x, (numpy.ndarray, numpy.generic)):
return "cpu"
return x.device
def size(x):
"""Return the total number of elements of x.
Parameters
----------
x : array
Array instance from NumPy or an array API compatible library.
Returns
-------
out : int
Total number of elements.
"""
return math.prod(x.shape)
def _is_numpy_namespace(xp):
"""Return True if xp is backed by NumPy."""
return xp.__name__ in {"numpy", "array_api_compat.numpy", "numpy.array_api"}
def _union1d(a, b, xp):
if _is_numpy_namespace(xp):
return xp.asarray(numpy.union1d(a, b))
assert a.ndim == b.ndim == 1
return xp.unique_values(xp.concat([xp.unique_values(a), xp.unique_values(b)]))
def isdtype(dtype, kind, *, xp):
"""Returns a boolean indicating whether a provided dtype is of type "kind".
Included in the v2022.12 of the Array API spec.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
"""
if isinstance(kind, tuple):
return any(_isdtype_single(dtype, k, xp=xp) for k in kind)
else:
return _isdtype_single(dtype, kind, xp=xp)
def _isdtype_single(dtype, kind, *, xp):
if isinstance(kind, str):
if kind == "bool":
return dtype == xp.bool
elif kind == "signed integer":
return dtype in {xp.int8, xp.int16, xp.int32, xp.int64}
elif kind == "unsigned integer":
return dtype in {xp.uint8, xp.uint16, xp.uint32, xp.uint64}
elif kind == "integral":
return any(
_isdtype_single(dtype, k, xp=xp)
for k in ("signed integer", "unsigned integer")
)
elif kind == "real floating":
return dtype in {xp.float32, xp.float64}
elif kind == "complex floating":
# Some name spaces do not have complex, such as cupy.array_api
# and numpy.array_api
complex_dtypes = set()
if hasattr(xp, "complex64"):
complex_dtypes.add(xp.complex64)
if hasattr(xp, "complex128"):
complex_dtypes.add(xp.complex128)
return dtype in complex_dtypes
elif kind == "numeric":
return any(
_isdtype_single(dtype, k, xp=xp)
for k in ("integral", "real floating", "complex floating")
)
else:
raise ValueError(f"Unrecognized data type kind: {kind!r}")
else:
return dtype == kind
class _ArrayAPIWrapper:
"""sklearn specific Array API compatibility wrapper
This wrapper makes it possible for scikit-learn maintainers to
deal with discrepancies between different implementations of the
Python array API standard and its evolution over time.
The Python array API standard specification:
https://data-apis.org/array-api/latest/
Documentation of the NumPy implementation:
https://numpy.org/neps/nep-0047-array-api-standard.html
"""
def __init__(self, array_namespace):
self._namespace = array_namespace
def __getattr__(self, name):
return getattr(self._namespace, name)
def __eq__(self, other):
return self._namespace == other._namespace
def take(self, X, indices, *, axis=0):
# When array_api supports `take` we can use this directly
# https://github.com/data-apis/array-api/issues/177
if self._namespace.__name__ == "numpy.array_api":
X_np = numpy.take(X, indices, axis=axis)
return self._namespace.asarray(X_np)
# We only support axis in (0, 1) and ndim in (1, 2) because that is all we need
# in scikit-learn
if axis not in {0, 1}:
raise ValueError(f"Only axis in (0, 1) is supported. Got {axis}")
if X.ndim not in {1, 2}:
raise ValueError(f"Only X.ndim in (1, 2) is supported. Got {X.ndim}")
if axis == 0:
if X.ndim == 1:
selected = [X[i] for i in indices]
else: # X.ndim == 2
selected = [X[i, :] for i in indices]
else: # axis == 1
selected = [X[:, i] for i in indices]
return self._namespace.stack(selected, axis=axis)
def isdtype(self, dtype, kind):
return isdtype(dtype, kind, xp=self._namespace)
def _check_device_cpu(device): # noqa
if device not in {"cpu", None}:
raise ValueError(f"Unsupported device for NumPy: {device!r}")
def _accept_device_cpu(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
_check_device_cpu(kwargs.pop("device", None))
return func(*args, **kwargs)
return wrapped_func
class _NumPyAPIWrapper:
"""Array API compat wrapper for any numpy version
NumPy < 1.22 does not expose the numpy.array_api namespace. This
wrapper makes it possible to write code that uses the standard
Array API while working with any version of NumPy supported by
scikit-learn.
See the `get_namespace()` public function for more details.
"""
# Creation functions in spec:
# https://data-apis.org/array-api/latest/API_specification/creation_functions.html
_CREATION_FUNCS = {
"arange",
"empty",
"empty_like",
"eye",
"full",
"full_like",
"linspace",
"ones",
"ones_like",
"zeros",
"zeros_like",
}
# Data types in spec
# https://data-apis.org/array-api/latest/API_specification/data_types.html
_DTYPES = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"complex64",
"complex128",
}
def __getattr__(self, name):
attr = getattr(numpy, name)
# Support device kwargs and make sure they are on the CPU
if name in self._CREATION_FUNCS:
return _accept_device_cpu(attr)
# Convert to dtype objects
if name in self._DTYPES:
return numpy.dtype(attr)
return attr
@property
def bool(self):
return numpy.bool_
def astype(self, x, dtype, *, copy=True, casting="unsafe"):
# astype is not defined in the top level NumPy namespace
return x.astype(dtype, copy=copy, casting=casting)
def asarray(self, x, *, dtype=None, device=None, copy=None): # noqa
_check_device_cpu(device)
# Support copy in NumPy namespace
if copy is True:
return numpy.array(x, copy=True, dtype=dtype)
else:
return numpy.asarray(x, dtype=dtype)
def unique_inverse(self, x):
return numpy.unique(x, return_inverse=True)
def unique_counts(self, x):
return numpy.unique(x, return_counts=True)
def unique_values(self, x):
return numpy.unique(x)
def concat(self, arrays, *, axis=None):
return numpy.concatenate(arrays, axis=axis)
def reshape(self, x, shape, *, copy=None):
"""Gives a new shape to an array without changing its data.
The Array API specification requires shape to be a tuple.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.reshape.html
"""
if not isinstance(shape, tuple):
raise TypeError(
f"shape must be a tuple, got {shape!r} of type {type(shape)}"
)
if copy is True:
x = x.copy()
return numpy.reshape(x, shape)
def isdtype(self, dtype, kind):
return isdtype(dtype, kind, xp=self)
_NUMPY_API_WRAPPER_INSTANCE = _NumPyAPIWrapper()
def get_namespace(*arrays):
"""Get namespace of arrays.
Introspect `arrays` arguments and return their common Array API
compatible namespace object, if any. NumPy 1.22 and later can
construct such containers using the `numpy.array_api` namespace
for instance.
See: https://numpy.org/neps/nep-0047-array-api-standard.html
If `arrays` are regular numpy arrays, an instance of the
`_NumPyAPIWrapper` compatibility wrapper is returned instead.
Namespace support is not enabled by default. To enabled it
call:
sklearn.set_config(array_api_dispatch=True)
or:
with sklearn.config_context(array_api_dispatch=True):
# your code here
Otherwise an instance of the `_NumPyAPIWrapper`
compatibility wrapper is always returned irrespective of
the fact that arrays implement the `__array_namespace__`
protocol or not.
Parameters
----------
*arrays : array objects
Array objects.
Returns
-------
namespace : module
Namespace shared by array objects. If any of the `arrays` are not arrays,
the namespace defaults to NumPy.
is_array_api_compliant : bool
True if the arrays are containers that implement the Array API spec.
Always False when array_api_dispatch=False.
"""
array_api_dispatch = get_config()["array_api_dispatch"]
if not array_api_dispatch:
return _NUMPY_API_WRAPPER_INSTANCE, False
_check_array_api_dispatch(array_api_dispatch)
# array-api-compat is a required dependency of scikit-learn only when
# configuring `array_api_dispatch=True`. Its import should therefore be
# protected by _check_array_api_dispatch to display an informative error
# message in case it is missing.
import array_api_compat
namespace, is_array_api_compliant = array_api_compat.get_namespace(*arrays), True
if namespace.__name__ in {"numpy.array_api", "cupy.array_api"}:
namespace = _ArrayAPIWrapper(namespace)
return namespace, is_array_api_compliant
def _expit(X):
xp, _ = get_namespace(X)
if _is_numpy_namespace(xp):
return xp.asarray(special.expit(numpy.asarray(X)))
return 1.0 / (1.0 + xp.exp(-X))
def _add_to_diagonal(array, value, xp):
# Workaround for the lack of support for xp.reshape(a, shape, copy=False) in
# numpy.array_api: https://github.com/numpy/numpy/issues/23410
value = xp.asarray(value, dtype=array.dtype)
if _is_numpy_namespace(xp):
array_np = numpy.asarray(array)
array_np.flat[:: array.shape[0] + 1] += value
return xp.asarray(array_np)
elif value.ndim == 1:
for i in range(array.shape[0]):
array[i, i] += value[i]
else:
# scalar value
for i in range(array.shape[0]):
array[i, i] += value
def _weighted_sum(sample_score, sample_weight, normalize=False, xp=None):
# XXX: this function accepts Array API input but returns a Python scalar
# float. The call to float() is convenient because it removes the need to
# move back results from device to host memory (e.g. calling `.cpu()` on a
# torch tensor). However, this might interact in unexpected ways (break?)
# with lazy Array API implementations. See:
# https://github.com/data-apis/array-api/issues/642
if xp is None:
xp, _ = get_namespace(sample_score)
if normalize and _is_numpy_namespace(xp):
sample_score_np = numpy.asarray(sample_score)
if sample_weight is not None:
sample_weight_np = numpy.asarray(sample_weight)
else:
sample_weight_np = None
return float(numpy.average(sample_score_np, weights=sample_weight_np))
if not xp.isdtype(sample_score.dtype, "real floating"):
sample_score = xp.astype(sample_score, xp.float64)
if sample_weight is not None:
sample_weight = xp.asarray(sample_weight)
if not xp.isdtype(sample_weight.dtype, "real floating"):
sample_weight = xp.astype(sample_weight, xp.float64)
if normalize:
if sample_weight is not None:
scale = xp.sum(sample_weight)
else:
scale = sample_score.shape[0]
if scale != 0:
sample_score = sample_score / scale
if sample_weight is not None:
return float(sample_score @ sample_weight)
else:
return float(xp.sum(sample_score))
def _asarray_with_order(array, dtype=None, order=None, copy=None, *, xp=None):
"""Helper to support the order kwarg only for NumPy-backed arrays
Memory layout parameter `order` is not exposed in the Array API standard,
however some input validation code in scikit-learn needs to work both
for classes and functions that will leverage Array API only operations
and for code that inherently relies on NumPy backed data containers with
specific memory layout constraints (e.g. our own Cython code). The
purpose of this helper is to make it possible to share code for data
container validation without memory copies for both downstream use cases:
the `order` parameter is only enforced if the input array implementation
is NumPy based, otherwise `order` is just silently ignored.
"""
if xp is None:
xp, _ = get_namespace(array)
if _is_numpy_namespace(xp):
# Use NumPy API to support order
if copy is True:
array = numpy.array(array, order=order, dtype=dtype)
else:
array = numpy.asarray(array, order=order, dtype=dtype)
# At this point array is a NumPy ndarray. We convert it to an array
# container that is consistent with the input's namespace.
return xp.asarray(array)
else:
return xp.asarray(array, dtype=dtype, copy=copy)
def _convert_to_numpy(array, xp):
"""Convert X into a NumPy ndarray on the CPU."""
xp_name = xp.__name__
if xp_name in {"array_api_compat.torch", "torch"}:
return array.cpu().numpy()
elif xp_name == "cupy.array_api":
return array._array.get()
elif xp_name in {"array_api_compat.cupy", "cupy"}: # pragma: nocover
return array.get()
return numpy.asarray(array)
def _estimator_with_converted_arrays(estimator, converter):
"""Create new estimator which converting all attributes that are arrays.
The converter is called on all NumPy arrays and arrays that support the
`DLPack interface <https://dmlc.github.io/dlpack/latest/>`__.
Parameters
----------
estimator : Estimator
Estimator to convert
converter : callable
Callable that takes an array attribute and returns the converted array.
Returns
-------
new_estimator : Estimator
Convert estimator
"""
from sklearn.base import clone
new_estimator = clone(estimator)
for key, attribute in vars(estimator).items():
if hasattr(attribute, "__dlpack__") or isinstance(attribute, numpy.ndarray):
attribute = converter(attribute)
setattr(new_estimator, key, attribute)
return new_estimator
|
[
"noreply@github.com"
] |
neurodata.noreply@github.com
|
5d008af1fe2e68f966cd6cb40d4cc4a415f37850
|
003d32a0948f83d456010b82284a0323bd524e58
|
/PartB/QueueEvent.py
|
384d29492101bb952d6939bd9185af92b4094728
|
[] |
no_license
|
Skymike868/Discrete_Simulation
|
4d6122ba5ed3ab184a9cff5699906f0732c0658d
|
2e2544d505da40dc9a4a136f60a432eacebf2582
|
refs/heads/master
| 2020-04-28T19:24:29.944286
| 2019-03-13T23:12:38
| 2019-03-13T23:12:38
| 175,509,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
from PartB.SimulationEvent import SimulationEvent
from enum import Enum
class QueueEventTypes(Enum):
ARRIVAL = 0
DEPARTURE = 1
ARRIVAL = QueueEventTypes.ARRIVAL
DEPARTURE = QueueEventTypes.DEPARTURE
class QueueEvent(SimulationEvent):
def __init__(self, event_time, event_type):
super().__init__(event_time)
self.event_type = event_type
def __repr__(self):
return f'{self.event_type} @ {self.event_time}'
|
[
"M3618861@hotmail.com"
] |
M3618861@hotmail.com
|
73648b74a1ebd8dbcaaba72205343f8e41529f72
|
612093b414a3869b500d5029b66ea20788c1d258
|
/kaggle_m5_forecasting/eda/weather.py
|
b365552db7f1d7093318ccf1f8336ce0ce3eda4f
|
[] |
no_license
|
kiccho1101/kaggle_m5_forecasting
|
1bec3ed258e0d2821deb22bbc1f596c26f034caf
|
278ebb0d3623963dd4dd4110f16dcbb0eb4cda8e
|
refs/heads/master
| 2023-01-02T21:10:28.880998
| 2020-10-14T08:30:35
| 2020-10-14T08:30:35
| 249,624,395
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,909
|
py
|
# %%
import pandas as pd
from thunderbolt import Thunderbolt
import sys
import os
sys.path.append(os.getcwd() + "/../..")
from kaggle_m5_forecasting.data.load_data import RawData
from kaggle_m5_forecasting.data.fe_weather import read_weather_data
from kaggle_m5_forecasting.utils import decode_ids
tb = Thunderbolt("./../../resource")
data: pd.DataFrame = pd.concat(
[tb.get_data("MakeData"), tb.get_data("FEWeather")], axis=1
)
weather = read_weather_data("./../../external_data")
weather["date"] = pd.to_datetime(weather["date_time"]).dt.strftime("%Y-%m-%d")
weather.index = pd.to_datetime(weather["date_time"])
weather.index.name = None
weather.drop("date_time", axis=1, inplace=True)
raw: RawData = tb.get_data("LoadRawData")
# %%
raw.calendar["d"] = raw.calendar["d"].apply(lambda d: int(d.replace("d_", "")))
cat_id = 0
df: pd.DataFrame = data[data["cat_id"] == cat_id].groupby(["d", "state_id"])[
"sales"
].mean().reset_index().merge(raw.calendar[["d", "date"]], on="d", how="left").merge(
weather, on=["date", "state_id"], how="left"
)
df.index = pd.to_datetime(df["date"])
# %%
import seaborn as sns
selected_cols = [
"sales",
"fe_weather_maxtempC",
"fe_weather_mintempC",
"fe_weather_sunHour",
"fe_weather_DewPointC",
"fe_weather_FeelsLikeC",
"fe_weather_HeatIndexC",
"fe_weather_WindChillC",
"fe_weather_WindGustKmph",
"fe_weather_cloudcover",
"fe_weather_humidity",
"fe_weather_precipMM",
"fe_weather_pressure",
]
state_id = 2
sns_plot = sns.pairplot(df[df["state_id"] == state_id][selected_cols])
sns_plot.savefig(
os.path.join(os.path.dirname(__file__), "weather", f"pairplot_{state_id}.png")
)
# %%
corr = df[df["state_id"] == state_id][selected_cols].corr()
sns_plot = sns.heatmap(corr)
sns_plot.get_figure().savefig(
os.path.join(os.path.dirname(__file__), "weather", f"corr_heatmap_{state_id}.png")
)
# %%
|
[
"youodf11khp@gmail.com"
] |
youodf11khp@gmail.com
|
0e127b01383041fc76e6b429509cb4aadac82a01
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/HLT/Event/ByteStreamEmonSvc/share/l1bits4emon
|
efe0560c94f319753634fc1f286498acd665777b
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
#!/usr/bin/env python
import sys
def l1bits(bits):
result = 0L
for i in bits:
result |= 1L << i
print "Full: 0x%x" % result
mask = (2**64-1)
print '[',"0x%x" % (result & mask),',',
print "0x%x" % ((result >> 64) & mask),',',
print "0x%x" % ((result >> 128) & mask),',',
print "0x%x" % ((result >> 192) & mask),']'
if __name__ == '__main__':
l1bits(map(int,sys.argv[1:]))
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
|
e410330b360d8dcf91745a5c636e64137c2856a9
|
055ff4b5faad0e7b1f99f03bf16989f6b0a70be4
|
/venv/bin/pylint
|
ec160ceb66efeebdc5f1ef8cbed87243aaae6e91
|
[] |
no_license
|
Cashley112/Dj_Portfolio
|
9f8f4dc6beeae9d1a3c52717e5dcff551cda5028
|
1f5036c9ffc900261434bf535a1a4217a62cc58d
|
refs/heads/master
| 2022-12-06T03:24:10.628292
| 2020-08-25T02:51:08
| 2020-08-25T02:51:08
| 290,095,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
#!/Users/connorashley/django_portfolio/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
|
[
"connorashley@Connors-MacBook-Pro.local"
] |
connorashley@Connors-MacBook-Pro.local
|
|
04616e090f78a429c2421054de60e0e9a4ec6a46
|
8bdce915174678a90a6be811ea91b50930b9d26a
|
/elk/shares/shares_from_sina/data_from_sina2.py
|
793d83f7dcc94fad86f72758e6de6dcd56691d01
|
[] |
no_license
|
CharlesBird/Resources
|
daefffef8fb3735e656cd0a3bf400d5e2ff85cc0
|
517ac7b7992a686fa5370b6fda8b62663735853c
|
refs/heads/master
| 2022-12-15T02:54:56.530940
| 2020-02-29T14:33:43
| 2020-02-29T14:33:43
| 109,668,108
| 1
| 1
| null | 2022-12-08T05:04:25
| 2017-11-06T08:34:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,307
|
py
|
import tushare as ts
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import re
import requests
from datetime import datetime
TOKEN = '137e3fc78e901b8463d68a102b168b2ea0217cb854abfad24d4dc7f7'
pro = ts.pro_api(TOKEN)
es = Elasticsearch(['47.103.32.102:9200'])
index = "shares_real_time_data_2019-09-19_00001"
def all_shares_codes():
sh_list_datas = pro.stock_basic(exchange='', list_status='', fields='ts_code,symbol')
return sh_list_datas.to_dict('records')
def get_sina_codes(step=500):
shares_list = all_shares_codes()
start = 0
sina_codes = []
while shares_list[start:start + step]:
per_codes = []
for share in shares_list[start:start + step]:
if share['ts_code'][-2:] == 'SH':
per_codes.append('sh' + share['symbol'])
elif share['ts_code'][-2:] == 'SZ':
per_codes.append('sz' + share['symbol'])
else:
pass
sina_codes.append(per_codes)
start += step
return sina_codes
def get_share_datas():
sina_codes = get_sina_codes()
for codes in sina_codes:
url = 'http://hq.sinajs.cn/list={}'.format(','.join(codes))
response = requests.get(url)
data = response.text
pattern = re.compile('="(.*)"')
data_list = pattern.findall(data)
for i, str_data in enumerate(data_list):
code = codes[i]
list_data = str_data.split(',')
features = ['name', 'open', 'yesterday_close', 'price', 'high', 'low', 'bid_price', 'auction_price',
'volume', 'amount', 'buy1_vol', 'buy1_quote', 'buy2_vol', 'buy2_quote', 'buy3_vol',
'buy3_quote', 'buy4_vol', 'buy4_quote', 'buy5_vol', 'buy5_quote', 'sell1_vol', 'sell1_quote',
'sell2_vol', 'sell2_quote', 'sell3_vol', 'sell3_quote', 'sell4_vol', 'sell4_quote', 'sell5_vol',
'sell5_quote']
value = {'code': code}
for j, f in enumerate(features):
value.update({f: list_data[j]})
date = list_data[30]
date_t = list_data[31]
str_date = date + ' ' + date_t
d = datetime.strptime(str_date, '%Y-%m-%d %H:%M:%S')
value.update({'@timestamp': d})
print(value)
insert_into_es(value)
def insert_into_es(data):
helpers.bulk(es, data, index=index)
# es.index(index=index, body=data)
def main():
# 190.34576533935595
import time
s = time.clock()
get_share_datas()
print(time.clock()-s)
if __name__ == '__main__':
# for r in get_sina_codes():
# print(r)
# main()
# print(get_sina_codes())
actions = [{'index': 'shares_real_time_data_2019-09-19_00001', 'doc_type': '_doc', '_source': {'code': 'sz000409', 'name': 'ST地矿', 'open': '4.730', 'yesterday_close': '4.820', 'price': '4.950', 'high': '5.060', 'low': '4.580', 'bid_price': '4.940', 'auction_price': '4.950', 'volume': '6388891', 'amount': '31123555.730', 'buy1_vol': '14974', 'buy1_quote': '4.940', 'buy2_vol': '9600', 'buy2_quote': '4.930', 'buy3_vol': '16800', 'buy3_quote': '4.920', 'buy4_vol': '35700', 'buy4_quote': '4.910', 'buy5_vol': '108500', 'buy5_quote': '4.900', 'sell1_vol': '32700', 'sell1_quote': '4.950', 'sell2_vol': '12300', 'sell2_quote': '4.960', 'sell3_vol': '29000', 'sell3_quote': '4.970', 'sell4_vol': '4800', 'sell4_quote': '4.980', 'sell5_vol': '25100', 'sell5_quote': '4.990'}}, {'index': 'shares_real_time_data_2019-09-19_00001', 'doc_type': '_doc', '_source': {'code': 'sz000410', 'name': '*ST沈机', 'open': '6.450', 'yesterday_close': '6.390', 'price': '6.460', 'high': '6.600', 'low': '6.430', 'bid_price': '6.450', 'auction_price': '6.460', 'volume': '3841985', 'amount': '24934233.610', 'buy1_vol': '44300', 'buy1_quote': '6.450', 'buy2_vol': '51300', 'buy2_quote': '6.440', 'buy3_vol': '22200', 'buy3_quote': '6.430', 'buy4_vol': '15600', 'buy4_quote': '6.420', 'buy5_vol': '36300', 'buy5_quote': '6.410', 'sell1_vol': '7000', 'sell1_quote': '6.460', 'sell2_vol': '62100', 'sell2_quote': '6.470', 'sell3_vol': '19400', 'sell3_quote': '6.480', 'sell4_vol': '12600', 'sell4_quote': '6.490', 'sell5_vol': '54900', 'sell5_quote': '6.500'}}]
insert_into_es(actions)
|
[
"1016784928@qq.com"
] |
1016784928@qq.com
|
bb7d86785330fb4f70a6863312bec508877cef0c
|
96744930057da883e3c1728fa34dfbf649322135
|
/dbc2excel_main.py
|
0137aede3b5f0a4693c3697c5aeecc3f6542f9a7
|
[] |
no_license
|
BlackCatKnight/dbc2excel
|
b87cdf83c7be4be0405ce3a8e876a66803806f40
|
bcb841b3d12d83982bc34c7321d9ebc01c79277b
|
refs/heads/master
| 2020-10-01T14:37:32.238027
| 2020-02-17T05:21:01
| 2020-02-17T05:21:01
| 227,557,688
| 0
| 0
| null | 2019-12-12T08:32:23
| 2019-12-12T08:32:23
| null |
UTF-8
|
Python
| false
| false
| 6,699
|
py
|
#!/usr/bin/env python
import wx
import dbc2excel as d2e
class MyFrame(wx.Frame):
""" We simply derive a new class of Frame. """
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(600, 500))
self.path = ''
#self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
#self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)
##excel生成条件变量
self.if_sig_desc = True
self.if_sig_val_desc = True
self.val_description_max_number = 70
self.if_start_val = True
self.if_recv_send = True
self.if_asc_sort = True
#静态文字
self.quote = wx.StaticText(self, label="\n", pos=(420, 260))
#控制台窗口
self.logger = wx.TextCtrl(self, pos=(5, 300), size=(580, 130), style=wx.TE_MULTILINE | wx.TE_READONLY)
# A button
self.button =wx.Button(self, label="生成Excel文件", pos=(10, 150),size=(200,100))
self.Bind(wx.EVT_BUTTON, self.create_excel,self.button)
# B button
b = wx.Button(self,-1,u"选择dbc文件",pos=(10, 20),size=(200,100))
self.Bind(wx.EVT_BUTTON, self.select_file_button, b)
# c button 增加图片
#pic = wx.Image("./source/a.bmp", wx.BITMAP_TYPE_BMP).ConvertToBitmap()
#c = wx.BitmapButton(self,-1,pic,pos=(250, 20),size=(290,150))
#self.Bind(wx.EVT_BUTTON, self.select_file_button, b)
#增加复选框
#panel = wx.Panel(self) # 创建画板,控件容器
#信号描述
HEIGHT = 25
OFFSET = 20
k = 1
self.check1 = wx.CheckBox(self, -1, '生成信号描述', pos=(250, HEIGHT), size=(100, -1))
self.Bind(wx.EVT_CHECKBOX, self.SigDescEvtCheckBox, self.check1)
self.check1.Set3StateValue(True)
self.check2 = wx.CheckBox(self, -1, '生成信号值描述', pos=(250, HEIGHT + k * OFFSET), size=(100, -1))
self.Bind(wx.EVT_CHECKBOX, self.SigValDescEvtCheckBox, self.check2)
self.check2.Set3StateValue(True)
k += 1
#最大信号值描述长度
self.check3 = wx.CheckBox(self, -1, '生成初始值', pos=(250, HEIGHT + k * OFFSET), size=(100, -1))
self.Bind(wx.EVT_CHECKBOX, self.StartValEvtCheckBox, self.check3)
self.check3.Set3StateValue(True)
k += 1
self.check4 = wx.CheckBox(self, -1, '生成发送方和接收方', pos=(250, HEIGHT + k * OFFSET), size=(150, -1))
self.Bind(wx.EVT_CHECKBOX, self.RecvSndEvtCheckBox, self.check4)
self.check4.Set3StateValue(True)
k += 1
self.check5 = wx.CheckBox(self, -1, '升序排序(取消勾选降序)', pos=(250, HEIGHT + k * OFFSET), size=(150, -1))
self.Bind(wx.EVT_CHECKBOX, self.SortEvtCheckBox, self.check5)
self.check5.Set3StateValue(True)
k += 1
self.quote = wx.StaticText(self, label="信号值描述最大文本长度\n", pos=(250, HEIGHT + k * OFFSET), size = (140, 20))
self.text1 = wx.TextCtrl(self, wx.ID_ANY, "70",pos=(400, HEIGHT + k * OFFSET), size=(100, 20), style=wx.TE_LEFT)
#print(self.text1.Value)
k += 1
# Setting up the menu.
filemenu = wx.Menu()
# wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets.
# wx.ID_ABOUT and wx.ID_EXIT are standard ids provided by wxWidgets.
menuAbout = filemenu.Append(wx.ID_ABOUT, "&关于"," Information about this program")
menuExit = filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&文件") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
# Set events.
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.logger.AppendText("Dbc转Excel工具仍在不断完善中\n***如果转换时间过长或无法转换,尝试取消勾选上方的选项再进行生成***\n有任何问题请在下面网址留言\nhttps://blog.csdn.net/hhlenergystory/article/details/80443454\n")
self.Show(True)
#响应事件
def SigDescEvtCheckBox(self,event):
self.if_sig_desc = not self.if_sig_desc
#print(self.if_sig_desc)
def SigValDescEvtCheckBox(self,event):
self.if_sig_val_desc = not self.if_sig_val_desc
#print(self.if_sig_val_desc)
def StartValEvtCheckBox(self,event):
self.if_start_val = not self.if_start_val
#print(self.if_start_val)
def RecvSndEvtCheckBox(self,event):
self.if_recv_send = not self.if_recv_send
#print(self.if_recv_send)
def SortEvtCheckBox(self,event):
self.if_asc_sort = not self.if_asc_sort
def OnAbout(self, e):
# A message dialog box with an OK button. wx.OK is a standard ID in wxWidgets.
dlg = wx.MessageDialog(self, "DBC转Excel工具\nBY黄洪磊 i2347\nV0.4\n有任何问题请发送dbc文件至int.honglei.huang@uaes.com", "关于", wx.OK)
dlg.ShowModal() # Show it
dlg.Destroy() # finally destroy it when finished.
def OnExit(self, e):
self.Close(True) # Close the frame.
def create_excel(self, event):
self.logger.AppendText(" \n载入DBC文件完成\n" )
dbc = d2e.DbcLoad(self.path)
self.logger.AppendText(" 生成文件中!稍等... \n")
if(str(self.text1.Value).isdigit()):
self.val_description_max_number = int(self.text1.Value)
#print(self.val_description_max_number)
dbc.dbc2excel(self.path,self.if_sig_desc,self.if_sig_val_desc,self.val_description_max_number,self.if_start_val,self.if_recv_send,self.if_asc_sort)
self.logger.AppendText(" 文件转换完成\n")
def select_file_button(self, event):
filesFilter = "Dicom (*.dbc)|*.dbc|" "All files (*.*)|*.*"
fileDialog = wx.FileDialog(self, message="选择单个文件", wildcard=filesFilter, style=wx.FD_OPEN)
dialogResult = fileDialog.ShowModal()
if dialogResult != wx.ID_OK:
return
self.path = fileDialog.GetPath()
self.logger.SetLabel('>>>选择文件:'+self.path)
def OnEraseBack(self,event):
dc = event.GetDC()
if not dc:
dc = wx.ClientDC(self)
rect = self.GetUpdateRegion().GetBox()
dc.SetClippingRect(rect)
dc.Clear()
bmp = wx.Bitmap("a.jpg")
dc.DrawBitmap(bmp, 0, 0)
#################
if __name__ == "__main__":
app = wx.App(False)
frame = MyFrame(None, 'DBC转Excel工具')
app.MainLoop()
|
[
"1248832396@qq.com"
] |
1248832396@qq.com
|
772ae11af58a259ee1b609eb3c4b3502947ba004
|
45b00dd318c3ba92a29495988d975f5bcd8a19c3
|
/venv/lib/python3.8/site-packages/cloudinary_cli/core/utils.py
|
f5b7976c6bc25ca4940398cab33c35c55359489a
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Jn-mic/Projects-Portfolio
|
b122d334457495ac7beed1707a61e4e3ab002468
|
642b9e58b81e8d505cc7462370e80fbab2945fd9
|
refs/heads/master
| 2023-06-13T22:38:06.123016
| 2021-07-20T15:43:07
| 2021-07-20T15:43:07
| 386,660,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,434
|
py
|
from webbrowser import open as open_url
from click import command, argument, option, Choice, echo
from cloudinary import utils as cld_utils
from cloudinary_cli.core.overrides import cloudinary_url
from cloudinary_cli.utils.api_utils import handle_command
from cloudinary_cli.utils.utils import print_help
cld_utils.cloudinary_url = cloudinary_url
utils_list = ["api_sign_request", "cloudinary_url", "download_archive_url", "download_zip_url", "private_download_url",
"download_folder", "download_backedup_asset", "verify_api_response_signature",
"verify_notification_signature"]
@command("utils", help="Call Cloudinary utility methods.")
@argument("params", nargs=-1)
@option("-o", "--optional_parameter", multiple=True, nargs=2, help="Pass optional parameters as raw strings.")
@option("-O", "--optional_parameter_parsed", multiple=True, nargs=2,
help="Pass optional parameters as interpreted strings.")
@option("-ls", "--ls", is_flag=True, help="List all available utility methods.")
def utils(params, optional_parameter, optional_parameter_parsed, ls):
if ls or len(params) < 1:
return print_help(cld_utils, allow_list=utils_list)
echo(handle_command(params, optional_parameter, optional_parameter_parsed, cld_utils, "Utils"))
@command("url", help="Generate a Cloudinary URL, which you can optionally open in your browser.")
@argument("public_id", required=True)
@argument("transformation", default="")
@option("-rt", "--resource_type", default="image", type=Choice(['image', 'video', 'raw']), help="The asset type")
@option("-t", "--type", "delivery_type", default="upload",
type=Choice(['upload', 'private', 'authenticated', 'fetch', 'list', 'url2png']),
help="The delivery type.")
@option("-o", "--open", 'open_in_browser', is_flag=True, help="Generate the derived asset and open it in your browser.")
@option("-s", "--sign", is_flag=True, help="Generate a signed URL.", default=False)
def url(public_id, transformation, resource_type, delivery_type, open_in_browser, sign):
if delivery_type == "authenticated" or resource_type == "url2png":
sign = True
elif delivery_type == "list":
public_id += ".json"
res = cloudinary_url(public_id, resource_type=resource_type,
raw_transformation=transformation, type=delivery_type, sign_url=sign)
echo(res)
if open_in_browser:
open_url(res)
|
[
"jackotienokey@gmail.com"
] |
jackotienokey@gmail.com
|
b0c92b082c127ce02470aafbb990e891b8e29ce8
|
5727534e65d551b5d202d9cc9ceb987aeb5a6a20
|
/envipyarclib/test/config.py
|
8710a9fcb818f07d49538160c9e94775fbd3a432
|
[
"MIT"
] |
permissive
|
envi-idl/envipyarclib
|
4122d884fc0134883f9a5bcddb50646eddfce6e9
|
3ebd62107ba04364dfdf5b1e436b4a7ad7715851
|
refs/heads/master
| 2023-02-16T10:21:12.407573
| 2023-02-10T19:37:18
| 2023-02-10T19:37:18
| 97,256,013
| 1
| 1
|
MIT
| 2023-02-10T19:38:07
| 2017-07-14T17:03:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,393
|
py
|
"""
"""
import os
import difflib
import glob
from abc import ABCMeta, abstractmethod
import arcpy
from ..metaclass import with_metaclass
class Config(with_metaclass(ABCMeta, object)):
"""
Defines the configuration for running the datatype tests.
"""
@property
def test_data_dir(self):
"""
Returns the full path to the test data directory
"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
@property
def test_task_dir(self):
"""
Returns the full paath to the test task directory
"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tasks')
@abstractmethod
def setup_toolbox(self, engine_name, task_name, toolbox_name):
"""
Abstract method for generating python toolboxes from test tasks.
:param engine_name: The name of the engine the test case runs against
:param task_name: The name of the task the test case runs against
:param toolbox_name: The name of the toolbox to be generated.
"""
pass
def remove_toolbox(self, toolbox_file):
"""
Removes the toolbox file from disk
"""
toolbox_file = os.path.splitext(toolbox_file)[0]
for tb_file in glob.glob(toolbox_file + '.*'):
os.remove(tb_file)
def compare_text_files(self, file1, file2):
"""
Compares two text files.
Removes whitespace, empty lines, and newline chars
Empty string as a return value mean file are the same
Open, read lines to string list, remove any newline chars, and
filter out empty strings/lines.
"""
text1 = list(filter(None, map(str.rstrip, open(file1, 'U').readlines())))
text2 = list(filter(None, map(str.rstrip, open(file2, 'U').readlines())))
return ''.join(difflib.unified_diff(text1, text2))
def setup_workspace(self, workspace_dir):
"""
Override the default arcpy workspace and scratch workspace
"""
if not os.path.isdir(workspace_dir):
os.mkdir(workspace_dir)
arcpy.env.workspace = workspace_dir
scratch_workspace = os.path.join(workspace_dir, 'scratch')
if not os.path.isdir(scratch_workspace):
os.mkdir(scratch_workspace)
arcpy.env.scratchWorkspace = scratch_workspace
|
[
"elefebvre@ittvis.com"
] |
elefebvre@ittvis.com
|
3334aa2448162bc9082686b1db06663e35a07d1e
|
6f3a7be25bf5106911af06713715b88b6f2477a8
|
/fixture/orm.py
|
debdd80d37bab1a78a66ad5b99c3608b4fe97f74
|
[
"Apache-2.0"
] |
permissive
|
werbk/task-7.22
|
477929bab1b2e46652562fa04b587b51b2cff71c
|
61a8c98846ccf385a7ef34ced19031b855593182
|
refs/heads/master
| 2021-01-01T17:05:25.175711
| 2015-08-09T10:46:23
| 2015-08-09T10:46:23
| 39,339,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,616
|
py
|
from pony.orm import *
from datetime import datetime
from tests_contract.contact_helper import Contact
from tests_group.group_helper import Group
from pymysql.converters import decoders
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table='address_in_groups', column='id', reverse='groups', lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
first_name = Optional(str, column='firstname')
last_name = Optional(str, column='lastname')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORMFixture.ORMGroup, table='address_in_groups', column='group_id', reverse='contacts', lazy=True)
def __init__(self, host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=decoders)
self.db.generate_mapping()
#sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), group_name=str(group.name), group_header=str(group.header),
group_footer=str(group.footer))
return list(map(convert,groups))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return Contact(id=str(contact.id), first_name=contact.first_name, last_name=contact.last_name)
return list(map(convert,contacts))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixture.ORMGroup))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None))
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups))
def destroy(self):
self.db.disconnect()
|
[
"werbk@bk.ru"
] |
werbk@bk.ru
|
0043b3f702afc3174da07d8caba0ca4d2413d7d4
|
2f700e4dbcf0d06d718a33c25611a0ce7660c361
|
/twittube/urls.py
|
8300fee2907e43145b44ce25d2d9c8b225f1cfca
|
[] |
no_license
|
DeminLi/twittube
|
15d17e19cb2f5a0ec82fcc5e862d5384cf031e38
|
d2fb605f36b4d0e53c5da535b8dc644c04e7d7a9
|
refs/heads/master
| 2020-05-04T16:41:23.529645
| 2014-04-30T15:24:19
| 2014-04-30T15:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'twittube.views.home', name='home'),
# url(r'^twittube/', include('twittube.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'twittube.views.index', name='index'),
url(r'^handlefile/$', 'twittube.views.handlefile', name='handlefile'),
url(r'^(?P<sponsor_id>\d+)/', include('conversation.urls')),
)
|
[
"lideminismine@gmail.com"
] |
lideminismine@gmail.com
|
2e3756bb08c6075914a70fe3ece7029d2345edaf
|
5d8717adf9229160a0cb72a4f73755ed8a88a1dd
|
/LeafCounting/utils/transform.py
|
c16ab3c4cb2c5c4efa5ac78d951d9fb745a131ea
|
[] |
no_license
|
harrishsmith/Leaf-Counting
|
1837ff6881fb737f3ad0c2c5631e4daf48bf7912
|
3cb6515cdccd0df26dc4e0ecd709ee8f4678cfb6
|
refs/heads/master
| 2023-08-21T01:39:34.026143
| 2021-10-05T05:33:59
| 2021-10-05T05:33:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,765
|
py
|
import numpy as np
DEFAULT_PRNG = np.random
def colvec(*args):
""" Create a numpy array representing a column vector. """
return np.array([args]).T
def transform_aabb(transform, aabb):
""" Apply a transformation to an axis aligned bounding box.
The result is a new AABB in the same coordinate system as the original AABB.
The new AABB contains all corner points of the original AABB after applying the given transformation.
# Arguments
transform: The transformation to apply.
x1: The minimum X value of the AABB.
y1: The minimum y value of the AABB.
x2: The maximum X value of the AABB.
y2: The maximum y value of the AABB.
# Returns
The new AABB as tuple (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = aabb
# Transform all 4 corners of the AABB.
points = transform.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1 ],
])
# Extract the min and max corners again.
min_corner = points.min(axis=1)
max_corner = points.max(axis=1)
return [min_corner[0], min_corner[1], max_corner[0], max_corner[1]]
def transform_ab(transform, ab):
""" Apply a transformation to an axis aligned bounding box.
The result is a new AABB in the same coordinate system as the original AABB.
The new AABB contains all corner points of the original AABB after applying the given transformation.
# Arguments
transform: The transformation to apply.
x1: The minimum X value of the AABB.
y1: The minimum y value of the AABB.
x2: The maximum X value of the AABB.
y2: The maximum y value of the AABB.
# Returns
The new AABB as tuple (x1, y1, x2, y2)
"""
x1, x2 = ab
# Transform all 4 corners of the AABB.
point = transform.dot([
[x1],
[x2],
[1 ],
])
return [point[0],point[1]]
def _random_vector(min, max, prng=DEFAULT_PRNG):
""" Construct a random vector between min and max.
# Arguments
min: the minimum value for each component
max: the maximum value for each component
"""
min = np.array(min)
max = np.array(max)
assert min.shape == max.shape
assert len(min.shape) == 1
return prng.uniform(min, max)
def rotation(angle):
""" Construct a homogeneous 2D rotation matrix.
# Arguments
angle: the angle in radians
# Returns
the rotation matrix as 3 by 3 numpy array
"""
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
def random_rotation(min, max, prng=DEFAULT_PRNG):
""" Construct a random rotation between -max and max.
# Arguments
min: a scalar for the minimum absolute angle in radians
max: a scalar for the maximum absolute angle in radians
prng: the pseudo-random number generator to use.
# Returns
a homogeneous 3 by 3 rotation matrix
"""
return rotation(prng.uniform(min, max))
def translation(translation):
""" Construct a homogeneous 2D translation matrix.
# Arguments
translation: the translation 2D vector
# Returns
the translation matrix as 3 by 3 numpy array
"""
return np.array([
[1, 0, translation[0]],
[0, 1, translation[1]],
[0, 0, 1]
])
def random_translation(min, max, prng=DEFAULT_PRNG):
""" Construct a random 2D translation between min and max.
# Arguments
min: a 2D vector with the minimum translation for each dimension
max: a 2D vector with the maximum translation for each dimension
prng: the pseudo-random number generator to use.
# Returns
a homogeneous 3 by 3 translation matrix
"""
return translation(_random_vector(min, max, prng))
def shear(angle):
""" Construct a homogeneous 2D shear matrix.
# Arguments
angle: the shear angle in radians
# Returns
the shear matrix as 3 by 3 numpy array
"""
return np.array([
[1, -np.sin(angle), 0],
[0, np.cos(angle), 0],
[0, 0, 1]
])
def random_shear(min, max, prng=DEFAULT_PRNG):
""" Construct a random 2D shear matrix with shear angle between -max and max.
# Arguments
min: the minimum shear angle in radians.
max: the maximum shear angle in radians.
prng: the pseudo-random number generator to use.
# Returns
a homogeneous 3 by 3 shear matrix
"""
return shear(prng.uniform(min, max))
def scaling(factor):
""" Construct a homogeneous 2D scaling matrix.
# Arguments
factor: a 2D vector for X and Y scaling
# Returns
the zoom matrix as 3 by 3 numpy array
"""
return np.array([
[factor[0], 0, 0],
[0, factor[1], 0],
[0, 0, 1]
])
def random_scaling(min, max, prng=DEFAULT_PRNG):
""" Construct a random 2D scale matrix between -max and max.
# Arguments
min: a 2D vector containing the minimum scaling factor for X and Y.
min: a 2D vector containing The maximum scaling factor for X and Y.
prng: the pseudo-random number generator to use.
# Returns
a homogeneous 3 by 3 scaling matrix
"""
return scaling(_random_vector(min, max, prng))
def random_flip(flip_x_chance, flip_y_chance, prng=DEFAULT_PRNG):
""" Construct a transformation randomly containing X/Y flips (or not).
# Arguments
flip_x_chance: The chance that the result will contain a flip along the X axis.
flip_y_chance: The chance that the result will contain a flip along the Y axis.
prng: The pseudo-random number generator to use.
# Returns
a homogeneous 3 by 3 transformation matrix
"""
flip_x = prng.uniform(0, 1) < flip_x_chance
flip_y = prng.uniform(0, 1) < flip_y_chance
# 1 - 2 * bool gives 1 for False and -1 for True.
return scaling((1 - 2 * flip_x, 1 - 2 * flip_y))
def change_transform_origin(transform, center):
""" Create a new transform representing the same transformation,
only with the origin of the linear part changed.
# Arguments:
transform: the transformation matrix
center: the new origin of the transformation
# Return:
translate(center) * transform * translate(-center)
"""
center = np.array(center)
return np.linalg.multi_dot([translation(center), transform, translation(-center)])
def random_transform(
min_rotation=0,
max_rotation=0,
min_translation=(0, 0),
max_translation=(0, 0),
min_shear=0,
max_shear=0,
min_scaling=(1, 1),
max_scaling=(1, 1),
flip_x_chance=0,
flip_y_chance=0,
prng=DEFAULT_PRNG
):
""" Create a random transformation.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
# Arguments
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.
flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.
prng: The pseudo-random number generator to use.
"""
return np.linalg.multi_dot([
random_rotation(min_rotation, max_rotation, prng),
random_translation(min_translation, max_translation, prng),
random_shear(min_shear, max_shear, prng),
random_scaling(min_scaling, max_scaling, prng),
random_flip(flip_x_chance, flip_y_chance, prng)
])
def random_transform_generator(prng=None, **kwargs):
""" Create a random transform generator.
Uses a dedicated, newly created, properly seeded PRNG by default instead of the global DEFAULT_PRNG.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
# Arguments
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.
flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.
prng: The pseudo-random number generator to use.
"""
if prng is None:
# RandomState automatically seeds using the best available method.
prng = np.random.RandomState()
while True:
yield random_transform(prng=prng, **kwargs)
|
[
"guyfar@post.bgu.ac.il"
] |
guyfar@post.bgu.ac.il
|
46efb5b0d22a66151fe0aee2e2e57566136aa783
|
3178c199a3a6f5805fc45c8791204259bcb39125
|
/PythonAPI/synchronous_mode/visualize_NN_summary.py
|
8925ea845a9bb5543b4ae5cf603d5d29d5b52008
|
[] |
no_license
|
zwc662/CARLA
|
e3550d6fa2b07036c69d10a9f1642cc975ce5e2e
|
ec32181c67417c56d93d3d7afd22946c50e78a6c
|
refs/heads/master
| 2021-07-21T14:12:52.241222
| 2020-08-21T01:20:37
| 2020-08-21T01:20:37
| 207,066,345
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from torch.autograd import Variable
# from torchviz import make_dot
from torchsummary import summary
from NN_controller import mlp
torch.set_default_dtype(torch.float32)
num_wps = 10
model = mlp(nx=(5+3*num_wps), ny = 2)
sample_state = np.array([0.00414619210919023, -0.15624896240234376, -0.19012335205078126, -0.5009140014648438, 0.75, -0.15629389953613282, -0.20678996276855469, 1.4991319444444444, -0.15633935546875, -0.22345657348632814, \
1.4991319444444444, -0.15638481140136717, -0.24012318420410156, 1.4991319444444444, -0.15643026733398438, -0.25678976440429685, 1.4991319444444444, -0.15645565795898436, -0.2734342346191406, 1.5011489868164063, \
-0.15625730895996093, -0.29004119873046874, 1.5064541286892361, -0.1557822265625, -0.3066424865722656, 1.511759270562066, -0.15503054809570313, -0.323233642578125, \
1.5170644124348958, -0.1540864562988281, -0.33986141967773437, 1.5181676228841146, -0.152741455078125, -0.356009033203125, -0.44086286756727433]).astype(np.float32).reshape(1, -1)
print("sample_state", sample_state.shape)
state = torch.from_numpy(sample_state)
MLP_dict_path = "/home/ruihan/UnrealEngine_4.22/carla/Dist/CARLA_0.9.5-428-g0ce908db/LinuxNoEditor/PythonAPI/synchronous_mode/models/mlp/dest_start_merge_models/mlp_dict_nx=8_wps10_lr0.001_bs32_optimSGD_ep100_ep200_ep300_ep400_ep500_ep600_ep700_ep800_ep900_ep1000.pth"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
checkpoint = torch.load(MLP_dict_path)
model.load_state_dict(checkpoint['model_state_dict'])
model = model.to(device)
state = state.to(device)
output = model(state)
print("output")
print(output)
# print("model")
# print(model)
'''
model
mlp(
(fc1): Linear(in_features=35, out_features=70, bias=True)
(fc2): Linear(in_features=70, out_features=140, bias=True)
(fc3): Linear(in_features=140, out_features=105, bias=True)
(fc4): Linear(in_features=105, out_features=2, bias=True)
(sig): Sigmoid()
(tanh): Tanh()
)
'''
shape = (1, 35)
print("summary")
summary(model, shape)
'''
summary
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Linear-1 [-1, 2, 70] 2,520
Linear-2 [-1, 2, 140] 9,940
Linear-3 [-1, 2, 105] 14,805
Linear-4 [-1, 2, 2] 212
Sigmoid-5 [-1, 2] 0
Tanh-6 [-1, 2] 0
================================================================
Total params: 27,477
Trainable params: 27,477
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.00
Params size (MB): 0.10
Estimated Total Size (MB): 0.11
----------------------------------------------------------------
'''
|
[
"zwc662@gmail.com"
] |
zwc662@gmail.com
|
ab01193036fd55bd00dd3212bb26c3216275c79c
|
7bb6c90ff26a7b55f8cd5227a4efadf58c7906b4
|
/utils/raw2model_data.py
|
6f278ee8df5da5f07eb007ff7de0405d595bbe19
|
[] |
no_license
|
bangbangbang12315/Persona-Dialogue-System
|
2321175078093cd4610c57cab8c5af8fa14dd6f1
|
bdcfb2e76680f7b53a0b9af3cdc2e09f535cf89d
|
refs/heads/master
| 2022-11-21T15:53:25.947512
| 2020-07-15T06:23:22
| 2020-07-15T06:23:22
| 279,768,821
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,962
|
py
|
import os
import sys
import json
from tqdm import tqdm
# from sentence_similarity import similarity
from multi_process_wraper import Worker, MultiProcessor
def get_history_rank(query, histories, model='cosine'):
scores = dict()
query_seg = query.split(' ')
for key in histories:
key_seg = key.split(' ')
sim = similarity.ssim(query_seg, key_seg, model=model)
try:
sim = 1.0 * sim
except:
sim = 0
scores[key] = scores.get(key, 0) + sim
return scores
def limit_history(post, resp, his, topK=15):
# his_p, his_p_id, his_p_time, his_r, his_r_id, his_r_time = zip(*his)
idx = {}
his_post, his_resp = [], []
for i, item in enumerate(his):
h_p, h_r = item[0], item[3]
idx[(h_p, h_r)] = i
his_post.append(h_p)
his_resp.append(h_r)
model = 'cosine'
sim_p_p = get_history_rank(post, his_post, model=model)
sim_p_r = get_history_rank(post, his_resp, model=model)
result = {}
for his_p, his_r in zip(his_post, his_resp):
result[(his_p, his_r)] = sim_p_p[his_p] + sim_p_r[his_r]
top = sorted(result.items(),key=lambda x:x[1],reverse=True)
result = top[:topK]
new_his, sim_val = [], []
for (h_p, h_r), v in result:
new_his.append(his[idx[(h_p, h_r)]])
sim_val.append(v)
return new_his, sim_val
def parse_line_sim(line):
def padded(seqs):
new_seqs = []
seqs_len = list(map(len, seqs))
pad_len = max(seqs_len)
for seq in seqs:
new_seqs.append(seq + ['<\s>'] * (pad_len - len(seq)))
assert len(new_seqs) == len(seqs_len)
new_seqs = '\t'.join(map(lambda x: ' '.join(x), new_seqs))
seqs_len = ' '.join(map(str, seqs_len))
return new_seqs, seqs_len
line = json.loads(line)
p, p_id, p_time, r, r_id, r_time, his = line
his = json.loads(his)
his_num = len(his)
if his:
his, his_sim_val = limit_history(p, r, his)
his_p, his_p_id, his_p_time, his_r, his_r_id, his_r_time = zip(*his)
his_p = list(map(lambda x: x.strip().split(' '), his_p))
his_r = list(map(lambda x: x.strip().split(' '), his_r))
his_p, his_p_len = padded(his_p)
his_r, his_r_len = padded(his_r)
his_p_id = ' '.join(his_p_id)
his_p_time = ' '.join(his_p_time)
his_r_id = ' '.join(his_r_id)
his_r_time = ' '.join(his_r_time)
his_sim_val = ' '.join(map(lambda x: f"{x:.4f}", his_sim_val))
else:
his_p, his_p_len, his_r, his_r_len = "<\s>", "0", "<\s>", "0"
his_p_id, his_p_time, his_r_id, his_r_time = "-1", "-1", "-1", "-1"
his_sim_val = "0.0"
new_line = [p, p_id, p_time, r, r_id, r_time, his_p, his_p_len, his_p_id, his_p_time, his_r, his_r_len, his_r_id, his_r_time, his_sim_val, his_num]
return json.dumps(new_line, ensure_ascii=False)
def parse_line_time(line):
def padded(seqs):
new_seqs = []
seqs_len = list(map(len, seqs))
pad_len = max(seqs_len)
for seq in seqs:
new_seqs.append(seq + ['<\s>'] * (pad_len - len(seq)))
assert len(new_seqs) == len(seqs_len)
new_seqs = '\t'.join(map(lambda x: ' '.join(x), new_seqs))
seqs_len = ' '.join(map(str, seqs_len))
return new_seqs, seqs_len
line = json.loads(line)
p, p_id, p_time, r, r_id, r_time, his = line
his = json.loads(his)
his_num = len(his)
if his:
# his, his_sim_val = limit_history(p, r, his)
his = his[max(0, len(his)-15):]
his_p, his_p_id, his_p_time, his_r, his_r_id, his_r_time = zip(*his)
his_p = list(map(lambda x: x.strip().split(' '), his_p))
his_r = list(map(lambda x: x.strip().split(' '), his_r))
his_p, his_p_len = padded(his_p)
his_r, his_r_len = padded(his_r)
his_p_id = ' '.join(his_p_id)
his_p_time = ' '.join(his_p_time)
his_r_id = ' '.join(his_r_id)
his_r_time = ' '.join(his_r_time)
his_sim_val = 0.0
else:
his_p, his_p_len, his_r, his_r_len = "<\s>", "0", "<\s>", "0"
his_p_id, his_p_time, his_r_id, his_r_time = "-1", "-1", "-1", "-1"
his_sim_val = "0.0"
new_line = [p, p_id, p_time, r, r_id, r_time, his_p, his_p_len, his_p_id, his_p_time, his_r, his_r_len, his_r_id, his_r_time, his_sim_val, his_num]
return new_line
def transform(src_fp, tgt_dir, tgt_postfix, mode="cosine"):
os.system(f"mkdir {tgt_dir}")
os.system(f"mkdir {tgt_dir}/extra")
with open(src_fp, 'r') as f_src, \
open(f"{tgt_dir}/post.{tgt_postfix}", 'w') as f_p, \
open(f"{tgt_dir}/resp.{tgt_postfix}", 'w') as f_r, \
open(f"{tgt_dir}/his_post_padded.{tgt_postfix}", 'w') as f_his_p, \
open(f"{tgt_dir}/his_resp_padded.{tgt_postfix}", 'w') as f_his_r, \
open(f"{tgt_dir}/his_post_len.{tgt_postfix}", 'w') as f_his_p_len, \
open(f"{tgt_dir}/his_resp_len.{tgt_postfix}", 'w') as f_his_r_len, \
open(f"{tgt_dir}/extra/post_time.{tgt_postfix}", 'w') as f_p_time, \
open(f"{tgt_dir}/extra/resp_time.{tgt_postfix}", 'w') as f_r_time, \
open(f"{tgt_dir}/extra/post_id.{tgt_postfix}", 'w') as f_p_id, \
open(f"{tgt_dir}/extra/resp_id.{tgt_postfix}", 'w') as f_r_id, \
open(f"{tgt_dir}/extra/his_post_time.{tgt_postfix}", 'w') as f_his_p_time, \
open(f"{tgt_dir}/extra/his_resp_time.{tgt_postfix}", 'w') as f_his_r_time, \
open(f"{tgt_dir}/extra/his_post_id.{tgt_postfix}", 'w') as f_his_p_id, \
open(f"{tgt_dir}/extra/his_resp_id.{tgt_postfix}", 'w') as f_his_r_id, \
open(f"{tgt_dir}/extra/his_num.{tgt_postfix}", 'w') as f_his_num, \
open(f"{tgt_dir}/extra/his_sim_val.{tgt_postfix}", 'w') as f_his_sim_val:
for line in tqdm(f_src, ncols=50):
# p, p_id, p_time, r, r_id, r_time, his_p, his_p_len, his_p_id, his_p_time, his_r, his_r_len, his_r_id, his_r_time, his_sim_val, his_num = parse_line(line)
if mode == "cosine":
p, p_id, p_time, r, r_id, r_time, his_p, his_p_len, his_p_id, his_p_time, his_r, his_r_len, his_r_id, his_r_time, his_sim_val, his_num = json.loads(line)
if mode == "time":
p, p_id, p_time, r, r_id, r_time, his_p, his_p_len, his_p_id, his_p_time, his_r, his_r_len, his_r_id, his_r_time, his_sim_val, his_num = parse_line_time(line)
if not his_p:
his_p, his_p_len, his_r, his_r_len = "<\s>", "0", "<\s>", "0"
his_p_id, his_p_time, his_r_id, his_r_time = "-1", "-1", "-1", "-1"
his_sim_val = "0.0"
# assert len(his_p.split('\t')) == len(his_p_len.split(' ')) == len(his_p_id.split(' ')) == len(his_p_time.split(' ')) == len(his_r.split('\t')) == len(his_r_len.split(' ')) == len(his_r_id.split(' ')) == len(his_r_time.split(' ')) == len(his_sim_val.split(' '))
f_p.write(f"{p}\n")
f_r.write(f"{r}\n")
f_his_p.write(f"{his_p}\n")
f_his_r.write(f"{his_r}\n")
f_his_p_len.write(f"{his_p_len}\n")
f_his_r_len.write(f"{his_r_len}\n")
f_p_time.write(f"{p_time}\n")
f_r_time.write(f"{r_time}\n")
f_p_id.write(f"{p_id}\n")
f_r_id.write(f"{r_id}\n")
f_his_p_id.write(f"{his_p_id}\n")
f_his_r_id.write(f"{his_r_id}\n")
f_his_p_time.write(f"{his_p_time}\n")
f_his_r_time.write(f"{his_r_time}\n")
f_his_num.write(f"{his_num}\n")
f_his_sim_val.write(f"{his_sim_val}\n")
def multi_trans(src_fp, tgt_fp):
worker = Worker(src_fp, tgt_fp, parse_line_sim)
mp = MultiProcessor(worker, 15)
mp.run()
print("All Processes Done.")
worker.merge_result(keep_pid_file=False)
if __name__ == "__main__":
# src_dir = "../raw"
src_dir = "../data/small/raw"
for phase in ['test', 'train', 'dev']:
transform(f"{src_dir}/{phase}.raw", f"../data/small/{phase}", f"{phase}", mode="time")
# multi_trans(f"{src_dir}/{phase}.raw", f"{src_dir}/{phase}.limited")
# transform(f"{src_dir}/{phase}.limited", f"../clean_news_400W/cosine/{phase}", f"{phase}", mode="cosine")
# transform(f"../raw/{phase}.limited", f"../cosine/{phase}", f"{phase}", mode="cosine")
# transform(f"../raw/{phase}.raw", f"../time/{phase}", f"{phase}", mode="time")
# transform(f"../raw/test.raw.shuf50", f"../test", f"test")
# multi_trans(f"../raw/test.raw", f"../raw/test.limited")
# transform(f"../raw/test.limited", f"../test", f"test")
# transform(f"{src_dir}/dev.limited.40000", f"../clean_news_400W/cosine/dev", f"dev", mode="cosine")
# transform(f"{src_dir}/test.limited.5000", f"../clean_news_400W/cosine/test", f"test", mode="cosine")
# transform(f"{src_dir}/same_post.limited", f"../clean_news_400W/cosine/same_post", f"same_post", mode="cosine")
|
[
"hanxun_zhong@ruc.edu.cn"
] |
hanxun_zhong@ruc.edu.cn
|
eab75732120eb4e8789bd9f3d9ca8b93a8cd6a81
|
9f8ba2a30a4d454b09d02b7efb47f232cc1173b1
|
/mediator/tests.py
|
4b204a5d2004468d3450d63b3ba3433e73bac394
|
[] |
no_license
|
selectnull/django-mediator
|
07b062edf95cb125219c35069dce4248c2e36218
|
d486ac3900dbe4368dffa99f55f12f56fe5f2a98
|
refs/heads/master
| 2016-09-05T20:47:15.872718
| 2012-04-07T14:07:19
| 2012-04-07T14:07:19
| 3,953,091
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
from django.utils import unittest
from mediator import MediaMapping
class MediaMappingTest(unittest.TestCase):
def setUp(self):
self.root_dir = '/home/foo/public_html'
self.root_url = '/media/'
self.mm = MediaMapping(self.root_dir, self.root_url)
def test_sanity(self):
self.assertEqual(1, 1)
def test_init(self):
self.assertEqual(self.mm.root_dir, self.root_dir)
self.assertEqual(self.mm.root_url, self.root_url)
def test_to_url(self):
pass
|
[
"sasha@selectnull.com"
] |
sasha@selectnull.com
|
8651ac38c94c1e85c9d0792e0f3b2ddff244c669
|
2a3059227ff8c162d1a8dcfc2e9d227adbb78d45
|
/0001. Two Sum/1.py
|
2617cc8a5c1928c5dcae7152603aad6d5b91ed05
|
[] |
no_license
|
121025/LeetCode-Python
|
ca4d1d3437145cf1491b683f73599b455c5f98f6
|
5a13cd3da42f6a3f3e9436c47b9c166435297163
|
refs/heads/main
| 2023-07-10T03:07:08.847464
| 2021-08-23T14:06:08
| 2021-08-23T14:06:08
| 391,373,772
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#4092ms
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
for i in range(0, len(nums)):
for j in range(i + 1, len(nums)):
if nums[i] + nums[j] == target:
return [i, j]
|
[
"noreply@github.com"
] |
121025.noreply@github.com
|
90246fc8ec69618e16e2cd33834a1be5f4b63c3d
|
51a065360b8b2f4a8cde43842a357b729ce6931a
|
/computer/RoadLaneDetection/line_hough_detector.py
|
473e966e68f05c209f2c52618cc0e0247c726c0a
|
[] |
no_license
|
muratory/perception
|
8fd95c1a865c5f2317c61110906856fd1eaa9f2d
|
23b03a3d33d526318e85748d978c48782298fd4f
|
refs/heads/master
| 2021-05-15T22:39:28.659715
| 2018-06-17T16:43:44
| 2018-06-17T16:43:44
| 106,734,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,192
|
py
|
# import the necessary packages
import numpy as np
import argparse
import glob
import cv2
def _auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def _canny_filter(image,mode):
# load the image, convert it to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
if mode == 'WIDE_THRESHOLD':
img = cv2.Canny(blurred, 10, 200)
elif mode == 'TIGHT_THRESHOLD':
img = cv2.Canny(blurred, 225, 250)
elif mode == 'AUTO_THRESHOLD':
img = _auto_canny(blurred)
return img
#ref: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
def hough_line_detector(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
####cv2.imshow("edges",edges)
lines = cv2.HoughLines(edges, 1, np.pi/180, 175)
return lines
def hough_lineP_detector(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
####cv2.imshow("edgesP",edges)
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 150, minLineLength, maxLineGap)
return lines
def main():
"""Main function"""
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True,
help="path to input dataset of images")
args = vars(ap.parse_args())
# loop over the images
for image_path in glob.glob(args["images"] + "/*.jpg"):
image_orig = cv2.imread(image_path)
image = cv2.imread(image_path)
imageP = cv2.imread(image_path)
lines = hough_line_detector(image)
print ('lines:', lines, 'lines.shape:', lines.shape)
linesP = hough_lineP_detector(imageP)
print ('linesP:', linesP, 'linesP.shape:', linesP.shape)
# show the images
for i in range(lines.shape[0]):
for rho, theta in lines[i]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*a)
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*a)
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
for i in range(linesP.shape[0]):
for x1, y1, x2, y2 in linesP[i]:
cv2.line(imageP, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imshow("Line Dectector", np.hstack([image, imageP]))
key = cv2.waitKey(0) & 0xFF
if key == ord("q"):
break
if __name__ == '__main__':
main()
|
[
"pierre.muratory@gmail.com"
] |
pierre.muratory@gmail.com
|
d25daf29e1bc3a62f0c1d407e5d6f4c7a6ed4d89
|
cb9c909a82db154cf820b22f7b3adc5b964bdf76
|
/JupiterDailyFiles.py
|
49494a9d706271d1018f6445611a54ad068045ee
|
[] |
no_license
|
slomuscio/Jupiter
|
901e8446a36e5b799188a3d90ed9a7ff8d5539dd
|
8edce53035fe3a4849cc8d07d614aa3c46aa29c5
|
refs/heads/master
| 2020-06-17T20:16:54.130177
| 2019-08-01T13:34:14
| 2019-08-01T13:34:14
| 196,041,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,887
|
py
|
"""
Generates files for fake Jupiter with 40 degrees subtracted from right ascension
Samantha Lomuscio
"""
import math
import pandas as pd
import os
import numpy as np
from astropy.time import Time
from coordinates import equatorial
from gt_apps import filter, evtbin, maketime
roi = 20
emin = 100
emax = 10000
num_bins = int(math.log10(emax/emin)*10)
zmax = 90 #deg
#read in data from file
info = pd.read_csv('DataRanges.txt', delimiter=',')
for i in range(len(info)):
#set number
setnum = info['SetNumber'][i]
#make a directory for this set
os.mkdir(setnum)
#make a list of photon files to be used
start_week = info['StartWeek'][i]
end_week = info['EndWeek'][i]
f = open(setnum + '/photon_file.txt','w+')
#write locations of photon files to a text file
if start_week != end_week:
weeks = list(range(int(start_week),int(end_week)))
weeks.append(end_week)
for val in weeks:
if val < 100:
f.write('/data/scratch/ysong/fdata/weeklyphoton/lat_photon_weekly_w0' + str(val) + '_p305_v001.fits\n')
else:
f.write('/data/scratch/ysong/fdata/weeklyphoton/lat_photon_weekly_w' + str(val) + '_p305_v001.fits\n')
elif start_week == end_week:
if start_week < 100:
f.write('/data/scratch/ysong/fdata/weeklyphoton/lat_photon_weekly_w0' + str(start_week) + '_p305_v001.fits\n')
else:
f.write('/data/scratch/ysong/fdata/weeklyphoton/lat_photon_weekly_w' + str(start_week) + '_p305_v001.fits\n')
f.close()
data_file = '@' + setnum + '/photon_file.txt'
time_start = Time(info['StartDate'][i])
time_end = Time(info['EndDate'][i])
time_start_MET = int(info['StartDateMET'][i])
time_end_MET = int(info['EndDateMET'][i])
time_step = 1 #in units of days
times = np.arange(time_start_MET, time_end_MET, time_step*(24*60*60), dtype=float)
times = np.append(times, time_end_MET)
skycoords_eq = equatorial(obj='jupiter', mode='c', tstart=time_start, tend=time_end, tstep=time_step) #used to find ra and dec
#loop through times to create stacked counts map
for i in range(1, len(skycoords_eq) + 1):
ra = skycoords_eq[i-1].ra.degree - 40
dec = skycoords_eq[i-1].dec.degree
if ra < 0:
ra = 360 - abs(ra)
tmin = times[i-1]
tmax = times[i]
#gtselect
filter['evclass'] = 128
filter['evtype'] = 3
filter['rad'] = roi
filter['emin'] = emin
filter['emax'] = emax
filter['zmax'] = zmax
filter['infile'] = data_file
filter['ra'] = ra
filter['dec'] = dec
filter['tmin'] = tmin #must be in MET (s)
filter['tmax'] = tmax
outfile = 'jupiter_' + setnum + '_' + str(i) + '.fits'
filter['outfile'] = outfile
filter.run()
|
[
"noreply@github.com"
] |
slomuscio.noreply@github.com
|
ad9cb7cc5c6bb4360989a06e4790b08d45e26375
|
68d2d58bc3d24e5422ee0cf73c1ea5214fc190ab
|
/main.py
|
4c3ecbd38c5c99d793a2baa793d431a72293a646
|
[] |
no_license
|
hector-han/easy_server
|
f92d0ec2abb91623125532b65bc3974ec4f2aeef
|
d73fedbcfa1f2d08023f54237581bc8f2889468d
|
refs/heads/master
| 2020-08-20T09:45:16.585204
| 2019-10-18T12:34:43
| 2019-10-18T12:34:43
| 216,008,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
import tornado.ioloop
import tornado.web
import json
import datetime
store_file = 'received_info.txt'
fhandler = open(store_file, 'a', encoding='utf-8')
class MainHandler(tornado.web.RequestHandler):
def post(self):
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
requestId = self.get_body_argument('requestId', default='')
code = self.get_body_argument('code', default='1')
text = self.get_body_argument('text', default='').replace('\n', '')
resp = {"code": 0, "message": "成功"}
if code == '0':
fhandler.write('[{}]: {},{}'.format(now, requestId, text) + '\n')
fhandler.flush()
else:
fhandler.write('[{}]: ERROR: '.format(now) + self.request.body.decode() + '\n')
self.write(json.dumps(resp, ensure_ascii=False))
def make_app():
return tornado.web.Application([
(r"/tencent_record", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8796)
tornado.ioloop.IOLoop.current().start()
|
[
"hanhengke@58ganji.com"
] |
hanhengke@58ganji.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.