blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64a65dc8e8eb1d2a4fda1b44992a6c4e8928a5f0
|
15ed27bece0ae11ee0ae19f7d8fbffb1bd6db342
|
/tensorforce/environments/socket_environment.py
|
20cfde293eb197ba167a9b4a457e7ab276897cdb
|
[
"Apache-2.0"
] |
permissive
|
marload/tensorforce
|
06baea28096d04bbe1f42ee99d0d8e4d815d1e40
|
7101282b2c4a0524361aeeab22d3a2c5a3dd03bc
|
refs/heads/master
| 2021-04-01T09:55:48.154186
| 2020-03-17T23:38:49
| 2020-03-17T23:38:49
| 248,179,865
| 1
| 0
|
Apache-2.0
| 2020-03-18T08:46:32
| 2020-03-18T08:46:31
| null |
UTF-8
|
Python
| false
| false
| 5,808
|
py
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from socket import SHUT_RDWR, socket as Socket
import msgpack
import msgpack_numpy
from tensorforce import TensorforceError, util
from tensorforce.environments import RemoteEnvironment
msgpack_numpy.patch()
class SocketEnvironment(RemoteEnvironment):
"""
An earlier version of this code (#626) was originally developed as part of the following work:
Rabault, J., Kuhnle, A (2019). Accelerating Deep Reinforcement Leaning strategies of Flow
Control through a multi-environment approach. Physics of Fluids.
"""
MAX_BYTES = 4096
@classmethod
def remote(cls, port, environment, max_episode_timesteps=None, **kwargs):
socket = Socket()
socket.bind(('', port))
socket.listen(1)
connection, address = socket.accept()
socket.close()
super().remote(
connection=connection, environment=environment,
max_episode_timesteps=max_episode_timesteps, **kwargs
)
@classmethod
def proxy_send(cls, connection, function, **kwargs):
str_function = function.encode()
num_bytes = len(str_function)
str_num_bytes = '{:08d}'.format(num_bytes).encode()
bytes_sent = connection.send(str_num_bytes + str_function)
if bytes_sent != num_bytes + 8:
raise TensorforceError.unexpected()
str_kwargs = msgpack.packb(o=kwargs)
num_bytes = len(str_kwargs)
str_num_bytes = '{:08d}'.format(num_bytes).encode()
bytes_sent = connection.send(str_num_bytes + str_kwargs)
if bytes_sent != num_bytes + 8:
raise TensorforceError.unexpected()
@classmethod
def proxy_receive(cls, connection):
str_success = connection.recv(1)
if len(str_success) != 1:
raise TensorforceError.unexpected()
success = bool(str_success)
str_num_bytes = connection.recv(8)
if len(str_num_bytes) != 8:
raise TensorforceError.unexpected()
num_bytes = int(str_num_bytes.decode())
str_result = b''
for n in range(num_bytes // cls.MAX_BYTES):
str_result += connection.recv(cls.MAX_BYTES)
if len(str_result) != n * cls.MAX_BYTES:
raise TensorforceError.unexpected()
str_result += connection.recv(num_bytes % cls.MAX_BYTES)
if len(str_result) != num_bytes:
raise TensorforceError.unexpected()
result = msgpack.unpackb(packed=str_result)
decode = (lambda x: x.decode() if isinstance(x, bytes) else x)
result = util.fmap(function=decode, xs=result, map_keys=True)
return success, result
@classmethod
def proxy_close(cls, connection):
connection.shutdown(SHUT_RDWR)
connection.close()
@classmethod
def remote_send(cls, connection, success, result):
str_success = str(int(success)).encode()
bytes_sent = connection.send(str_success)
if bytes_sent != 1:
raise TensorforceError.unexpected()
str_result = msgpack.packb(o=result)
num_bytes = len(str_result)
str_num_bytes = '{:08d}'.format(num_bytes).encode()
bytes_sent = connection.send(str_num_bytes + str_result)
assert bytes_sent == num_bytes + 8
if bytes_sent != num_bytes + 8:
raise TensorforceError.unexpected()
@classmethod
def remote_receive(cls, connection):
str_num_bytes = connection.recv(8)
if len(str_num_bytes) != 8:
raise TensorforceError.unexpected()
num_bytes = int(str_num_bytes.decode())
str_function = b''
for n in range(num_bytes // cls.MAX_BYTES):
str_function += connection.recv(cls.MAX_BYTES)
if len(str_function) != n * cls.MAX_BYTES:
raise TensorforceError.unexpected()
str_function += connection.recv(num_bytes % cls.MAX_BYTES)
if len(str_function) != num_bytes:
raise TensorforceError.unexpected()
function = str_function.decode()
str_num_bytes = connection.recv(8)
if len(str_num_bytes) != 8:
raise TensorforceError.unexpected()
num_bytes = int(str_num_bytes.decode())
str_kwargs = b''
for n in range(num_bytes // cls.MAX_BYTES):
str_kwargs += connection.recv(cls.MAX_BYTES)
if len(str_kwargs) != n * cls.MAX_BYTES:
raise TensorforceError.unexpected()
str_kwargs += connection.recv(num_bytes % cls.MAX_BYTES)
if len(str_kwargs) != num_bytes:
raise TensorforceError.unexpected()
kwargs = msgpack.unpackb(packed=str_kwargs)
decode = (lambda x: x.decode() if isinstance(x, bytes) else x)
kwargs = util.fmap(function=decode, xs=kwargs, map_keys=True)
return function, kwargs
@classmethod
def remote_close(cls, connection):
connection.shutdown(SHUT_RDWR)
connection.close()
def __init__(self, host, port, blocking=False):
socket = Socket()
socket.connect((host, port))
super().__init__(connection=socket, blocking=blocking)
|
[
"alexkuhnle@t-online.de"
] |
alexkuhnle@t-online.de
|
42635fa6693ccd0a767871eb5e8353b606f9eb6d
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/PostenMapping/Model/Post060261215.py
|
1e8401f5696f51dc2d278fa4a273f956133d8b95
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,912
|
py
|
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060261215(StandaardPost):
def __init__(self):
super().__init__(
nummer='0602.61215',
beschrijving='Toplaag, bouwklassegroep B1-B3 volgens 6-2, type SMA-C2 met polymeerbitumen, dikte E = 5 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtuBVLaagtypes.laagtype',
dotnotation='laagtype.laagtype',
defaultWaarde='andere-toplagen',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='verharding',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagBouwklasse.bouwklasse',
dotnotation='bouwklasse',
defaultWaarde='',
range='B1|B2|B3',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag.mengseltype',
dotnotation='mengseltype',
defaultWaarde='SMA-C2',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag.bindmiddelType',
dotnotation='bindmiddelType',
defaultWaarde='polymeerbitumen',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='5',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BitumineuzeLaag',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0602.61215')])
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
51218db60992a0598c11d71f2a2840fd04fe9494
|
4e6caa29a341e8e3964855172af4b89d683ff65f
|
/orders/models.py
|
d2030a34d7e0aace750b9adea2fb7d4d6f9d3e26
|
[] |
no_license
|
sadakchap/basic-ecommerce-site
|
2112e758ac0d9074a18c50f6a0955c0ab82ff01d
|
b1f9990eaebb260a3c338fda61f8c426953faad8
|
refs/heads/master
| 2022-12-25T11:04:16.247128
| 2021-06-02T01:47:00
| 2021-06-02T01:47:00
| 212,818,635
| 0
| 0
| null | 2022-12-08T06:40:57
| 2019-10-04T13:14:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
from django.db import models
from shop.models import Product
# Create your models here.
class Order(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
address = models.CharField(max_length=255)
postal_code = models.CharField(max_length=10)
city = models.CharField(max_length=200)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
paid = models.BooleanField(default=False)
braintree_id= models.CharField(max_length=150,blank=True)
def __str__(self):
return 'Order {}'.format(self.id)
class Meta:
ordering = ('-created',)
def get_total_cost(self):
return sum([i.get_cost() for i in self.items.all()])
class OrderItem(models.Model):
order = models.ForeignKey(Order,on_delete=models.CASCADE,related_name='items')
product = models.ForeignKey(Product,on_delete=models.CASCADE,related_name='order_items')
price = models.DecimalField(max_digits=10,decimal_places=2)
quantity= models.PositiveIntegerField(default=1)
def __str__(self):
return '{}'.format(self.id)
def get_cost(self):
return self.price * self.quantity
|
[
"aliceprerna@gmail.com"
] |
aliceprerna@gmail.com
|
981bf9717f31d66e32777202e08d7e15847fb7fd
|
9b3f578e63a7e17e2b1bab5f38aa8625b8a80251
|
/descarteslabs/workflows/models/versionedgraft.py
|
5c4b26227aef784da05648dcf5da73da4df46673
|
[
"Apache-2.0"
] |
permissive
|
carderne/descarteslabs-python
|
e6f7000f08cd1569e0ddd0f7fb8e53abb6765183
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
refs/heads/master
| 2022-12-09T23:19:02.361226
| 2020-08-13T11:52:30
| 2020-08-13T11:52:30
| 287,264,851
| 0
| 0
|
NOASSERTION
| 2020-08-13T11:46:58
| 2020-08-13T11:46:57
| null |
UTF-8
|
Python
| false
| false
| 5,341
|
py
|
import json
import textwrap
from descarteslabs.common.graft import client as graft_client
from descarteslabs.common.proto.workflow import workflow_pb2
from descarteslabs.workflows import _channel
from descarteslabs.workflows.cereal import deserialize_typespec, serialize_typespec
from descarteslabs.workflows.client import get_global_grpc_client
class VersionedGraft:
"""
A specific version of a Workflow.
Except in advanced cases, you shouldn't need to interact with this object much—you'll primarily
use the `Workflow` object and `wf.use <.models.use>`.
"""
def __init__(self, version, proxy_object, docstring="", labels=None):
"""
Construct a VersionedGraft object from a proxy object.
You shouldn't construct a `VersionedGraft` directly; use `Workflow.set_version`
or `wf.publish <.models.publish>` instead.
Parameters
----------
version: str
Version of the graft. This should adhere to the semantic versioning schema (https://semver.org).
proxy_object: Proxytype
The proxy object source of the graft.
docstring: str, default ""
Docstring for the VersionedGraft.
labels: dict, optional
Key-value pair labels to add to the VersionedGraft.
Returns
-------
VersionedGraft
"""
typespec = serialize_typespec(type(proxy_object))
graft = proxy_object.graft
message = workflow_pb2.VersionedGraft(
version=version,
serialized_graft=json.dumps(graft),
channel=_channel.__channel__,
typespec=typespec,
docstring=textwrap.dedent(docstring),
labels=labels,
)
self._object = proxy_object
self._message = message
@classmethod
def get(cls, workflow_id, version, client=None):
"""
Get a specific `VersionedGraft` of a `Workflow`.
Parameters
----------
workflow_id: str
The ID of the `Workflow`.
version: str
The version of the `Workflow` that you wish to fetch.
client: `.workflows.client.Client`, optional
Allows you to use a specific client instance with non-default
auth and parameters.
Returns
-------
VersionedGraft
"""
if client is None:
client = get_global_grpc_client()
req = workflow_pb2.GetVersionRequest(id=workflow_id, version=version)
versioned_graft_message = client.api["GetVersion"](
req, timeout=client.DEFAULT_TIMEOUT
)
return cls._from_proto(versioned_graft_message)
@classmethod
def _from_proto(cls, message):
"""
Low-level constructor for a `VersionedGraft` object from a Protobuf message.
Do not use this method directly; use `VersionedGraft.__init__`
or `VersionedGraft.get` instead.
Parameters
----------
proto_message: workflow_pb2.VersionedGraft message
Protobuf message for the VersionedGraft
Returns
-------
VersionedGraft
"""
obj = cls.__new__(cls) # bypass __init__
obj._message = message
obj._object = None
return obj
@property
def type(self):
"""type: The type of the proxy object."""
return type(self.object)
@property
def version(self):
"""str: The version of this `VersionedGraft`."""
return self._message.version
@property
def labels(self):
"""dict: The labels attached to this `VersionedGraft`."""
return self._message.labels
@property
def channel(self):
"""str: The channel under which this `VersionedGraft` was created."""
return self._message.channel
@property
def docstring(self):
"""str: The docstring for this `VersionedGraft`."""
return self._message.docstring
@property
def object(self):
"""
Proxytype: The proxy object of this Workflow.
Raises ValueError if the VersionedGraft is not compatible with the current channel.
"""
if self.channel != _channel.__channel__:
raise ValueError(
"This client is compatible with channel '{}', "
"but the VersionedGraft is only defined for channel '{}'.".format(
_channel.__channel__, self.channel
)
)
if self._object is None:
proxy_type = deserialize_typespec(self._message.typespec)
graft = json.loads(self._message.serialized_graft)
isolated = graft_client.isolate_keys(graft)
proxy_obj = proxy_type._from_graft(isolated)
proxy_obj.__doc__ = self.docstring
self._object = proxy_obj
return self._object
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._message == other._message
def __repr__(self):
return """\
VersionedGraft: {self.version}
- type: {self.type.__name__}
- labels: {self.labels}
- channel: {self.channel}
{docstring}
""".format(
self=self, docstring=textwrap.indent(self.docstring, " ")
)
|
[
"support@descarteslabs.com"
] |
support@descarteslabs.com
|
c9481cd2dcec110f75b2234f1c65a3c1766da112
|
543286f4fdefe79bd149ff6e103a2ea5049f2cf4
|
/Exercicios&cursos/Curso_Py/escrevendo_arquivo.py
|
b357539a43c5f9dca9aea36886c29d852f520cae
|
[] |
no_license
|
antonioleitebr1968/Estudos-e-Projetos-Python
|
fdb0d332cc4f12634b75984bf019ecb314193cc6
|
9c9b20f1c6eabb086b60e3ba1b58132552a84ea6
|
refs/heads/master
| 2022-04-01T20:03:12.906373
| 2020-02-13T16:20:51
| 2020-02-13T16:20:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
arq = open("teste_aula.txt", "w")#w para escrever
arq.write("escreviiiiiiii\n")
arq.write("dnvvvvvvvvvvv\n")
arq.write("HAHAHA CHUPA")
arq.write("UHUUUUUU")
arq.close()
|
[
"progmatheusmorais@gmail.com"
] |
progmatheusmorais@gmail.com
|
72e597de56f9957f4ef37fa1adeabff299cde620
|
ebcea394905df8222c257c8c6c469627a6e48095
|
/djangoProject/mysite/Profile/admin.py
|
cba40e206156413afaf37126354b58225ad1a65d
|
[] |
no_license
|
valiok98/Python-Qt5-Tensorflow
|
2773cfc2a0e569ed53cf3d90066885f17abe8c6a
|
e03ccc2884b687a36fbe47f5ff320837be3e217a
|
refs/heads/master
| 2021-09-17T20:41:01.908602
| 2018-03-31T12:42:25
| 2018-03-31T12:42:25
| 103,644,683
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
from django.contrib import admin
# Register your models here.
from .models import PersonalProfile, Item
class PProfile(admin.ModelAdmin):
list_display = ["f_name","l_name"]
class Meta:
model = PersonalProfile
admin.site.register(PersonalProfile,PProfile)
admin.site.register(Item)
|
[
"valentin1998v@gmail.com"
] |
valentin1998v@gmail.com
|
c5c5338564eb826c01194cf073d4ba304148170e
|
39200f6d08ebeac0f147d3b2b6aaed980d2ce555
|
/blog/urls.py
|
ce3882ff9612bb77ff64376fa9dd24a0c8c0a086
|
[] |
no_license
|
hemor/my-first-blog
|
506a81b6dcda82a39aa24ecda65632b1d92ecbce
|
525e002897fd9367ffa6fbfc9458bf2e477efab9
|
refs/heads/master
| 2020-03-28T02:08:04.116659
| 2016-06-01T17:40:35
| 2016-06-01T17:40:35
| 60,012,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
from django.conf.urls import url
from . import views
app_name = 'blog'
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^index/$', views.post_list, name='index'),
url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
url(r'^post/(?P<pk>\d+)/comment/$', views.add_comment_to_post,
name='add_comment_to_post'),
url(r'^post/(?P<pk>\d+)/edit/$', views.post_edit, name='post_edit'),
url(r'^post/(?P<pk>\d+)/publish/$', views.post_publish, name='post_publish'),
url(r'^post/(?P<pk>\d+)/delete/$', views.post_delete, name='post_delete'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),
url(r'^comment/(?P<pk>\d+)/approve/$', views.comment_approve, name='comment_approve'),
url(r'^comment/(?P<pk>\d+)/delete/$', views.comment_delete, name='comment_delete'),
]
|
[
"you@example.com"
] |
you@example.com
|
de41530ae7cbc92425d107c3f2e3e33d1b2dfa11
|
4e96f383d4703ad8ee58869ed91a0c8432c8a051
|
/Cura/Uranium/UM/Operations/AddSceneNodeOperation.py
|
3afab5a287b257c42b56bb5d7554760e5322b2c3
|
[
"LGPL-3.0-only",
"GPL-3.0-only"
] |
permissive
|
flight7788/3d-printing-with-moveo-1
|
b2dba26010c4fa31815bc1d2d0966161a8600081
|
7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0
|
refs/heads/Feature_Marlin_with_AlanBoy
| 2022-08-30T18:36:44.785058
| 2020-05-30T07:52:58
| 2020-05-30T07:52:58
| 212,583,912
| 0
| 0
|
MIT
| 2020-05-16T07:39:47
| 2019-10-03T13:13:01
|
C
|
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
# Copyright (c) 2018 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Operations.Operation import Operation
from UM.Scene.Selection import Selection
from UM.Scene.SceneNode import SceneNode
from typing import Optional
## Operation that adds a new node to the scene.
class AddSceneNodeOperation(Operation):
## Creates the scene node operation.
#
# This saves the node and its parent to be able to search for the node to
# remove the node if we want to undo, and to be able to re-do the adding
# of the node.
#
# \param node The node to add to the scene.
# \param parent The parent of the new node.
def __init__(self, node: SceneNode, parent: Optional[SceneNode]) -> None:
super().__init__()
self._node = node
self._parent = parent
self._selected = False # Was the node selected while the operation is undone? If so, we must re-select it when redoing it.
## Reverses the operation of adding a scene node.
#
# This removes the scene node again.
def undo(self) -> None:
self._node.setParent(None)
self._selected = Selection.isSelected(self._node)
if self._selected:
Selection.remove(self._node) # Also remove the node from the selection.
## Re-applies this operation after it has been undone.
def redo(self) -> None:
self._node.setParent(self._parent)
if self._selected: # It was selected while the operation was undone. We should restore that selection.
Selection.add(self._node)
|
[
"t106360212@ntut.org.tw"
] |
t106360212@ntut.org.tw
|
6b0447a4f1e8917cc1625bd04e865f86499b4fa2
|
23e1f9af34f2f2c3f32f68a6176c7664cb7f62d7
|
/legacy/daily/163/overview.py
|
f5d54088c3d930ebb955ac5055aba50cd24e7f87
|
[] |
no_license
|
jki14/the-cat-of-wall-street
|
8bc95d812f0381c7e02bd65777ee9f44e17f73d9
|
5e1265a11bba7f3040c893043d5ac79fe5736b63
|
refs/heads/master
| 2021-10-11T11:27:18.297584
| 2019-01-25T08:51:22
| 2019-01-25T08:51:22
| 115,118,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,088
|
py
|
# -*- coding: utf-8 -*-
import csv
import datetime
import json
import os
import socket
import sys
import time
import urllib2
tracking_list = ['sh000001', 'sz399001', 'sz399300']
foo = {}
try:
with open('./data/dbf/overview.dbf', 'r') as dbf:
foo = json.load(dbf)
except IOError:
foo['layout'] = ['DATE', 'TCLOSE','HIGH','LOW','TOPEN','LCLOSE','CHG','PCHG','TURNOVER','VOTURNOVER','VATURNOVER','TCAP','MCAP']
foo['record'] = {}
x = 0
y = len(tracking_list)
for code in tracking_list:
first = '19910101'
if code not in foo['record']:
foo['record'][code] = []
elif len(foo['record'][code])>0:
tmp = time.strptime(foo['record'][code][0][0], '%Y-%m-%d')
nxt = datetime.date(tmp.tm_year, tmp.tm_mon, tmp.tm_mday) + datetime.timedelta(1)
first = nxt.strftime('%Y%m%d')
code163 = code.replace('sh', '0').replace('sz', '1')
url = 'http://quotes.money.163.com/service/chddata.html?code=%s&start=%s&end=20380119&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP' % (code163, first)
rep = None
raw = None
sup = 0.0
while True:
try:
rep = urllib2.urlopen(url, timeout=1)
raw = list(csv.reader(rep))
except (urllib2.URLError, socket.error, socket.timeout) as e:
sup += 0.2
time.sleep(sup)
continue
break
bunk = []
for row in raw[1:]:
contents = [row[0]]
for i in xrange(3, 15):
cell = row[i]
if cell != '':
if cell != 'None':
cell = float(cell)
else:
cell = None
else:
cell = 0.0
contents.append(cell)
bunk.append(contents)
foo['record'][code] = bunk + foo['record'][code]
x += 1
print '%d/%d + %d' % (x, y, len(bunk))
sys.stdout.flush()
#time.sleep(0.2)
with open('./data/dbf/overview.dbf', 'w') as dbf:
json.dump(foo, dbf)
print 'dbf have been written successfully'
sys.stdout.flush()
|
[
"jki14wz@gmail.com"
] |
jki14wz@gmail.com
|
4661574ed6bcbdbcb9e401e4121736a4c790d8e6
|
0131f6d91da5b063b3d79330b014871c128c67ed
|
/irc/asparagus/modules/4chan.py
|
ebb414e01e4aa3612a6cdb9b84448a46a5a2afc9
|
[
"Zlib"
] |
permissive
|
moneytech/code-2
|
f31602a702cc7e13b24c1ab5817d30d2314dde76
|
d970038329f7c4e4f0ee9dcd1b345741dd0fcc51
|
refs/heads/master
| 2021-10-02T18:24:20.159492
| 2018-11-30T02:14:18
| 2018-11-30T02:14:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
"""
Copyright (c) 2013, Christine Dodrill
All rights reserved.
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
"""
import requests
import re
NAME="4chan lookups"
DESC="4chan post info lookups"
FOURCHAN_REGEX = re.compile('(.+boards\.)4chan\.org\/([a-z0-9]+)\/res\/([1-9][0-9]+)')
def initModule(cod):
cod.s2scommands["PRIVMSG"].append(fourchanLookup)
def destroyModule(cod):
cod.s2scommands["PRIVMSG"].remove(fourchanLookup)
def rehash():
pass
def fourchanLookup(cod, line):
"""
This uses requests to scrape out things from the 4chan API
"""
global FOURCHAN_REGEX
if line.args[0] not in cod.channels:
return
chatline = line.args[-1]
postid = None
try:
board = FOURCHAN_REGEX.split(chatline)[2]
postid = FOURCHAN_REGEX.split(chatline)[3]
except:
return
try:
info = requests.get("http://api.4chan.org/%s/res/%s.json" % (board, postid)).json()
text = info["posts"][0]["com"].split("<br>")[0]
text = text.replace('<span class="quote">>', ">")
text = text.replace("</span>", "")
string = "^ fourchan: %s on /%s/ - %s" %\
(info["posts"][0]["name"], board, text)
cod.privmsg(line.args[0], string)
except Exception as e:
cod.privmsg(line.args[0], "There was some error looking up that post: %s" % e.message)
|
[
"xena@yolo-swag.com"
] |
xena@yolo-swag.com
|
3b3693eadafb982f2084a294eca435e9ca20ceee
|
67bdebd561b19af9bf759b6ed5de8556b93ea91f
|
/trace_unless.py
|
f02ba6959a7ce84d5162f521cd3b15b0d53a8b8d
|
[] |
no_license
|
rlowrance/re-avm
|
91371ec79f6b6f48e17643da4dfb7a4894d0a0ca
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
refs/heads/master
| 2021-01-17T07:34:16.876133
| 2017-02-06T21:04:59
| 2017-02-06T21:04:59
| 42,865,972
| 31
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
import pdb
def trace_unless(condition, message, **kwds):
'like assert condition, message; but enters debugger if condition fails'
if condition:
return
print '+++++++++++++++'
for k, v in kwds.iteritems():
print k, v
print message
print '+++++++++++++++'
pdb.set_trace()
|
[
"roy.lowrance@gmail.com"
] |
roy.lowrance@gmail.com
|
01f9a0bdf77391ac938617afd13a6195299dafb5
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Wpbb6x9nHax55zKLX_14.py
|
8e96679b72e16f74a7456684857920bfe30cdea7
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
"""
In many cases, **SQL** is used to select more than just columns in a table.
For example, you can filter your search by specifying conditions as seen
below:
SELECT * FROM Table
WHERE Name = "Bob";
* Again, we can use the asterisks to select all the data in a table.
* However, with the use of the WHERE keyword, only all of Bob's data is selected.
Name| Salary
---|---
Bob| 30000
In this challenge, fill in the query in the **Code** tab to select the
`Salary` from "Adam" in the `Employees` table.
### Original Table
Name| Salary
---|---
Adam| 50000
Bob| 30000
Charlotte| 45000
Dillon| 70000
Eileen| 70000
### Expected Results
Salary
---
50000
### Notes
* Check out the **Resources** tab for more SQL tutorials and exercises.
* When presented with more complex queries like this, it is best practice to format your code by putting each statement on separate lines!
* See the rest of the challenges in this series [here!](https://edabit.com/collection/ZEmuGy8zxzDQdBb5o)
"""
query = "SELECT salary FROM employees WHERE name = 'Adam'"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
52d6fc860067cda559eaca821a47486d2e8644ac
|
c4af67db4c523d20f2d55aef90ba77db1fb53c38
|
/CMFDefault/Extensions/update_catalogIndexes.py
|
c86903bdb2a56bf53134126924f3d2287a8708e7
|
[] |
no_license
|
dtgit/dtedu
|
e59b16612d7d9ea064026bf80a44657082ef45a3
|
d787885fe7ed0de6f9e40e9b05d852a0e9d60677
|
refs/heads/master
| 2020-04-06T05:22:50.025074
| 2009-04-08T20:13:20
| 2009-04-08T20:13:20
| 171,351
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
from Products.CMFCore.utils import getToolByName
def update_catalogIndexes(self, REQUEST):
'''
External method to drop, re-add, and rebuild catalog Indexes for migrated
CMF sites from Zope 2.3 to 2.4+.
'''
rIndexes = {'allowedRolesAndUsers': 'KeywordIndex'
, 'effective': 'FieldIndex'
, 'expires': 'FieldIndex'}
ct = getToolByName(self, 'portal_catalog')
map(lambda x, ct=ct: ct.delIndex(x), rIndexes.keys())
map(lambda x, ct=ct: ct.addIndex(x[0], x[1]), rIndexes.items())
ct.manage_reindexIndex(ids=rIndexes.keys(), REQUEST=REQUEST)
return 'Catalog Indexes rebuilt.'
|
[
"ron@domU-12-31-39-02-65-03.compute-1.internal"
] |
ron@domU-12-31-39-02-65-03.compute-1.internal
|
00c5c1b70eb024b63457ed9b09528d8528502b80
|
cf05dc6b31bb83b0b71cd357d7d19dfea7ad40a0
|
/office_system/apps/users/forms.py
|
2f956301bb4f9be16de57da66f79a02283317f08
|
[] |
no_license
|
peng-python/oa
|
5df69a935b20b8200808133bf92d6757016cb9fa
|
bfc600a9c439866e1617f297007dc10fd8b86090
|
refs/heads/master
| 2020-03-17T20:21:54.927871
| 2018-05-18T05:15:28
| 2018-05-18T05:15:28
| 133,905,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django import forms
class LoginFrom(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(required=True, min_length=8)
|
[
"zhenpeng_pro@sina.com"
] |
zhenpeng_pro@sina.com
|
e52e408df819be554e75d2ac286768fecdfd6097
|
060e82b46016744deb7da7c940f97d0dea39d1b3
|
/excel/定向分配/赵土良100445/ztl_python_script/To_mongo/ztl.py
|
1fa3c0c96bb707e8b2bfcc393884c9065dcf378b
|
[] |
no_license
|
yangwen1997/-
|
eb8c609d8af3f4493adf70d10df8cc5f561bcf60
|
197ae391ff1be36189ba003b025fd4802d703e00
|
refs/heads/master
| 2022-12-08T17:11:57.910664
| 2019-11-12T01:32:17
| 2019-11-12T01:32:17
| 160,466,240
| 0
| 1
| null | 2022-12-08T05:24:52
| 2018-12-05T05:34:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
'''
@author : yangwenlong
@file : ztl
@intro : 4320条-赵土良存入monmgo
@creatime : 2019/9/25
'''
import xlrd
import hashlib
from ztl_python_script.common import Enterprise_db,get_log
log = get_log()
#使用xlrd打开工作本(excel文件)
book = xlrd.open_workbook(r'D:\白名单\定向分配资源整理\excel\定向分配\赵土良100445\2016年8月4320条-赵土良.xlsx')
#使用已经打开的excel文件里面的sheet_by_index(0)方法来获取该excel的文档对象
sheet=book.sheet_by_index(0)
def save(item):
item["_id"] = hashlib.md5(str(item["电话"]).encode('utf-8')).hexdigest()
Enterprise_db.save(item)
log.info("数据{}存入mongodb成功".format(item["电话"]))
# 循环ncols可以获取整列内容
# 循环获取每行的内容
for i in range(sheet.nrows):
if i == 0:
continue
else:
item = {}
item["公司"] = sheet.row_values(i)[0]
item["公司类型"] = sheet.row_values(i)[1]
item["负责人姓名"] = sheet.row_values(i)[2]
item["联络员姓名"] = sheet.row_values(i)[3]
phone= str(sheet.row_values(i)[4])
item["经营范围"] = sheet.row_values(i)[5]
item["地址"] = sheet.row_values(i)[6]
if ";" in phone:
phone_lt = phone.split(";")
for _ in phone_lt:
if _ and _ != '':
if "." in _:
item["电话"] = _.split(".")[0]
save(item)
else:
item["电话"] = _
save(item)
else:
if phone:
if "." in phone:
item["电话"] = phone.split(".")[0]
save(item)
else:
item["电话"] = phone
save(item)
|
[
"1120021365@qq.com"
] |
1120021365@qq.com
|
e9b1a299486c6a98727862c462c3b949bf92b416
|
94c8dd4126da6e9fe9acb2d1769e1c24abe195d3
|
/qiskit/circuit/library/__init__.py
|
775bf9fdf67fe41e674f190ff83ba2983724d6da
|
[
"Apache-2.0"
] |
permissive
|
levbishop/qiskit-terra
|
a75c2f96586768c12b51a117f9ccb7398b52843d
|
98130dd6158d1f1474e44dd5aeacbc619174ad63
|
refs/heads/master
| 2023-07-19T19:00:53.483204
| 2021-04-20T16:30:16
| 2021-04-20T16:30:16
| 181,052,828
| 1
| 0
|
Apache-2.0
| 2019-06-05T15:32:13
| 2019-04-12T17:20:54
|
Python
|
UTF-8
|
Python
| false
| false
| 7,078
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===============================================
Circuit Library (:mod:`qiskit.circuit.library`)
===============================================
.. currentmodule:: qiskit.circuit.library
Standard Gates
==============
.. autosummary::
:toctree: ../stubs/
Barrier
C3XGate
C3SXGate
C4XGate
CCXGate
DCXGate
CHGate
CPhaseGate
CRXGate
CRYGate
CRZGate
CSwapGate
CSXGate
CUGate
CU1Gate
CU3Gate
CXGate
CYGate
CZGate
HGate
IGate
MCPhaseGate
MCXGate
MCXGrayCode
MCXRecursive
MCXVChain
Measure
MSGate
PhaseGate
RCCXGate
RC3XGate
Reset
RGate
RXGate
RXXGate
RYGate
RYYGate
RZGate
RZZGate
RZXGate
ECRGate
SGate
SdgGate
SwapGate
iSwapGate
SXGate
SXdgGate
TGate
TdgGate
UGate
U1Gate
U2Gate
U3Gate
XGate
YGate
ZGate
Generalized Gates
=================
.. autosummary::
:toctree: ../stubs/
Diagonal
MCMT
MCMTVChain
Permutation
GMS
GR
GRX
GRY
GRZ
RVGate
Boolean Logic Circuits
======================
.. autosummary::
:toctree: ../stubs/
AND
OR
XOR
InnerProduct
Basis Change Circuits
=====================
.. autosummary::
:toctree: ../stubs/
QFT
Arithmetic Circuits
===================
Amplitude Functions
+++++++++++++++++++
.. autosummary::
:toctree: ../stubs/
LinearAmplitudeFunction
Functional Pauli Rotations
++++++++++++++++++++++++++
.. autosummary::
:toctree: ../stubs/
FunctionalPauliRotations
LinearPauliRotations
PolynomialPauliRotations
PiecewiseLinearPauliRotations
PiecewisePolynomialPauliRotations
PiecewiseChebyshev
Adders
++++++
.. autosummary::
:toctree: ../stubs/
WeightedAdder
Comparators
+++++++++++
.. autosummary::
:toctree: ../stubs/
IntegerComparator
Functions on binary variables
+++++++++++++++++++++++++++++
.. autosummary::
:toctree: ../stubs/
QuadraticForm
Amplitude Functions
===================
.. autosummary::
:toctree: ../stubs/
LinearAmplitudeFunction
Particular Quantum Circuits
===========================
.. autosummary::
:toctree: ../stubs/
FourierChecking
GraphState
HiddenLinearFunction
IQP
QuantumVolume
PhaseEstimation
GroverOperator
PhaseOracle
Probability distributions
=========================
.. autosummary::
:toctree: ../stubs/
UniformDistribution
NormalDistribution
LogNormalDistribution
N-local circuits
================
.. autosummary::
:toctree: ../stubs/
NLocal
TwoLocal
PauliTwoDesign
RealAmplitudes
EfficientSU2
ExcitationPreserving
QAOAAnsatz
Data encoding circuits
======================
.. autosummary::
:toctree: ../stubs/
PauliFeatureMap
ZFeatureMap
ZZFeatureMap
NCT (Not-CNOT-Toffoli) template circuits
========================================
.. autosummary::
:toctree: ../stubs/
templates.nct.template_nct_2a_1
templates.nct.template_nct_2a_2
templates.nct.template_nct_2a_3
templates.nct.template_nct_4a_1
templates.nct.template_nct_4a_2
templates.nct.template_nct_4a_3
templates.nct.template_nct_4b_1
templates.nct.template_nct_4b_2
templates.nct.template_nct_5a_1
templates.nct.template_nct_5a_2
templates.nct.template_nct_5a_3
templates.nct.template_nct_5a_4
templates.nct.template_nct_6a_1
templates.nct.template_nct_6a_2
templates.nct.template_nct_6a_3
templates.nct.template_nct_6a_4
templates.nct.template_nct_6b_1
templates.nct.template_nct_6b_2
templates.nct.template_nct_6c_1
templates.nct.template_nct_7a_1
templates.nct.template_nct_7b_1
templates.nct.template_nct_7c_1
templates.nct.template_nct_7d_1
templates.nct.template_nct_7e_1
templates.nct.template_nct_2a_1
templates.nct.template_nct_9a_1
templates.nct.template_nct_9c_1
templates.nct.template_nct_9c_2
templates.nct.template_nct_9c_3
templates.nct.template_nct_9c_4
templates.nct.template_nct_9c_5
templates.nct.template_nct_9c_6
templates.nct.template_nct_9c_7
templates.nct.template_nct_9c_8
templates.nct.template_nct_9c_9
templates.nct.template_nct_9c_10
templates.nct.template_nct_9c_11
templates.nct.template_nct_9c_12
templates.nct.template_nct_9d_1
templates.nct.template_nct_9d_2
templates.nct.template_nct_9d_3
templates.nct.template_nct_9d_4
templates.nct.template_nct_9d_5
templates.nct.template_nct_9d_6
templates.nct.template_nct_9d_7
templates.nct.template_nct_9d_8
templates.nct.template_nct_9d_9
templates.nct.template_nct_9d_10
Clifford template circuits
==========================
.. autosummary::
:toctree: ../stubs/
clifford_2_1
clifford_2_2
clifford_2_3
clifford_2_4
clifford_3_1
clifford_4_1
clifford_4_2
clifford_4_3
clifford_4_4
clifford_5_1
clifford_6_1
clifford_6_2
clifford_6_3
clifford_6_4
clifford_6_5
clifford_8_1
clifford_8_2
clifford_8_3
RZXGate template circuits
=========================
.. autosummary::
:toctree: ../stubs/
rzx_yz
rzx_xz
rzx_cy
rzx_zz1
rzx_zz2
rzx_zz3
"""
from .standard_gates import *
from .templates import *
from ..barrier import Barrier
from ..measure import Measure
from ..reset import Reset
from .blueprintcircuit import BlueprintCircuit
from .generalized_gates import (
Diagonal,
MCMT,
MCMTVChain,
Permutation,
GMS,
GR,
GRX,
GRY,
GRZ,
RVGate
)
from .boolean_logic import (
AND,
OR,
XOR,
InnerProduct,
)
from .basis_change import QFT
from .arithmetic import (
FunctionalPauliRotations,
LinearPauliRotations,
PiecewiseLinearPauliRotations,
PiecewisePolynomialPauliRotations,
PolynomialPauliRotations,
IntegerComparator,
WeightedAdder,
QuadraticForm,
LinearAmplitudeFunction,
PiecewiseChebyshev,
)
from .n_local import (
NLocal,
TwoLocal,
PauliTwoDesign,
RealAmplitudes,
EfficientSU2,
ExcitationPreserving,
QAOAAnsatz
)
from .data_preparation import (
PauliFeatureMap,
ZFeatureMap,
ZZFeatureMap
)
from .probability_distributions import (
LogNormalDistribution,
NormalDistribution,
UniformDistribution
)
from .quantum_volume import QuantumVolume
from .fourier_checking import FourierChecking
from .graph_state import GraphState
from .hidden_linear_function import HiddenLinearFunction
from .iqp import IQP
from .phase_estimation import PhaseEstimation
from .grover_operator import GroverOperator
from .phase_oracle import PhaseOracle
|
[
"noreply@github.com"
] |
levbishop.noreply@github.com
|
10f403c415e3258aa195e50df38424e21966e650
|
c67831f476cb530fc0c26e0bf4258ce18e986749
|
/module_intent/migrations/0001_initial.py
|
f9fde504260720388d163863648d863149e5b7ff
|
[
"MIT"
] |
permissive
|
cz-qq/bk-chatbot
|
a3ce4b86452b3de0ff35430c1c85b91d6b23a3e6
|
da37fb2197142eae32158cdb5c2b658100133fff
|
refs/heads/master
| 2023-06-05T05:48:22.083008
| 2021-06-15T10:21:30
| 2021-06-15T10:21:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,294
|
py
|
"""
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations, models
import module_intent.models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Intent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"biz_id",
models.PositiveIntegerField(
db_index=True, default=0, verbose_name="业务ID"
),
),
(
"biz_name",
models.CharField(default="", max_length=128, verbose_name="业务名称"),
),
(
"intent_name",
models.CharField(default="", max_length=128, verbose_name="业务名称"),
),
("status", models.BooleanField(default=False, verbose_name="意图状态")),
(
"available_user",
module_intent.models.CompressJSONField(
default=[], verbose_name="可执行用户"
),
),
(
"available_group",
module_intent.models.CompressJSONField(
default=[], verbose_name="可执行群组"
),
),
("is_delete", models.BooleanField(default=False, verbose_name="是否已删除")),
(
"create_by",
models.CharField(default="-", max_length=100, verbose_name="创建人"),
),
(
"create_time",
models.DateTimeField(auto_now=True, verbose_name="创建时间"),
),
(
"update_time",
models.DateTimeField(auto_now=True, verbose_name="更新时间"),
),
],
options={
"verbose_name": "【意图】",
"verbose_name_plural": "【意图】",
},
),
migrations.CreateModel(
name="Task",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"biz_id",
models.PositiveIntegerField(
db_index=True, default=0, verbose_name="业务ID"
),
),
("index_id", models.BigIntegerField(default=-1, verbose_name="索引ID")),
(
"platform",
models.CharField(
choices=[
("JOB", "JOB"),
("SOPS", "标准运维"),
("DEVOPS", "蓝盾"),
("DEFINE", "自定义"),
],
default="JOB",
max_length=128,
verbose_name="平台名称",
),
),
(
"task_id",
models.CharField(
default="JOB", max_length=128, verbose_name="任务ID"
),
),
(
"slots",
module_intent.models.CompressJSONField(
default=[], verbose_name="槽位信息"
),
),
(
"source",
module_intent.models.CompressJSONField(
default={}, verbose_name="任务元数据"
),
),
("script", models.TextField(default="", verbose_name="执行脚本信息")),
],
options={
"verbose_name": "【任务信息】",
"verbose_name_plural": "【任务信息】",
},
),
migrations.CreateModel(
name="Utterances",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"biz_id",
models.PositiveIntegerField(
db_index=True, default=0, verbose_name="业务ID"
),
),
("index_id", models.BigIntegerField(default=-1, verbose_name="索引ID")),
(
"content",
module_intent.models.CompressJSONField(
default=[], verbose_name="语料列表"
),
),
],
options={
"verbose_name": "【语料库】",
"verbose_name_plural": "【语料库】",
},
),
]
|
[
"123@qq.com"
] |
123@qq.com
|
1778e80f2fab17a4495abc3b59d9289f191e26ed
|
bb71e927dc2429abf551b44874ee990cb3a93f7a
|
/python/python_tricks/train.py
|
b7b940b5716ec1c81a6ebdd74a7e822126801790
|
[] |
no_license
|
khuyentran1401/Data-science
|
c37021349bb407ed50d891dab780463e0b243de5
|
be59f5959be9f5944e12260fbb4548c85ef6aabe
|
refs/heads/master
| 2023-08-31T13:46:58.212459
| 2023-08-09T15:46:11
| 2023-08-09T15:46:11
| 280,508,180
| 3,809
| 943
| null | 2023-05-23T02:38:37
| 2020-07-17T19:25:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 281
|
py
|
import sys
model_type = sys.argv[1]
model_version = sys.argv[2]
model_path = f'''model/model1/{model_type}/version_{model_version}'''
print('Loading model from', model_path, 'for training')
# On the terminal type
# for version in 1 2 3 4
# do
# python train.py $version
# done
|
[
"khuyentran1476@gmail.com"
] |
khuyentran1476@gmail.com
|
9593144727e4f55f8bc92271de4d519cd3632302
|
8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6
|
/venv/Lib/site-packages/mypy/typeshed/stdlib/3/winreg.pyi
|
23482b55cac99932caabc054d47d219409f6ba40
|
[] |
no_license
|
RodrigoNeto/cursopythonyt
|
fc064a2e6106324e22a23c54bdb9c31040ac9eb6
|
279dad531e21a9c7121b73d84fcbdd714f435e7e
|
refs/heads/master
| 2023-07-03T00:54:09.795054
| 2021-08-13T12:42:24
| 2021-08-13T12:42:24
| 395,646,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,827
|
pyi
|
from types import TracebackType
from typing import Any, Optional, Tuple, Type, Union
_KeyType = Union[HKEYType, int]
def CloseKey(__hkey: _KeyType) -> None: ...
def ConnectRegistry(__computer_name: Optional[str], __key: _KeyType) -> HKEYType: ...
def CreateKey(__key: _KeyType, __sub_key: Optional[str]) -> HKEYType: ...
def CreateKeyEx(key: _KeyType, sub_key: Optional[str], reserved: int = ..., access: int = ...) -> HKEYType: ...
def DeleteKey(__key: _KeyType, __sub_key: str) -> None: ...
def DeleteKeyEx(key: _KeyType, sub_key: str, access: int = ..., reserved: int = ...) -> None: ...
def DeleteValue(__key: _KeyType, __value: str) -> None: ...
def EnumKey(__key: _KeyType, __index: int) -> str: ...
def EnumValue(__key: _KeyType, __index: int) -> Tuple[str, Any, int]: ...
def ExpandEnvironmentStrings(__str: str) -> str: ...
def FlushKey(__key: _KeyType) -> None: ...
def LoadKey(__key: _KeyType, __sub_key: str, __file_name: str) -> None: ...
def OpenKey(key: _KeyType, sub_key: str, reserved: int = ..., access: int = ...) -> HKEYType: ...
def OpenKeyEx(key: _KeyType, sub_key: str, reserved: int = ..., access: int = ...) -> HKEYType: ...
def QueryInfoKey(__key: _KeyType) -> Tuple[int, int, int]: ...
def QueryValue(__key: _KeyType, __sub_key: Optional[str]) -> str: ...
def QueryValueEx(__key: _KeyType, __name: str) -> Tuple[Any, int]: ...
def SaveKey(__key: _KeyType, __file_name: str) -> None: ...
def SetValue(__key: _KeyType, __sub_key: str, __type: int, __value: str) -> None: ...
def SetValueEx(
__key: _KeyType, __value_name: Optional[str], __reserved: Any, __type: int, __value: Union[str, int]
) -> None: ... # reserved is ignored
def DisableReflectionKey(__key: _KeyType) -> None: ...
def EnableReflectionKey(__key: _KeyType) -> None: ...
def QueryReflectionKey(__key: _KeyType) -> bool: ...
HKEY_CLASSES_ROOT: int
HKEY_CURRENT_USER: int
HKEY_LOCAL_MACHINE: int
HKEY_USERS: int
HKEY_PERFORMANCE_DATA: int
HKEY_CURRENT_CONFIG: int
HKEY_DYN_DATA: int
KEY_ALL_ACCESS: int
KEY_WRITE: int
KEY_READ: int
KEY_EXECUTE: int
KEY_QUERY_VALUE: int
KEY_SET_VALUE: int
KEY_CREATE_SUB_KEY: int
KEY_ENUMERATE_SUB_KEYS: int
KEY_NOTIFY: int
KEY_CREATE_LINK: int
KEY_WOW64_64KEY: int
KEY_WOW64_32KEY: int
REG_BINARY: int
REG_DWORD: int
REG_DWORD_LITTLE_ENDIAN: int
REG_DWORD_BIG_ENDIAN: int
REG_EXPAND_SZ: int
REG_LINK: int
REG_MULTI_SZ: int
REG_NONE: int
REG_QWORD: int
REG_QWORD_LITTLE_ENDIAN: int
REG_RESOURCE_LIST: int
REG_FULL_RESOURCE_DESCRIPTOR: int
REG_RESOURCE_REQUIREMENTS_LIST: int
REG_SZ: int
REG_CREATED_NEW_KEY: int # undocumented
REG_LEGAL_CHANGE_FILTER: int # undocumented
REG_LEGAL_OPTION: int # undocumented
REG_NOTIFY_CHANGE_ATTRIBUTES: int # undocumented
REG_NOTIFY_CHANGE_LAST_SET: int # undocumented
REG_NOTIFY_CHANGE_NAME: int # undocumented
REG_NOTIFY_CHANGE_SECURITY: int # undocumented
REG_NO_LAZY_FLUSH: int # undocumented
REG_OPENED_EXISTING_KEY: int # undocumented
REG_OPTION_BACKUP_RESTORE: int # undocumented
REG_OPTION_CREATE_LINK: int # undocumented
REG_OPTION_NON_VOLATILE: int # undocumented
REG_OPTION_OPEN_LINK: int # undocumented
REG_OPTION_RESERVED: int # undocumented
REG_OPTION_VOLATILE: int # undocumented
REG_REFRESH_HIVE: int # undocumented
REG_WHOLE_HIVE_VOLATILE: int # undocumented
error = OSError
# Though this class has a __name__ of PyHKEY, it's exposed as HKEYType for some reason
class HKEYType:
def __bool__(self) -> bool: ...
def __int__(self) -> int: ...
def __enter__(self) -> HKEYType: ...
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
) -> Optional[bool]: ...
def Close(self) -> None: ...
def Detach(self) -> int: ...
|
[
"rodrigoneto.forseti@gmail.com"
] |
rodrigoneto.forseti@gmail.com
|
9848fd410ddb1d313b711e656fde9ae27d2261fd
|
732d750ce7b96090bc1b252fbefdadfe167990a1
|
/networker/io/__init__.py
|
3ac84486d4ee66edda8e7eb94ebf4c159a76b37a
|
[] |
no_license
|
carbz/networker
|
4008174200db1865635f524646ad550187a4d289
|
cab14026118db42603bd1a5757ec460c6cb4984d
|
refs/heads/master
| 2021-01-15T10:24:59.858048
| 2015-04-22T17:11:49
| 2015-04-22T17:11:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
# -*- coding: utf-8 -*-
import ogr
import osr
import networkx as nx
import networker.geomath as gm
from networker.classes.geograph import GeoGraph
import warnings
import os
"""
Package for reading/writing networkx based GeoGraphs
Note: these wrap existing networkx functions for custom behavior
"""
def load_shp(shp_path):
""" loads a shapefile into a networkx based GeoGraph object
Args:
shp_path: string path to a line or point shapefile
Returns:
geograph: GeoGraph
"""
# NOTE: if shp_path is unicode io doesn't work for some reason
shp_path = shp_path.encode('ascii', 'ignore')
g = nx.read_shp(shp_path)
coords = dict(enumerate(g.nodes()))
driver = ogr.GetDriverByName('ESRI Shapefile')
shp = driver.Open(shp_path)
layer = shp.GetLayer()
spatial_ref = layer.GetSpatialRef()
proj4 = None
if not spatial_ref:
if gm.is_in_lon_lat(coords):
proj4 = gm.PROJ4_LATLONG
else:
warnings.warn("Spatial Reference could not be set for {}".
format(shp_path))
else:
proj4 = spatial_ref.ExportToProj4()
g = nx.convert_node_labels_to_integers(g)
return GeoGraph(srs=proj4, coords=coords, data=g)
def write_shp(geograph, shp_dir):
""" writes a shapefile from a networkx based GeoGraph object
Args:
geograph: GeoGraph object
shp_dir: string path to dir to write shape files
"""
assert geograph.is_aligned()
# looks like networkx wants us to relabel nodes by their coords
tup_map = {i: tuple(coords) for i, coords in geograph.coords.items()}
# copy geograph to plain networkx graph
# (relabeling a GeoGraph doesn't seem to work)
nx_coord_graph = nx.Graph(data=geograph)
nx.relabel_nodes(nx_coord_graph, tup_map, copy=False)
nx.write_shp(nx_coord_graph, shp_dir)
if geograph.srs:
# write srs info to prj file (nx seems to miss this)
sr = osr.SpatialReference()
sr.ImportFromProj4(geograph.srs)
main_prj_filename = shp_dir + '.prj'
edge_prj_filename = os.path.join(shp_dir, 'edges.prj')
node_prj_filename = os.path.join(shp_dir, 'nodes.prj')
def write_prj(prj_filename):
out = open(prj_filename, 'w')
out.write(sr.ExportToWkt())
out.close()
write_prj(main_prj_filename)
write_prj(edge_prj_filename)
write_prj(node_prj_filename)
|
[
"chris.natali@gmail.com"
] |
chris.natali@gmail.com
|
43b694369a93e5b2fca5c24a9a3ea7ae339c90e4
|
604bd9370a5b4e61a5f9e533c6612bc94aef0c6c
|
/django/helpdesk_deploy_old/helpdesk/base/migrations/0005_auto_20201210_0554.py
|
ac187c287afd3fa9fa8cede7bf91012b73480f43
|
[
"Apache-2.0"
] |
permissive
|
Netromnik/python
|
2ba2f15b56e635b53c12ef39ed776b9577c08dff
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
refs/heads/main
| 2023-06-16T04:58:35.634371
| 2021-07-18T16:20:13
| 2021-07-18T16:20:13
| 355,891,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
# Generated by Django 3.1.2 on 2020-12-10 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0004_auto_20201114_1445'),
]
operations = [
migrations.AddField(
model_name='collectmedia',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='_collectmedia_groups_+', to='base.CustomGroup', verbose_name='groups'),
),
migrations.AlterField(
model_name='collectmedia',
name='name',
field=models.CharField(max_length=30, unique=True),
),
]
|
[
"you@example.com"
] |
you@example.com
|
e261f4ac64cfeda090c8d41631cb690d60dd4505
|
964f2882117ff656d7a2757c233c6dd88226d975
|
/services/autoscaling/setup.py
|
e3e711abf8319d6d068bac1c11a37bd4253ee6bd
|
[
"MIT"
] |
permissive
|
ignapas/osparc-simcore
|
a002dd47d7689af9c1c650eea33e31add2b182c1
|
cb62e56b194265a907f260f3071c55a65f569823
|
refs/heads/master
| 2023-01-22T08:55:32.580775
| 2022-12-09T15:57:36
| 2022-12-09T15:57:36
| 170,852,656
| 0
| 0
|
MIT
| 2023-01-09T05:03:04
| 2019-02-15T11:12:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
#!/usr/bin/env python3
import re
import sys
from pathlib import Path
from setuptools import find_packages, setup
def read_reqs(reqs_path: Path) -> set[str]:
return {
r
for r in re.findall(
r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)",
reqs_path.read_text(),
re.MULTILINE,
)
if isinstance(r, str)
}
CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
NAME = "simcore-service-autoscaling"
VERSION = (CURRENT_DIR / "VERSION").read_text().strip()
AUTHORS = (
"Alexandre Allexandre (Surfict)",
"Sylvain Anderegg (sanderegg)",
"Pedro Crespo-Valero (pcrespov)",
)
DESCRIPTION = "Service to autoscale swarm resources"
README = (CURRENT_DIR / "README.md").read_text()
PROD_REQUIREMENTS = tuple(
read_reqs(CURRENT_DIR / "requirements" / "_base.txt")
| {
"simcore-models-library",
"simcore-service-library[fastapi]",
"simcore-settings-library",
}
)
TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt"))
SETUP = dict(
name=NAME,
version=VERSION,
author=AUTHORS,
description=DESCRIPTION,
long_description=README,
license="MIT license",
python_requires="~=3.9",
packages=find_packages(where="src"),
package_dir={
"": "src",
},
include_package_data=True,
install_requires=PROD_REQUIREMENTS,
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
extras_require={"test": TEST_REQUIREMENTS},
entry_points={
"console_scripts": [
"simcore-service-autoscaling = simcore_service_autoscaling.cli:main",
],
},
)
if __name__ == "__main__":
setup(**SETUP)
|
[
"noreply@github.com"
] |
ignapas.noreply@github.com
|
480f230a8f4d7f2d2cb4b1c639c05909e1bd21f2
|
e90bf4b372da78ceec15282d060b48d18ba8d4e9
|
/supervisor/docker/const.py
|
f8e3edbeb75efcfbbb06b5df64772e6a083b2986
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/supervisor
|
67f2e1755ff5fbf7cf2084351e1c32c6995274e0
|
4838b280adafed0997f32e021274b531178386cd
|
refs/heads/main
| 2023-08-31T22:51:25.949277
| 2023-08-31T08:01:42
| 2023-08-31T08:01:42
| 84,926,758
| 928
| 477
|
Apache-2.0
| 2023-09-14T17:11:27
| 2017-03-14T08:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,030
|
py
|
"""Docker constants."""
from enum import Enum
from docker.types import Mount
from ..const import MACHINE_ID
class Capabilities(str, Enum):
"""Linux Capabilities."""
BPF = "BPF"
DAC_READ_SEARCH = "DAC_READ_SEARCH"
IPC_LOCK = "IPC_LOCK"
NET_ADMIN = "NET_ADMIN"
NET_RAW = "NET_RAW"
PERFMON = "PERFMON"
SYS_ADMIN = "SYS_ADMIN"
SYS_MODULE = "SYS_MODULE"
SYS_NICE = "SYS_NICE"
SYS_PTRACE = "SYS_PTRACE"
SYS_RAWIO = "SYS_RAWIO"
SYS_RESOURCE = "SYS_RESOURCE"
SYS_TIME = "SYS_TIME"
class ContainerState(str, Enum):
"""State of supervisor managed docker container."""
FAILED = "failed"
HEALTHY = "healthy"
RUNNING = "running"
STOPPED = "stopped"
UNHEALTHY = "unhealthy"
UNKNOWN = "unknown"
class RestartPolicy(str, Enum):
"""Restart policy of container."""
NO = "no"
ON_FAILURE = "on-failure"
UNLESS_STOPPED = "unless-stopped"
ALWAYS = "always"
class MountType(str, Enum):
"""Mount type."""
BIND = "bind"
VOLUME = "volume"
TMPFS = "tmpfs"
NPIPE = "npipe"
class PropagationMode(str, Enum):
"""Propagataion mode, only for bind type mounts."""
PRIVATE = "private"
SHARED = "shared"
SLAVE = "slave"
RPRIVATE = "rprivate"
RSHARED = "rshared"
RSLAVE = "rslave"
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_OLD = "HASSIO_TOKEN"
LABEL_MANAGED = "supervisor_managed"
MOUNT_DBUS = Mount(
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
)
MOUNT_DEV = Mount(
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
)
MOUNT_DOCKER = Mount(
type=MountType.BIND.value,
source="/run/docker.sock",
target="/run/docker.sock",
read_only=True,
)
MOUNT_MACHINE_ID = Mount(
type=MountType.BIND.value,
source=MACHINE_ID.as_posix(),
target=MACHINE_ID.as_posix(),
read_only=True,
)
MOUNT_UDEV = Mount(
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
)
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
a90e9370b12a22d95dfd74afb1de671b312d4041
|
6471f95e6a193b0c018d81a2c4e8a518f7ec35d7
|
/tests/test_wrappers.py
|
04a0837a4d619f644b026ee4c60fc82aa4fe1eee
|
[
"BSD-3-Clause"
] |
permissive
|
Billingegroup/bluesky_scanplans
|
5b297e4874b2e57d44a5cc091a2a87be87856503
|
f865da9712bb91dceee73d4aea61f9b6c4b2c9ef
|
refs/heads/master
| 2021-06-30T18:08:37.553268
| 2021-05-20T18:25:31
| 2021-05-20T18:25:31
| 234,350,238
| 0
| 3
|
BSD-3-Clause
| 2021-05-20T17:11:51
| 2020-01-16T15:27:35
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
import bluesky.plan_stubs as bps
import bluesky.plans as bp
import bluesky.simulators as sim
import xpdsim.movers as movers
import scanplans.wrapper as helper
def test_wrapper_count():
trigger_and_read = helper.shutter_wrapper(bps.trigger_and_read, movers.shctl1, 0, 100, 0)
one_shot = helper.take_reading_wrapper(bps.one_shot, trigger_and_read)
plan = bp.count([movers.cs700], per_shot=one_shot)
sim.summarize_plan(plan)
def test_grid_scan():
trigger_and_read = helper.shutter_wrapper(bps.trigger_and_read, movers.shctl1, 0, 100, 0)
one_nd_step = helper.take_reading_wrapper(bps.one_nd_step, trigger_and_read)
plan = bp.grid_scan([movers.cs700], movers.cs700, -1, 1, 3, per_step=one_nd_step)
sim.summarize_plan(plan)
|
[
"st3107@columbia.edu"
] |
st3107@columbia.edu
|
527f3f6b2e59ae487d07bc1f27334d5ba63a8ff6
|
eb2867c8ef92810fb0a76aa5fa9933cfba42d3de
|
/DL_HomeWork/DL_midterm_codings/u_toronto_csc321_winter2018.py
|
a2e3d6675c9eead5a172b8b76a40dbf99aa66367
|
[] |
no_license
|
thomas-liao/python_prac
|
76bbaf988a21cdac8e932a272db8f5eddfc66a7f
|
05c68f14c5d7b8bb202ab6dde22e0214b1cd4ee1
|
refs/heads/master
| 2020-04-23T00:48:33.306497
| 2019-02-27T03:46:40
| 2019-02-27T03:46:40
| 170,792,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
import numpy as np
X = np.arange(15).reshape(5,3)
w = np.zeros((3, 1))
t = np.array([1,2,3,4,5])
t = np.reshape(t, (5, 1))
y_bar = np.dot(X, w) - t
# single: dL/ dw.T = (yi - ti) * xi - > dl / dw = (yi - ti) * xi.T
# multiple - > = y_bar = (np.dot(X, w) - t)
# res = np.dot(X.T, y_bar) / N
w_bar = np.dot(X.T, y_bar) / X.shape[0]
alpha = 0.01
w -= alpha * w_bar
print(w)
b_bar = np.mean(y_bar)
b -= alpha * b_bar
|
[
"thomas.liao256@gmail.com"
] |
thomas.liao256@gmail.com
|
1fb0eac2c32fdf739ca1c6178dcee78561e16153
|
8cba955ce23f98e0a24dc8f8be04e305c4ba59ef
|
/model.py
|
8f523915da7766efb21deaa5bbaee095b8367a84
|
[] |
no_license
|
amit20-meet/Y2L-Flask-Routing
|
5ba3ae8c9631f18897faf6d7a794355ac9dd907a
|
8ddac918a788bd68c065c9976487eb7f589b701a
|
refs/heads/master
| 2020-09-14T17:58:14.881190
| 2019-12-12T14:59:42
| 2019-12-12T14:59:42
| 223,207,600
| 0
| 0
| null | 2019-11-21T15:41:05
| 2019-11-21T15:41:04
| null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
from sqlalchemy import Column, Integer, String, Date, ForeignKey, Float, Boolean, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Product(Base):
__tablename__ = 'products'
id = Column(Integer, primary_key=True)
name = Column(String)
price = Column(Float)
picture_link = Column(String)
class Cart(Base):
__tablename__ = 'carts'
id= Column(Integer,primary_key=True)
id_cart = Column(Integer)
name_cart = Column(String)
price_cart = Column(Float)
|
[
"myname21@meet.mit.edu"
] |
myname21@meet.mit.edu
|
2c5e1e78f81d3c4f9a9b47f685b834307e66f8aa
|
7a9f6e01c0450173a0a45bd70816a4be38021eda
|
/cliente/migrations/0009_auto_20151117_2109.py
|
a5f6d71b2be3d0455b483d4a777ab7235a44b6d6
|
[] |
no_license
|
ryujiin/lovizdigital
|
6137726349e6bd1de866054ce37de90f783a3b38
|
9f14c83b976e1e47a2558b508396139145b67bf2
|
refs/heads/master
| 2021-01-10T03:54:41.460708
| 2016-10-14T02:10:32
| 2016-10-14T02:10:32
| 45,559,176
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cliente', '0008_auto_20151117_2054'),
]
operations = [
migrations.AlterField(
model_name='comentario',
name='producto',
field=models.ForeignKey(blank=True, to='catalogo.Producto', null=True),
),
]
|
[
"ryujiin22@gmail.com"
] |
ryujiin22@gmail.com
|
b682efa36350fb281b200657223293f02c8be285
|
d6ca0b326f1bd0ce381c6db611f6331096bf4187
|
/examples/example_18_many_runs.py
|
e04e292092de98147577f3188ad7b98029be7b6e
|
[
"BSD-3-Clause"
] |
permissive
|
SmokinCaterpillar/pypet
|
aa35355d70e8f44be015313494376d993f645d80
|
3d454ac65f89e7833baaf89510f73c546e90d8f6
|
refs/heads/develop
| 2023-08-08T16:01:54.087819
| 2023-02-14T14:59:32
| 2023-02-14T14:59:32
| 12,901,526
| 89
| 22
|
BSD-3-Clause
| 2023-07-24T00:46:12
| 2013-09-17T17:06:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,415
|
py
|
"""Exploring more than 20000 runs may slow down *pypet*.
HDF5 has problems handling nodes with more than 10000 children.
To overcome this problem, simply group your runs into buckets or sets
using the `$set` wildcard.
"""
__author__ = 'Robert Meyer'
import os # To allow file paths working under Windows and Linux
from pypet import Environment
from pypet.utils.explore import cartesian_product
def multiply(traj):
"""Example of a sophisticated simulation that involves multiplying two values."""
z = traj.x * traj.y
# Since we perform many runs we will group results into sets of 1000 each
# using the `$set` wildcard
traj.f_add_result('$set.$.z', z, comment='Result of our simulation '
'sorted into buckets of '
'1000 runs each!')
def main():
# Create an environment that handles running
filename = os.path.join('hdf5','example_18.hdf5')
env = Environment(trajectory='Multiplication',
filename=filename,
file_title='Example_18_Many_Runs',
overwrite_file=True,
comment='Contains many runs',
multiproc=True,
use_pool=True,
freeze_input=True,
ncores=2,
wrap_mode='QUEUE')
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')
# Explore the parameters with a cartesian product, yielding 2500 runs
traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))
# Run the simulation
env.run(multiply)
# Disable logging
env.disable_logging()
# turn auto loading on, since results have not been loaded, yet
traj.v_auto_load = True
# Use the `v_idx` functionality
traj.v_idx = 2042
print('The result of run %d is: ' % traj.v_idx)
# Now we can rely on the wildcards
print(traj.res.crunset.crun.z)
traj.v_idx = -1
# Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
print('The result of run %d is: ' % 2044)
print(traj.res.rts_2044.r_2044.z)
if __name__ == '__main__':
main()
|
[
"robert.meyer@ni.tu-berlin.de"
] |
robert.meyer@ni.tu-berlin.de
|
1eda218ffa2e3edd8b517b2eb124e9f95f1996c4
|
c67831f476cb530fc0c26e0bf4258ce18e986749
|
/backend/opsbot/command/argfilter/__init__.py
|
f929fa7ce2f64581dcd0d1bd5e2a4709dad4ffb0
|
[
"MIT"
] |
permissive
|
cz-qq/bk-chatbot
|
a3ce4b86452b3de0ff35430c1c85b91d6b23a3e6
|
da37fb2197142eae32158cdb5c2b658100133fff
|
refs/heads/master
| 2023-06-05T05:48:22.083008
| 2021-06-15T10:21:30
| 2021-06-15T10:21:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
"""
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Optional
from opsbot.self_typing import Message_T
class ValidateError(ValueError):
def __init__(self, message: Optional[Message_T] = None):
self.message = message
|
[
"123@qq.com"
] |
123@qq.com
|
fc124f8b4453ed0ea2d82b9e548da280ea7e856d
|
0cd09f64f7d42f60167c688a959ab1b4eec62caf
|
/sources/t06/t06ej03.py
|
6e6fa34ed09a94cfe12c5ef1749df95a040a47e9
|
[
"MIT"
] |
permissive
|
workready/pythonbasic
|
3d438250b2fce6b6d243f2a8a1f8c5ccc9734d8c
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
refs/heads/master
| 2022-10-14T09:20:30.160865
| 2020-06-10T09:22:51
| 2020-06-10T09:22:51
| 270,270,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
# Implementamos un constructor para nuestra clase, y dentro del constructor asignamos variables.
class MiClase:
def __init__(self, x, y):
# self.x y self.y son propias de la instancia, no compartidas
self.x = x
self.y = y
c = MiClase(7, 12)
print(c.x, c.y)
|
[
"jorgeas80@tuta.io"
] |
jorgeas80@tuta.io
|
7a1a3bafcd8974fdc513d298fc7f66943334e152
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_mv_op.py
|
14a4ada5727e896eb87ca2a3359b5f9f4e760a81
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 3,850
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest
import paddle
from paddle.static import Program, program_guard
class TestMVOp(OpTest):
def setUp(self):
self.op_type = "mv"
self.python_api = paddle.mv
self.init_config()
self.inputs = {'X': self.x, 'Vec': self.vec}
self.outputs = {'Out': np.dot(self.x, self.vec)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Vec'], 'Out')
def init_config(self):
self.x = np.random.random((2, 100)).astype("float64")
self.vec = np.random.random(100).astype("float64")
class TestMVAPI(unittest.TestCase):
def test_dygraph_api_out(self):
paddle.disable_static()
self.x_data = np.random.random((5, 100)).astype("float64")
self.x = paddle.to_tensor(self.x_data)
self.vec_data = np.random.random(100).astype("float64")
self.vec = paddle.to_tensor(self.vec_data)
z = paddle.mv(self.x, self.vec)
np_z = z.numpy()
z_expected = np.array(np.dot(self.x_data, self.vec_data))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
paddle.enable_static()
def test_static_graph(self):
for x_stop_gradient in [False, True]:
for vec_stop_gradient in [False, True]:
paddle.enable_static()
train_program = Program()
startup_program = Program()
self.input_x = np.random.rand(5, 100).astype("float64")
self.input_vec = np.random.rand(100).astype("float64")
with program_guard(train_program, startup_program):
data_x = paddle.static.data(
"x", shape=[5, 100], dtype="float64"
)
data_vec = paddle.static.data(
"vec", shape=[100], dtype="float64"
)
data_x.stop_gradient = x_stop_gradient
data_vec.stop_gradient = vec_stop_gradient
result_vec = paddle.mv(data_x, data_vec)
self.place = paddle.CPUPlace()
exe = paddle.static.Executor(self.place)
(res,) = exe.run(
feed={"x": self.input_x, "vec": self.input_vec},
fetch_list=[result_vec],
)
z_expected = np.array(np.dot(self.input_x, self.input_vec))
np.testing.assert_allclose(res, z_expected, rtol=1e-05)
class TestMVError(unittest.TestCase):
def test_input(self):
def test_shape():
paddle.enable_static()
self.input_x = np.random.rand(5, 100).astype("float64")
self.input_vec = np.random.rand(100).astype("float64")
data_x = paddle.static.data("x", shape=[5, 100], dtype="float64")
data_vec = paddle.static.data(
"vec", shape=[100, 2], dtype="float64"
)
result_vec = paddle.mv(data_x, data_vec)
self.assertRaises(ValueError, test_shape)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
a80e5a089f37600ba430f6f29761a5e3e3bc6a52
|
4d4c197c49172549514af7845f2429772d0158c7
|
/message/migrations/0003_delete_user_account.py
|
6a1e9ea6b0bb8c49d85c29853a9aa0745e36b8ee
|
[] |
no_license
|
vigneshhari/djangolife
|
787147ca4195a9a066bf7fdf2f389435afc6cc0b
|
1e3b7a6516e1b4fbb98117abec4fa166e6747250
|
refs/heads/master
| 2021-01-10T02:00:44.300142
| 2017-06-29T02:21:19
| 2017-06-29T02:21:19
| 48,617,422
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('message', '0002_auto_20151206_1835'),
]
operations = [
migrations.DeleteModel(
name='User_Account',
),
]
|
[
"vichuhari100@gmail.com"
] |
vichuhari100@gmail.com
|
32ded32756b1b4f4f83fa1014ca01a88bfcc0928
|
1131198c6d53eed5aeacb8af7cfd5e4664f924e5
|
/suggestion_baselines/HRED-qs/multi_bleu.py
|
bc4f5c96ca8b749f2fd68872137f47f4ad8213ed
|
[
"MIT"
] |
permissive
|
polaris79/mnsrf_ranking_suggestion
|
d9f2a889e1ccd7f9993594ac212b3a2853f1b7eb
|
5bd241fb49f08fa4937539991e12e5a502d5a072
|
refs/heads/master
| 2020-03-11T23:15:24.019548
| 2018-04-14T16:41:07
| 2018-04-14T16:41:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,302
|
py
|
from functools import reduce
from math import exp, log
from collections import Counter
def ngram_count(words, n):
if n <= len(words):
return Counter(zip(*[words[i:] for i in range(n)]))
return Counter()
def max_count(c1, c2):
return Counter({k: max(c1[k], c2[k]) for k in c1})
def min_count(c1, c2):
return Counter({k: min(c1[k], c2[k]) for k in c1})
def closest_min_length(candidate, references):
l0 = len(candidate)
return min((abs(len(r) - l0), len(r)) for r in references)[1]
def safe_log(n):
if n <= 0:
return -9999999999
return log(n)
def precision_n(candidate, references, n):
ref_max = reduce(max_count, [ngram_count(ref, n) for ref in references])
candidate_ngram_count = ngram_count(candidate, n)
total = sum(candidate_ngram_count.values())
correct = sum(reduce(min_count, (ref_max, candidate_ngram_count)).values())
score = (correct / total) if total else 0
return score, correct, total
def bleu(candidate, references, maxn=4):
precs = [precision_n(candidate, references, n) for n in range(1, maxn + 1)]
bp = exp(1 - closest_min_length(candidate, references) / len(candidate))
return bp * exp(sum(safe_log(precs[n]) for n in range(maxn)) / maxn)
def tokenize(txt):
return txt.strip().split()
def tokenize_lower(txt):
return txt.strip().lower().split()
def multi_bleu(candidates, all_references, tokenize_fn=tokenize, maxn=4):
correct = [0] * maxn
total = [0] * maxn
cand_tot_length = 0
ref_closest_length = 0
for candidate, reference in zip(candidates, all_references):
candidate = tokenize_fn(candidate)
reference = [tokenize_fn(reference)]
cand_tot_length += len(candidate)
ref_closest_length += closest_min_length(candidate, reference)
for n in range(maxn):
sc, cor, tot = precision_n(candidate, reference, n + 1)
correct[n] += cor
total[n] += tot
precisions = [(correct[n] / total[n]) if correct[n] else 0 for n in range(maxn)]
if cand_tot_length < ref_closest_length:
brevity_penalty = exp(1 - ref_closest_length / cand_tot_length)
else:
brevity_penalty = 1
score = 100 * brevity_penalty * exp(
sum(safe_log(precisions[n]) for n in range(maxn)) / maxn)
prec_pc = [100 * p for p in precisions]
return score, prec_pc, brevity_penalty, cand_tot_length, ref_closest_length
def print_multi_bleu(candidates, all_references, tokenize_fn=tokenize, maxn=4):
score, precisions, brevity_penalty, cand_tot_length, ref_closest_length = multi_bleu(candidates, all_references,
tokenize_fn, maxn)
print("BLEU = {:.2f}, {:.1f}/{:.1f}/{:.1f}/{:.1f} "
"(BP={:.3f}, ratio={:.3f}, hyp_len={:d}, ref_len={:d})".format(
score, precisions[0], precisions[1], precisions[2], precisions[3],
brevity_penalty, cand_tot_length / ref_closest_length, cand_tot_length,
ref_closest_length))
if __name__ == "__main__":
candidates = ['my name']
all_references = ['your name']
print_multi_bleu(candidates, all_references)
|
[
"wasiahmad@ucla.edu"
] |
wasiahmad@ucla.edu
|
bf846570cdfda344627ed0134bc3cfe19b69e3b6
|
531be8455556ce2b1e171f71eb040fddd7eb7522
|
/Chapter_4_5/gifts_calc.py
|
15433eb8c94439a333675850b421dfd031bb4e4f
|
[] |
no_license
|
tanc7/ACC_410_Tax_Calculator_Project
|
8a8f206268b0fb69872d0cc7191e3e69a299dee6
|
b6684f8a307f948dee653b7a81457d144866ba11
|
refs/heads/master
| 2021-01-23T16:19:55.737386
| 2017-07-11T00:32:28
| 2017-07-11T00:32:28
| 93,293,406
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,517
|
py
|
from termcolor import colored
import os
import sys
import socket
import StringIO
import time
import operator
# os.chdir('/root/Documents/ACC_410_Exam_Calculator_Project')
os.chdir('/root/Documents/ACC_410_Exam_Calculator_Project/Chapter_4_5')
def red(string):
string = colored(string,'red',attrs=['bold'])
print string
return string
def green(string):
string = colored(string,'green',attrs=['bold'])
print string
return string
def yellow(string):
string = colored(string,'yellow',attrs=['bold'])
print string
return string
def cyan(string):
string = colored(string,'cyan',attrs=['bold'])
print string
return string
def go_back_main_menu_module():
os.system('python /root/Documents/ACC_410_Exam_Calculator_Project/Chapter_4_5/main.py')
return
def gift_basis_stock():
timestr = time.strftime("%Y%m%d-%H%M%S")
donor_cost = float(raw_input(yellow('Enter the cost of the donor bought stock: ')).replace(',',''))
donor_fmv = float(raw_input(yellow('Enter the FMV of the stock currently, prior to being donated: ')).replace(',',''))
donor_holding_period = float(raw_input(yellow('Enter the holding period in months of the donated stock until the date of donation: ')).replace(',',''))
if donor_fmv < donor_cost:
donee_loss_basis = donor_fmv
donee_gain_basis = donor_cost
donee_holding_period = 'Date of receipt of the gift'
elif donor_fmv > donor_cost:
donee_gain_basis = donor_cost
donee_loss_basis = donor_cost
donee_holding_period = donor_holding_period
string_GAIN_basis = "Recipient's GAIN BASIS: " + str(donee_gain_basis)
green(string_GAIN_basis)
string_LOSS_basis = "Recipient's LOSS BASIS: " + str(donee_loss_basis)
yellow(string_LOSS_basis)
string_HOLDING_period = "Recipient's HOLDING PERIOD: " + str(donee_holding_period)
cyan(string_HOLDING_period)
saved_answer = './solutions/gift_calc_stock' + timestr + '.csv'
w = open(saved_answer,'a+')
w.write(string_GAIN_basis + '\n')
w.write(string_LOSS_basis + '\n')
w.write(string_HOLDING_period + '\n')
w.close()
main()
return
def depreciable_gift_property():
timestr = time.strftime("%Y%m%d-%H%M%S")
donor_basis = float(raw_input(yellow('Enter the DONOR basis in the donated property: ')).replace(',',''))
donor_fmv = float(raw_input(yellow('Enter the DONOR FMV in the donated property: ')).replace(',',''))
donor_useful_life = float(raw_input(yellow('Enter the DONOR remaining USEFUL LIFE in the donated property in YEARS: ')).replace(',',''))
salvage_value = float(raw_input(yellow('Ente the anticipated SALVAGE VALUE if any: ')))
donee_basis = donor_basis
donee_useful_life = donor_useful_life
if donor_basis < donor_fmv:
donee_loss_basis = donor_basis
donee_gain_basis = donor_fmv
elif donor_basis > donor_fmv:
donee_loss_basis = donor_fmv
donee_gain_basis = donor_basis
if salvage_value == '':
salvage_value = 0
elif salvage_value < 0:
red('Error, salvage value cannot be negative')
depreciable_gift_property()
else:
pass
annual_depreciation_straight_line = (donee_basis - salvage_value) / donee_useful_life
years_passed = float(raw_input(yellow('Enter the number of YEARS that have passed: ')))
donee_loss_basis = donee_loss_basis - (annual_depreciation_straight_line * years_passed)
donee_gain_basis = donee_gain_basis - (annual_depreciation_straight_line * years_passed)
if donee_loss_basis <= 0:
donee_loss_basis = 0
if donee_gain_basis <= 0:
donee_gain_basis = 0
string_GAIN_basis = "DONEE'S GAIN BASIS: " + str(donee_gain_basis)
string_LOSS_basis = "DONEE'S LOSS BASIS: " + str(donee_loss_basis)
# string_HOLDING_period = "DONEE'S HOLDING PERIOD: " + str(donee_holding_period)
green(string_GAIN_basis)
red(string_LOSS_basis)
saved_answer = './solutions/depreciable_gift_property_solution' + timestr + '.csv'
w = open(saved_answer,'a+')
w.write(string_GAIN_basis + '\n')
w.write(string_LOSS_basis + '\n')
w.close()
main()
return
def deathbed_gifts(): # this one requires more reading to fully understand
offspring_donor_fmv = float(raw_input(yellow('Enter the FMV from the donor offspring: ')).replace(',',''))
offspring_donor_cost = float(raw_input(yellow('Enter the COST of the donated property to the donor: ')).replace(',',''))
offspring_donor_basis = float(raw_input(yellow('Enter the BASIS of the donated property from the donor: ')).replace(',',''))
donee_elder_time_living = float(raw_input(yellow('Enter the amount of years between when the donee lived and died (while holding the donated property): ')).replace(',',''))
donee_elder_fmv_death = float(raw_input(yellow('Enter the FMV of the property at time of death: ')).replace(',',''))
if 0 < donee_elder_time_living <= 1:
one_year_exception = True
else:
one_year_exception = False
if one_year_exception == True:
offspring_donor_basis = offspring_donor_basis
else:
offspring_donor_basis = donee_elder_fmv_death
improvements_to_property_before_death = float(raw_input(yellow('Enter the amounts of any improvements made to the property by the elder before death (and after receiving the donated property)')).replace(',',''))
if improvements_to_property_before_death == '':
improvements_to_property_before_death = 0
offspring_donor_basis = offspring_donor_basis + improvements_to_property_before_death
string_offspring_basis_after_bequeath = 'NEW BASIS FOR OFFSPRING UPON RECEIVING LAND BACK AFTER DONEE DIED: ' + str(offspring_donor_basis)
green(string_offspring_basis_after_bequeath)
saved_answer = './solutions/deathbed_gifts_solution' + timestr + '.csv'
w = open(saved_answer,'a+')
w.write(string_offspring_basis_after_bequeath + '\n')
w.close()
main()
return
def main():
print """
# 1. Gift basis on stock
# 2. Depreciable gift property
# 3. Deathbed gifts
"""
opt_choice = float(raw_input(yellow('Enter a OPTION: ')))
if opt_choice == 0:
go_back_main_menu_module()
elif opt_choice == 1:
gift_basis_stock()
elif opt_choice == 2:
depreciable_gift_property()
elif opt_choice == 3:
deathbed_gifts()
else:
red('You have entered a invalid option')
main()
return
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e774524468621c8b62228ad9b5e4bf70b776f035
|
2c872fedcdc12c89742d10c2f1c821eed0470726
|
/pbase/day06/jiangyi/day06/day05_exercise/narcissistic.py
|
cbe324dd1bb5e88bff78735f37f05917d091fbdb
|
[] |
no_license
|
zuigehulu/AID1811
|
581c3c7a37df9fa928bc632e4891fc9bafe69201
|
10cab0869875290646a9e5d815ff159d0116990e
|
refs/heads/master
| 2020-04-19T16:33:04.174841
| 2019-01-30T07:58:24
| 2019-01-30T07:58:24
| 168,307,918
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
# 4. 算出 100 ~ 999 范围内的水仙花数(Narcissistic Number)
# 水仙花数是指百位的3次方 + 十位的3次方 + 个位的3次方 等于原
# 数的整数
# 如:
# 153 = 1**3 + 5**3 + 3**3
# 答案:
# 153, 370, ....
# 方法1
# for x in range(100, 1000):
# bai = x // 100 # 百位
# shi = x % 100 // 10 # 十位
# ge = x % 10 # 个数
# if x == bai ** 3 + shi ** 3 + ge ** 3:
# print(x)
# 方法2
# for x in range(100, 1000):
# s = str(x) # 转为字符串
# bai = int(s[0]) # 百位
# shi = int(s[1]) # 十位
# ge = int(s[2]) # 个数
# if x == bai ** 3 + shi ** 3 + ge ** 3:
# print(x)
# 方法3
for bai in range(1, 10):
for shi in range(0, 10):
for ge in range(0, 10):
# print(bai, shi, ge)
x = bai * 100 + shi * 10 + ge
if x == bai ** 3 + shi ** 3 + ge ** 3:
print(x)
|
[
"442315617@qq.com"
] |
442315617@qq.com
|
54857f8ce1335c730dd6913435514bdd95f0ec4d
|
6c1a3dc849b1d84271caad0133387c7001a9704f
|
/Sep05/client01.py
|
9c05f4d7af4e730c40cfc31f06a6225b9e242ab8
|
[] |
no_license
|
tnaswin/PythonPractice
|
f6207a4cf560b45c09af2f82d7365d4f0d16afaf
|
8c20fa35bdf65aaf8ec899c217c10ffc7d4d3d64
|
refs/heads/master
| 2020-06-11T08:53:35.190582
| 2019-06-26T13:41:18
| 2019-06-26T13:41:18
| 193,908,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
from __future__ import print_function
from twisted.internet import task
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
class EchoClient(LineReceiver):
end = "End!"
def connectionMade(self):
self.sendLine("Hello")
self.sendLine("World!")
self.sendLine(self.end)
def lineReceived(self, line):
print("receive:", line)
if line == self.end:
self.transport.loseConnection()
class EchoClientFactory(ClientFactory):
protocol = EchoClient
def __init__(self):
self.done = Deferred()
def clientConnectionFailed(self, connector, reason):
print('connection failed:', reason.getErrorMessage())
self.done.errback(reason)
def clientConnectionLost(self, connector, reason):
print('connection lost:', reason.getErrorMessage())
self.done.callback(None)
def main(reactor):
factory = EchoClientFactory()
reactor.connectTCP('localhost', 8000, factory)
return factory.done
if __name__ == '__main__':
task.react(main)
|
[
"aswin@lintel.in"
] |
aswin@lintel.in
|
af6a652fa0f874ec7d1a8c91a4a5b77365f67462
|
b8085ef607da70023214f105eb27bdbc713e596f
|
/Day2/Async3.py
|
7774674800fd4e6e93ddc3f94d67f879c4b88f34
|
[] |
no_license
|
artheadsweden/python_adv_april19
|
893c9ec76e8505a580439b7a2fd7aa2776503c77
|
04eecd25d4a291dddd608d94968b217fed7b88d8
|
refs/heads/master
| 2020-05-07T13:41:15.545033
| 2019-04-11T18:47:22
| 2019-04-11T18:47:22
| 180,559,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
import asyncio
import random
async def my_other(id):
process_time = random.randint(1, 5)
await asyncio.sleep(process_time)
print(f"Coroutine {id}, has successfully completed after {process_time} seconds")
async def my_coroutine():
tasks = []
for i in range(10):
tasks.append(asyncio.ensure_future(my_other(i)))
await asyncio.gather(*tasks)
print("All done")
def main():
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(my_coroutine())
finally:
loop.close()
if __name__ == '__main__':
main()
|
[
"joakim@arthead.se"
] |
joakim@arthead.se
|
b5dd0646b1b2c4b271da16df28f9fa635ae98a8c
|
6162b166a93c60677b97b91c1f07be1511fd05e2
|
/catkin_ws/src/jackal_hunt_rabbit/scripts/output.py
|
ae97f3dd5df85b938f61ff6c608095446704d1bb
|
[] |
no_license
|
SiChiTong/fuzzy-eureka
|
d7c540e4349621097ee861e7337488ba46a2c718
|
61e2075cfd99520e0fb689e47aa73b3d43481f18
|
refs/heads/master
| 2021-08-24T09:04:28.384368
| 2017-12-08T23:30:23
| 2017-12-08T23:30:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
#!/usr/bin/env python
import cv2
import numpy as np
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.namedWindow("ouput", flags = cv2.WINDOW_NORMAL)
img = np.zeros((512, 2400, 3), np.uint8)
def draw_egg(color, count, x, y):
cv2.ellipse(img, (x, y), (140, 200), 0, 0, 360, color, -1)
cv2.putText(img, count, (x - 80, y + 100), font, 8, (255, 255, 255), 12)
def show_output(eggs):
draw_egg((255, 0, 255), str(eggs[0]), 200, 250) # Magenta
draw_egg((0, 165, 255), str(eggs[1]), 600, 250) # Orange
draw_egg((0, 215, 255), str(eggs[2]), 1000, 250) # Yellow
draw_egg((0, 255, 0), str(eggs[3]), 1400, 250) # Green
draw_egg((255, 80, 0), str(eggs[4]), 1800, 250) # Blue
draw_egg((220, 40, 140), str(eggs[5]), 2200, 250) # Purple
cv2.imshow('ouput', img)
cv2.resizeWindow('ouput', 2400, 512)
cv2.moveWindow('ouput', 1125, 250)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"spartanhaden@gmail.com"
] |
spartanhaden@gmail.com
|
a149725c1bd9437f884bb652f73407173011b1b8
|
8e3a27091d51c3fd9681f5caf0534a0d3a36c3ff
|
/setup.py
|
d6e064fc1b06aca13b6baa6cd70dd4d6360354cb
|
[
"MIT"
] |
permissive
|
DES-SL/EasyLens
|
c5c5d9dc5af7d6495027f7cfe51cdf48c0a098de
|
97673d65abc00b945e7c6332e465c1d08fcb09a9
|
refs/heads/master
| 2020-04-06T07:09:07.342981
| 2016-11-08T10:47:20
| 2016-11-08T10:47:20
| 58,768,462
| 1
| 0
| null | 2016-05-20T06:19:18
| 2016-05-13T19:51:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
#!/usr/bin/env python
import os
import sys
from setuptools.command.test import test as TestCommand
from setuptools import find_packages
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation can be generated with Sphinx"""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requires = [] #during runtime
tests_require=['pytest>=2.3'] #for testing
PACKAGE_PATH = os.path.abspath(os.path.join(__file__, os.pardir))
setup(
name='easylens',
version='0.1.0',
description='Software package for modeling strong lens systems in the Dark Energy Survey data.',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Simon Birrer',
author_email='simon.birrer@phys.ethz.ch',
url='https://github.com/DES-SL/EasyLens',
packages=find_packages(PACKAGE_PATH, "test"),
package_dir={'EasyLens': 'EasyLens'},
include_package_data=True,
install_requires=requires,
license='Proprietary',
zip_safe=False,
keywords='EasyLens',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
"Intended Audience :: Science/Research",
'Intended Audience :: Developers',
'License :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
tests_require=tests_require,
cmdclass = {'test': PyTest},
)
|
[
"simon.birrer@pyhs.ethz.ch"
] |
simon.birrer@pyhs.ethz.ch
|
2b3018fd53876b1bd94dc8c35c89202502fb585b
|
13131e0e4805aa48bf64647f5da666e2e72dab9a
|
/misc/aggregate_logs_and_stats.py
|
1b29c5437f03387cef5aede4d88bb57b7142ffbc
|
[] |
no_license
|
m-bain/collaborative-experts
|
4ae6632f0ec36b612b768048b2daa623d8b4c385
|
3a224ecad6fe36722112181c3ac48f918a799081
|
refs/heads/master
| 2021-01-08T19:43:14.689074
| 2020-02-16T06:05:06
| 2020-02-16T06:05:06
| 242,124,924
| 1
| 0
| null | 2020-02-21T11:35:41
| 2020-02-21T11:35:40
| null |
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
"""Aggregate logs across multiple seeded runs and summarise their statistics.
"""
import argparse
import logging
from pathlib import Path
from collections import OrderedDict
from utils.util import read_json
from logger.log_parser import log_summary
def summarise(group_id, log_dir="data/saved/log", model_dir="data/saved/models"):
seeded_runs = sorted(list(Path(log_dir).glob(f"**/{group_id}/seed-*")))
print(f"Found a total of {len(seeded_runs)} seed runs in {group_id}")
info_logs = OrderedDict()
for seeded_run in seeded_runs:
info_log_matches = list(Path(seeded_run).glob("**/info.log"))
msg = f"expected to find a single info.log file, found {len(info_log_matches)}"
assert len(info_log_matches) == 1, msg
info_logs[seeded_run.stem] = info_log_matches[0]
summary_log = []
for seeded_run, info_log_path in info_logs.items():
with open(info_log_path, "r") as f:
log = f.read().splitlines()
summary_log.extend(log)
first_info_log = list(info_logs.values())[0]
summary_log_name = f"summary-{'_'.join(list(info_logs.keys()))}.json"
summary_log_path = first_info_log.parent / summary_log_name
with open(summary_log_path, "w") as f:
f.write("\n".join(summary_log))
print(f"Wrote summary log to {summary_log_path}")
# retrieve the config from the first run
rel_path = first_info_log.relative_to(log_dir).parent
config_path = Path(model_dir) / rel_path / "config.json"
assert config_path.exists(), f"Could not find config at {config_path}"
config = read_json(config_path)
logger = logging.getLogger("summary")
logging.basicConfig(filename=summary_log_path, level=logging.INFO)
logger.addHandler(logging.StreamHandler())
log_summary(
logger=logger,
log_path=summary_log_path,
eval_mode=config["eval_mode"],
fixed_num_epochs=config["trainer"]["epochs"],
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--group_id", default="ed53d01d")
args = parser.parse_args()
summarise(group_id=args.group_id)
if __name__ == '__main__':
main()
|
[
"albanie@robots.ox.ac.uk"
] |
albanie@robots.ox.ac.uk
|
6b39dee81dd9f2c3d18cd4ad5ec017627ab539e7
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/spacecomponents/server/eventLogger.py
|
fb49f52c4e8741c49e6cc178d8184b066a6f509d
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\spacecomponents\server\eventLogger.py
from eveexceptions.exceptionEater import ExceptionEater
EVENT_DECAYED = 'spacecomponent::decay_Decayed'
EVENT_BECOMEACTIVE = 'spacecomponent::activate_BecomeActive'
class EventLogger(object):
def __init__(self, eventLog, solarSystemID):
self.eventLog = eventLog
self.solarSystemID = solarSystemID
def LogDecayed(self, item):
self.LogItemAndTypeOwnerEvent(EVENT_DECAYED, item)
def LogBecomeActive(self, item):
self.LogItemAndTypeOwnerEvent(EVENT_BECOMEACTIVE, item)
def LogItemAndTypeOwnerEvent(self, eventName, item):
with ExceptionEater('eventLog'):
self.eventLog.LogOwnerEvent(eventName, item.ownerID, self.solarSystemID, item.itemID, item.typeID)
self.eventLog.LogOwnerEventJson(eventName, item.ownerID, self.solarSystemID, componentItemID=item.itemID, componentTypeID=item.typeID)
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
ad194816a6379fa3222c48a90e327ef99945ece2
|
a8b17b17f9b2a640013064c50e1cebc27a7a68de
|
/16-statistical-thinking-in-python-pt2/01-parameter-estimation-by-optimization/01-how-often-do-we-get-no-hitters.py
|
8e07aef449d1b4f71f6836c43205c929a66754b4
|
[] |
no_license
|
JohnnyFang/datacamp
|
20eae09752521f14006cb3fda600b10bd7b12398
|
0fa8fa7682c23b0eb07bd03e4b75f5b77aeafa75
|
refs/heads/master
| 2020-04-18T00:27:37.358176
| 2020-02-04T20:54:19
| 2020-02-04T20:54:19
| 167,078,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,031
|
py
|
'''
How often do we get no-hitters?
The number of games played between each no-hitter in the modern era (1901-2015) of Major League Baseball is stored in the array nohitter_times.
If you assume that no-hitters are described as a Poisson process, then the time between no-hitters is Exponentially distributed. As you have seen, the Exponential distribution has a single parameter, which we will call τ
, the typical interval time. The value of the parameter τ
that makes the exponential distribution best match the data is the mean interval time (where time is in units of number of games) between no-hitters.
Compute the value of this parameter from the data. Then, use np.random.exponential() to "repeat" the history of Major League Baseball by drawing inter-no-hitter times from an exponential distribution with the τ
you found and plot the histogram as an approximation to the PDF.
NumPy, pandas, matlotlib.pyplot, and seaborn have been imported for you as np, pd, plt, and sns, respectively.
Instructions
Seed the random number generator with 42.
Compute the mean time (in units of number of games) between no-hitters.
Draw 100,000 samples from an Exponential distribution with the parameter you computed from the mean of the inter-no-hitter times.
Plot the theoretical PDF using plt.hist(). Remember to use keyword arguments bins=50, normed=True, and histtype='step'. Be sure to label your axes.
Show your plot.
'''
# Seed random number generator
np.random.seed(42)
# Compute mean no-hitter time: tau
tau = np.mean(nohitter_times)
# Draw out of an exponential distribution with parameter tau: inter_nohitter_time
inter_nohitter_time = np.random.exponential(tau, 100000)
# Plot the PDF and label axes
_ = plt.hist(inter_nohitter_time,
bins=50, normed=True, histtype='step')
_ = plt.xlabel('Games between no-hitters')
_ = plt.ylabel('PDF')
# Show the plot
plt.show()
# We see the typical shape of the Exponential distribution, going from a maximum at 0 and decaying to the right.
|
[
"fangdejavu@gmail.com"
] |
fangdejavu@gmail.com
|
6bf01c4d7cdaa26a436edc642c3871f8a1df6b49
|
9be5b6259e4db9a9386d5e6eea59bfb4ed4ccdbd
|
/liberapay/notifications/web.py
|
d3677868d705e6ac8647ec0f6c6f06ec7f41dc0b
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Changaco/liberapay.com
|
b3e040ed24d47a6ebccdd0b2285526f02b4103cc
|
4d134508c911f23478e80b8d8ff62223b866bb5e
|
refs/heads/master
| 2021-01-16T21:47:57.475734
| 2015-09-02T15:41:50
| 2015-09-02T15:41:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
from __future__ import division, print_function, unicode_literals
def withdrawal_failed(_, user, exchange):
href = '/%s/receiving/payout?exchange_id=%s' % (user.username, exchange.id)
return ('danger',
['a',
{'href': href}, _("The transfer to your bank account has failed!"),
]
)
def withdrawal_pending(_, user, exchange, Money):
return ('success',
['span', _("We have initiated a transfer of {0} from your Liberapay wallet to your bank account.",
Money(exchange.amount - exchange.fee, 'EUR'))
]
)
def charge_failed(_, user, exchange, Money):
href = '/%s/giving/payin?exchange_id=%s' % (user.username, exchange.id)
return ('danger',
['a', {'href': href},
_("We tried to charge your credit card {0}, but it failed!",
Money(exchange.amount + exchange.fee, 'EUR'))
]
)
def charge_succeeded(_, user, exchange, Money):
return ('success',
['span', _("We charged your credit card {0} to fund your ongoing donations.",
Money(exchange.amount + exchange.fee, 'EUR'))
]
)
def pledgee_joined(_, user_name, platform, profile_url):
return ('info',
['a',
{'href': profile_url},
_("{0} from {1} has joined Liberapay!", user_name, platform),
]
)
|
[
"changaco@changaco.oy.lc"
] |
changaco@changaco.oy.lc
|
5186b10cd80cab8f8320b0162dfd8881526c443f
|
93a613f09d564a1d45ecc01b54b73745ce2850b7
|
/majora2/management/commands/load_counties.py
|
7e60d9acd4fc67489980ad313b871f818eb9608d
|
[] |
no_license
|
pythseq/majora
|
fa17c77fa8a916c688fd2b40744d768dd851b99b
|
40b918d32b4061cddee5f7279f97e70eb894623d
|
refs/heads/master
| 2022-12-23T20:09:41.233844
| 2020-09-28T18:18:42
| 2020-09-28T18:18:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
from django.core.management.base import BaseCommand, CommandError
from majora2 import models
class Command(BaseCommand):
help = "Load a list of counties"
def add_arguments(self, parser):
parser.add_argument('filename')
def handle(self, *args, **options):
fh = open(options["filename"])
for line in fh:
fields = line.strip().split('\t')
country_code = fields[0]
name = fields[1]
c, created = models.County.objects.get_or_create(country_code=country_code, name=name)
c.save()
|
[
"samstudio8@gmail.com"
] |
samstudio8@gmail.com
|
ca32ac6e38b9a3d4b55c6c625ef3bc8c727a7bb3
|
3c17e189622018329bc0ebd8523eae8db9f3112a
|
/ykdl/util/wrap.py
|
de6d61b463f96d1bc0bb8f01bef4d73c3470f3b0
|
[
"MIT"
] |
permissive
|
YU-zreo/ykdl
|
167c9b8715a1cecf57c18bf60c7da3b22437ad06
|
b59dacd78bcec79d208d7cb86b86fa65428e386a
|
refs/heads/master
| 2020-12-02T12:47:01.113309
| 2017-07-07T12:39:20
| 2017-07-07T12:39:20
| 96,594,712
| 1
| 0
| null | 2017-07-08T03:57:22
| 2017-07-08T03:57:21
| null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import shlex
from logging import getLogger
logger = getLogger("wrap")
from ykdl.compact import compact_tempfile
def launch_player(player, urls, **args):
if 'mpv' in player:
cmd = shlex.split(player) + ['--demuxer-lavf-o=protocol_whitelist=[file,tcp,http]']
if args["ua"]:
cmd += ["--user-agent={}".format(args["ua"])]
if args["referer"]:
cmd += ["--referrer={}".format(args["referer"])]
cmd += list(urls)
else:
cmd = shlex.split(player) + list(urls)
subprocess.call(cmd)
def launch_ffmpeg(basename, ext, lenth):
#build input
inputfile = compact_tempfile(mode='w+t', suffix='.txt', dir='.', encoding='utf-8')
for i in range(lenth):
inputfile.write('file \'%s_%d_.%s\'\n' % (basename, i, ext))
inputfile.flush()
outputfile = basename+ '.' + ext
cmd = ['ffmpeg','-f', 'concat', '-safe', '-1', '-y', '-i', inputfile.name, '-c', 'copy', '-hide_banner']
if ext == 'mp4':
cmd += ['-absf', 'aac_adtstoasc']
cmd.append(outputfile)
print('Merging video %s using ffmpeg:' % basename)
subprocess.call(cmd)
def launch_ffmpeg_download(url, name, live):
print('Now downloading: %s' % name)
if live:
print('stop downloading by press \'q\'')
cmd = ['ffmpeg', '-y']
if not url.startswith('http'):
cmd += ['-protocol_whitelist', 'file,tcp,http' ]
cmd += ['-i', url, '-c', 'copy', '-absf', 'aac_adtstoasc', '-hide_banner', name]
subprocess.call(cmd)
|
[
"zhangn1985@gmail.com"
] |
zhangn1985@gmail.com
|
5a4d7837248354b88c88b465c3cbd58a4b15c328
|
0b1e404a165c960677d07015bc26aac0569cf84a
|
/src/combustion/nn/activations/swish.py
|
de70fb4f298b010d397300b048a83d7107bb6a6f
|
[
"Apache-2.0"
] |
permissive
|
johndpope/combustion
|
d3ec349cd7be086f55b4e3deebd571c97842e1ed
|
c3f91e62a10a873cfeeae8c675b0683bc5158818
|
refs/heads/master
| 2023-03-01T14:34:42.149415
| 2021-02-07T17:55:58
| 2021-02-13T17:17:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,077
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
# implementation inspired by
# https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/utils.py
class _SwishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
for i in ctx.saved_tensors:
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
def swish(inputs: Tensor, memory_efficient: bool = True) -> Tensor:
r"""The swish activation function, defined as
.. math::
f(x) = x \cdot \text{sigmoid}(x)
Args:
inputs (Tensor):
The input tensor
memory_efficient (bool, optional):
Whether or not to use an implementation that is more memory efficient at training
time. When ``memory_efficient=True``, this method is incompatible with TorchScript.
.. warning::
This method is traceable with TorchScript when ``memory_efficient=False``, but is
un-scriptable due to the use of :class:`torch.autograd.Function` for a
memory-efficient backward pass. Please export using :func:`torch.jit.trace` with
``memory_efficient=False``
"""
if memory_efficient:
return _SwishFunction.apply(inputs)
else:
return inputs * torch.sigmoid(inputs)
class Swish(nn.Module):
r"""The swish activation function, defined as
.. math::
f(x) = x \cdot \text{sigmoid}(x)
.. warning::
This method is traceable with TorchScript, but is un-scriptable due to the
use of :class:`torch.autograd.Function` for a memory-efficient backward pass.
Please export using :func:`torch.jit.trace` after calling ``module.eval()``.
"""
@torch.jit.ignore
def _memory_efficient_forward(self, inputs: Tensor) -> Tensor:
return swish(inputs)
def forward(self, inputs: Tensor) -> Tensor:
if not self.training:
return self._memory_efficient_forward(inputs)
else:
return inputs * torch.sigmoid(inputs)
def hard_swish(inputs: Tensor, inplace: bool = False) -> Tensor:
r"""The hard swish activation function proposed in
`Searching For MobileNetV3`_, defined as
.. math::
f(x) = x \cdot \frac{\text{ReLU6}(x + 3)}{6}
Hard swish approximates the swish activation, but computationally cheaper due to the
removal of :math:`\text{sigmoid}(x)`.
Args:
inputs (Tensor):
The input tensor
inplace (bool, optional):
Whether or not to perform the operation in place.
.. _Searching for MobileNetV3:
https://arxiv.org/abs/1905.02244
"""
if inplace:
return inputs.mul_(F.relu6(inputs + 3, inplace=True).div_(6))
else:
return F.relu6(inputs + 3).div(6).mul(inputs)
class HardSwish(nn.Module):
r"""The hard swish activation function proposed in
`Searching For MobileNetV3`_, defined as
.. math::
f(x) = x \cdot \frac{\text{ReLU6}(x + 3)}{6}
Hard swish approximates the swish activation, but computationally cheaper due to the
removal of :math:`\text{sigmoid}(x)`.
.. image:: ./hswish.png
:width: 600px
:align: center
:height: 300px
:alt: Comparison of Hard Swish and Swish activations.
Args:
inplace (bool, optional):
Whether or not to perform the operation in place.
.. _Searching for MobileNetV3:
https://arxiv.org/abs/1905.02244
"""
def __init__(self, inplace: bool = False):
super().__init__()
self.inplace = inplace
def extra_repr(self):
if self.inplace:
return "inplace=True"
else:
return ""
def forward(self, inputs: Tensor) -> Tensor:
return hard_swish(inputs, self.inplace)
|
[
"tidalpaladin@protonmail.com"
] |
tidalpaladin@protonmail.com
|
f784b3a01ba171e37e1998ab9f4997ed520f1cbe
|
22622d1899ac2a37c66d776e317a7c752b2fb10e
|
/rio/_compat.py
|
3e05ed7366dbb45a1f3df523a9be3de33fa28f95
|
[
"MIT"
] |
permissive
|
soasme/rio
|
64bb3e24580c18951d5eaf84809216785ae35020
|
e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2
|
refs/heads/master
| 2022-01-23T01:48:17.445050
| 2019-12-30T02:36:50
| 2019-12-30T02:36:50
| 55,581,621
| 0
| 1
|
MIT
| 2019-12-30T02:39:58
| 2016-04-06T06:34:59
|
Python
|
UTF-8
|
Python
| false
| false
| 315
|
py
|
# -*- coding: utf-8 -*-
"""
rio._compact
~~~~~~~~~~~~~
"""
try:
import cPickle as pickle
except ImportError:
import pickle # noqa
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # noqa
try:
import simplejson as json
except ImportError:
import json
|
[
"soasme@gmail.com"
] |
soasme@gmail.com
|
c07b1b463ef553ac6896b1ab101ae7c439731385
|
c56ffb7215547b658e6698bc4bbe78fbd0e3330b
|
/3.1 Conditional Statements Advanced - Exercise/02-summerOutfit.py
|
9fe22dd121ed0949664396b2184c89cbc50fc5bd
|
[] |
no_license
|
byAbaddon/Basics-Course-Python-November-2020
|
344646bbb33740d15bec94fd5b5d7cd257df9220
|
c6c17a5cdc29121d706bc7677a61637a9bcefbb1
|
refs/heads/main
| 2023-04-16T01:00:10.000371
| 2023-04-10T20:46:11
| 2023-04-10T20:46:11
| 316,531,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
degrees = int(input())
day_time = input()
result = 0
data_dict = {
'm': {'Morning':'Sweatshirt and Sneakers', 'Afternoon': 'Shirt and Moccasins', 'Evening': 'Shirt and Moccasins'},
'a': {'Morning':'Shirt and Moccasins', 'Afternoon': 'T-Shirt and Sandals', 'Evening': 'Shirt and Moccasins'},
'e': {'Morning':'T-Shirt and Sandals', 'Afternoon': 'Swim Suit and Barefoot', 'Evening': 'Shirt and Moccasins'},
}
if 10 <= degrees <= 18:
result = data_dict['m'][day_time]
elif 18 < degrees <= 24:
result = data_dict['a'][day_time]
elif degrees >= 25:
result = data_dict['e'][day_time]
print(f"It's {degrees} degrees, get your {result}.")
'''
16
Morning
---------------
16
Afternoon
---------------
22
Afternoon
---------------
28
Evening
'''
|
[
"noreply@github.com"
] |
byAbaddon.noreply@github.com
|
6684bd853d6ef7bafa5052fab25a9722778e5c79
|
a193a941a9f70dd0aa46e7a402265bfff27bb075
|
/tests/codecs/formats/test_wav.py
|
ea60a4ef2fba103e2ff55f42f4466be82d316a33
|
[
"0BSD"
] |
permissive
|
hile/oodi
|
f3c606b5209c2b05e077d9039104df7187ba0b1c
|
f3a758238033c0a511e1ecffbb4b5bfde70efbda
|
refs/heads/main
| 2023-04-13T07:07:03.535176
| 2023-04-10T07:17:44
| 2023-04-10T07:17:44
| 196,691,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
"""
Unit tests for oodi.codecs.formats.wav module
"""
from pathlib import Path
from oodi.codecs.constants import CodecFormat
from oodi.codecs.formats.wav import Wav
from .validators import validate_codec_properties, TEST_FILENAME_NO_MATCH
TEST_FILENAME_MATCH = f'test case.{CodecFormat.WAV.value}'
def test_codecs_formats_wav_properties(mock_empty_config):
"""
Test properties of the Wav codec class
"""
validate_codec_properties(Wav(mock_empty_config), mock_empty_config.__path__)
def test_codecs_formats_wav_match_file_no_match(mock_empty_config, tmpdir):
"""
Test matching unexpected filename to wav codec
"""
assert Wav(mock_empty_config).match_file(Path(tmpdir.strpath, TEST_FILENAME_NO_MATCH)) is False
def test_codecs_formats_wav_match_file_matches(mock_empty_config, tmpdir):
"""
Test matching a wav filename to wav codec
"""
assert Wav(mock_empty_config).match_file(Path(tmpdir.strpath, TEST_FILENAME_MATCH)) is True
|
[
"hile@iki.fi"
] |
hile@iki.fi
|
e5071c6b16895e33b46c923d938f1b8ff8361ee1
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/tools/test/check_futures_test.py
|
f0c4d0d47930e2ca716933f9f677440922daf5ca
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 4,053
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Check that TensorFlow python files have certain __future__ imports.
This makes it easier to find Python 2.7 / Python 3.x incompatibility bugs.
In particular, this test makes it illegal to write a Python file that
doesn't import division from __future__, which can catch subtle division
bugs in Python 3.
Note: We can't use tf.test in this file because it needs to run in an
environment that doesn't include license-free gen_blah_ops.py files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import six
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
FUTURES_PATTERN = re.compile(r'^from __future__ import (\w+)\s*$')
FUTURES_PATTERN_2 = re.compile(
r'^from __future__ import (\w+), (\w+), (\w+)\s*$')
FUTURES_PATTERN_3 = re.compile(r'^from __future__ import (\w+) as \w+\s*$')
REQUIRED_FUTURES = frozenset(['absolute_import', 'division', 'print_function'])
WHITELIST = [
'python/platform/control_imports.py',
'tools/docker/jupyter_notebook_config.py',
'tools/ci_build/update_version.py',
'tools/ci_build/copy_binary.py',
]
# Tests that must *not* import division
OLD_DIVISION = [
'python/framework/tensor_shape_div_test.py',
'python/kernel_tests/division_past_test.py',
]
def check_file(path, old_division):
futures = set()
count = 0
for line in open(path, encoding='utf-8') if six.PY3 else open(path):
count += 1
m = FUTURES_PATTERN.match(line)
if not m:
m = FUTURES_PATTERN_3.match(line)
if m:
futures.add(m.group(1))
else:
m = FUTURES_PATTERN_2.match(line)
if m:
for entry in m.groups():
futures.add(entry)
if not count:
return # Skip empty files
if old_division:
# This file checks correct behavior without importing division
# from __future__, so make sure it's doing that.
expected = set(['absolute_import', 'print_function'])
if futures != expected:
raise AssertionError(('Incorrect futures for old_division file:\n'
' expected = %s\n got = %s') %
(' '.join(expected), ' '.join(futures)))
else:
missing = REQUIRED_FUTURES - futures
if missing:
raise AssertionError('Missing futures: %s' % ' '.join(missing))
def main():
# Make sure BASE_DIR ends with tensorflow. If it doesn't, we probably
# computed the wrong directory.
if os.path.split(BASE_DIR)[-1] != 'tensorflow':
raise AssertionError("BASE_DIR = '%s' doesn't end with tensorflow" %
BASE_DIR)
# Verify that all files have futures
whitelist = frozenset(os.path.join(BASE_DIR, w) for w in WHITELIST)
old_division = frozenset(os.path.join(BASE_DIR, w) for w in OLD_DIVISION)
for root, _, filenames in os.walk(BASE_DIR):
for f in fnmatch.filter(filenames, '*.py'):
path = os.path.join(root, f)
if path not in whitelist:
try:
check_file(path, old_division=path in old_division)
except AssertionError as e:
short_path = path[len(BASE_DIR) + 1:]
raise AssertionError('Error in %s: %s' % (short_path, str(e)))
if __name__ == '__main__':
main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
0fb2282d3b84448e01b5dcc2f4bd64681db1bbb0
|
fabc3b3286df0fa98a35ea90d4693d9f38db50a2
|
/sendJsonRequest.py
|
a84652870f3a942e8fd181669bc76831b3cc1e21
|
[] |
no_license
|
yangtou45du/openpyxl
|
750b5ee23ce8e5cb6826b8cc137012fbf2a5d9cb
|
87eef380391e60eab81f93c7742d1c21b1d029de
|
refs/heads/master
| 2020-03-16T21:59:35.078761
| 2018-05-31T03:42:03
| 2018-05-31T03:42:03
| 133,022,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
import requests
import json
class sendRequest():
def POST(self,url,dict):
re=requests.post(url,json=dict)
return re.text
if __name__ == '__main__':
url="http://221.236.20.217:8093/pcl/services/loanCenter/account/queryPaymentHistory"
dict={
"params": {
"loanNo": "000002017090601542",
"isPage":1,
"pageSize":"10",
"pageNo":"1"
}
}
f=sendRequest().POST(url,dict)
|
[
"you@example.com"
] |
you@example.com
|
59773eb3cc899c7e48f6283380f46e6b5d8902c3
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/pyinstaller/tests/old_suite/interactive/test_pygame.py
|
bc4b2ab4e66186771f7e4675c2622bd8225cad3d
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:82d416a54f2704d7f86702c9d4fca758c3922aabd54f55bd79bc5d55d8cb8592
size 8156
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
8a43127609e4f347391453d0dab8d410e2ee6d3d
|
dc456b315dc6988fbc37a92e8c1af8987205c9fa
|
/holiday/countries/poland.py
|
5d59cee331e259533d6991ba00d5467cc7251f70
|
[
"MIT"
] |
permissive
|
Lionbridge-Technologies/holiday
|
83bf8e0d665828e75429b519e3b85294475ecb64
|
e8fa1628efdc81ed2f3452cf7009de605968cb76
|
refs/heads/master
| 2021-01-13T08:58:09.830066
| 2013-11-10T21:50:58
| 2013-11-10T21:50:58
| 69,601,979
| 0
| 1
| null | 2016-09-29T19:46:17
| 2016-09-29T19:46:16
| null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
# -*- coding: utf-8 -*-
'''Holiday information for Poland. Adapted from
https://gist.github.com/sebzur/1810707
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date, timedelta
from dateutil import easter
from dateutil.relativedelta import relativedelta, SU, TH
def get_holidays(year, place=['Poland', None, None], scope='legal', _=str):
"""Returns Polish holiday dates (legally considered non-working days)."""
easter_sunday = easter.easter(year)
return {
date(year, 1, 1): _('New Year'),
date(year, 1, 6): _('Trzech Kroli'),
easter_sunday: _('Easter Sunday'),
easter_sunday + timedelta(days=1): _('Easter Monday'),
date(year, 5, 1): _('Labor Day'),
date(year, 5, 3): _('Constitution Day'),
# 7th Sunday after Easter
# (notice days+1 - this is 7th Sunday excluding Easter Sunday
easter_sunday + relativedelta(days=+1, weekday=SU(+7)):
_('Pentecost Sunday'),
# 9th Thursday after Easter
easter_sunday + relativedelta(weekday=TH(+9)):
_('Corpus Christi'),
date(year, 8, 15): _('Assumption of the Blessed Virgin Mary'),
date(year, 11, 1): _("All Saints' Day"),
date(year, 11, 11): _('Independence Day'),
date(year, 12, 25): _('Christmas Day'),
date(year, 12, 26): _('Boxing Day'),
} # What the hell, you don't celebrate Chopin's birthday???
if __name__ == "__main__":
from pprint import pprint
pprint(get_holidays(2014))
|
[
"nandoflorestan@gmail.com"
] |
nandoflorestan@gmail.com
|
2d40f0bfb1b559ceab1fa73202905567d7c26083
|
24c5c46f1d281fc15de7f6b72a5148ae85f89fb4
|
/SRC/demo/imooc/imooc_advanced/对象迭代与反迭代/part5.py
|
3baf32b97753c3d1764be42a9bf4906e61683e3c
|
[] |
no_license
|
enterpriseih/easyTest
|
22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0
|
43b8d294e898f25055c78313cfece2753352c250
|
refs/heads/master
| 2023-08-23T22:55:14.798341
| 2020-02-11T09:13:43
| 2020-02-11T09:13:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
# 如何对迭代器做切片操作
# 实际案例:有某个文本文件,我们想读取其中某范围的内容如100~300行之间的内容,
# python中文本文件是可迭代对象,我们是否可以使用类似列表切片的方式得到一个
# 100~300行文件内容的生成器?
# f=open('')
# f[100:300] 可以吗
# 使用标准库中的itertools.islice,它能返回一个迭代对象切片的生成器
from itertools import islice
f=open('part1.py',encoding='utf-8')
# for x in f:
# print(f.readline())
for line in islice(f,2,5):
print(line)
# islice(f,2) #前2行
# islice(f,2,None) #2行到最后
#消耗原生成器
l=range(20)
t=iter(l)
for x in islice(t,4,8,1):
print(x,end=' ')
print()
for x in t:
print(x,end=' ')
|
[
"yaolihui0506"
] |
yaolihui0506
|
21f5c6a34e88da9ea700f99dfdcc7834a7aefe05
|
6ead0d3997aa3470fc6f49c6ccc0ac8f808ae5d7
|
/problems/python/findPeakElement.py
|
54ddbd13b948a36ca234c81c0125b7d55d9dc703
|
[] |
no_license
|
ikedaosushi/leetcode
|
d405455bfffda3057259da78783901feb56d9f76
|
d378f2dc5f0b2df1f00208e304979ac0f53ab385
|
refs/heads/master
| 2021-06-24T04:31:56.586685
| 2020-12-08T13:51:18
| 2020-12-08T13:51:18
| 178,659,078
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
if len(nums) <= 1:
return 0
if nums[0] > nums[1]:
return 0
if nums[-2] < nums[-1]:
return len(nums) - 1
i = 1
while i < len(nums) - 1:
if nums[i] > nums[i+1]:
if nums[i-1] < nums[i]:
return i
i += 1
i += 1
return 0
|
[
"ikeda.yutaro@gmail.com"
] |
ikeda.yutaro@gmail.com
|
251e492a5dd723d7807866355d7f4487306bc38d
|
0a7711063b30b1566ade3cc07f105292e32fe6d6
|
/scrapy_test/aggregates/apartment/tests/integration/test_apartment_behavior.py
|
2c4e1054d5b46d3ecaff69fa4cd40a24006fb188
|
[] |
no_license
|
huokedu/dynamic-scrapy
|
e150a1fc6894e39d6bae37c602a592d57cd22c51
|
31a47e9810f2039cfe33653e09d7d03242764723
|
refs/heads/master
| 2021-01-17T21:33:17.810250
| 2013-10-05T17:28:19
| 2013-10-05T17:28:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
import pytest
from scrapy_test.aggregates.apartment.services import apartment_service
from scrapy_test.aggregates.listing.models import Listing
from scrapy_test.aggregates.listing.services import listing_service
from scrapy_test.aggregates.listing.tests import listing_test_data
@pytest.mark.django_db_with_migrations
def test_apartment_publishes_notified_unavailable():
listing_id = listing_service.create_listing(**listing_test_data.cl_listing_4033538277).id
listing = Listing.objects.get(pk=listing_id)
apartment = listing.apartment
apartment_service.notify_unavailable(apartment)
assert apartment.is_available == False
listings = Listing.objects.filter(apartment=apartment).values_list('is_deleted', flat=True)
assert all(f == True for f in listings)
|
[
"scoarescoare@gmail.com"
] |
scoarescoare@gmail.com
|
12808eccde179a2a86faac55bccb1419289b162f
|
cc20c7658fdf4fa7506625c9efdae792dfd857ce
|
/src/visionlouisville/utils.py
|
8f17dc56324ff01681c7cff2368133b2fa2a242e
|
[] |
no_license
|
openplans/visionlouisville
|
4a9972adc97af0048c4f7c0cd8f642bef59e69cf
|
e4dc182d061866e80ae140a887bba1b0e967753c
|
refs/heads/master
| 2020-05-20T13:25:47.273430
| 2013-10-24T16:27:47
| 2013-10-24T16:27:47
| 10,971,764
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
from django.conf import settings
from itertools import combinations, islice
from random import randint
def uniquify_tweet_ids(queryset):
"""Ensure that the tweet_ids of all items in the queryset are unique"""
all_different = False
while not all_different:
all_different = True
for referrence, other in combinations(queryset, 2):
if reference.tweet_id == other.tweet_id:
all_different = False
other.tweet_id = str(randint(0, 9999999999999999))
other.save()
def chunk(iterable, n):
"""Collect data into fixed-length chunks"""
it = iter(iterable)
while True:
item = list(islice(it, n))
if item: yield item
else: break
def settings_context(request):
return {'settings': settings}
|
[
"mjumbewu@gmail.com"
] |
mjumbewu@gmail.com
|
19e37018d09be7a049a1eb9e51bdf5f7e821a01a
|
46c38a849a96ca868b1efaa8280be7416e15f952
|
/goslinks/blueprints/auth.py
|
3fc63ffc1366208b6bd7351e53fdf2521ccce5a9
|
[
"ISC"
] |
permissive
|
RevolutionTech/goslinks
|
066b7f08a05bb68b6440bab4e670e537d0f1960f
|
fedb91a0d4ab227ba926f4588c7feeb3af284d2b
|
refs/heads/main
| 2023-02-20T17:25:50.052529
| 2021-10-16T23:19:28
| 2021-10-16T23:19:28
| 184,518,945
| 1
| 0
|
ISC
| 2023-02-08T00:51:24
| 2019-05-02T04:07:12
|
Python
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
from flask import Blueprint, redirect, session
from goslinks.auth.constants import AUTH_EMAIL_KEY, AUTH_NEXT_URL_KEY
from goslinks.auth.decorators import no_cache
bp = Blueprint("auth", __name__)
@bp.route("/logout/")
@no_cache
def logout():
# Deprecated session variables, to be removed after 2020/10/13
session.pop("auth_token", None)
session.pop("auth_state", None)
session.pop(AUTH_NEXT_URL_KEY, None)
session.pop(AUTH_EMAIL_KEY, None)
return redirect("/", code=302)
|
[
"lucas.revolutiontech@gmail.com"
] |
lucas.revolutiontech@gmail.com
|
269883af7e8c26cec4f9f1501eb31440733b33e5
|
2534803a09f5a6676ccece4519a2b8faaea9329d
|
/zeno/test/propagate/helper.py
|
dc33a738b861d6a9fd3e317a8adb5d9b0f2649da
|
[
"Apache-2.0"
] |
permissive
|
SmithSamuelM/plenum
|
c41ccb849cd1113ba8496fa8bd9b0c5336ee9878
|
2675523f8718d6f240027582bc90c76b4f80c105
|
refs/heads/master
| 2021-01-15T23:35:53.640499
| 2016-02-16T05:01:22
| 2016-02-16T05:01:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
from zeno.common.request_types import Propagate
from zeno.test.helper import TestNode, getAllArgs
def sentPropagate(node: TestNode):
params = getAllArgs(node, TestNode.send)
return [p for p in params if isinstance(p['msg'], Propagate)]
def recvdPropagate(node: TestNode):
return getAllArgs(node,
TestNode.processPropagate)
def recvdRequest(node: TestNode):
return getAllArgs(node,
TestNode.processRequest)
def forwardedRequest(node: TestNode):
return getAllArgs(node,
TestNode.forward)
|
[
"jason@evernym.us"
] |
jason@evernym.us
|
7d3f4e0a5031f9ce618c568b440c7425489060a1
|
16631cf7cd4a70f2cd2750851649d3eff5e17724
|
/2019/day06/part1.py
|
d5325cb19e7543fcf23dde0b345f5a8f5535efa1
|
[] |
no_license
|
kynax/AdventOfCode
|
1dd609a3308d733f2dd7d4ea00508d2da73180b9
|
36a339241dd7a31ebe08a73e5efa599e5faeea1a
|
refs/heads/master
| 2022-12-21T13:32:52.591068
| 2022-12-16T22:41:30
| 2022-12-16T22:41:30
| 48,439,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
import sys
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
COM = Obj('COM')
orbits = {}
orbits['COM'] = COM
effects = [x.strip().split(')') for x in list(sys.stdin)]
for c,o in effects:
obj = None
if o in orbits:
obj = orbits[o]
else:
obj = Obj(o)
orbits[o] = obj
if c in orbits:
orbits[c].add_child(obj)
else:
ctr = Obj(c)
ctr.add_child(obj)
orbits[c] = ctr
print(COM.distance(0))
|
[
"guilemay@gmail.com"
] |
guilemay@gmail.com
|
dc099d384ffc6b9326adbfb10628a62857513c67
|
cbc829f5787b770c9184b91ee470d058cc4cbe65
|
/backtrack/46_全排列.py
|
b21c5927665b36a64df5503725fa3d085639de52
|
[] |
no_license
|
SilvesSun/learn-algorithm-in-python
|
58815e7e85e767cbc4a9c21e36e7bdede4f32bef
|
5ba3465ba9c85955eac188e1e3793a981de712e7
|
refs/heads/master
| 2022-09-19T05:10:26.783943
| 2022-09-10T04:56:43
| 2022-09-10T04:56:43
| 115,470,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return
res = []
def backtrack(_nums, track):
if not _nums:
res.append(track[:])
return
for i in range(len(_nums)):
track.append(_nums[i])
backtrack(_nums[:i] + _nums[i+1:], track)
track.pop()
backtrack(nums, [])
return res
if __name__ == '__main__':
s = Solution()
print(s.permute([1, 2, 3]))
|
[
"2498256234@qq.com"
] |
2498256234@qq.com
|
01c6a82363241a9064bcdc20ba495dec968eb0ca
|
9461195cac30788855359753ac2856d746e81cd6
|
/apps/estado_flujo/forms.py
|
4e5747b61b4e54b2f824df8dd33ccf4402278ed3
|
[] |
no_license
|
ChristianSmith18/python-project
|
e15460b29e29a6bb841c82a762618f7ff86ab724
|
76d876f3fded93643af58e65f183bb6403beb755
|
refs/heads/master
| 2023-04-30T15:30:48.472909
| 2021-05-24T17:33:46
| 2021-05-24T17:33:46
| 370,433,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
from django import forms
from apps.estado_flujo.models import Glo_EstadoFlujo
class estadoFlujoForm(forms.ModelForm):
class Meta:
model = Glo_EstadoFlujo
fields = [
'descripcion_estado',
'estado',
]
widgets = {
'descripcion_estado': forms.TextInput(attrs={'class': 'form-control'}),
'estado': forms.TextInput(attrs={'class': 'form-control','type':'number'}),
}
|
[
"cgr.gonzalezrossier@gmail.com"
] |
cgr.gonzalezrossier@gmail.com
|
6d2e0a26d4c7ad4f0faf749760e2e908565be54d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_splotches.py
|
ed0d1adf3f190745463d7e746b1ad80895ac0e75
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from xai.brain.wordbase.nouns._splotch import _SPLOTCH
#calss header
class _SPLOTCHES(_SPLOTCH, ):
def __init__(self,):
_SPLOTCH.__init__(self)
self.name = "SPLOTCHES"
self.specie = 'nouns'
self.basic = "splotch"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
88dfa41087978b540b432a730f6068e9e609f5bc
|
2edfa18568b02e63757da73254c09e195b9f4efa
|
/evaluation/nejm/evaluate.py
|
7e2fcaf57a3bb27d779b4998aa9d3e4d76e406dd
|
[] |
no_license
|
boxiangliu/ParaMed
|
65e67977c88c1ce2166d08d6d40a33f6961a3486
|
08484488f4829bf144303a2e348c79e4e2ae5f71
|
refs/heads/master
| 2023-05-02T22:14:31.911384
| 2021-05-15T21:44:51
| 2021-05-15T21:44:51
| 217,118,327
| 19
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,436
|
py
|
import argparse
import os
import pandas as pd
pd.options.display.max_columns = 99
import numpy as np
from collections import defaultdict
parser = argparse.ArgumentParser(description="Generate precision-recall "\
"table for sentence alignments.")
parser.add_argument("--align_fn", type=str, help="Path to ground-truth "\
"alignment file.")
parser.add_argument("--en_fn", type=str, help="Path to English sentences.")
parser.add_argument("--zh_fn", type=str, help="Path to Chinese sentences.")
parser.add_argument("--pred_fn", type=str, help="Path to prediction sentence.")
parser.add_argument("--out_fn", type=str, help="Path to output precision "\
"recall table.")
args = parser.parse_args()
os.makedirs(os.path.dirname(args.out_fn), exist_ok=True)
# Example
# args = argparse.Namespace(align_fn="../data/wmt19_biomed_modified/align_validation_zh_en.txt",
# en_fn="../data/wmt19_biomed_modified/medline_zh2en_en.txt",
# zh_fn="../data/wmt19_biomed_modified/medline_zh2en_zh.txt",
# pred_fn="../data/wmt19_biomed_modified/align_bleualign_zh_en.txt",
# out_fn="../processed_data/evaluation/wmt19_biomed/evaluate/bleualign.pr")
def align_en_zh(align, en, zh):
align["zh"] = [x.split(" <=> ")[0] for x in align["align"]]
align["en"] = [x.split(" <=> ")[1] for x in align["align"]]
docs = align.doc.unique()
alignment = defaultdict(list)
for doc in docs:
e = en[en.doc == doc]
z = zh[zh.doc == doc]
a = align[align.doc == doc]
if e.shape[0] == 0 or z.shape[0] == 0:
continue
for i, j, status in \
zip(a["zh"], a["en"], a["status"]):
zh_sent = ""
en_sent = ""
for v in i.split(","):
if v != "omitted":
v = int(v) - 1
zh_sent += z["sent"].iloc[v]
for w in j.split(","):
if w != "omitted":
w = int(w) - 1
en_sent += e["sent"].iloc[w]
alignment["doc"].append(doc)
alignment["align"].append("{} <=> {}".format(i,j))
alignment["status"].append(status)
alignment["zh"].append(zh_sent)
alignment["en"].append(en_sent)
alignment = pd.DataFrame(alignment)
return alignment
def read_data(args):
shape_getter = pd.read_table(args.align_fn, nrows=10)
ncol = shape_getter.shape[1]
print(f"{ncol} columns detected in alignment file.")
if ncol == 3:
align = pd.read_table(args.align_fn, names=["doc", "align", "status"])
elif ncol == 4:
align = pd.read_table(args.align_fn, names=["pmid", "doc", "align", "status"])
else:
raise ValueError(f"Column = {ncol} has not been implemented.")
if args.en_fn is not None and args.zh_fn is not None:
en = pd.read_table(args.en_fn, names=["doc", "sent_id", "sent"])
zh = pd.read_table(args.zh_fn, names=["doc", "sent_id", "sent"])
align = align_en_zh(align, en, zh)
else:
en = None
zh = None
return align, en, zh
def align_type(x):
out = []
for i in x:
if i is np.NaN:
out.append(np.NaN)
else:
src, tgt = i.split(" <=> ")
if src == "omitted":
src_len = 0
else:
src_len = len(src.split(","))
if tgt == "omitted":
tgt_len = 0
else:
tgt_len = len(tgt.split(","))
min_len = min(src_len, tgt_len)
max_len = max(src_len, tgt_len)
out.append("{} - {}".format(min_len, max_len))
return out
def get_precision_recall(valid, pred):
types = valid["type"].unique()
print(f"Alignment types: {types}", flush=True)
def paste(x):
return ":".join([x["doc"], x["align"]])
pr_table = defaultdict(list)
for _type in types:
try:
valid_of_type = valid[valid["type"] == _type].\
apply(lambda x: paste(x), axis=1).tolist()
pred_of_type = pred[pred["type"] == _type].\
apply(lambda x: paste(x), axis=1).tolist()
TP = sum([x in pred_of_type for x in valid_of_type])
FN = sum([x not in pred_of_type for x in valid_of_type])
FP = sum([x not in valid_of_type for x in pred_of_type])
precision = TP / (TP + FP)
recall = TP / (TP + FN)
pr_table["type"].append(_type)
pr_table["precision"].append(precision)
pr_table["recall"].append(recall)
except:
print(f"Type {_type} not found.")
pr_table = pd.DataFrame(pr_table)
return pr_table
def main():
valid, en, zh = read_data(args)
pred = pd.read_table(args.pred_fn,
names=["doc", "align","status", "zh", "en"])
valid["type"] = align_type(valid["align"])
pred["type"] = align_type(pred["align"])
pr_table = get_precision_recall(valid, pred)
pr_table.to_csv(args.out_fn, sep="\t", index=False)
if __name__ == "__main__":
main()
|
[
"jollier.liu@gmail.com"
] |
jollier.liu@gmail.com
|
eed94a71c7239fbdd541de9a4417a48de3d95475
|
ab670d6e59ebd4a0c23fa867fb77866d223163da
|
/Python/Problem029.py
|
7fedb1207b0e2cc07512acac43d00c69115028a9
|
[] |
no_license
|
JeromeLefebvre/ProjectEuler
|
18799e85947e378e18839704c349ba770af4a128
|
3f16e5f231e341a471ffde8b0529407090920b56
|
refs/heads/master
| 2020-07-05T02:42:44.844607
| 2014-07-26T01:04:38
| 2014-07-26T01:04:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
'''
Distinct powers
Problem 29
Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
22=4, 23=8, 24=16, 25=32
32=9, 33=27, 34=81, 35=243
42=16, 43=64, 44=256, 45=1024
52=25, 53=125, 54=625, 55=3125
If they are then placed in numerical order, with any repeats removed, we get the following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and 2 ≤ b ≤ 100?
'''
from itertools import product
def problem29():
return len({a ** b for a, b in product(range(2, 100 + 1), range(2, 100 + 1))})
if __name__ == "__main__":
print(problem29() == 9183)
from cProfile import run
run("problem29()")
|
[
"jerome.p.lefebvre@gmail.com"
] |
jerome.p.lefebvre@gmail.com
|
3bf61bbfa5ba4d78c42105bc36280e5ed2f3f3b2
|
34c01d4bf7ae13b15bfbcfd90ff39f5353971820
|
/examples/reactive.py
|
cb3d656e3187fae427834a62122f2958b8199bf1
|
[] |
no_license
|
nvbn/microasync
|
c78d8684119fe6cbcd1ece762a15d64940ff9eb6
|
9e4975ed5077f133051bc80c1d54042dac5b78c7
|
refs/heads/master
| 2021-01-23T08:56:52.382753
| 2014-10-23T20:13:49
| 2014-10-23T20:13:49
| 23,565,211
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
from microasync.async import coroutine, as_chan, Channel, do_all, select
from microasync.device import get_switch, get_output_pin
@as_chan(Channel)
def get_bicolor_led(chan, left, right):
left_pin = get_output_pin(left)
right_pin = get_output_pin(right)
while True:
msg = yield chan.get()
print(msg)
if msg == 'red':
yield do_all(left_pin.put(1),
right_pin.put(0))
elif msg == 'green':
yield do_all(left_pin.put(0),
right_pin.put(1))
elif msg == 'yellow':
yield do_all(left_pin.put(1),
right_pin.put(1))
elif msg == 'none':
yield do_all(left_pin.put(0),
right_pin.put(0))
@as_chan(Channel)
def switchable_filter(chan, orig_chan, fn):
select_ch = select(get_switch(), chan)
enabled = False
while True:
result_ch, val = yield select_ch.get()
if result_ch == chan:
if not enabled or fn(val):
yield orig_chan.put(val)
else:
enabled = not enabled
@coroutine
def main():
first_led = switchable_filter(get_bicolor_led('X1', 'X2'),
lambda msg: msg != 'red')
second_led = switchable_filter(get_bicolor_led('X3', 'X4'),
lambda msg: msg == 'red')
while True:
for led in (first_led, second_led):
for mode in ('red', 'green', 'yellow', 'none'):
yield led.put(mode)
|
[
"nvbn.rm@gmail.com"
] |
nvbn.rm@gmail.com
|
0f7b6291b1f5cf75ec7597313122777012517352
|
f875b0d80254c8f6eee4e5887869442a8abf60e4
|
/Official_OpenCV_Docs/Image_Processing_In_OpenCV/changing_color_spaces.py
|
2065a51ee24f675278df82863552b3ccb6733902
|
[] |
no_license
|
AmitKulkarni23/OpenCV
|
6d9320fa9e4fd41af4806cda6df0fb2c641d7884
|
449468f4c9c84ffb5b66ab352086e5b23f342b45
|
refs/heads/master
| 2020-03-19T12:36:23.305898
| 2018-09-16T00:53:51
| 2018-09-16T00:53:51
| 136,528,669
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
# Python file which captures video from teh camera and displays only the red color
# Everything else is blacked out
# OpenCV has >150 color spaces
# But 2 of teh important ones are BGR -> HSV and BGR -> Gray
# Changing Colorspaces - used API cv2.cvtColor
# Why HSV for color detection?
# Will help in pinpointing a more specific color
# What is HSV -> Hue, Saturation and Value
# Hue - color
# Stauration - Strenght of color
# Value - for light
# Credits: https://pythonprogramming.net/color-filter-python-opencv-tutorial/
###################################
import cv2
import numpy as np
# Creat a VideoCapture object
cap = cv2.VideoCapture(0)
while 1:
# Read frame-by-frame from teh camera
_, frame = cap.read()
# Change to hsv colorspace
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Specify red ranges
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
# So we will be seeing anything in the ranges of 30-255, 150-255 and 50-180
# Mask that is created using inRange is eitehr true or false
# i.e black or white( See the mask image for more clarity)
mask = cv2.inRange(hsv, lower_red, upper_red)
# This is our result
# we show color where there is the frame AND the mask.
# The white part of the mask will be red range, that was converted to pure white, while everything else became black.
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('Original Video',frame)
cv2.imshow('Mask using inRange',mask)
cv2.imshow('Resultant Video',res)
k = cv2.waitKey(5) & 0xFF
# Break out of while loop on press of 'ESC'
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
[
"amitrkulkarni232@gmail.com"
] |
amitrkulkarni232@gmail.com
|
eab66c8739d2f800e313c946eeac35d82206b0f6
|
2b832e5d3d88b25998f44d21fdb3fa40c2072a9e
|
/testcase/api/__init__.py
|
fb1bdccf22a848238c4b584cbd9bcd23639dd0a1
|
[
"MIT"
] |
permissive
|
lijunzhe123/Automation
|
387536505e0b77fd9cc1d7dc9d017dc1268925eb
|
18122ce2c5debe485fab7dac5f8007f4b7b2d51f
|
refs/heads/main
| 2023-06-17T10:04:51.296922
| 2021-07-10T17:58:56
| 2021-07-10T17:58:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: lewyuejian@163.com
@file: __init__.py.py
@time: 2021/7/8 0008 20:59
@desc:
'''
|
[
"lewyuejian@163.com"
] |
lewyuejian@163.com
|
134ea0ae19f609e515c711126a5c421c4f2b288a
|
850c6fd59110bbdd89a28a2ebd117be04ce3917a
|
/nengo_normal_form/hosted.py
|
4cbcbcb5084393bea9907aeb717900c91f30748e
|
[
"MIT"
] |
permissive
|
tcstewar/nengo_normal_form
|
e56b35b5cb36a0ed659528ab83e1116bda3dfb32
|
37ca02b20c4cc143a7bf9c27912ead36d23a04d7
|
refs/heads/master
| 2021-01-01T19:44:39.556970
| 2018-05-15T22:08:06
| 2018-05-15T22:08:06
| 98,668,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,597
|
py
|
import nengo_normal_form
import nengo
import numpy as np
from .generic import GenericSimulator
class Host2Client(nengo.Node):
def __init__(self, conn):
self.latest_time = 0
self.latest_x = np.zeros(conn.size_out)
super(Host2Client, self).__init__(self.update,
size_in=conn.post_obj.size_in,
size_out=0)
self.post_slice = conn.post_slice
def update(self, t, x):
self.latest_time = t
self.latest_x = x[self.post_slice]
class Client2Host(nengo.Node):
def __init__(self, conn):
super(Client2Host, self).__init__(self.update,
size_in=0,
size_out=conn.size_out)
self.value = np.zeros(conn.size_out)
def update(self, t):
return self.value
class HostedSimulator(GenericSimulator):
def __init__(self, model, dt=0.001, progress_bar=True):
super(HostedSimulator, self).__init__(dt=dt, progress_bar=progress_bar)
norm_model, probes = nengo_normal_form.convert(model)
self.host2client = {}
self.client2host = {}
self.client_conns = []
self.client_objs = []
host_model = nengo.Network()
for node in norm_model.nodes:
if self.is_on_host(node):
host_model.nodes.append(node)
else:
self.client_objs.append(node)
for ens in norm_model.ensembles:
if self.is_on_host(ens):
host_model.ensembles.append(ens)
else:
self.client_objs.append(ens)
for c in norm_model.connections:
host_pre = self.is_on_host(c.pre_obj)
host_post = self.is_on_host(c.post_obj)
if host_pre:
if host_post:
host_model.connections.append(c)
else:
with host_model:
self.host2client[c] = Host2Client(c)
nengo.Connection(
c.pre,
self.host2client[c],
synapse=c.synapse,
transform=c.transform,
function=c.function,
label=c.label)
else:
if host_post:
with host_model:
self.client2host[c] = Client2Host(c)
nengo.Connection(
self.client2host[c],
c.post,
synapse=c.synapse,
transform=c.transform,
label=c.label)
else:
self.client_conns.append(c)
self.host = nengo.Simulator(host_model, progress_bar=False)
for p, pnode in probes.items():
self.data[p] = pnode.data
def step(self):
self.host.step()
super(HostedSimulator, self).step()
def is_on_host(self, obj):
if isinstance(obj, nengo_normal_form.DecoderNode):
return False
if isinstance(obj, nengo.Node):
return True
if isinstance(obj, nengo.Ensemble):
if isinstance(obj.neuron_type, nengo.Direct):
return True
else:
return False
raise nengo.exceptions.NengoException(
'Unhandled connection to/from %s' % obj)
|
[
"tcstewar@uwaterloo.ca"
] |
tcstewar@uwaterloo.ca
|
00104352c1370d91932fa8d8269ab961641f0546
|
5201e237c0d58cdfdbc2fdf8103f9141161eb9f8
|
/ITKFastMarchingPython.pyi
|
29ae0da83b4f80330b389cd611cb717d8648dacb
|
[] |
no_license
|
hjmjohnson/itk-stubs
|
704f5b92a755e55b81d02fcad62a366143e125f3
|
771951d007ae425b758e088eae6f9e4ca0e4afb1
|
refs/heads/main
| 2023-01-22T05:50:33.649088
| 2020-12-04T01:31:09
| 2020-12-04T01:35:06
| 318,368,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
pyi
|
from itk.itkLevelSetNodePython import *
from itk.itkNodePairPython import *
from itk.itkFastMarchingStoppingCriterionBasePython import *
from itk.ITKFastMarchingBasePython import *
from itk.itkFastMarchingImageFilterBasePython import *
from itk.itkFastMarchingExtensionImageFilterPython import *
from itk.itkFastMarchingImageFilterPython import *
from itk.itkFastMarchingImageToNodePairContainerAdaptorPython import *
from itk.itkFastMarchingReachedTargetNodesStoppingCriterionPython import *
from itk.itkFastMarchingThresholdStoppingCriterionPython import *
from itk.itkFastMarchingUpwindGradientImageFilterPython import *
from itk.itkFastMarchingUpwindGradientImageFilterBasePython import *
from typing import Any
class _SwigNonDynamicMeta(type):
__setattr__: Any = ...
swig: Any
|
[
"hans-johnson@uiowa.edu"
] |
hans-johnson@uiowa.edu
|
0a32b3ef2fb132f35bbac420ae65c23c9a600a2d
|
4dc0b92ae40c4bb90e4549732cab8b1f2d4305c6
|
/platforms/windows/dos/8232.py
|
c9204be6911fdfac7062141a0fd3580830f5263a
|
[] |
no_license
|
xiaohen/exploit-database
|
be462a39978d6309ae98677e662dc8b228f936a8
|
d02449c714d2df225eba6f55b65d9840e6d19a5f
|
refs/heads/master
| 2021-01-17T10:00:13.535969
| 2014-03-02T04:29:43
| 2014-03-02T04:29:43
| 17,334,511
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
#!/usr/bin/python
# Chasys Media Player 1.1 (.pls) Local Buffer Overflow (SEH) PoC
# SEH And NEXT_SEH are Overwritten but shellcode doesn't executed !!!
# I have tried a lot of Addresses .
# Waitting for the Exploit from someone .
# Download : http://www.jpcha2.com/setup/chasys_media_player.zip
print " Chasys Media Player 1.1 (.pls) Local Buffer Overflow (SEH) PoC"
print " Discovered By : zAx"
print " Contact : ThE-zAx@Hotmail.Com"
header = "\x5B\x70\x6C\x61\x79\x6C\x69\x73\x74\x5D\x0A\x4E\x75\x6D\x62\x65\x72\x4F\x66\x45\x6E\x74\x72\x69\x65\x73\x3D\x31\x0A\x46\x69\x6C\x65\x31\x3D"
junk = "\x41"*2024
next_seh = "\x42"*4
seh = "\x43"*4
other_data = "\xCC"*800
ex = header + junk + next_seh + seh + other_data
file=open("zAx.pls","w")
file.write(ex)
file.close()
# milw0rm.com [2009-03-18]
|
[
"info@exploit-db.com"
] |
info@exploit-db.com
|
704c318e614ef0ed1ac677b6e615272c162d3661
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_Class36.py
|
e0acf1a88e1f96250e4f122e4227bc4cdb34de6e
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,427
|
py
|
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
prog.cx(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_Class36.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
5cbb883ea1c04efe8a7a2cef7957a2a2273cf727
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03946/s811408190.py
|
1dcbe0bb3e40ad9cf5592528edaf02ff138bc42a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
N,T = map(int,input().split())
A = list(map(int,input().split()))
data = [0]*(N-1)
M = [0]*N
M[N-1] = A[N-1]
M_index = [N-1]*N
for i in range(2,N+1):
if A[N-i] > M[N-i+1]:
M[N-i] = A[N-i]
M_index[N-i] = N-i
else:
M[N-i] = M[N-i+1]
M_index[N-i] = M_index[N-i+1]
data[N-i] = M[N-i] - A[N-i]
m = max(data)
l = []
for i in range(N-1):
if data[i] == m:
l.append(i)
ans = [0]*N
for x in l:
ans[M_index[x]] = 1
#####
#print("A",A)
#print("M",M)
#print("M_index",M_index)
#print("data",data)
#print("ans",ans)
#####
print(sum(ans))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
534d7a8d361d7a451f2b98d94d9b4539fe824687
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/137/usersdata/218/47092/submittedfiles/Maratona.py
|
6d8876f8f1158c2fd5d5be7ed13195de53f89d34
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
# -*- coding: utf-8 -*-
N=int(input('digite o numero de postos da prova:'))
M=int(input('digite a distancia de alcance maxima entre os postos:'))
maxima=42195
minima=0
cont=0
for i in range (0,N,1):
p=int(input('digite a posição do posto:'))
if p<minima:
minima=p
if p>maxima:
maxima=p
if maxima-minima>M:
cont=1
if cont==0:
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
90183842ee994ee4c500c4ac32e0e8ac30df18d8
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/2306.py
|
480ae3036f9ad1679c427e947506870bf81d64db
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
import sys
import math
def is_palindrom(number):
return str(number) == str(number)[::-1]
def make_work(input='input.txt', output='output.txt'):
file_in = open(input)
cases_number = int(file_in.readline().strip())
for n in xrange(cases_number):
case_number = n + 1
a, b = map(int, file_in.readline().split(' '))
sq_a, sq_b = map(math.sqrt, (a, b))
sq_a, sq_b = map(int, (math.ceil(sq_a), math.floor(sq_b)))
fair_square = 0
for x in xrange(sq_a, sq_b + 1):
if is_palindrom(x) and is_palindrom(x*x):
fair_square += 1
print "Case #%s: %s" % (case_number, fair_square)
if len(sys.argv) >= 2:
make_work(input=sys.argv[1])
else:
make_work()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
5cbd9ab1e7cbca8d9043ca2d795fd3e2e95dce6d
|
e63ab09f227459380c317aa1694cffd04255c807
|
/cheshire3/lucene/indexStore.py
|
d8219db0cb88d5d868bd9d0e3bef35b30995337d
|
[
"ICU",
"X11"
] |
permissive
|
bitwhite/cheshire3
|
91a0d2f8d2e79ac277ac4f7a3bea9efa911ce3d6
|
ca27bc2600d217e36a429ccfe064f11d9b200193
|
refs/heads/master
| 2021-05-27T03:50:09.456813
| 2013-10-10T13:47:16
| 2013-10-10T13:47:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
from cheshire3.baseObjects import IndexStore
from cheshire3.lucene.utils import NullC3Analyzer, C3TokenStream, cqlToLucene
from cheshire3.resultSet import SimpleResultSet, SimpleResultSetItem
import lucene
class LuceneIndexStore(IndexStore):
def __init__(self, session, config, parent):
IndexStore.__init__(self, session, config, parent)
path = self.get_path(session, 'defaultPath')
self.analyzer = NullC3Analyzer()
self.dir = lucene.FSDirectory.getDirectory(path, False)
self.parser = lucene.QueryParser("", lucene.StandardAnalyzer())
self.searcher = lucene.IndexSearcher(self.dir)
self.writer = None
self.currDoc = None
self.currRec = None
def create_index(self, session, index):
# created in begin_indexing()
pass
def begin_indexing(self, session, index):
# will append if exists, or create if not
if not self.writer:
self.writer = lucene.IndexWriter(self.dir, self.analyzer, lucene.IndexWriter.MaxFieldLength.UNLIMITED)
def commit_indexing(self, session, index):
if self.currDoc:
self.writer.addDocument(self.currDoc)
self.currDoc = None
elif self.writer:
self.writer.optimize()
self.writer.close()
self.writer = None
print "called commit"
def store_terms(self, session, index, terms, rec):
strm = C3TokenStream(terms)
if rec != self.currRec:
if self.currDoc:
# write it
self.writer.addDocument(self.currDoc)
doc = lucene.Document()
self.currDoc = doc
doc.add(lucene.Field(index.id, strm))
doc.add(lucene.Field('id', str(rec),
lucene.Field.Store.YES,
lucene.Field.Index.UN_TOKENIZED))
else:
doc.add(lucene.Field(index.id, strm))
def search(self, session, query, db):
# take CQL query and translate to Lucene
pm = db.get_path(session, 'protocolMap')
if not pm:
db._cacheProtocolMaps(session)
pm = db.protocolMaps.get('http://www.loc.gov/zing/srw/')
query.config = pm
lq = cqlToLucene(session, query, pm)
q = self.parser.parse(lq)
results = self.searcher.search(q, lucene.Sort.RELEVANCE)
# now map to a ResultSet
items = []
for x in range(len(results)):
hit = results[x]
w = results.score(x)
rsid = hit.getField('id').stringValue()
(recStore, id) = rsid.split('/')
if id.isdigit():
id = int(id)
rsi = SimpleResultSetItem(session, id, recStore, weight=w)
items.append(rsi)
rs = SimpleResultSet(session, items)
return rs
def index_record(self, session, rec):
pass
def delete_record(self, session, rec):
pass
def fetch_term(self, session, term, summary, prox):
pass
def fetch_summary(self, session, index):
raise NotImplementedError()
|
[
"info@cheshire3.org"
] |
info@cheshire3.org
|
97373bd89810120c5216516ef09d1046ca0c6302
|
509c3eb9d205be19426c01f222a6e5870cca256f
|
/runs/sim-study/configs/test-sim-6-8-6/tsne/viz.py
|
b608dfa400b45c5118d89e6526b248dbdb47ad82
|
[
"MIT"
] |
permissive
|
luiarthur/CytofRepFAM.jl
|
b4d23cd32cc89493015b72777f1016b41862aaf7
|
1f997d1620d74861c5bde5559ebdd1e6c449b9e7
|
refs/heads/master
| 2021-07-07T16:07:23.489103
| 2021-04-30T02:43:42
| 2021-04-30T02:43:42
| 238,282,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,998
|
py
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
import sys
sys.path.append('../../../../PlotUtils')
import plot_yz
def graph_tsne(tsne_df, clust, i, method, outpath, method_suffix=''):
if clust is None:
tsne_df[method] = tsne_df[method].astype(int)
else:
tsne_df[method] = clust.astype(int)
mask_i = (tsne_df.sample_ind == i)
df = tsne_df[mask_i]
markersize = 15
sns.pairplot(x_vars="tsne1", y_vars="tsne2", data=df,
hue=method,
plot_kws=dict(linewidth=0, s=markersize),
aspect=1, height=3)
plt.savefig(outpath, bbox_inches="tight")
plt.close()
def get_data_dict(tsne_df, y_header='Y', marker_header='M',
sample_ind_name='sample_ind',
true_labels_name='true_labels'):
y_columns = filter(lambda x: x.startswith(y_header), tsne_df.columns)
Y = tsne_df[y_columns].to_numpy()
m_columns = filter(lambda x: x.startswith(marker_header), tsne_df.columns)
M = tsne_df[m_columns].to_numpy() == 1
Y[M] = np.nan
sample_ind = tsne_df[sample_ind_name].to_numpy().astype(int)
true_labels = tsne_df[true_labels_name].to_numpy().astype(int)
return dict(Y=Y, M=M, sample_ind=sample_ind, true_labels=true_labels)
def make_heatmap(data_dict, clust, i, outpath):
if clust is None:
clust = data_dict['true_labels']
else:
clust = clust.astype(int)
K = clust.max()
wi_mean = np.zeros(K)
lami = clust[data_dict['sample_ind'] == i]
for k in range(K):
wi_mean[k] = (lami == k + 1).mean()
yi = data_dict['Y'][data_dict['sample_ind'] == i]
plt.figure(figsize=(6, 6))
plot_yz.plot_y(yi, wi_mean, lami, vlim=(-3, 3),
cm=plot_yz.blue2red.cm(5),
fs_lab=15, fs_cbar=15, lw=3, fs_xlab=15, fs_ylab=15,
interpolation='nearest')
plt.savefig(outpath, bbox_inches="tight")
plt.close()
if __name__ == "__main__":
path_to_csv = 'viz/csv'
# methods = ['mclust', 'flowsom', 'rfam']
methods = ['mclust', 'flowsom', 'true_labels']
os.makedirs('viz/img', exist_ok=True)
method = methods[0]
for pmiss in [0.0, 0.2]:
for zind in [1, 2, 3]:
simname = f'pmiss{pmiss}-phi0-zind{zind}'
tsne_path = f'{path_to_csv}/tsne-{simname}.csv'
tsne_df = pd.read_csv(tsne_path)
data_dict = get_data_dict(tsne_df)
for method in methods:
if method == 'true_labels':
clust = None
else:
clust_path = f'{path_to_csv}/{method}-{simname}.csv'
print(clust_path)
clust = np.loadtxt(clust_path)
for i in range(2):
outpath = f'viz/img/tsne-{method}{i + 1}-{simname}.pdf'
graph_tsne(tsne_df, clust, i + 1, method, outpath)
heatmap_outpath = f'viz/img/heatmap-{method}{i + 1}-{simname}.pdf'
make_heatmap(data_dict, clust, i + 1, heatmap_outpath)
# rfam
method = "rfam"
for phi in [0, 1, 10, 25, 100]: # NOTE: mind this!
clust_simname = f'pmiss{pmiss}-phi{phi}-zind{zind}'
clust_path = f'{path_to_csv}/{method}-{clust_simname}.csv'
print(clust_path)
clust = np.loadtxt(clust_path)
tsne_path = f'{path_to_csv}/tsne-{simname}.csv'
tsne_df = pd.read_csv(tsne_path)
for i in range(2):
outpath = f'viz/img/tsne-{method}{i + 1}-{clust_simname}.pdf'
graph_tsne(tsne_df, clust, i + 1, method, outpath,
method_suffix=f'phi={phi}')
heatmap_outpath = f'viz/img/heatmap-{method}{i + 1}-{clust_simname}.pdf'
make_heatmap(data_dict, clust, i + 1, heatmap_outpath)
|
[
"luiarthur@gmail.com"
] |
luiarthur@gmail.com
|
4f1a9db5a85cb6b5683c2bd6e86cbc75cb4ce5ed
|
06737979a3d4924dc6a3d926d1b3c1c144891fb8
|
/yq/operators/subsequence.py
|
ac83dc051cc727e3080488e6edb463a6a9c0e07a
|
[] |
no_license
|
abesto/yq
|
630fc2377adfb5198b5b0068a1505af01744a339
|
daf0c0d8f4da2ce2eace0c21ce4d8c2d7055ba54
|
refs/heads/master
| 2020-05-16T22:29:59.196970
| 2017-01-19T12:43:07
| 2017-01-19T12:43:07
| 16,524,927
| 83
| 10
| null | 2016-02-28T18:24:32
| 2014-02-04T20:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 507
|
py
|
from yq.operators.base import Operator
class Subsequence(Operator):
def __init__(self, low, hi):
if low is None:
self.low = low
else:
self.low = int(low)
if hi is None:
self.hi = None
else:
self.hi = int(hi)
def _apply_item(self, data):
try:
return data[self.low:self.hi]
except IndexError:
return None
def __repr__(self):
return '[%s:%s]' % (self.low, self.hi)
|
[
"abesto@abesto.net"
] |
abesto@abesto.net
|
0a5b64d704824ce71bb59cd4c8f347f2cc8e081b
|
0001e8b4a35ea0530cb0f6f803b896da96d06ce3
|
/sommashampoosales.py
|
be0a16dd95394ca00a7b52f08f417ddef8f97b50
|
[] |
no_license
|
elisagiacomini/programming.lab
|
7374340ed875b17702566f2e760b184a4abbb70d
|
03cef1f1befa3aeb0ccc74c89f5663bdc3e50712
|
refs/heads/main
| 2023-01-27T17:46:24.651148
| 2020-12-01T16:03:04
| 2020-12-01T16:03:04
| 311,688,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# Inizializzo una lista vuota per salvare i valori
values = []
# Apro e leggo il file, linea per linea
my_file = open('shampoo_sales.csv', 'r')
for line in my_file:
# Faccio lo split di ogni riga sulla virgola
elements = line.split(',')
# Se NON sto processando l’intestazione...
if elements[0] != 'Date':
# Setto la data e il valore
date = elements[0]
value = elements[1]
# Aggiungo alla lista dei valori questo valore
values.append(float(value))
print(values)
somma = sum(values)
print('La somma dei valori della lista è: {}'.format(somma))
|
[
"replituser@example.com"
] |
replituser@example.com
|
4e9a8679f6d3e1a43540bb78022b35d6cb07679d
|
a74b980fd95d5d810315f181449fc9d1710e6923
|
/savecode/threeyears/idownclient/scan/plugin/zgrab2/zgrab2scanner/zgrab2scannerpop3.py
|
b822e7e9071e3ecd8759508da388a167c5059ce4
|
[
"Apache-2.0"
] |
permissive
|
cbbbbbbbb/sspywork
|
b70f5539203b47b21eec2f0514ddca155affc2b8
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
refs/heads/master
| 2023-03-22T19:45:13.024076
| 2021-03-08T01:24:21
| 2021-03-08T01:24:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,680
|
py
|
"""
zgrab2去扫pop3
create by judy 2019/11/19
"""
import os
import signal
import traceback
import uuid
from datacontract.iscandataset.iscantask import IscanTask
from .zgrab2scannerbase import Zgrab2ScannerBase
from ..zgrab2parser import Zgrab2ParserPop3
class Zgrab2ScannerPop3(Zgrab2ScannerBase):
"""zgrab2 http scanner"""
def __init__(self, zgrab_path: str):
Zgrab2ScannerBase.__init__(self, "zgrab2pop3")
self._parser: Zgrab2ParserPop3 = Zgrab2ParserPop3()
def get_banner_pop3(
self,
task: IscanTask,
level,
pinfo_dict,
port,
*args,
zgrab2path: str = "zgrab2",
sudo: bool = False,
timeout: float = 600,
) -> iter:
"""scan http services and get the banner"""
try:
if not isinstance(port, int) or port < 0 or port > 65535:
raise Exception("Invalid port: {}".format(port))
hosts: iter = pinfo_dict.keys()
hostfi = self._write_hosts_to_file(task, hosts)
if hostfi is None:
return
outfi = self._scan_pop3(
task,
level,
hostfi,
port,
*args,
zgrab2path=zgrab2path,
sudo=sudo,
timeout=timeout,
)
if outfi is None or not os.path.isfile(outfi):
return
# should there be modified?
self._parser.parse_banner_pop3(task, level, pinfo_dict, outfi)
except Exception:
self._logger.error("Scan pop3 error: {}".format(traceback.format_exc()))
def _scan_pop3(
self,
task: IscanTask,
level,
host_file: str,
port: int,
*args,
zgrab2path: str = "zgrab2",
sudo: bool = False,
timeout: float = 600,
) -> str:
"""scan the ips or domains, and write the output files to specified output directory.
host_file: the full path of a file with list of ['1.1.1.1','www.xxx.com'] in the file per line
port: '80' or '443'
outfi: result file path
"""
outfi: str = None
try:
enhanced_args = []
# add hosts and ports to args
enhanced_args.append("pop3")
enhanced_args.append(f"-p {port}")
# zgrab2 pop3 -p 110 -n pop3 -t 10 -o ./mt1.json -f ./mtip.txt
enhanced_args.append("-n pop3")
enhanced_args.append(f"-t {timeout}")
if "--debug" not in enhanced_args:
enhanced_args.append("--debug")
if "--send-help" not in enhanced_args:
enhanced_args.append("--send-help")
if "--send-noop" not in enhanced_args:
enhanced_args.append("--send-noop")
if "--send-quit" not in enhanced_args:
enhanced_args.append("--send-quit")
if port == 110 and "--starttls" not in enhanced_args:
enhanced_args.append("--starttls")
elif port == 995 and "--pop3s" not in enhanced_args:
enhanced_args.append("--pop3s")
# 这个args里面几乎没有东西,除非真的是外面有特殊说明这个才有值,所以还是先留在这里
enhanced_args.extend(args)
if "--input-file=" not in args or "-f" not in args:
enhanced_args.append(f"-f {host_file}") # input file
# outfi = os.path.join(self._tmpdir, "{}_{}.pop3".format(task.taskid, port))
with self._outfile_locker:
outfi = os.path.join(
self._tmpdir, "{}_{}.pop3".format(str(uuid.uuid1()), port)
)
while os.path.isfile(outfi):
outfi = os.path.join(
self._tmpdir, "{}_{}.pop3".format(str(uuid.uuid1()), port)
)
if "--output-file=" not in args or "-o" not in args:
# here must use -o, use '--output-file' will cause exception 'No such file or directory'
# this may be a bug
# 人家没说可以用--output-file
enhanced_args.append(f"-o {outfi}") # output file
# 如果没有当前文件夹那么就创建当前文件夹
outdir = os.path.dirname(outfi)
if not os.path.exists(outdir) or not os.path.isdir(outdir):
os.makedirs(outdir)
curr_process = None
try:
curr_process = self._run_process(
zgrab2path, *enhanced_args, rootDir=outdir, sudo=sudo
)
stdout, stderr = curr_process.communicate(timeout=timeout)
exitcode = curr_process.wait(timeout=10)
if stdout is not None:
self._logger.trace(stdout)
if stderr is not None:
self._logger.trace(stderr)
if exitcode != 0:
raise Exception(f"Scan pop3 error: {stdout}\n{stderr}")
self._logger.info(
f"Scan pop3 exitcode={str(exitcode)}\ntaskid:{task.taskid}\nbatchid:{task.batchid}\nport:{port}"
)
finally:
if curr_process is not None:
curr_process.kill()
except Exception:
if outfi is not None and os.path.isfile(outfi):
os.remove(outfi)
outfi = None
self._logger.info(
f"Scan pop3 error\ntaskid:{task.taskid}\nbatchid:{task.batchid}\nport:{port}"
)
return outfi
|
[
"shiyuegege@qq.com"
] |
shiyuegege@qq.com
|
33a2d5d740644f1f35166c70f71a5928b409bcb6
|
072f8bffbfef6e149ad1934ea9183a79864c1acd
|
/venv/Lib/site-packages/watcherclient/tests/unit/v1/test_strategy_shell.py
|
7a1568714e2603e647f5b0d99f8bceeb0ede953e
|
[] |
no_license
|
numvc/LuxoftBot
|
77d9bf8f5f63aee63350f1ec82f4b940afe203d2
|
29d7ca8868ab86bc076509d103f7596039333417
|
refs/heads/master
| 2020-09-21T21:37:12.527546
| 2019-12-04T23:24:35
| 2019-12-04T23:24:35
| 224,939,956
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,741
|
py
|
# Copyright (c) 2016 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
import six
from oslo_serialization import jsonutils
from watcherclient import shell
from watcherclient.tests.unit.v1 import base
from watcherclient import v1 as resource
from watcherclient.v1 import resource_fields
STRATEGY_1 = {
'uuid': '2cf86250-d309-4b81-818e-1537f3dba6e5',
'name': 'basic',
'display_name': 'Basic consolidation',
'goal_uuid': 'fc087747-61be-4aad-8126-b701731ae836',
'goal_name': 'SERVER_CONSOLIDATION',
'created_at': datetime.datetime.now().isoformat(),
'updated_at': None,
'deleted_at': None,
'parameters_spec': {},
}
STRATEGY_2 = {
'uuid': 'b20bb987-ea8f-457a-a4ea-ab3ffdfeff8b',
'name': 'dummy',
'display_name': 'Dummy',
'goal_uuid': '407b03b1-63c6-49b2-adaf-4df5c0090047',
'goal_name': 'DUMMY',
'created_at': datetime.datetime.now().isoformat(),
'updated_at': None,
'deleted_at': None,
'parameters_spec': {},
}
class StrategyShellTest(base.CommandTestCase):
SHORT_LIST_FIELDS = resource_fields.STRATEGY_SHORT_LIST_FIELDS
SHORT_LIST_FIELD_LABELS = (
resource_fields.STRATEGY_SHORT_LIST_FIELD_LABELS)
FIELDS = resource_fields.STRATEGY_FIELDS
FIELD_LABELS = resource_fields.STRATEGY_FIELD_LABELS
STATE_FIELDS = resource_fields.STRATEGY_STATE_FIELDS
STATE_FIELD_LABELS = resource_fields.STRATEGY_STATE_FIELD_LABELS
def setUp(self):
super(self.__class__, self).setUp()
p_strategy_manager = mock.patch.object(resource, 'StrategyManager')
self.m_strategy_mgr_cls = p_strategy_manager.start()
self.addCleanup(p_strategy_manager.stop)
self.m_strategy_mgr = mock.Mock()
self.m_strategy_mgr_cls.return_value = self.m_strategy_mgr
self.stdout = six.StringIO()
self.cmd = shell.WatcherShell(stdout=self.stdout)
def test_do_strategy_list(self):
strategy1 = resource.Strategy(mock.Mock(), STRATEGY_1)
strategy2 = resource.Strategy(mock.Mock(), STRATEGY_2)
self.m_strategy_mgr.list.return_value = [
strategy1, strategy2]
exit_code, results = self.run_cmd('strategy list')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(strategy1, self.SHORT_LIST_FIELDS,
self.SHORT_LIST_FIELD_LABELS),
self.resource_as_dict(strategy2, self.SHORT_LIST_FIELDS,
self.SHORT_LIST_FIELD_LABELS)],
results)
self.m_strategy_mgr.list.assert_called_once_with(detail=False)
def test_do_strategy_list_marker(self):
strategy2 = resource.Strategy(mock.Mock(), STRATEGY_2)
self.m_strategy_mgr.list.return_value = [strategy2]
exit_code, results = self.run_cmd(
'strategy list --marker 2cf86250-d309-4b81-818e-1537f3dba6e5')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(strategy2, self.SHORT_LIST_FIELDS,
self.SHORT_LIST_FIELD_LABELS)],
results)
self.m_strategy_mgr.list.assert_called_once_with(
detail=False,
marker='2cf86250-d309-4b81-818e-1537f3dba6e5')
def test_do_strategy_list_detail(self):
strategy1 = resource.Strategy(mock.Mock(), STRATEGY_1)
strategy2 = resource.Strategy(mock.Mock(), STRATEGY_2)
self.m_strategy_mgr.list.return_value = [
strategy1, strategy2]
exit_code, results = self.run_cmd('strategy list --detail')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(strategy1, self.FIELDS,
self.FIELD_LABELS),
self.resource_as_dict(strategy2, self.FIELDS,
self.FIELD_LABELS)],
results)
self.m_strategy_mgr.list.assert_called_once_with(detail=True)
def test_do_strategy_list_filter_by_goal_name(self):
strategy2 = resource.Strategy(mock.Mock(), STRATEGY_2)
self.m_strategy_mgr.list.return_value = [strategy2]
exit_code, results = self.run_cmd(
'strategy list --goal '
'DUMMY')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(strategy2, self.SHORT_LIST_FIELDS,
self.SHORT_LIST_FIELD_LABELS)],
results)
self.m_strategy_mgr.list.assert_called_once_with(
detail=False,
goal='DUMMY',
)
def test_do_strategy_list_filter_by_goal_uuid(self):
strategy1 = resource.Strategy(mock.Mock(), STRATEGY_1)
self.m_strategy_mgr.list.return_value = [strategy1]
exit_code, results = self.run_cmd(
'strategy list --goal '
'fc087747-61be-4aad-8126-b701731ae836')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(strategy1, self.SHORT_LIST_FIELDS,
self.SHORT_LIST_FIELD_LABELS)],
results)
self.m_strategy_mgr.list.assert_called_once_with(
detail=False,
goal='fc087747-61be-4aad-8126-b701731ae836',
)
def test_do_strategy_show_by_uuid(self):
strategy = resource.Strategy(mock.Mock(), STRATEGY_1)
self.m_strategy_mgr.get.return_value = strategy
exit_code, result = self.run_cmd(
'strategy show f8e47706-efcf-49a4-a5c4-af604eb492f2')
self.assertEqual(0, exit_code)
self.assertEqual(
self.resource_as_dict(strategy, self.FIELDS, self.FIELD_LABELS),
result)
self.m_strategy_mgr.get.assert_called_once_with(
'f8e47706-efcf-49a4-a5c4-af604eb492f2')
def test_do_strategy_state(self):
strategy1 = resource.Strategy(mock.Mock(), STRATEGY_1)
strategy_req = [
{'type': 'Datasource', 'mandatory': True,
'comment': '', 'state': 'gnocchi: True'},
{'type': 'Metrics', 'mandatory': False,
'comment': '', 'state': jsonutils.dumps([
{'compute.node.cpu.percent': 'available'},
{'cpu_util': 'available'},
{'memory.resident': 'available'},
{'hardware.memory.used': 'not available'}])},
{'type': 'CDM', 'mandatory': True,
'comment': '',
'state': jsonutils.dumps([{'compute_model': 'available'},
{'storage_model': 'not available'}])},
{'type': 'Name', 'mandatory': '', 'comment': '',
'state': strategy1.name}]
requirements = [resource.Strategy(mock.Mock(), req)
for req in strategy_req]
self.m_strategy_mgr.state.return_value = requirements
exit_code, results = self.run_cmd('strategy state basic')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(req, self.STATE_FIELDS,
self.STATE_FIELD_LABELS)
for req in requirements],
results)
|
[
"feys-00@mail.ru"
] |
feys-00@mail.ru
|
688a3352badf0b2f3f6ee521627b783d65c0d14a
|
d18ed72d6f8d27dd8a13eab5c6366f9dca48aa6b
|
/espresso/lab/hydra/usecases/cassandra/cassandra/time/Scheduler.py
|
29bbabb9f1513382a67171657fec3a84c5a2406f
|
[] |
no_license
|
danse-inelastic/AbInitio
|
6f1dcdd26a8163fa3026883fb3c40f63d1105b0c
|
401e8d5fa16b9d5ce42852b002bc2e4274afab84
|
refs/heads/master
| 2021-01-10T19:16:35.770411
| 2011-04-12T11:04:52
| 2011-04-12T11:04:52
| 34,972,670
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,527
|
py
|
class Scheduler(object):
def __init__(self):
self.now = self.getCurrentTime()
self.alarmIndex = []
self.alarms = {}
return
def alarm(self, interval, callback):
"""Call the given callback after the specified time interval
elapses."""
from pyre.units.time import second
alarmTime = self.now + interval/second
newAlarm = self.Alarm(alarmTime)
alarm = self.alarms.setdefault(alarmTime, newAlarm)
alarm.append(callback)
if alarm is newAlarm:
self.alarmIndex.append(alarmTime)
self.alarmIndex.sort(reverse = True)
return
def poll(self):
"""Call the callbacks for any alarms that have gone off.
Answer the number of seconds we can sleep until the next
alarm. If there are no more alarms, answer None."""
self.updateInternalClock()
activeAlarm = self.activeAlarm
if activeAlarm is None:
return None # sleep indefinitely
while activeAlarm.time <= self.now:
for callback in activeAlarm:
callback()
# activate the next alarm
activeAlarm = self.popActiveAlarm()
if activeAlarm is None:
return None # sleep indefinitely
return activeAlarm.time - self.now
# private
def updateInternalClock(self):
"""Advance our internal clock to the current system time."""
now = self.getCurrentTime()
if now < self.now:
self.clockSetBack(self.now - now)
self.now = now
return
def clockSetBack(self, delta):
"""The system clock was set back; update our internal data
structures."""
if not self.alarms:
return # nothing to do
self.alarmIndex = []
for alarm in self.alarms:
alarm.time -= delta
self.alarmIndex.append(alarm.time)
self.alarmIndex.sort(reverse = True)
return
def getActiveAlarm(self):
if self.alarmIndex:
return self.alarms[self.alarmIndex[-1]]
return None
activeAlarm = property(getActiveAlarm)
def popActiveAlarm(self):
"""Discard the currently active alarm. Answer the new active
alarm, if any."""
time = self.alarmIndex.pop()
self.alarms.pop(time)
return self.activeAlarm
from time import time as getCurrentTime
from Alarm import Alarm
|
[
"dexity@gmail.com"
] |
dexity@gmail.com
|
2c492ea4b85b3ed7315977442b45bb309b1f88a1
|
3c59b7bde01cfbc1fbd170883393e8ebf7a0a92f
|
/HackerRank/Binary Search Tree-Lowest Common Ancestor.py
|
50b0ac929272da74203ba36f002280fdd857dc6f
|
[] |
no_license
|
gf234/python_problem_solving
|
93ae00d940091131d8f8b06e478e385e4c2a4503
|
4c95751f5a687215c14bf61c37e6dc2e7e752342
|
refs/heads/main
| 2023-05-10T07:28:12.351006
| 2021-06-14T04:59:33
| 2021-06-14T04:59:33
| 314,479,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
def lca(root, v1, v2):
if root.info < v1 and root.info < v2:
return lca(root.right, v1, v2)
elif root.info > v1 and root.info > v2:
return lca(root.left, v1, v2)
return root
|
[
"gf265@naver.com"
] |
gf265@naver.com
|
94403f1b21bd73a763eaa34c5f40bef0076041d1
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/jnk1_input/24/24-36_MD_NVT_rerun/set_2.py
|
3ad7ebd55a5bdc277eef71cc5e0ea648a32b10d0
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/jnkl/L624/MD_NVT_rerun/ti_one-step/24_36/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
53d454a183cbff0e567046f5fd4165f5a6a8bd6d
|
41d96fec63aff02b9eb0152002f45b1b867bfafe
|
/experiments/tools/noise.py
|
b725ec234fe89a9a7e61b4a4de3dd5803acf4c04
|
[] |
no_license
|
jon--lee/dart_dev
|
fef7b18e0c05ac64b3a1b1c42c1e0ac071504619
|
2539f1d918c07c15dea8d72865bb2faf1a49d22c
|
refs/heads/master
| 2020-03-14T04:29:23.200345
| 2018-06-21T07:03:38
| 2018-06-21T07:03:38
| 131,442,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
import numpy as np
import statistics
import IPython
def sample_covariance_lnr(env, lnr, sup, samples, T):
cov = np.zeros(env.action_space.shape[0])
for s in range(samples):
states, tmp_actions, _, _ = statistics.collect_traj(env, lnr, T)
sup_actions = np.array([sup.intended_action(s) for s in states])
lnr_actions = np.array(tmp_actions)
length = len(tmp_actions)
diff = sup_actions - lnr_actions
cov = cov + np.dot(diff.T, diff) / float(length)
return cov / float(samples)
def sample_covariance_sup(env, lnr, sup, samples, T):
cov = np.zeros(env.action_space.shape[0])
for s in range(samples):
states, tmp_actions, _, _ = statistics.collect_traj(env, sup, T)
sup_actions = np.array(tmp_actions)
lnr_actions = np.array([lnr.intended_action(s) for s in states])
length = len(tmp_actions)
diff = sup_actions - lnr_actions
cov = cov + np.dot(diff.T, diff) / float(length)
return cov / float(samples)
def sample_covariance_trajs(env, lnr, trajs, T):
d = env.action_space.shape[0]
cov = np.zeros((d, d))
for states, i_actions in trajs:
length = len(i_actions)
if not length == 0:
sup_actions = np.array([a for a in i_actions])
lnr_actions = np.array([lnr.intended_action(s) for s in states])
diff = sup_actions - lnr_actions
cov = cov + np.dot(diff.T, diff) / float(length)
print "Trajs: " + str(len(trajs))
return cov / float(len(trajs))
def sample_iso_cov_lnr(env, lnr, sup, samples, T):
d = env.action_space.shape[0]
cov = sample_covariance_lnr(env, lnr, sup, samples, T)
return np.trace(cov) / float(d) * np.identity(d)
def sample_iso_cov_sup(env, lnr, sup, samples, T):
d = env.action_space.shape[0]
cov = sample_covariance_sup(env, lnr, sup, samples, T)
return np.trace(cov) / float(d) * np.identity(d)
def sample_epsilon_lnr(env, lnr, sup, samples, T):
surr_loss = statistics.evaluate_agent_disc(env, lnr, sup, T, samples)
return surr_loss
def sample_epsilon_sup(env, lnr, sup, samples, T):
loss = statistics.evaluate_sup_disc(env, lnr, sup, T, samples)
return loss
|
[
"123abcjonathanlee@gmail.com"
] |
123abcjonathanlee@gmail.com
|
17c41d0c9e81aa5d9e9f1da1181bc65e32417ecb
|
d2a0c5b7f9977720eb588a664b6ad40486691baa
|
/sports/tasks.py
|
30dd137c0455fd2cbc90eb28af09f7dac89ebf4a
|
[] |
no_license
|
cjredmond/final_project
|
37df2ab88ff6e1aa06af12a196d5d7bc4cda71e0
|
ae9bbabae8f611d88d12e045ffedf87a077b0580
|
refs/heads/master
| 2020-07-26T22:25:44.357141
| 2016-12-15T19:24:25
| 2016-12-15T19:24:25
| 73,711,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,221
|
py
|
import time
from celery import Celery
import random
import datetime
import requests
from celery import shared_task
from sports.scraper import usable_data, fix_names, nba_scores
from sports.nhl_scraper import nhl_scores, nhl_usable_data
from sports.nfl_scraper import nfl_scores, nfl_usable_data
from sports.models import Score, Team, Squad, Clock
from django.utils import timezone
@shared_task
def cal():
items_nba = usable_data(fix_names(nba_scores()))
for dictionary in items_nba:
if dictionary['winner'] == 'LA Lakers':
winner = Team.objects.get(name='Lakers')
loser = Team.objects.get(city=dictionary['loser'], sport='k')
elif dictionary['loser'] == 'LA Lakers':
winner = Team.objects.get(city=dictionary['winner'], sport='k')
loser = Team.objects.get(name='Lakers')
else:
winner = Team.objects.get(city=dictionary['winner'], sport='k')
loser = Team.objects.get(city=dictionary['loser'], sport='k')
if Score.objects.filter(tag=dictionary['tag']):
x = Score.objects.filter(tag=dictionary['tag'])
prev_winner = Score.objects.get(tag=dictionary['tag'], team=winner)
y = list(prev_winner.active_squad.all())
prev_winner.delete()
Score.objects.create(team=winner, pts=dictionary['winner_pts'],tag=dictionary['tag'], time=timezone.now())
squads = list(Squad.objects.filter(roster__name=winner.name))
current = Score.objects.get(team=winner,tag=dictionary['tag'])
current.active_squad.add(*y)
current.save()
prev_loser = Score.objects.get(tag=dictionary['tag'], team=loser)
y = list(prev_loser.active_squad.all())
prev_loser.delete()
Score.objects.create(team=loser, pts=dictionary['loser_pts'],tag=dictionary['tag'], time=timezone.now())
current = Score.objects.get(team=loser,tag=dictionary['tag'])
current.active_squad.add(*y)
current.save()
else:
Score.objects.filter(tag=dictionary['tag']).delete()
Score.objects.create(team=winner, pts=dictionary['winner_pts'],tag=dictionary['tag'], time=timezone.now())
squads = list(Squad.objects.filter(roster__name=winner.name))
current = Score.objects.get(team=winner,tag=dictionary['tag'])
current.active_squad.add(*squads)
current.save()
Score.objects.create(team=loser, pts=dictionary['loser_pts'],tag=dictionary['tag'], time=timezone.now())
squads = list(Squad.objects.filter(roster__name=loser.name))
current = Score.objects.get(team=loser,tag=dictionary['tag'])
current.active_squad.add(*squads)
current.save()
########## HOCKEY ##########
items_nhl = nhl_usable_data(fix_names(nhl_scores()))
for dictionary in items_nhl:
winner = Team.objects.get(city=dictionary['winner'], sport='h')
loser = Team.objects.get(city=dictionary['loser'], sport='h')
Score.objects.filter(tag=dictionary['tag']).delete()
x = Score.objects.filter(tag=dictionary['tag'])
if x:
prev_winner = Score.objects.get(tag=dictionary['tag'], team=winner)
y = list(prev_winner.active_squad.all())
prev_winner.delete()
Score.objects.create(team=winner, pts=dictionary['winner_pts'],tag=dictionary['tag'], time=timezone.now())
squads = list(Squad.objects.filter(roster__name=winner.name))
current = Score.objects.get(team=winner,tag=dictionary['tag'])
current.active_squad.add(*y)
current.save()
prev_loser = Score.objects.get(tag=dictionary['tag'], team=loser)
y = list(prev_loser.active_squad.all())
prev_loser.delete()
Score.objects.create(team=loser, pts=dictionary['loser_pts'],tag=dictionary['tag'], time=timezone.now())
current = Score.objects.get(team=loser,tag=dictionary['tag'])
current.active_squad.add(*y)
current.save()
else:
Score.objects.filter(tag=dictionary['tag']).delete()
Score.objects.create(team=winner, pts=dictionary['winner_pts'],tag=dictionary['tag'], time=timezone.now())
squads = list(Squad.objects.filter(roster__name=winner.name))
current = Score.objects.get(team=winner,tag=dictionary['tag'])
current.active_squad.add(*squads)
current.save()
Score.objects.create(team=loser, pts=dictionary['loser_pts'],tag=dictionary['tag'], time=timezone.now())
squads = list(Squad.objects.filter(roster__name=loser.name))
current = Score.objects.get(team=loser,tag=dictionary['tag'])
current.active_squad.add(*squads)
current.save()
### FOOTBALL ######
# items_nfl = nfl_usable_data(fix_names(nfl_scores()))
# for dictionary in items_nfl:
# winner = Team.objects.get(city=dictionary['winner'], sport='f')
# loser = Team.objects.get(city=dictionary['loser'], sport='f')
# Score.objects.filter(tag=dictionary['tag']).delete()
#
# x = Score.objects.filter(tag=dictionary['tag'])
# if x:
# prev_winner = Score.objects.get(tag=dictionary['tag'], team=winner)
# y = list(prev_winner.active_squad.all())
# prev_winner.delete()
# Score.objects.create(team=winner, pts=dictionary['winner_pts'],tag=dictionary['tag'], time=timezone.now())
# squads = list(Squad.objects.filter(roster__name=winner.name))
# current = Score.objects.get(team=winner,tag=dictionary['tag'])
# current.active_squad.add(*y)
# current.save()
#
# prev_loser = Score.objects.get(tag=dictionary['tag'], team=loser)
# y = list(prev_loser.active_squad.all())
# prev_loser.delete()
# Score.objects.create(team=loser, pts=dictionary['loser_pts'],tag=dictionary['tag'], time=timezone.now())
# current = Score.objects.get(team=loser,tag=dictionary['tag'])
# current.active_squad.add(*y)
# current.save()
#
# else:
# Score.objects.filter(tag=dictionary['tag']).delete()
# Score.objects.create(team=winner, pts=dictionary['winner_pts'],tag=dictionary['tag'], time=timezone.now())
# squads = list(Squad.objects.filter(roster__name=winner.name))
# current = Score.objects.get(team=winner,tag=dictionary['tag'])
# current.active_squad.add(*squads)
# current.save()
#
# Score.objects.create(team=loser, pts=dictionary['loser_pts'],tag=dictionary['tag'], time=timezone.now())
# squads = list(Squad.objects.filter(roster__name=loser.name))
# current = Score.objects.get(team=loser,tag=dictionary['tag'])
# current.active_squad.add(*squads)
# current.save()
# @shared_task
# def timer():
# clock = Clock.objects.get(id=1)
# if clock.time == 0:
# pass
# else:
# clock.time = clock.time - 1
# clock.save()
|
[
"connor.redmond@gmail.com"
] |
connor.redmond@gmail.com
|
fa474f19b5c1d31fb875338677ede2815df5a871
|
ae9bd6f9a8fc4133a03db60910c25617b65732f1
|
/pyro_models/bcm/bernoulli.py
|
9f6e2ccf839bc0c52db09b353c0c27383d26dbc1
|
[
"MIT"
] |
permissive
|
afcarl/pyro-models
|
0af1a239af81436b1980752c132f288ea59bcbc3
|
1fc3f4b27fe946a99b2ddd6b57cca16f8c7da181
|
refs/heads/master
| 2022-01-27T13:15:33.641771
| 2019-07-09T18:55:39
| 2019-07-09T18:55:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
# model file: example-models/basic_estimators/bernoulli.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
N = data["N"]
y = data["y"]
def init_params(data):
params = {}
return params
def model(data, params):
# initialize data
N = data["N"]
y = data["y"]
with pyro.plate("data", N):
theta = pyro.sample("theta", dist.Beta(1., 1.))
pyro.sample('obs', dist.Bernoulli(theta), obs=y)
|
[
"jonathanp.chen@gmail.com"
] |
jonathanp.chen@gmail.com
|
d5b69845bfc9fdf1ab9d7e507f7523442b3dc405
|
def27d5864764b877b6786835ec97f2bd74c6ba8
|
/medium/RotateImage.py
|
4c0c646667bb3773d3d30ad5ef12082eb7604994
|
[] |
no_license
|
bolan2014/leetcode
|
f6cf38a49a9250abeb36543ea2498062c58e811d
|
1c35fde3a65c4f216218f459736d4c39a29980d5
|
refs/heads/master
| 2021-04-09T16:59:41.494568
| 2017-05-10T03:47:14
| 2017-05-10T03:47:14
| 46,648,353
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
for i in range(n):
for j in range(i, n):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
for i in range(n):
for j in range(n / 2):
matrix[i][j], matrix[i][n - 1 - j] = matrix[i][n - 1 - j], matrix[i][j]
|
[
"1139217488@qq.com"
] |
1139217488@qq.com
|
cccb144b3b818fd1c34f78878597e6c7566e5ae1
|
18239524612cf572bfeaa3e001a3f5d1b872690c
|
/clients/keto/python/ory_keto_client/models/list_ory_access_control_policy_roles.py
|
012f3b490b2695312354cb16433a47b77482f36d
|
[
"Apache-2.0"
] |
permissive
|
simoneromano96/sdk
|
2d7af9425dabc30df830a09b26841fb2e8781bf8
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
refs/heads/master
| 2023-05-09T13:50:45.485951
| 2021-05-28T12:18:27
| 2021-05-28T12:18:27
| 371,689,133
| 0
| 0
|
Apache-2.0
| 2021-05-28T12:11:41
| 2021-05-28T12:11:40
| null |
UTF-8
|
Python
| false
| false
| 5,559
|
py
|
# coding: utf-8
"""
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0-alpha.37
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ory_keto_client.configuration import Configuration
class ListOryAccessControlPolicyRoles(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'flavor': 'str',
'limit': 'int',
'offset': 'int'
}
attribute_map = {
'flavor': 'flavor',
'limit': 'limit',
'offset': 'offset'
}
def __init__(self, flavor=None, limit=None, offset=None, local_vars_configuration=None): # noqa: E501
"""ListOryAccessControlPolicyRoles - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._flavor = None
self._limit = None
self._offset = None
self.discriminator = None
self.flavor = flavor
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
@property
def flavor(self):
"""Gets the flavor of this ListOryAccessControlPolicyRoles. # noqa: E501
The ORY Access Control Policy flavor. Can be \"regex\", \"glob\", and \"exact\" in: path # noqa: E501
:return: The flavor of this ListOryAccessControlPolicyRoles. # noqa: E501
:rtype: str
"""
return self._flavor
@flavor.setter
def flavor(self, flavor):
"""Sets the flavor of this ListOryAccessControlPolicyRoles.
The ORY Access Control Policy flavor. Can be \"regex\", \"glob\", and \"exact\" in: path # noqa: E501
:param flavor: The flavor of this ListOryAccessControlPolicyRoles. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and flavor is None: # noqa: E501
raise ValueError("Invalid value for `flavor`, must not be `None`") # noqa: E501
self._flavor = flavor
@property
def limit(self):
"""Gets the limit of this ListOryAccessControlPolicyRoles. # noqa: E501
The maximum amount of policies returned. in: query # noqa: E501
:return: The limit of this ListOryAccessControlPolicyRoles. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListOryAccessControlPolicyRoles.
The maximum amount of policies returned. in: query # noqa: E501
:param limit: The limit of this ListOryAccessControlPolicyRoles. # noqa: E501
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListOryAccessControlPolicyRoles. # noqa: E501
The offset from where to start looking. in: query # noqa: E501
:return: The offset of this ListOryAccessControlPolicyRoles. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListOryAccessControlPolicyRoles.
The offset from where to start looking. in: query # noqa: E501
:param offset: The offset of this ListOryAccessControlPolicyRoles. # noqa: E501
:type: int
"""
self._offset = offset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListOryAccessControlPolicyRoles):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ListOryAccessControlPolicyRoles):
return True
return self.to_dict() != other.to_dict()
|
[
"noreply@github.com"
] |
simoneromano96.noreply@github.com
|
d99415ddf223c396bd34c3886c0908198511a6c6
|
de6fb3a55196b6bd36a4fda0e08ad658679fb7a1
|
/modules/resource/orchestrator/src/delegate/geni/v3/utils/ro.py
|
5e877ef9e520129ca30707d53a4bf5b7ffbcc9fc
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
dana-i2cat/felix
|
4a87af639e4c7db686bfa03f1ae4ce62711615e3
|
059ed2b3308bda2af5e1942dc9967e6573dd6a53
|
refs/heads/master
| 2021-01-02T23:12:43.840754
| 2016-02-04T10:04:24
| 2016-02-04T10:04:24
| 17,132,912
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,153
|
py
|
from db.db_manager import db_sync_manager
from delegate.geni.v3.rm_adaptor import AdaptorFactory
from rspecs.ro.manifest_parser import ROManifestParser
from commons import CommonUtils
from tn import TNUtils
from vl import VLUtils
import core
logger = core.log.getLogger("ro-utils")
class ROUtils(CommonUtils):
def __init__(self):
super(ROUtils, self).__init__()
def manage_describe(self, peer, urns, creds):
try:
adaptor, uri = AdaptorFactory.create_from_db(peer)
logger.debug("Adaptor=%s, uri=%s" % (adaptor, uri))
m, urn, ss = adaptor.describe(urns, creds[0]["geni_value"])
ret = self.generate_internal_return(m)
return (ret, urn, ss)
except Exception as e:
logger.critical("manage_describe exception: %s", e)
raise e
def manage_provision(self, peer, urns, creds, beffort, etime, gusers):
try:
adaptor, uri = AdaptorFactory.create_from_db(peer)
logger.debug("Adaptor=%s, uri=%s" % (adaptor, uri))
m, urn = adaptor.provision(
urns, creds[0]["geni_value"], beffort, etime, gusers)
ret = self.generate_internal_return(m)
return (ret, urn)
except Exception as e:
# It is possible that RO does not implement this method!
if beffort:
logger.error("manage_provision exception: %s", e)
return (ret, [])
else:
logger.critical("manage_provision exception: %s", e)
raise e
def generate_internal_return(self, m):
ret = {"com_nodes": [], "sdn_slivers": [],
"tn_nodes": [], "tn_links": [],
"se_nodes": [], "se_links": []}
manifest = ROManifestParser(from_string=m)
logger.debug("ROManifestParser=%s" % (manifest,))
self.validate_rspec(manifest.get_rspec())
ret["com_nodes"] = manifest.com_nodes()
logger.info("COMNodes(%d)=%s" %
(len(ret["com_nodes"]), ret["com_nodes"],))
ret["sdn_slivers"] = manifest.sdn_slivers()
logger.info("SDNSlivers(%d)=%s" %
(len(ret["sdn_slivers"]), ret["sdn_slivers"],))
ret["tn_nodes"] = manifest.tn_nodes()
logger.info("TNNodes(%d)=%s" %
(len(ret["tn_nodes"]), ret["tn_nodes"],))
ret["tn_links"] = manifest.tn_links()
logger.info("TNLinks(%d)=%s" %
(len(ret["tn_links"]), ret["tn_links"],))
ret["se_nodes"] = manifest.se_nodes()
logger.info("SENodes(%d)=%s" %
(len(ret["se_nodes"]), ret["se_nodes"],))
ret["se_links"] = manifest.se_links()
logger.info("SELinks(%d)=%s" %
(len(ret["se_links"]), ret["se_links"],))
return ret
@staticmethod
def generate_list_resources(rspec, geni_available=False, show_interdomain=False, inner_call=True):
for n in db_sync_manager.get_com_nodes():
logger.debug("COM resources node=%s" % (n,))
rspec.com_node(n, inner_call)
for d in db_sync_manager.get_sdn_datapaths():
logger.debug("OF resources dpid=%s" % (d,))
rspec.datapath(d, inner_call)
for l in db_sync_manager.get_com_links():
logger.debug("COM resources link=%s" % (l,))
rspec.com_link(l, inner_call)
(of_links, fed_links) = db_sync_manager.get_sdn_links()
for l in of_links:
logger.debug("OF resources of-link=%s" % (l,))
rspec.of_link(l, inner_call)
for l in fed_links:
logger.debug("OF resources fed-link=%s" % (l,))
rspec.fed_link(l, inner_call)
# Internal use (M/RO) -- OR show inter-domain resources, through config flag
if inner_call or show_interdomain:
ROUtils.generate_list_resources_internal(rspec, inner_call)
# External use (experimenter) -- OR show inter-domain resources, through config flag
if geni_available or show_interdomain:
ROUtils.generate_list_resources_external(rspec, inner_call)
return rspec
@staticmethod
def generate_list_resources_internal(rspec, inner_call=True):
"""
Appends TN and SE (internal) information when any of the following:
* It is an internal call (MRO->RO, MRO->MRO)
* Configuration flag "interdomain_available_to_user" is set to True
"""
for n in db_sync_manager.get_tn_nodes():
logger.debug("TN resources node=%s" % (n,))
rspec.tn_node(n, inner_call)
for n in db_sync_manager.get_se_nodes():
logger.debug("SE resources node=%s" % (n,))
rspec.se_node(n, inner_call)
for l in db_sync_manager.get_tn_links():
logger.debug("TN resources tn-link=%s" % (l,))
rspec.tn_link(l, inner_call)
for l in db_sync_manager.get_se_links():
logger.debug("SE resources se-link=%s" % (l,))
rspec.se_link(l, inner_call)
@staticmethod
def generate_list_resources_external(rspec, inner_call=True):
"""
Appends VL (external) information when any of the following:
* GENI flag "geni_available" is set to True
* Configuration flag "interdomain_available_to_user" is set to True
"""
for l in VLUtils.find_vlinks_from_tn_stps(TNUtils()):
logger.debug("VL resources vl-link=%s" % (l,))
rspec.vl_link(l, inner_call)
@staticmethod
def generate_describe_manifest(ro_manifest, ro_m_info):
for n in ro_m_info.get("com_nodes"):
ro_manifest.com_node(n)
for s in ro_m_info.get("sdn_slivers"):
ro_manifest.of_sliver(s)
for n in ro_m_info.get("tn_nodes"):
ro_manifest.tn_node(n)
for l in ro_m_info.get("tn_links"):
ro_manifest.tn_link(l)
for n in ro_m_info.get("se_nodes"):
ro_manifest.se_node(n)
for l in ro_m_info.get("se_links"):
ro_manifest.se_link(l)
return ro_manifest
|
[
"jenkins@integration.localhost"
] |
jenkins@integration.localhost
|
9746c655fff4a9fe413aa78411dd64741596373b
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_233734.00+434646.2/sdB_SDSSJ_233734.00+434646.2_lc.py
|
db14c23c567efeaa0cbc6954ff394fa00c9bcd68
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[354.391667,43.7795], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SDSSJ_233734.00+434646.2 /sdB_SDSSJ_233734.00+434646.2_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
98fcf5fcfca6641dd017597b7d01b3e14688866e
|
f99f30752e9bb9e023b37c731f64fb2155ac3daf
|
/05/for.py
|
7557743e98836a3021a0668b8696a0f0828f700a
|
[] |
no_license
|
chu83/python-basics
|
148ff6977f5ca04775951d90ed1f5f763c51a9ff
|
19fe0937842c668f604876be0aeb0962a2630dd2
|
refs/heads/master
| 2023-01-19T01:29:25.203738
| 2020-11-29T18:34:33
| 2020-11-29T18:34:33
| 311,258,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
#for loop
print('========== for loop 기본 ===========')
a=['cat', 'cow', 'tiger']
for animal in a:
print(animal, end=' ')
else:
print('')
print('========== 복합 자료형의 for loop ===========')
l1 = [('둘리', 10), ('마이콜', 20), ('또치', 10)]
for t in l1:
#print(f'이름 : {t[0]}, 나이 : {t[1]}')
print('이름 : %s, 나이 : %d' %t)
print('========== 1~10 합 ===========')
sum = 0
for i in range(1, 11):
sum = sum+i
print(sum)
#break
for i in range(10):
if i>5:
break
print(i, end=' ')
else:
print('--end loop\n')
print('========== continue ===========')
for i in range(10):
if i <= 5 :
continue
print(i, end=' ')
else:
print('--end loop\n')
print('========== 삼각형 ===========')
for i in range(0, 5):
for j in range(0,i+1):
print('*', end='')
print("")
print('========== 역삼각형 ===========')
for i in range(10, 1, -1):
if i <= 10:
star = '*' * i
print(star)
print('========== 정삼각형 ===========')
for i in range(1,10):
if i <= 10:
star = '*' * i
vacant = ' '* (10-i)
print(star+vacant)
print('========== 구구단 ===========')
for i in range(1,10):
for j in range(1, 10):
print(f'{j} * {i} = {i*j} ', end='\t')
print(end='\n')
|
[
"59534807+chu83@users.noreply.github.com"
] |
59534807+chu83@users.noreply.github.com
|
e031a15e54ff318ebc704179b54cb081cd8a680f
|
7ad19e854135977ee5b789d7c9bdd39d67ec9ea4
|
/mapping_models/setup.py
|
b83d962eff3cd62e6d0205c3795371958ee84111
|
[
"MIT"
] |
permissive
|
Leofltt/rg_sound_generation
|
1b4d522507bf06247247f3ef929c8d0b93015e61
|
8e79b4d9dce028def43284f80521a2ec61d0066c
|
refs/heads/main
| 2023-05-02T19:53:23.645982
| 2021-05-22T16:09:54
| 2021-05-22T16:09:54
| 369,842,561
| 0
| 0
|
MIT
| 2021-05-22T15:27:28
| 2021-05-22T15:27:27
| null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="mapping_models",
version="0.0.1",
author="Sound Generation OSR",
author_email="amit.yadav.iitr@gmail.com",
description="mapping models module",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/TheSoundOfAIOSR/rg_sound_generation/mapping_models",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.7",
install_requires=[
"ddsp==1.0.1",
"click==7.1.2"
]
)
|
[
"amit.yadav.iitr@gmail.com"
] |
amit.yadav.iitr@gmail.com
|
5ddb60ad27fa77fedbf2551bbab1bb2e95fa771c
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2692487_0/Python/kangz/skel.py
|
d65aa33398c04c3302fbb0196b83fee415960cd0
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
import sys
def concat_str(args):
s = ""
for arg in args:
s += str(arg)
return s
def debug(*args):
sys.stderr.write(concat_str(args) + "\n")
def printf(*args):
debug(*args)
print concat_str(args)
def int_input():
return map(int, raw_input().split(' '))
#######################################################
def read_input( ):
size, _ = int_input()
motes = int_input()
return size, motes
def solve(size, motes):
#do not divide by zero
if size == 1:
return len(motes)
motes.sort(reverse=True)
mini = len(motes)
n_added = 0
while len(motes) != 0:
if motes[-1] < size:
size += motes[-1]
motes.pop()
mini = min(mini, n_added + len(motes))
else:
val = motes[-1]
n = (val - size + 1 + size - 2) // (size - 1)
n_added += n
size += n * (size - 1)
return mini
#######################################################
for n_test_case in range(1, int(raw_input()) + 1):
debug("Solving case ", n_test_case)
printf("Case #", n_test_case, ": ", str(solve(*read_input())))
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
316a5d32e5f75abc519788e2b798965ebd1d55c5
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/2973.py
|
a8200601b088f6309637d43fb933939df7d0a51b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
tests = int(raw_input(''))
for t in xrange(tests):
r = raw_input('').split()
C = float(r[0])
F = float(r[1])
X = float(r[2])
time = 0.0
increase = 2.0
result = 100000000000000.0
bound = int( (F*X - 2.0*C)/(F*C) )
for i in xrange(bound+10):
result = min(result, time + X/increase)
time += C/increase
increase += F
print "Case #"+str(t+1)+":", result
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
eba134ac77216aec2247bf99d6822f1a554a1e1e
|
28431c6bdfd15b346741f62b06410d815c1a1482
|
/jupytex/tools.py
|
5142309970c92b973730541273841ea8f6bb80b3
|
[
"MIT"
] |
permissive
|
agoose77/jupytex
|
e3740c367f3fd87bc32819e0a91ef9674a14ff66
|
0906c35e29c6f4b6030ae7f86565d667fb30c1da
|
refs/heads/master
| 2021-06-09T22:31:58.200217
| 2021-05-21T13:55:40
| 2021-05-21T13:55:40
| 123,421,594
| 1
| 0
| null | 2019-01-30T09:12:09
| 2018-03-01T10:45:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
import logging
import pathlib
import subprocess
import typing
import importlib_resources as resources
from . import data
logger = logging.getLogger(__name__)
def get_resource_names(package: resources.Package) -> typing.Iterator[str]:
for name in resources.contents(package):
if name == "__init__.py":
continue
if resources.is_resource(package, name):
yield name
def install(directory: pathlib.Path):
logger.info(f"Installing Jupytex into {directory}")
for name in get_resource_names(data):
logger.info(f"Copying {name}")
source = resources.read_text(data, name)
(directory / name).write_text(source)
logger.info("Done!")
def uninstall(directory: pathlib.Path):
logger.info(f"Uninstalling Jupytex from {directory}")
for name in get_resource_names(data):
logger.info(f"Removing {name}")
resource_path = directory / name
if resource_path.exists():
resource_path.unlink()
logger.info("Done!")
def make(sys_args: typing.List[str]):
subprocess.call(["latexmk", "--shell-escape", *sys_args])
def clean(sys_args: typing.List[str], full: bool = False):
clean_type = "-C" if full else "-c"
subprocess.run(["latexmk", clean_type, *sys_args])
|
[
"goosey15@gmail.com"
] |
goosey15@gmail.com
|
05857782727dc8cbb0cce52c998e642c1192f449
|
1049d75b4d94564e54fbd0f2d8c36f774832bbf3
|
/career/migrations/0001_initial.py
|
2c6bf0213e1deef916116f664196fc4205095f74
|
[] |
no_license
|
wahid999/Alumni-Studen-Portal-FYP--master
|
b7597b4fd7f9e9e7966b09229b887bc156238c6b
|
1c9719f33fc338786220cbceb8e91789aa0161ab
|
refs/heads/master
| 2023-07-16T10:43:41.431096
| 2021-08-31T07:06:44
| 2021-08-31T07:06:44
| 401,582,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
# Generated by Django 3.0.5 on 2020-04-17 19:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CareerPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('postType', models.CharField(choices=[('internship', 'Internship'), ('job', 'Job')], max_length=30)),
('description', models.TextField()),
('careerFile', models.FileField(null=True, upload_to='Career(Job/Internship)')),
('datePosted', models.DateField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"wahidhussainturi@gmail.com"
] |
wahidhussainturi@gmail.com
|
5d61287b4f126287b3558dee2293d4b9935db595
|
43362c036fa9c4dd61ead35a38ede10f3e72c222
|
/setup.py
|
27bd1aebc9af315fd4e7a748e43a283f8c40ed78
|
[
"MIT"
] |
permissive
|
lowks/esios
|
acea4d86c1362641ab022d30f885dea980384473
|
2d0b00807d3e5900dfae7b89d3aacc4bc123066b
|
refs/heads/master
| 2021-01-15T09:47:59.190672
| 2016-03-18T13:17:39
| 2016-03-18T13:17:39
| 54,208,454
| 0
| 1
| null | 2016-03-18T14:43:14
| 2016-03-18T14:43:14
| null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
from setuptools import setup, find_packages
setup(
name='esios',
version='0.1.7',
packages=find_packages(),
url='https://github.com/gisce/esios',
license='MIT',
install_requires=['libsaas'],
author='GISCE-TI, S.L.',
author_email='devel@gisce.net',
description=''
)
|
[
"ecarreras@gisce.net"
] |
ecarreras@gisce.net
|
33fd60f33c6e12e205067a4078dfebf1066dbc9c
|
7d76d00142cd5c3b8d4aaeb917829ddf46871fb5
|
/mlir/lib/Bindings/Python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
|
229458855939a794af5b3415dc548a23455b0cde
|
[
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
pauljoo28/llvm-project
|
8ff49f620f6ae2e9bacaad83991cd0c2bc8b36bd
|
4bf8985f4fb1411831505a4b38265eb517783dc7
|
refs/heads/main
| 2023-04-23T07:14:36.877563
| 2021-04-06T20:23:58
| 2021-04-06T20:23:58
| 320,934,047
| 4
| 2
| null | 2020-12-12T22:09:36
| 2020-12-12T22:09:36
| null |
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
from ..lang import *
T1 = TV.T1
T2 = TV.T2
Batch = S.Batch
@linalg_structured_op
def matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
"""Performs a matrix multiplacation of two 2D inputs.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
@linalg_structured_op
def batch_matmul(A=TensorDef(T1, Batch, S.M, S.K),
B=TensorDef(T2, Batch, S.K, S.N),
C=TensorDef(U, Batch, S.M, S.N, output=True)):
"""Performs a batched matrix multiplacation of two 3D inputs.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
C[D.b, D.m, D.n] += cast(U, A[D.b, D.m, D.k]) * cast(U, B[D.b, D.k, D.n])
@linalg_structured_op
def matvec(A=TensorDef(T1, S.M, S.N),
y=TensorDef(T2, S.N),
x=TensorDef(U, S.M, output=True)):
"""Performs a matrix-vector multiplication.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
x[D.m] += cast(U, A[D.m, D.n]) * cast(U, y[D.n])
@linalg_structured_op
def vecmat(y=TensorDef(T1, S.M),
A=TensorDef(T2, S.M, S.N),
x=TensorDef(U, S.N, output=True)):
"""Performs a vector-matrix multiplacation.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
x[D.n] += cast(U, y[D.m]) * cast(U, A[D.m, D.n])
@linalg_structured_op
def dot(A=TensorDef(T1, S.M), B=TensorDef(T2, S.M), C=TensorDef(U,
output=True)):
"""Performs a dot product of two vectors to a scalar result.
Numeric casting is performed on the operands to the inner multiply, promoting
them to the same data type as the accumulator/output.
"""
implements(ContractionOpInterface)
C[None] += cast(U, A[D.m]) * cast(U, B[D.m])
|
[
"stellaraccident@gmail.com"
] |
stellaraccident@gmail.com
|
8bdc625bce37546dcff8911293c12f4f99a0b92a
|
ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86
|
/pychron/loading/tasks/loading_plugin.py
|
ab3f96269cb4288e7d554f9723dd6eaa10080d38
|
[
"Apache-2.0"
] |
permissive
|
UManPychron/pychron
|
2fb7e479a9f492423c0f458c70102c499e1062c4
|
b84c9fd70072f9cbda30abe2c471e64fe3dd75d8
|
refs/heads/develop
| 2022-12-03T23:32:45.579326
| 2020-01-29T19:02:20
| 2020-01-29T19:02:20
| 36,100,637
| 0
| 0
| null | 2015-05-23T00:10:06
| 2015-05-23T00:10:05
| null |
UTF-8
|
Python
| false
| false
| 3,651
|
py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from envisage.ui.tasks.task_extension import TaskExtension
from envisage.ui.tasks.task_factory import TaskFactory
from pyface.tasks.action.schema_addition import SchemaAddition
# from pyface.action.group import Group
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
from pychron.loading.loading_manager import LoadingManager
from pychron.loading.tasks.actions import SaveLoadingPDFAction, SaveTrayPDFAction, GenerateResultsAction
from pychron.loading.tasks.load_task import LoadingTask
from pychron.loading.tasks.loading_preferences import LoadingPreferencesPane
from pychron.loading.tasks.panes import LoadDockPane, LoadTablePane
from pychron.pychron_constants import DVC_PROTOCOL
class LoadingPlugin(BaseTaskPlugin):
name = 'Loading'
id = 'pychron.loading'
def _task_extensions_default(self):
actions = [SchemaAddition(id='save_loading_figure',
factory=SaveLoadingPDFAction,
path='MenuBar/file.menu'),
SchemaAddition(id='save_tray',
factory=SaveTrayPDFAction,
path='MenuBar/file.menu'),
SchemaAddition(id='generate_results',
factory=GenerateResultsAction,
path='MenuBar/file.menu')]
return [TaskExtension(task_id='pychron.loading',
actions=actions)]
def _tasks_default(self):
return [TaskFactory(id='pychron.loading',
factory=self._load_task_factory,
name='Loading',
accelerator='Ctrl+Y',
task_group='experiment')]
def _service_offers_default(self):
load = self.service_offer_factory(protocol=LoadDockPane,
factory=LoadDockPane)
table = self.service_offer_factory(protocol=LoadTablePane,
factory=LoadTablePane)
man = self.service_offer_factory(protocol=LoadingManager,
factory=self._loading_manager_factory)
return [load, table, man]
def _loading_manager_factory(self):
return LoadingManager(application=self.application,
dvc=self.application.get_service(DVC_PROTOCOL))
def _load_task_factory(self):
return LoadingTask(manager=self._loading_manager_factory())
def _preferences_panes_default(self):
return [LoadingPreferencesPane]
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.