blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
156756b04cbdf6811f0ff5436305d7c09339f87d
|
e70cb371f8642ac597f3a2266da3be205d971af5
|
/X0406.py
|
ce95ba2dca74845f1048bbcc82f57b8c5550fd4e
|
[] |
no_license
|
bgnori/X0406
|
0b5a2545832c6283c07cd065a21697cf9f52042a
|
ed0f7ee1f8112043a246a64c99bff8a427541b03
|
refs/heads/master
| 2021-01-20T05:07:38.994728
| 2015-10-06T12:28:37
| 2015-10-06T12:28:37
| 40,598,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,099
|
py
|
#!/bin/python
#-*- coding=utf-8 -*-
import re
import json
DEBUG = False
x = re.compile("(?P<IsValuationAccountCode>\()?(?P<AccountCode>\d\d\d\d)\)?,(?P<rest>.+)")
start_end = re.compile("\[(?P<start>\d\d\d\d)-(?P<end>\d\d\d\d)\]")
class IDNode(object):
def __init__(self, code, title, isvaluation, start, end, note):
self.children = []
self.code = code
self.title = title
self.isvaluation = isvaluation
self.start = start
self.end = end
self.note = note
def add(self, node):
for c in self.children:
if c.start <= node.code and node.code <= c.end:
c.add(node)
return
self.children.append(node)
def visit(self, f, n=None):
if n is None:
n = 0
f(n, self)
for c in self.children:
c.visit(f, n+1)
def findByCode(self, code):
if self.code == code:
return self
for c in self.children:
if c.code == code:
return c
if c.start <= code and code <= c.end:
return c.findByCode(code)
return None
def findByTitle(self, title):
if self.title == title:
return self
for c in self.children:
found = c.findByTitle(title)
if found is not None:
return found
return None
def load(f):
tree = IDNode(code=0, title="勘定科目", isvaluation=False, start=1, end=9999, note=None)
for line in f:
m = x.match(line)
if m:
d = m.groupdict()
assert(d['AccountCode'] is not None)
start = None
end = None
isvaluation = d['IsValuationAccountCode'] is not None
code = int(d['AccountCode'])
note = None
for i, part in enumerate(d["rest"].split(",")):
if i == 0:
title = part
else:
m = start_end.match(part)
if m is not None:
d = m.groupdict()
start = int(d["start"])
end = int(d["end"])
else:
note = part
if DEBUG:
print code, start, end
if start is None:
m = code
r = 1000
while r > 0:
n, m = divmod(m, r)
if DEBUG:
print n, m
if n == 0:
start = code + 1
end = code + r*10 -1
break
r = r / 10
if DEBUG:
print code, start, end, "default"
tree.add(IDNode(code, title, isvaluation, start, end, note))
return tree
if __name__ == "__main__":
import sys
tree = load(sys.stdin.readlines())
def foo(n, node):
print ' '*n, node.code, node.title, node.isvaluation, node.note
tree.visit(foo)
|
[
"bgnori@gmail.com"
] |
bgnori@gmail.com
|
f7ee387f7c79dc4fbb42c1d6b123cb829d3698e5
|
5509d3b5bbcc393684f7d2fc7fc11bb12ed1911a
|
/env/lib/python2.7/site-packages/pyramid_debugtoolbar-2.4.2-py2.7.egg/pyramid_debugtoolbar/panels/traceback.py
|
d80e51455eb29e63e6e7c926c6b4ed65eda11e19
|
[] |
no_license
|
jundong/CRManager
|
99fd6c0eda084354d9237e11d07ef82124c22e1e
|
4306bf4d2b29b19d4b3092aab152192f7d623a19
|
refs/heads/master
| 2021-01-21T04:47:26.125045
| 2016-07-29T15:07:04
| 2016-07-29T15:07:04
| 50,995,792
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
import re
from pyramid_debugtoolbar.tbtools import Traceback
from pyramid_debugtoolbar.panels import DebugPanel
from pyramid_debugtoolbar.utils import escape
from pyramid_debugtoolbar.utils import STATIC_PATH
from pyramid_debugtoolbar.utils import ROOT_ROUTE_NAME
from pyramid_debugtoolbar.utils import EXC_ROUTE_NAME
_ = lambda x: x
class TracebackPanel(DebugPanel):
name = 'traceback'
template = 'pyramid_debugtoolbar.panels:templates/traceback.dbtmako'
title = _('Traceback')
nav_title = title
def __init__(self, request):
self.request = request
self.exc_history = request.exc_history
@property
def has_content(self):
if hasattr(self.request, 'pdbt_tb'):
return True
else:
return False
def process_response(self, response):
if self.has_content:
traceback = self.request.pdbt_tb
exc = escape(traceback.exception)
summary = Traceback.render_summary(traceback, include_title=False, request=self.request)
token = self.request.registry.pdtb_token
url = '' # self.request.route_url(EXC_ROUTE_NAME, _query=qs)
evalex = self.exc_history.eval_exc
self.data = {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': None,
'title': exc,
'exception': exc,
'exception_type': escape(traceback.exception_type),
'summary': summary,
'plaintext': traceback.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', traceback.plaintext),
'traceback_id': traceback.id,
'token': token,
'url': url,
}
def render_content(self, request):
return super(TracebackPanel, self).render_content(request)
def render_vars(self, request):
return {
'static_path': request.static_url(STATIC_PATH),
'root_path': request.route_url(ROOT_ROUTE_NAME)
}
|
[
"jundong.xu@spirent.com"
] |
jundong.xu@spirent.com
|
9431a9423d7fad2d5a4e7c1636dac7a36b374906
|
34530f74092ac04334d3d18879f3c59c3354f4f8
|
/0x08-python-more_classes/7-rectangle.py
|
cb0c06f693a581ec33482c8da5d28feb78a75f5c
|
[] |
no_license
|
MarySerna/holbertonschool-higher_level_programming
|
9f37df91d7da703a31c461ca07703947ed090322
|
f7ed79a660690d412b7a8298ac9c658962d07c7a
|
refs/heads/master
| 2021-01-08T23:53:29.528920
| 2020-05-15T04:15:25
| 2020-05-15T04:15:25
| 242,180,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,343
|
py
|
#!/usr/bin/python3
"""Module to define a rectangle
Args:
width (int): width of a rectangle
height (int): height of a rectangle
"""
class Rectangle:
"""Rectangle class
"""
number_of_instances = 0
print_symbol = '#'
def __init__(self, width=0, height=0):
"""Initializes Rectangle class
"""
self.width = width
self.height = height
Rectangle.number_of_instances += 1
"""Private instance attribute: width"""
@property
def width(self):
"""Width getter
"""
return self.__width
@width.setter
def width(self, value):
"""Width setter
"""
if not isinstance(value, int):
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
"""Private instance attribute: height"""
@property
def height(self):
"""Height getter
"""
return self.__height
@height.setter
def height(self, value):
"""Height setter
"""
if not isinstance(value, int):
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
def area(self):
"""Area of a rectangle
"""
return self.__width * self.__height
def perimeter(self):
"""Perimeter of a rectangle
"""
if self.width == 0 or self.height == 0:
return 0
return ((2 * self.width) + (2 * self.height))
def __str__(self):
"""Prints the rectangle with the character #
"""
rect = ""
if self.width == 0 or self.__height == 0:
return ""
for i in range(self.__height):
for j in range(self.__width):
rect += str(self.print_symbol)
if i < self.__height - 1:
rect += '\n'
return rect
def __repr__(self):
"""Prints representation of the rectangle
"""
return "Rectangle({}, {})".format(self.__width, self.__height)
def __del__(self):
"""Prints a message when an instance of Rectangle is deleted
"""
print('Bye rectangle...')
Rectangle.number_of_instances -= 1
|
[
"mary.serna.8903@gmail.com"
] |
mary.serna.8903@gmail.com
|
52d9e68a9e9779fd139fc1b6351e313f9867021a
|
92436a50cc26c8c8a216ba6d4a62e36069614234
|
/classy_vision/hooks/model_complexity_hook.py
|
2d950e229a73ec3d167f234b2c1f3d1cac33c6ba
|
[
"MIT"
] |
permissive
|
hahaxun/ClassyVision
|
9341f4e6849c858094592052f3df111c13d1a91d
|
b3f714ef94275b3e9753ab3f3c8256cb852b96fc
|
refs/heads/master
| 2021-08-17T07:42:34.402613
| 2021-03-08T08:50:01
| 2021-03-08T08:50:01
| 245,940,574
| 1
| 0
|
MIT
| 2021-03-08T08:50:01
| 2020-03-09T04:02:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,368
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from classy_vision.generic.profiler import (
ClassyProfilerNotImplementedError,
compute_activations,
compute_flops,
count_params,
)
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
@register_hook("model_complexity")
class ModelComplexityHook(ClassyHook):
"""
Logs the number of paramaters and forward pass FLOPs and activations of the model.
"""
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self) -> None:
super().__init__()
self.num_flops = None
self.num_activations = None
self.num_parameters = None
def on_start(self, task) -> None:
"""Measure number of parameters, FLOPs and activations."""
self.num_flops = 0
self.num_activations = 0
self.num_parameters = 0
try:
self.num_parameters = count_params(task.base_model)
logging.info("Number of parameters in model: %d" % self.num_parameters)
try:
self.num_flops = compute_flops(
task.base_model,
input_shape=task.base_model.input_shape,
input_key=task.base_model.input_key
if hasattr(task.base_model, "input_key")
else None,
)
if self.num_flops is None:
logging.info("FLOPs for forward pass: skipped.")
self.num_flops = 0
else:
logging.info(
"FLOPs for forward pass: %d MFLOPs"
% (float(self.num_flops) / 1e6)
)
except ClassyProfilerNotImplementedError as e:
logging.warning(f"Could not compute FLOPs for model forward pass: {e}")
try:
self.num_activations = compute_activations(
task.base_model,
input_shape=task.base_model.input_shape,
input_key=task.base_model.input_key
if hasattr(task.base_model, "input_key")
else None,
)
logging.info(f"Number of activations in model: {self.num_activations}")
except ClassyProfilerNotImplementedError as e:
logging.warning(
f"Could not compute activations for model forward pass: {e}"
)
except Exception:
logging.info("Skipping complexity calculation: Unexpected error")
logging.debug("Error trace for complexity calculation:", exc_info=True)
def get_summary(self):
return {
"FLOPS(M)": float(self.num_flops) / 1e6
if self.num_flops is not None
else 0,
"num_activations(M)": float(self.num_activations) / 1e6
if self.num_activations is not None
else 0,
"num_parameters(M)": float(self.num_parameters) / 1e6
if self.num_parameters is not None
else 0,
}
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
d66e5bf50843298b9445b71d3ec2cca177e78de5
|
329b48089c64ebefe78d52f1c71c73bdadadd4b4
|
/keras2/keras64_1_Hyperparameter.py
|
ad68911f67a30528708f7b0d723608067bb8b426
|
[] |
no_license
|
variablejun/keras__R
|
7f854570952ed97c48715047015786d873e512cb
|
9faf4814b46cda1ac0ddbf2a2f8236fa0394f144
|
refs/heads/main
| 2023-07-13T19:32:25.950500
| 2021-08-22T18:26:52
| 2021-08-22T18:26:52
| 398,870,548
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dense,Dropout,Input,Conv2D
(x_train, y_train),(x_test,y_test)= mnist.load_data()
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train = x_train.reshape(60000,28*28).astype('float32')/255
x_test= x_test.reshape(10000,28*28).astype('float32')/255
def build_model(drop=0.5,optimizer='adam'):
inputs= Input(shape=(28*28), name='Input')
x = Dense(512, activation='relu',name='hidden1')(inputs)
x = Dropout(drop)(x)
x = Dense(256, activation='relu',name='hidden2')(x)
x = Dropout(drop)(x)
x = Dense(128, activation='relu',name='hidden3')(x)
x = Dropout(drop)(x)
outputs = Dense(10,activation='softmax',name='outputs')(x)
model = Model(inputs=inputs,outputs =outputs )
model.compile(optimizer=optimizer,metrics=['acc'],loss='categorical_crossentropy')
return model
def create_hyperparameter():
batches = [1000,2000,3000,4000,5000]
optimizers = ['rmsprop','adam','adadelta']
dropout = [0.5,0.6,0.7]
return {'batch_size':batches, 'optimizer': optimizers, 'drop':dropout}
hyperparameters = create_hyperparameter()
print(hyperparameters)
#{'batch_size': [10, 20, 30, 40, 50], 'optimizer': ['rmsprop', 'adam', 'adadelta'], 'drop': [0.1, 0.2, 0.3]}
#model2 = build_model()
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier# 텐서모델을 사이킷런에서 돌릴수있도록하는것, 텐서를 사이킷런 형태로 래핑
model2 = KerasClassifier(build_fn=build_model,verbose=1)
from sklearn.model_selection import GridSearchCV,RandomizedSearchCV
from xgboost import XGBClassifier
model = RandomizedSearchCV(model2, hyperparameters,cv=5)
model.fit(x_train,y_train,verbose=1,epochs=3, validation_split=0.2)
print(model.best_estimator_)
print(model.best_params_)
print(model.best_score_)
acc = model.score(x_test,y_test)
print(acc)
'''
<tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier object at 0x000001BCCE273100>
{'optimizer': 'rmsprop', 'drop': 0.5, 'batch_size': 1000}
0.9427833318710327
10/10 [==============================] - 0s 3ms/step - loss: 0.1547 - acc: 0.9530
0.953000009059906
'''
|
[
"crescendo0217@gmail.com"
] |
crescendo0217@gmail.com
|
fffa9fc3b815accf4276f2bb4c6e09c6bc58c609
|
dcefbb67cfdc837a5b1016ea674ead66263f0af2
|
/algorithm/BOJ_9498.py
|
290b709344b970e624e35aaaf96ba697a6f8a63d
|
[] |
no_license
|
SeungYeopB/weekend-study
|
0a5d5bdbb00a7d81f2ec7c6c5b2fc7b96d92c296
|
02651855bb91e26784611bbed34a01023f4ef307
|
refs/heads/master
| 2023-06-23T15:52:54.475077
| 2021-07-23T07:57:16
| 2021-07-23T07:57:16
| 382,514,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
N = int(input())
if 90<=N:
print("A")
elif 80<=N:
print("B")
elif 70<=N:
print("C")
elif 60<=N:
print("D")
else:
print("F")
|
[
"="
] |
=
|
f40b984eb61b3ef75296fcd0a7d260bb6141d45e
|
8fdcd12cfb91b2245da8b3c65fb937b1d72dd4c5
|
/tissuelab/omero/gateway_ome500_ice351/omero_ext/xmlrunner/main.py
|
b901b82051114a73824341dc847547251109890c
|
[] |
no_license
|
VirtualPlants/tissuelab
|
569a334deab0b73acc8b43f313efc3f4c4e552fd
|
8c064a34b91127806848f4992d1e4767574863cf
|
refs/heads/master
| 2021-01-11T01:32:19.830778
| 2017-05-04T09:42:53
| 2017-05-04T09:42:53
| 70,694,783
| 2
| 1
| null | 2017-01-05T14:21:50
| 2016-10-12T11:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
OME Testing Methods
"""
import logging
import unittest
from omero_ext import xmlrunner
class OmeTestLoader(object):
def __init__(self, args):
self.__args = args
def loadTestsFromModule(self, *args):
if hasattr(self, "already_called"):
raise Exception("Already called")
load = unittest.defaultTestLoader.loadTestsFromName
suite = unittest.TestSuite()
for arg in self.__args:
suite.addTest(load(arg))
self.already_called = True
return suite
def ome_test_main(args):
logging.basicConfig(level=logging.WARN)
unittest.main(
testRunner=xmlrunner.XMLTestRunner(verbose=True, output='target/reports'),
testLoader = OmeTestLoader(args))
|
[
"guillaume.baty@inria.fr"
] |
guillaume.baty@inria.fr
|
6c59afc01f8f79d247c8828e95c7474ffcd0ed59
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/elastic/azure-mgmt-elastic/azure/mgmt/elastic/_microsoft_elastic.py
|
1b77371ed5a3ff00b9189073d035aff87c44842c
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,959
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import MicrosoftElasticConfiguration
from .operations import Operations
from .operations import MonitorsOperations
from .operations import MonitoredResourcesOperations
from .operations import DeploymentInfoOperations
from .operations import TagRulesOperations
from .operations import VMHostOperations
from .operations import VMIngestionOperations
from .operations import VMCollectionOperations
from . import models
class MicrosoftElastic(object):
"""MicrosoftElastic.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.elastic.operations.Operations
:ivar monitors: MonitorsOperations operations
:vartype monitors: azure.mgmt.elastic.operations.MonitorsOperations
:ivar monitored_resources: MonitoredResourcesOperations operations
:vartype monitored_resources: azure.mgmt.elastic.operations.MonitoredResourcesOperations
:ivar deployment_info: DeploymentInfoOperations operations
:vartype deployment_info: azure.mgmt.elastic.operations.DeploymentInfoOperations
:ivar tag_rules: TagRulesOperations operations
:vartype tag_rules: azure.mgmt.elastic.operations.TagRulesOperations
:ivar vm_host: VMHostOperations operations
:vartype vm_host: azure.mgmt.elastic.operations.VMHostOperations
:ivar vm_ingestion: VMIngestionOperations operations
:vartype vm_ingestion: azure.mgmt.elastic.operations.VMIngestionOperations
:ivar vm_collection: VMCollectionOperations operations
:vartype vm_collection: azure.mgmt.elastic.operations.VMCollectionOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = MicrosoftElasticConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.monitors = MonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.monitored_resources = MonitoredResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.deployment_info = DeploymentInfoOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tag_rules = TagRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vm_host = VMHostOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vm_ingestion = VMIngestionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vm_collection = VMCollectionOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> MicrosoftElastic
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
9b7bbcece100ed41298687ceaf110a854d4c4f80
|
7fd1406b7e94d4b82a158ce5be87b5ae821e16b6
|
/pro2_4.py
|
cbe3b853a1fd6e8842058c68d0e80ca7dfa7022e
|
[] |
no_license
|
THABUULAGANATHAN/guvi-programs
|
c1c4d314c7ce43d6c3996fdac85616248c69e4fd
|
fb004f6916776ca9fbe07b8d507f9725cc55248f
|
refs/heads/master
| 2022-01-15T09:08:32.904234
| 2019-07-19T06:45:04
| 2019-07-19T06:45:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
pi,qi=map(int,input().split())
l=list(map(int,input().split()))
for i in range(qi):
r,s=map(int,input().split())
t1 = l[r-1:s]
u1 = t1[0]
for i in range(1,len(t1)):
u1 = u1 ^ t1[i]
print(u1)
|
[
"noreply@github.com"
] |
THABUULAGANATHAN.noreply@github.com
|
5ca662cfd326bbfc872365527afa925f6d62a32a
|
003dd45d19b5a6fd4a04deeefa63756462dbe09d
|
/pymoo/core/decomposition.py
|
c44fc0098edfa8eabc65e6dfd473ec6d63021804
|
[
"Apache-2.0"
] |
permissive
|
Flytortoise/pymoo
|
51d32793e843977bd8fda0226bb6add1c356e21d
|
c6426a721d95c932ae6dbb610e09b6c1b0e13594
|
refs/heads/master
| 2023-09-03T20:54:13.284192
| 2021-11-09T13:23:15
| 2021-11-09T13:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,451
|
py
|
import numpy as np
from pymoo.util.misc import at_least_2d_array, to_1d_array_if_possible
class Decomposition:
def __init__(self, eps=0.0, _type="auto", **kwargs) -> None:
super().__init__()
self.eps = eps
self._type = _type
self.ideal_point, self.utopian_point, self.nadir_point = None, None, None
def do(self,
F,
weights,
_type="auto",
ideal_point=None,
utopian_point=None,
nadir_point=None,
**kwargs):
_F, _weights = to_1d_array_if_possible(F), to_1d_array_if_possible(weights)
if _type == "auto":
if _F.ndim == 1 and _weights.ndim > 1:
_type = "one_to_many"
elif _F.ndim > 1 and _weights.ndim == 1:
_type = "many_to_one"
elif _F.ndim == 2 and _weights.ndim == 2 and _F.shape[0] == _weights.shape[0]:
_type = "one_to_one"
else:
_type = "many_to_many"
# make both at least 2d arrays
F, weights = at_least_2d_array(F), at_least_2d_array(weights)
# get the number of points and weights
n_points, n_weights = F.shape[0], weights.shape[0]
self.ideal_point = ideal_point
if self.ideal_point is None:
self.ideal_point = np.zeros(F.shape[1])
self.utopian_point = utopian_point
if self.utopian_point is None:
self.utopian_point = self.ideal_point - self.eps
# set the nadir point by default to value or default
self.nadir_point = nadir_point
if self.nadir_point is None:
self.nadir_point = self.utopian_point + np.ones(F.shape[1])
if _type == "one_to_one":
D = self._do(F, weights=weights, **kwargs).flatten()
elif _type == "one_to_many":
F = np.repeat(F, n_weights, axis=0)
D = self._do(F, weights=weights, **kwargs).flatten()
elif _type == "many_to_one":
weights = np.repeat(weights, n_points, axis=0)
D = self._do(F, weights=weights, **kwargs).flatten()
elif _type == "many_to_many":
F = np.repeat(F, n_weights, axis=0)
weights = np.tile(weights, (n_points, 1))
D = self._do(F, weights=weights, **kwargs).reshape(n_points, n_weights)
else:
raise Exception("Unknown type for decomposition: %s" % _type)
return D
|
[
"blankjul@egr.msu.edu"
] |
blankjul@egr.msu.edu
|
7451c76f2aaa67e39cfca280e39ce44c0c871877
|
27381f38b713258645855716593a9e309c43337e
|
/mechanics/hallway_plate_assembly.py
|
0138693b254035f06a4cdb76b2cd0e5774ac69b4
|
[
"Apache-2.0"
] |
permissive
|
iorodeo/hallway_arenas
|
828859f66a0f6966fde24c95fc32db7d1c4fea51
|
02d87d7890aea4f09fc244792aecaf6cb24357b2
|
refs/heads/master
| 2022-05-22T06:01:38.534312
| 2020-04-21T19:50:58
| 2020-04-21T19:50:58
| 256,603,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
"""
Creates assembly of top and bottom plates for hallway arena
"""
from py2scad import *
from assembly import Assembly
from hallway_top_plate import Hallway_Top_Plate
from hallway_bottom_plate import Hallway_Bottom_Plate
class Hallway_Plate_Assembly(Assembly):
def make(self):
# Create components
top_plate = Hallway_Top_Plate(**self.params.hallway_top_plate)
bottom_plate = Hallway_Bottom_Plate(**self.params.hallway_bottom_plate)
explode_z = self.params.explode_z
# Translate into position
bottom_z_shift = 0.5*self.params.hallway_bottom_plate['thickness']
bottom_plate.translate(v=(0,0,bottom_z_shift))
top_z_shift = 2*bottom_z_shift + 0.5*self.params.hallway_top_plate['thickness'] + explode_z
top_plate.translate(v=(0,0,top_z_shift))
# Add color
bottom_plate.color(rgba=self.params.hallway_bottom_plate['color'])
top_plate.color(rgba=self.params.hallway_top_plate['color'])
self.parts = {
'top_plate' : top_plate,
'bottom_plate' : bottom_plate,
}
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import params
assem = Hallway_Plate_Assembly(params=params)
prog = SCAD_Prog()
prog.fn = 50
prog.add(assem)
prog.write('hallway_plate_assembly.scad')
|
[
"will@iorodeo.com"
] |
will@iorodeo.com
|
dda989c5ddf4601eb9af755446131b6ba4d3e885
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/audio/ESPnet2_for_PyTorch/espnet2/enh/separator/conformer_separator.py
|
dbc1251d99d8976e54f34dfd9c5ec546f8c6cdef
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,643
|
py
|
from collections import OrderedDict
from distutils.version import LooseVersion
from typing import List
from typing import Tuple
from typing import Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.conformer.encoder import (
Encoder as ConformerEncoder, # noqa: H301
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class ConformerSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 2,
adim: int = 384,
aheads: int = 4,
layers: int = 6,
linear_units: int = 1536,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
normalize_before: bool = False,
concat_after: bool = False,
dropout_rate: float = 0.1,
input_layer: str = "linear",
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
nonlinear: str = "relu",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
conformer_enc_kernel_size: int = 7,
padding_idx: int = -1,
):
"""Conformer separator.
Args:
input_dim: input feature dimension
num_spk: number of speakers
adim (int): Dimension of attention.
aheads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
layers (int): The number of transformer blocks.
dropout_rate (float): Dropout rate.
input_layer (Union[str, torch.nn.Module]): Input layer type.
attention_dropout_rate (float): Dropout rate in attention.
positional_dropout_rate (float): Dropout rate after adding
positional encoding.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
conformer_pos_enc_layer_type(str): Encoder positional encoding layer type.
conformer_self_attn_layer_type (str): Encoder attention layer type.
conformer_activation_type(str): Encoder activation function type.
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of
positionwise conv1d layer.
use_macaron_style_in_conformer (bool): Whether to use macaron style for
positionwise layer.
use_cnn_in_conformer (bool): Whether to use convolution module.
conformer_enc_kernel_size(int): Kernerl size of convolution module.
padding_idx (int): Padding idx for input_layer=embed.
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
"""
super().__init__()
self._num_spk = num_spk
self.conformer = ConformerEncoder(
idim=input_dim,
attention_dim=adim,
attention_heads=aheads,
linear_units=linear_units,
num_blocks=layers,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
input_layer=input_layer,
normalize_before=normalize_before,
concat_after=concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
padding_idx=padding_idx,
)
self.linear = torch.nn.ModuleList(
[torch.nn.Linear(adim, input_dim) for _ in range(self.num_spk)]
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self, input: Union[torch.Tensor, ComplexTensor], ilens: torch.Tensor
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
# prepare pad_mask for transformer
pad_mask = make_non_pad_mask(ilens).unsqueeze(1).to(feature.device)
x, ilens = self.conformer(feature, pad_mask)
masks = []
for linear in self.linear:
y = linear(x)
y = self.nonlinear(y)
masks.append(y)
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
|
[
"ningzhenjiang@huawei.com"
] |
ningzhenjiang@huawei.com
|
7301d83c595597b93916cb9dd6928a33c2a858e2
|
338dbd8788b019ab88f3c525cddc792dae45036b
|
/lib/python3.6/site-packages/statsmodels/sandbox/descstats.py
|
1b90db63cf35925066b9ad968d0d41c5ed48f642
|
[] |
permissive
|
KshitizSharmaV/Quant_Platform_Python
|
9b8b8557f13a0dde2a17de0e3352de6fa9b67ce3
|
d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39
|
refs/heads/master
| 2022-12-10T11:37:19.212916
| 2019-07-09T20:05:39
| 2019-07-09T20:05:39
| 196,073,658
| 1
| 2
|
BSD-3-Clause
| 2022-11-27T18:30:16
| 2019-07-09T19:48:26
|
Python
|
UTF-8
|
Python
| false
| false
| 6,472
|
py
|
'''
Glue for returning descriptive statistics.
'''
import numpy as np
from scipy import stats
import os
from statsmodels.stats.descriptivestats import sign_test
#############################################
#
#============================================
# Univariate Descriptive Statistics
#============================================
#
def descstats(data, cols=None, axis=0):
'''
Prints descriptive statistics for one or multiple variables.
Parameters
----------
data: numpy array
`x` is the data
v: list, optional
A list of the column number or field names (for a recarray) of variables.
Default is all columns.
axis: 1 or 0
axis order of data. Default is 0 for column-ordered data.
Examples
--------
>>> descstats(data.exog,v=['x_1','x_2','x_3'])
'''
x = np.array(data) # or rather, the data we're interested in
if cols is None:
# if isinstance(x, np.recarray):
# cols = np.array(len(x.dtype.names))
if not isinstance(x, np.recarray) and x.ndim == 1:
x = x[:,None]
if x.shape[1] == 1:
desc = '''
---------------------------------------------
Univariate Descriptive Statistics
---------------------------------------------
Var. Name %(name)12s
----------
Obs. %(nobs)22i Range %(range)22s
Sum of Wts. %(sum)22s Coeff. of Variation %(coeffvar)22.4g
Mode %(mode)22.4g Skewness %(skewness)22.4g
Repeats %(nmode)22i Kurtosis %(kurtosis)22.4g
Mean %(mean)22.4g Uncorrected SS %(uss)22.4g
Median %(median)22.4g Corrected SS %(ss)22.4g
Variance %(variance)22.4g Sum Observations %(sobs)22.4g
Std. Dev. %(stddev)22.4g
''' % {'name': cols, 'sum': 'N/A', 'nobs': len(x), 'mode': \
stats.mode(x)[0][0], 'nmode': stats.mode(x)[1][0], \
'mean': x.mean(), 'median': np.median(x), 'range': \
'('+str(x.min())+', '+str(x.max())+')', 'variance': \
x.var(), 'stddev': x.std(), 'coeffvar': \
stats.variation(x), 'skewness': stats.skew(x), \
'kurtosis': stats.kurtosis(x), 'uss': np.sum(x**2, axis=0),\
'ss': np.sum((x-x.mean())**2, axis=0), 'sobs': np.sum(x)}
desc+= '''
Percentiles
-------------
1 %% %12.4g
5 %% %12.4g
10 %% %12.4g
25 %% %12.4g
50 %% %12.4g
75 %% %12.4g
90 %% %12.4g
95 %% %12.4g
99 %% %12.4g
''' % tuple([stats.scoreatpercentile(x,per) for per in (1,5,10,25,
50,75,90,95,99)])
t,p_t=stats.ttest_1samp(x,0)
M,p_M=sign_test(x)
S,p_S=stats.wilcoxon(np.squeeze(x))
desc+= '''
Tests of Location (H0: Mu0=0)
-----------------------------
Test Statistic Two-tailed probability
-----------------+-----------------------------------------
Student's t | t %7.5f Pr > |t| <%.4f
Sign | M %8.2f Pr >= |M| <%.4f
Signed Rank | S %8.2f Pr >= |S| <%.4f
''' % (t,p_t,M,p_M,S,p_S)
# Should this be part of a 'descstats'
# in any event these should be split up, so that they can be called
# individually and only returned together if someone calls summary
# or something of the sort
elif x.shape[1] > 1:
desc ='''
Var. Name | Obs. Mean Std. Dev. Range
------------+--------------------------------------------------------'''+\
os.linesep
# for recarrays with columns passed as names
# if isinstance(cols[0],str):
# for var in cols:
# desc += "%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g \
#%(range)20s" % {'name': var, 'obs': len(x[var]), 'mean': x[var].mean(),
# 'stddev': x[var].std(), 'range': '('+str(x[var].min())+', '\
# +str(x[var].max())+')'+os.linesep}
# else:
for var in range(x.shape[1]):
xv = x[:, var]
kwargs = {
'name': var,
'obs': len(xv),
'mean': xv.mean(),
'stddev': xv.std(),
'range': '('+str(xv.min())+', '+str(xv.max())+')'+os.linesep
}
desc += ("%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g "
"%(range)20s" % kwargs)
else:
raise ValueError("data not understood")
return desc
#if __name__=='__main__':
# test descstats
# import os
# loc='http://eagle1.american.edu/~js2796a/data/handguns_data.csv'
# relpath=(load_dataset(loc))
# dta=np.recfromcsv(relpath)
# descstats(dta,['stpop'])
# raw_input('Hit enter for multivariate test')
# descstats(dta,['stpop','avginc','vio'])
# with plain arrays
# import string2dummy as s2d
# dts=s2d.string2dummy(dta)
# ndts=np.vstack(dts[col] for col in dts.dtype.names)
# observations in columns and data in rows
# is easier for the call to stats
# what to make of
# ndts=np.column_stack(dts[col] for col in dts.dtype.names)
# ntda=ntds.swapaxis(1,0)
# ntda is ntds returns false?
# or now we just have detailed information about the different strings
# would this approach ever be inappropriate for a string typed variable
# other than dates?
# descstats(ndts, [1])
# raw_input("Enter to try second part")
# descstats(ndts, [1,20,3])
if __name__ == '__main__':
import statsmodels.api as sm
data = sm.datasets.longley.load(as_pandas=False)
data.exog = sm.add_constant(data.exog, prepend=False)
sum1 = descstats(data.exog)
sum1a = descstats(data.exog[:,:1])
# loc='http://eagle1.american.edu/~js2796a/data/handguns_data.csv'
# dta=np.recfromcsv(loc)
# summary2 = descstats(dta,['stpop'])
# summary3 = descstats(dta,['stpop','avginc','vio'])
#TODO: needs a by argument
# summary4 = descstats(dta) this fails
# this is a bug
# p = dta[['stpop']]
# p.view(dtype = np.float, type = np.ndarray)
# this works
# p.view(dtype = np.int, type = np.ndarray)
### This is *really* slow ###
if os.path.isfile('./Econ724_PS_I_Data.csv'):
data2 = np.recfromcsv('./Econ724_PS_I_Data.csv')
sum2 = descstats(data2.ahe)
sum3 = descstats(np.column_stack((data2.ahe,data2.yrseduc)))
sum4 = descstats(np.column_stack(([data2[_] for \
_ in data2.dtype.names])))
|
[
"kshitizsharmav@gmail.com"
] |
kshitizsharmav@gmail.com
|
67804ab2e7258dc505b5a127e72d28636629181a
|
28ef7c65a5cb1291916c768a0c2468a91770bc12
|
/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/res50_jhmdb_sub3_256x256.py
|
cdef4c576e262405a3b53d78b059c38bcc6b2148
|
[
"Apache-2.0"
] |
permissive
|
bit-scientist/mmpose
|
57464aae1ca87faf5a4669991ae1ea4347e41900
|
9671a12caf63ae5d15a9bebc66a9a2e7a3ce617e
|
refs/heads/master
| 2023-08-03T17:18:27.413286
| 2021-09-29T03:48:37
| 2021-09-29T03:48:37
| 411,549,076
| 0
| 0
|
Apache-2.0
| 2021-09-29T06:01:27
| 2021-09-29T06:01:26
| null |
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
_base_ = ['../../../../_base_/datasets/jhmdb.py']
log_level = 'INFO'
load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=1)
evaluation = dict(interval=1, metric=['PCK', 'tPCK'], save_best='Mean PCK')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 15])
total_epochs = 20
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=15,
dataset_joints=15,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
# model settings
model = dict(
type='TopDown',
pretrained=None,
backbone=dict(type='ResNet', depth=50),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=30,
scale_factor=0.25),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox', 'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/jhmdb'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub3_train.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub3_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub3_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
)
|
[
"noreply@github.com"
] |
bit-scientist.noreply@github.com
|
b2239d07d16a98cc0da947a715c77ca38064eb64
|
dcae1caa1816ba8ab8016e125027dd09b9a69720
|
/tasks/copy_task.py
|
8d95337c5390fec60fce1636b397fef0f820170f
|
[] |
no_license
|
mahi97/XMANN
|
5832bc0b02c7ee5998eaf8b4ed558f916e0d5d36
|
bd6da642a5afc35582476b417e862f57817ed63c
|
refs/heads/master
| 2023-06-15T19:10:55.331234
| 2021-07-15T11:35:40
| 2021-07-15T11:35:40
| 316,759,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,671
|
py
|
"""Copy Task NTM model."""
from attr import attrs, attrib, Factory
import random
import numpy as np
import torch
from torch import nn
from torch import optim
from model import Model
from model import ModelParams
class CopyTask(object):
def __init__(self):
self.model = CopyTaskModel
self.param = CopyTaskParams
def data_loader(num_batches, batch_size, seq_width, min_len, max_len, is_cuda=False):
"""Generator of random sequences for the copy task.
Creates random batches of "bits" sequences.
All the sequences within each batch have the same length.
The length is [`min_len`, `max_len`]
:param is_cuda: Generating data in GPU Memory
:param num_batches: Total number of batches to generate.
:param seq_width: The width of each item in the sequence.
:param batch_size: Batch size.
:param min_len: Sequence minimum length.
:param max_len: Sequence maximum length.
NOTE: The input width is `seq_width + 1`, the additional input
contain the delimiter.
"""
for batch_num in range(num_batches):
# All batches have the same sequence length
seq_len = random.randint(min_len, max_len)
seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))
seq = torch.from_numpy(seq)
# The input includes an additional channel used for the delimiter
inp = torch.zeros(seq_len + 1, batch_size, seq_width + 1)
inp[:seq_len, :, :seq_width] = seq
inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel
outp = seq.clone()
if is_cuda:
inp = inp.cuda()
outp = outp.cuda()
yield batch_num + 1, inp.float(), outp.float()
@attrs
class CopyTaskParams(object):
name = attrib(default="copy-task")
memory = attrib(default='static')
memory_init = attrib(default='random')
controller = attrib(default='LSTM')
data_path = attrib(default='NTM')
controller_size = attrib(default=100, converter=int)
controller_layers = attrib(default=1, converter=int)
num_read_heads = attrib(default=1, converter=int)
num_write_heads = attrib(default=1, converter=int)
sequence_width = attrib(default=8, converter=int)
sequence_min_len = attrib(default=1, converter=int)
sequence_max_len = attrib(default=20, converter=int)
memory_n = attrib(default=128, converter=int)
memory_m = attrib(default=20, converter=int)
num_batches = attrib(default=20000, converter=int)
batch_size = attrib(default=1, converter=int)
rmsprop_lr = attrib(default=1e-4, converter=float)
rmsprop_momentum = attrib(default=0.9, converter=float)
rmsprop_alpha = attrib(default=0.95, converter=float)
is_cuda = attrib(default=False, converter=bool)
@attrs
class CopyTaskModel(object):
params = attrib(default=Factory(CopyTaskParams))
net = attrib()
data_loader = attrib()
criterion = attrib()
optimizer = attrib()
@net.default
def default_net(self):
# We have 1 additional input for the delimiter which is passed on a
# separate "control" channel
model_params = ModelParams(
memory=self.params.memory,
controller=self.params.controller,
data_path=self.params.data_path,
num_inputs=self.params.sequence_width + 1,
num_outputs=self.params.sequence_width,
num_hidden=self.params.controller_layers,
num_layers=self.params.controller_layers,
controller_size=self.params.controller_size,
num_read_heads=self.params.num_read_heads,
num_write_heads=self.params.num_write_heads,
memory_size=self.params.memory_n,
word_size=self.params.memory_m,
memory_init=self.params.memory_init,
batch_size=self.params.batch_size,
is_cuda=self.params.is_cuda
)
net = Model(model_params)
if self.params.is_cuda:
net = net.cuda()
return net
@data_loader.default
def default_dataloader(self):
return data_loader(self.params.num_batches, self.params.batch_size,
self.params.sequence_width,
self.params.sequence_min_len, self.params.sequence_max_len)
@criterion.default
def default_criterion(self):
return nn.BCELoss()
@optimizer.default
def default_optimizer(self):
return optim.RMSprop(self.net.parameters(),
momentum=self.params.rmsprop_momentum,
alpha=self.params.rmsprop_alpha,
lr=self.params.rmsprop_lr)
|
[
"mohammadmahdi76@gmail.com"
] |
mohammadmahdi76@gmail.com
|
4a9b892072ba58f5757ea70f8734c086671564e2
|
db4c0f86904157c9ba40b495ca6506cd96450821
|
/algorithms/python/104_maxinum_deepth_of_binary_tree.py
|
4fa1f5be1929bd3e5494335478218e090b039496
|
[] |
no_license
|
ppd0705/leetcode
|
c26dfdd077985607354fc8dbac93a5ef3daf8e62
|
543e2ce47ea454d355762e6291a65a1cc6f7af71
|
refs/heads/master
| 2022-08-29T22:50:02.308073
| 2022-08-09T01:28:39
| 2022-08-09T01:28:39
| 221,321,139
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxDepth(self, root: TreeNode) -> int:
def helper(node):
if node is None:
return 0
return 1 + max(helper(node.left), helper(node.right))
return helper(root)
|
[
"ppd0705@163.com"
] |
ppd0705@163.com
|
57de64b03d3c4f7ab214b32f22252f72c6390376
|
94d5ef47d3244950a0308c754e0aa55dca6f2a0e
|
/migrations/versions/5a9e6291a59c_added_scopus_id_field.py
|
a912de3bd6a52767365e01577cee59169158dc04
|
[] |
no_license
|
MUMT-IT/mis2018
|
9cbc7191cdc1bcd7e0c2de1e0586d8bd7b26002e
|
69fabc0b16abfeba44173caa93d4f63fa79033fd
|
refs/heads/master
| 2023-08-31T16:00:51.717449
| 2023-08-31T11:30:13
| 2023-08-31T11:30:13
| 115,810,883
| 5
| 5
| null | 2023-09-14T10:08:35
| 2017-12-30T17:06:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
"""added scopus ID field
Revision ID: 5a9e6291a59c
Revises: 42f544489b96
Create Date: 2019-03-25 07:05:06.087909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5a9e6291a59c'
down_revision = '42f544489b96'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('research_pub', sa.Column('scopus_id', sa.String(length=128), nullable=True))
op.create_index(op.f('ix_research_pub_scopus_id'), 'research_pub', ['scopus_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_research_pub_scopus_id'), table_name='research_pub')
op.drop_column('research_pub', 'scopus_id')
# ### end Alembic commands ###
|
[
"likit.pre@mahidol.edu"
] |
likit.pre@mahidol.edu
|
8cce7e85e1266c30a9ed503ccc6006ffbf2c94d5
|
b1ea00015ad8196f78f0a7296ceb55dd5fa68820
|
/Design/SnakeGame.py
|
cf21b44b0c5ce706a5ce5cbebd25421eebe4cc53
|
[] |
no_license
|
YusiZhang/leetcode-python
|
d1fa7c1b76cb13caaa800fe1d20c7bbd5550d871
|
26e2a812d86b4c09b2917d983df76d3ece69b074
|
refs/heads/master
| 2020-05-29T16:08:52.277158
| 2016-10-11T06:50:44
| 2016-10-14T06:36:22
| 58,106,795
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
import collections
class SnakeGame(object):
def __init__(self, width,height,food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.snake = collections.deque([[0,0]]) # snake head is at the front
self.width = width
self.height = height
self.food = collections.deque(food)
self.direct = {'U': [-1, 0], 'L': [0, -1], 'R': [0, 1], 'D': [1, 0]}
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
newHead = [self.snake[0][0]+self.direct[direction][0], self.snake[0][1]+self.direct[direction][1]]
# notice that the newHead can be equal to self.snake[-1]
if (newHead[0] < 0 or newHead[0] >= self.height) or (newHead[1] < 0 or newHead[1] >= self.width) \
or (newHead in self.snake and newHead != self.snake[-1]): return -1
if self.food and self.food[0] == newHead: # eat food
self.snake.appendleft(newHead) # just make the food be part of snake
self.food.popleft() # delete the food that's already eaten
else: # not eating food: append head and delete tail
self.snake.appendleft(newHead)
self.snake.pop()
return len(self.snake)-1
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction)
|
[
"yusi.zhang@oracle.com"
] |
yusi.zhang@oracle.com
|
87f3ec6b5cd4aadb962208b899f021f77a46846a
|
00cf2491d97f079dadee6b05990e9a506983f3b2
|
/datastore/model.py
|
e3643f844a6427676d3ac675d26e92a0011c481c
|
[] |
no_license
|
simonemmott/DataStore
|
af16cdb91f73835203e77108e731acd129e15f96
|
d7ccc2e8540b8cd47bb80318b62b813da7b76357
|
refs/heads/master
| 2020-06-25T02:23:23.221408
| 2019-07-27T22:15:41
| 2019-07-27T22:15:41
| 199,169,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
from json_model import JsonModel
import importlib
def import_class(name):
mod_path = '.'.join(name.split('.')[:-1])
cls_name = name.split('.')[-1]
mod = importlib.import_module(mod_path)
if hasattr(mod, cls_name):
attr = getattr(mod, cls_name)
if isinstance(attr, type):
return attr
raise ValueError('{name} is not a class'.format(name=name))
raise ValueError('The module {mod} does not define {name}'.format(mod=mod_path, name=cls_name))
class MetaType(JsonModel):
name = JsonModel.field(str)
ref_type = JsonModel.field(str)
class Meta():
required_fields = ['name']
def __init__(self, *args, **kw):
super(MetaType, self).__init__(*args, **kw)
if not self.ref_type:
self.ref_type = self.name.split('.')[-1]
self.type = import_class(self.name)
@staticmethod
def from_class(cls):
return MetaType(name=cls.__name__)
|
[
"simon.emmott@yahoo.co.uk"
] |
simon.emmott@yahoo.co.uk
|
927e9f9ea8862b1450ddf8c6f8814db817921683
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_051/ch43_2020_08_17_19_52_57_356234.py
|
f295feb2e8b443b435c9674f12d6e46d6abb9ea6
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
lista=['janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro']
print (lista[int(input('numero do mes: '))-1])
|
[
"you@example.com"
] |
you@example.com
|
9cc1f699589a7ce3fd4896716330dd97386159c6
|
c450204fda11a5d3733c463e31e4c10105420534
|
/ans_comparer_gui.py
|
66ea93f18d0abb7444b3ffe4cf88a1608c1a1ea8
|
[] |
no_license
|
Hilary02/IpynbComparer
|
6a25386702ed7de5fdea0ae3281b836970645cce
|
418919562b9eeefbbcc8d694aeab88356ba15f73
|
refs/heads/master
| 2022-11-04T00:56:44.659890
| 2020-06-15T07:09:56
| 2020-06-15T07:55:20
| 272,431,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,209
|
py
|
import os
import tkinter as tk
import tkinter.filedialog
import json
from make_dict import *
left_data = None
right_data = None
debug = False
now_select = ""
def log(s):
logarea.insert("end", f"{s}\n")
def make_model_data():
log("模範解答を選択してください")
file_path = tk.filedialog.askopenfilename(
filetypes=[("模範解答", "*.ipynb")], initialdir="./")
model_dict = ProblemFileReader.makedict(file_path)
if not model_dict:
log("模範解答の処理に失敗しました")
else:
with open("./modelanswer.json", mode="w", encoding="utf-8") as f:
json.dump(model_dict, f, indent=4, ensure_ascii=False)
log("modelanswer.jsonを保存しました")
def file_select_f1():
global left_data
log("左に表示するデータを選択")
file_path = tk.filedialog.askopenfilename(
filetypes=[("Jupyter", "*.ipynb"), ("Json", "*.json")], initialdir="./")
kadai_dict = ProblemFileReader.makedict(file_path)
if kadai_dict:
file_name = file_path.split("/")[-1]
f1la1["text"] = f"ファイル名:{file_name}"
left_data = kadai_dict
log("読み込み成功")
selector_reset()
compare()
else:
log("読み込み失敗")
def file_select_f2():
global right_data
log("右に表示するデータを選択")
file_path = tk.filedialog.askopenfilename(
filetypes=[("Jupyter", "*.ipynb"), ("Json", "*.json")], initialdir="./")
kadai_dict = ProblemFileReader.makedict(file_path)
if kadai_dict:
file_name = file_path.split("/")[-1]
f2la1["text"] = f"ファイル名:{file_name}"
right_data = kadai_dict
log("読み込み成功")
compare()
else:
log("読み込み失敗")
def model_update():
global now_select
with open("./modelanswer.json", mode="r", encoding="utf-8") as f:
tmp_model = json.load(f)
tmp_model[now_select]["input"] = f1tx1.get("1.0", "end-1c")
tmp_model[now_select]["output"] = f1tx2.get("1.0", "end-1c")
left_data[now_select]["input"] = f1tx1.get("1.0", "end-1c")
left_data[now_select]["output"] = f1tx2.get("1.0", "end-1c")
with open("./modelanswer.json", mode="w", encoding="utf-8") as f:
json.dump(tmp_model, f, indent=4, ensure_ascii=False)
log("modelanswer.jsonを左のデータで更新しました")
def selector_reset():
for i in range(selector.size()):
selector.delete(tk.END)
for k in left_data.keys():
selector.insert(tk.END, k)
def kadai_selected(event):
if len(selector.curselection()) == 0:
return
i = selector.curselection()
if not left_data:
log("左側のデータが未選択")
return
f1tx1.delete("1.0", "end")
f1tx1.insert("end", left_data[selector.get(i)]["input"])
f1tx2.delete("1.0", "end")
f1tx2.insert("end", left_data[selector.get(i)]["output"])
if not right_data:
log("右側のデータが未選択")
return
global now_select
now_select = selector.get(i) # 保存
f2tx1.delete("1.0", "end")
f2tx1.insert("end", right_data[selector.get(i)]["input"])
f2tx2.delete("1.0", "end")
f2tx2.insert("end", right_data[selector.get(i)]["output"])
def strip_margin(s):
"""
文字列の各行から空白,空行などを除去した文字列を返す
"""
strip_str = ""
for l in s.split("\n"):
strip_line = l.strip(" '\"")
if strip_line:
strip_str += l.strip(" '\"") + "\n"
return strip_str
def loose_compare(str1, str2):
strip_str1 = strip_margin(str1)
strip_str2 = strip_margin(str2)
return strip_str1 == strip_str2
def compare():
if not left_data or not right_data:
return False
keys = left_data.keys()
q_num = len(keys)
match_list = [False]*q_num
match_num = 0
score.delete("1.0", "end")
try:
for i, k in enumerate(keys):
if loose_compare(left_data[k]["output"], right_data[k]["output"]):
match_num += 1
match_list[i] = True
except Exception as e:
log("左右の形式が一致しません")
return False
score.insert("end", f"{match_num}/{q_num}")
colors = ("red", "green")
for i, b in enumerate(match_list):
selector.itemconfigure(i, foreground="white", background=colors[b])
return f"{match_num}/{q_num}"
# dousiyo
if __name__ == "__main__":
root = tk.Tk()
root.title("nbcompare")
root.geometry("1200x600")
# 左課題表示画面
f1 = tk.Frame(root, relief=tk.GROOVE, bd=2)
f1la1 = tk.Label(f1, text="ファイル名")
f1la1.grid(row=0, column=0, padx=2, pady=2, sticky=tk.N + tk.W)
# ボタン
f1bt1 = tkinter.Button(f1, text="ファイル選択", command=file_select_f1)
f1bt1.grid(row=0, column=1, padx=2, pady=2, sticky=tk.N + tk.E)
f1la2 = tk.Label(f1, text="コード")
f1la2.grid(row=1, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f1tx1 = tk.Text(f1, padx=5, pady=5, width=60,
height=15, font=('Consolas', 11))
f1tx1.grid(row=2, column=0, padx=2, pady=2, columnspan=2)
f1la3 = tk.Label(f1, text="出力")
f1la3.grid(row=3, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f1tx2 = tk.Text(f1, padx=5, pady=5, width=50,
height=8, font=('Consolas', 12))
f1tx2.grid(row=4, column=0, padx=2, pady=2,
columnspan=2, sticky=tk.N + tk.W)
f1.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# 中央課題表示画面
f2 = tk.Frame(root, relief=tk.GROOVE, bd=2)
f2la1 = tk.Label(f2, text="ファイル名")
f2la1.grid(row=0, column=0, padx=2, pady=2, sticky=tk.N + tk.W)
# ボタン
f2bt1 = tkinter.Button(f2, text="ファイル選択", command=file_select_f2)
f2bt1.grid(row=0, column=1, padx=2, pady=2, sticky=tk.N + tk.E)
f2la2 = tk.Label(f2, text="コード")
f2la2.grid(row=1, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f2tx1 = tk.Text(f2, padx=5, pady=5, width=60,
height=15, font=('Consolas', 11))
f2tx1.grid(row=2, column=0, padx=2, pady=2, columnspan=2)
f2la3 = tk.Label(f2, text="出力")
f2la3.grid(row=3, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f2tx2 = tk.Text(f2, padx=5, pady=5, width=50,
height=8, font=('Consolas', 12))
f2tx2.grid(row=4, column=0, padx=2, pady=2,
columnspan=2, sticky=tk.N + tk.W)
f2.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# 右情報表示画面
f3 = tk.Frame(root, bd=2)
f3la1 = tk.Label(f3, text="課題一覧")
f3la1.pack(side=tk.TOP)
# 課題選択リストの作成
selector = tkinter.Listbox(f3, selectmode=tkinter.SINGLE)
selector.insert(0, "選択なし")
selector.bind('<<ListboxSelect>>', kadai_selected)
selector.pack(side=tk.TOP, fill=tk.X, expand=0)
f3la2 = tk.Label(f3, text="一致率")
f3la2.pack(side=tk.TOP)
score = tk.Text(f3, padx=5, pady=5, width=20,
height=1, font=('Consolas', 18))
score.pack(side=tk.TOP)
f3la3 = tk.Label(f3, text="ログ")
f3la3.pack(side=tk.TOP)
logarea = tk.Text(f3, padx=5, pady=5, width=30,
height=20, font=('Consolas', 9))
logarea.pack(side=tk.TOP)
f3bt1 = tkinter.Button(f3, text="左の内容でmodelを更新(仮)", command=model_update)
f3bt1.pack(side=tk.TOP, fill=tk.X, expand=0)
f3.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# 初回入力処理
if not os.path.isfile("./modelanswer.json"):
log("模範回答データがありません")
make_model_data()
# 自動読み込み
try:
log("模範回答データを読み込みます")
with open("./modelanswer.json", mode="r", encoding="utf-8") as f:
left_data = json.load(f)
f1la1["text"] = "ファイル名:modelanswer.json"
selector_reset()
except Exception as e:
log("模範回答データが見つかりません")
file_select_f2()
root.mainloop()
|
[
"c011605154@edu.teu.ac.jp"
] |
c011605154@edu.teu.ac.jp
|
9eba0b833a0ba139819af0b9aa282f36e595bdaf
|
9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c
|
/baomoicrawl/venv/Lib/site-packages/scrapy/utils/job.py
|
12a886c4752744d82c9c82f2144df6d642aa170c
|
[] |
no_license
|
thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler
|
b0fdedee2942a12d9f64dfed93f43802dc5ab340
|
87c8c07433466bbc43a24ea089f75baeb467c356
|
refs/heads/master
| 2022-11-27T21:36:33.917491
| 2020-08-10T23:24:42
| 2020-08-10T23:24:42
| 286,583,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
import os
def job_dir(settings):
path = settings['JOBDIR']
if path and not os.path.exists(path):
os.makedirs(path)
return path
|
[
"thuy4tbn99@gmail.com"
] |
thuy4tbn99@gmail.com
|
87acdc16f9e7ff0ad3da6aaea1d2590cdc5fdf75
|
99091fded6b655e27a7afd5a81693f9e86d064f6
|
/offset/core/util.py
|
6a1ec85bddc55449b942472a87079f5b6acedf5d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
dotpot/offset
|
68670ace4945c23d1193ef8a8f57679db4fd9038
|
51200d0ee3a1776ad55d7c3ce53a5237236759e2
|
refs/heads/master
| 2021-01-15T21:50:06.090937
| 2013-10-01T16:55:57
| 2013-10-01T18:26:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
# -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
import fcntl
import os
import time
def nanotime(s=None):
""" convert seconds to nanoseconds. If s is None, current time is
returned """
if s is not None:
return s * 1000000000
return time.time() * 1000000000
def from_nanotime(n):
""" convert from nanotime to seconds """
return n / 1.0e9
def nanosleep(n):
time.sleep(from_nanotime(n))
|
[
"bchesneau@gmail.com"
] |
bchesneau@gmail.com
|
f68d8f0d5878ccd2ea18009cd682be3667f78cec
|
7ba05e73515c14fb8d2f3d056b51102131171a11
|
/exercise_funktions/perfect_number.py
|
ceb4a03e0b152d53bd91c0d0ee007e347cd9bfd5
|
[] |
no_license
|
gyurel/SoftUni-Basics-and-Fundamentals
|
bd6d5fa8c9d0cc51f241393afd418633a66c65dc
|
184fc5dfab2fdd410aa8593f4c562fd56211c727
|
refs/heads/main
| 2023-07-05T11:16:58.966841
| 2021-08-31T19:25:40
| 2021-08-31T19:25:40
| 401,485,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
def perfect_number(number):
list_of_devisors = []
# for devisor in range(1, number):
# if number % devisor == 0:
# list_of_devisors.append(devisor)
#
# if sum(list_of_devisors) == number:
# print("We have a perfect number!")
# else:
# print("It's not so perfect.")
list_of_devisors = [devisor for devisor in range(1, number) if number % devisor == 0]
return print("We have a perfect number!" if sum(list_of_devisors) == number else "It's not so perfect.")
# if sum(list_of_devisors) == number:
# print("We have a perfect number!")
# else:
# print("It's not so perfect.")
number = int(input())
perfect_number(number)
|
[
"gyurel@yahoo.com"
] |
gyurel@yahoo.com
|
9c7c5503d6820c8d892b7ba12c79c4c53e2b1abc
|
91f4078045a57eaaafe0b172909d7041e829941c
|
/arjuna-samples/arjex/test/pkg/app_class/check_02_app_model.py
|
66bdc8398d1a691c081dc86f4420c25017d30f6c
|
[
"Apache-2.0"
] |
permissive
|
amiablea2/arjuna
|
0d06d1dfb34309f4b6f39b17298f7acb6c3c48c9
|
af74e0882216881ceca0a10f26442165ffc43287
|
refs/heads/master
| 2023-08-21T20:04:30.416303
| 2021-10-27T06:41:40
| 2021-10-27T06:41:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
from arjex.lib.app_class.wp_app_model import WordPress
@for_test
def wordpress(request):
# Setup
wordpress = WordPress()
wordpress.login()
yield wordpress
# Teadown
wordpress.logout()
@test
def check_with_wp_app_model(request, wordpress):
wordpress.tweak_role_value_in_settings("editor")
|
[
"rahulverma81@gmail.com"
] |
rahulverma81@gmail.com
|
6a0133b60e39092bb9168e79b34e7f97ef908275
|
ff738b3ec7e5c8c414f6d3c7d74310d8fab69368
|
/Mock/Interview5/solution1.py
|
63117bb3138185ef7bcaea918ed9a22c4e801a57
|
[] |
no_license
|
jw3329/leetcode-problem-solving
|
a0684ef13bd60e81bd54b91e1b54827aaac9bf16
|
0cc7ad64891a23e348c8214f806a2820ac8c9e0a
|
refs/heads/main
| 2023-08-17T20:36:51.624415
| 2023-08-17T07:09:56
| 2023-08-17T07:09:56
| 170,944,191
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
n = len(nums)
for i in range(n):
for j in range(i+1,n):
if nums[i] + nums[j] == target:
return [i,j]
|
[
"junwon3329@gmail.com"
] |
junwon3329@gmail.com
|
de63bdffcb21ae66826ed72756766dc1638d7361
|
926b3c52070f6e309567c8598248fd5c57095be9
|
/src/mmgeneration/configs/positional_encoding_in_gans/stylegan2_c2_ffhq_512_b3x8_1100k.py
|
b051c9f38b4cd25d7bc657feff88311e3c9f1f18
|
[
"Apache-2.0"
] |
permissive
|
fengbingchun/PyTorch_Test
|
410f7cd2303707b0141d433fb9d144a961e1f4c8
|
df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348
|
refs/heads/master
| 2023-05-23T16:42:29.711338
| 2023-03-25T11:31:43
| 2023-03-25T11:31:43
| 167,339,907
| 15
| 4
| null | 2023-03-25T11:31:45
| 2019-01-24T09:24:59
|
C++
|
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(generator=dict(out_size=512), discriminator=dict(in_size=512))
data = dict(
samples_per_gpu=3,
train=dict(dataset=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_512')))
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-512-50k-rgb.pkl',
bgr2rgb=True),
pr10k3=dict(type='PR', num_images=10000, k=3))
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=30)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
total_iters = 1100002
|
[
"fengbingchun@163.com"
] |
fengbingchun@163.com
|
7fbf97b1503386b6939f46cfda12993249f306aa
|
3a93a50bf80668a6ede701534f1567c3653729b0
|
/Sept_challenge/arithemetic_slice_II.py
|
5d4391eb891b132eaff46dae5987441400c9534b
|
[] |
no_license
|
Tadele01/Competitive-Programming
|
c16778298b6c1b4c0b579aedd1b5f0d4106aceeb
|
125de2b4e23f78d2e9f0a8fde90463bed0aed70f
|
refs/heads/master
| 2023-09-01T06:00:09.068940
| 2021-09-13T18:04:30
| 2021-09-13T18:04:30
| 325,728,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
from typing import List
from collections import defaultdict
class Solution:
def numberOfArithmeticSlices(self, nums: List[int]) -> int:
subsequence = 0
cache = [defaultdict(int) for _ in nums]
for i in range(len(nums)):
for j in range(i):
diff = nums[i] - nums[j]
cache[i][diff] += cache[j][diff] + 1
subsequence += cache[j][diff]
return subsequence
|
[
"tadeleyednkachw@gmail.com"
] |
tadeleyednkachw@gmail.com
|
8dbeb21e45db293b1eed3a4d259d0a7190aadd1e
|
46e6b58d52aad982ac49e8b4b1e72cc19d5855e7
|
/venv/Lib/encodings/gb2312.py
|
9e002e015138f7d2e656d95a22e0deba0a85c73b
|
[] |
no_license
|
Josquin95/Triqui
|
76584d2801d49546c79422b4635bff5d251b77c9
|
82e3ac1b0e053993d76e3d7aea88204799540e05
|
refs/heads/master
| 2023-07-21T09:38:49.037207
| 2023-07-17T21:42:29
| 2023-07-17T21:42:29
| 113,802,751
| 0
| 1
| null | 2022-10-07T04:32:38
| 2017-12-11T02:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
#
# gb2312.py: Python Unicode Codec for GB2312
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn
import _multibytecodec as mbc
import codecs
codec = _codecs_cn.getcodec('gb2312')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb2312',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
[
"jose.osorio1@correo.icesi.edu.co"
] |
jose.osorio1@correo.icesi.edu.co
|
677da4b419273dd5a0c14a32c94015dd6bb3ad6b
|
10c6f6801ff50e7456ef7dacc57e6c019cbe5311
|
/vendor/pipstrip
|
67cd04941aefc0845693decab4a56cce0f59c29f
|
[
"MIT"
] |
permissive
|
2pax-hq/heroku-buildpack-python
|
6d66f9cc48e80276e65c00bf95d486ff24f5be48
|
4669a838e7a56a369f97c4ba1c65426774d93d96
|
refs/heads/master
| 2021-06-15T18:33:20.806751
| 2017-03-14T12:52:01
| 2017-03-14T12:52:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
#!/usr/bin/env python
import sys
req_file = sys.argv[1]
lines = []
with open(req_file, 'r') as f:
r = f.readlines()
for l in r:
lines.append(l.split('--hash')[0])
with open(req_file, 'w') as f:
f.write('\n'.join(lines))
|
[
"me@kennethreitz.org"
] |
me@kennethreitz.org
|
|
b1a3c8cc07fae71c627558f8887507229a139da5
|
1add595fa4a4b2ebd42e4447310a1b389f88d9fe
|
/aiohttp_cache/setup.py
|
1e9a3bcf79880be8bf5bf2d1b016be470047d511
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
baldur/aiohttp-cache
|
95a57fd71feddb3fa0423c88318ed7c2a828b3a3
|
dc20cb7fb291e2eb755de1e70f356631cc0b16ac
|
refs/heads/master
| 2020-06-18T19:18:12.889982
| 2019-07-11T15:00:24
| 2019-07-11T15:00:24
| 196,415,749
| 0
| 0
|
NOASSERTION
| 2019-07-11T14:59:01
| 2019-07-11T14:59:00
| null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
import logging
from aiohttp import web
from .backends import *
from .middleware import *
from .exceptions import *
log = logging.getLogger("aiohttp")
def setup_cache(app: web.Application,
cache_type: str = "memory",
backend_config=None):
app.middlewares.append(cache_middleware)
_cache_backend = None
if cache_type.lower() == "memory":
_cache_backend = MemoryCache()
log.debug("Selected cache: {}".format(cache_type.upper()))
elif cache_type.lower() == "redis":
_redis_config = backend_config or RedisConfig()
assert isinstance(_redis_config, RedisConfig), \
"Config must be a RedisConfig object. Got: '{}'".format(type(_redis_config))
_cache_backend = RedisCache(config=_redis_config)
log.debug("Selected cache: {}".format(cache_type.upper()))
else:
raise HTTPCache("Invalid cache type selected")
app["cache"] = _cache_backend
__all__ = ("setup_cache", )
|
[
"cr0hn@cr0hn.com"
] |
cr0hn@cr0hn.com
|
7e1d3f98f9918b8c57609a3abdbe11d5430f69ca
|
7f8ae468840f175a4744896986d4ec66895f08c2
|
/src/logcollection/loggers.py
|
7f85bc7dc7dc0ceaae168959543cf8019ea29f5d
|
[] |
no_license
|
TakesxiSximada/logcollection
|
1ed54554c12d12e10149d1c473cd0eab00bd1cb2
|
19028f249fe7526227f3fe11607b24812a000b5c
|
refs/heads/master
| 2016-09-11T01:19:13.810367
| 2015-08-30T13:54:24
| 2015-08-30T13:54:24
| 29,011,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# -*- coding: utf-8 -*-
import logging
from functools import lru_cache
from lazr.delegates import delegate_to
from .interfaces import ILogger
@delegate_to(ILogger, context='_context')
class LazyLogger(object):
def __init__(self, name):
self._name = name
@property
@lru_cache()
def _context(self):
return logging.getLogger(self._name)
def getLogger(*args, **kwds):
return LazyLogger(*args, **kwds)
|
[
"takesxi.sximada@gmail.com"
] |
takesxi.sximada@gmail.com
|
deaf0d68b75d76daf2148049eed3e2be5d5008b9
|
60aa3bcf5ace0282210685e74ee8ed31debe1769
|
/core/sims4/repr_utils.py
|
8fe89c74f5e505a9815ce9efad0af05baaee8694
|
[] |
no_license
|
TheBreadGuy/sims4-ai-engine
|
42afc79b8c02527353cc084117a4b8da900ebdb4
|
865212e841c716dc4364e0dba286f02af8d716e8
|
refs/heads/master
| 2023-03-16T00:57:45.672706
| 2016-05-01T17:26:01
| 2016-05-01T17:26:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
from types import FrameType
import functools
import sys
def _strip_source_path(path):
for f in sys.path:
while path.startswith(f):
return path[len(f):].lstrip('\\/')
return path
class suppress_quotes(str):
__qualname__ = 'suppress_quotes'
def __str__(self):
return self
def __repr__(self):
return self
def callable_repr(func):
if isinstance(func, FrameType):
code = func.f_code
else:
if isinstance(func, functools.partial):
return 'partial({}, ...)'.format(callable_repr(func.func))
code = func.__code__
return '<{} at {}:{}>'.format(code.co_name, _strip_source_path(code.co_filename), code.co_firstlineno)
def standard_repr(obj, *args, **kwargs):
type_str = type(obj).__name__ if not isinstance(obj, str) else obj
args_str = None
if args:
args_str = [str(i) for i in args]
args_str = ', '.join(args_str)
kwargs_str = None
if kwargs:
kwargs_str = ['{}={}'.format(k, v) for (k, v) in kwargs.items()]
kwargs_str = ', '.join(sorted(kwargs_str))
if args_str and kwargs_str:
return '{}({}, {})'.format(type_str, args_str, kwargs_str)
if args_str or kwargs_str:
return '{}({})'.format(type_str, args_str or kwargs_str)
return '{}()'.format(type_str)
def standard_auto_repr(obj, missing_value_marker='?', omit_missing_attributes=True):
return object.__repr__(obj)
def standard_angle_repr(obj, *args, **kwargs):
type_str = type(obj).__name__
args_str = None
if args:
args_str = [str(i) for i in args]
args_str = ' '.join(args_str)
kwargs_str = None
if kwargs:
kwargs_str = ['{}={}'.format(k, v) for (k, v) in kwargs.items()]
kwargs_str = ' '.join(sorted(kwargs_str))
if args_str and kwargs_str:
return '<{}: {} {}>'.format(type_str, args_str, kwargs_str)
if args_str or kwargs_str:
return '<{}: {}>'.format(type_str, args_str or kwargs_str)
return '<{} at {:#010x}>'.format(type_str, id(obj))
def standard_float_tuple_repr(*floats):
return '(' + ', '.join('{:0.3f}'.format(i) for i in floats) + ')'
def standard_brief_id_repr(guid):
return '{:#018x}'.format(guid)
|
[
"jp@bellgeorge.com"
] |
jp@bellgeorge.com
|
129ec1929c1af7937079abce7452c92eea96f8e4
|
2443f23d928a6b3516f810e3dfdf6f4b72aa0325
|
/st01.Python기초/py08반복문/py08_16_보초값.py
|
75f7ddc86a9165bb9ee575c350e7cb0af877175b
|
[] |
no_license
|
syuri7/Python20200209
|
48653898f0ce94b8852a6a43e4e806adcf8cd233
|
5f0184d9b235ce366e228b84c663a376a9957962
|
refs/heads/master
| 2021-01-01T10:37:59.077170
| 2020-03-15T09:15:50
| 2020-03-15T09:15:50
| 239,241,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
# while문을 사용하여 합계를 구하시오.
# 무한 반복과 반목문(루프) 탈출을 결합한 예정
# 페이지 134 참고
# 무한 반복문은 조건식을 True로 하면 된다.
# 루프 탈출은 break를 사용하면 된다.
sum = 0
count = 0
print("종료하려면 음수를 입력하시오.")
while True: # 무한 루프
입력값 = input("성적을 입력하시오.")
# 정수로 변환
입력값 = int(입력값)
# 입력값이 음수이면 반복문을 종료
if 입력값 < 0:
break # 반복문을 종료
count = count+1 # 입력횟수
# 합계를 구한다.
sum = sum+입력값
# 평균값을 계산한다.
평균값 = sum/count
# 평균값을 출력한다.
str = "성적의 평규는 %s입니다." % (평균값)
print(str)
|
[
"d@d"
] |
d@d
|
53882dc73e368f6c749d7838a985265351315fb0
|
f64d8201c2e55d7631d0a03a7a51d146c7d5c761
|
/00Python代码/flask_learn/10extends_block/extends_block.py
|
e882945721804bf0cdb4dafcb648896e538e10a1
|
[] |
no_license
|
wh-orange/CodeRecord
|
cd14b5ccc1760a3d71762fef596ba9ab8dac8b8c
|
0e67d1dafcb2feaf90ffb55964af7a9be050e0ee
|
refs/heads/master
| 2022-01-18T10:26:27.993210
| 2019-08-04T17:38:35
| 2019-08-04T17:38:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
#encoding: utf-8
from flask import Flask,render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login/')
def login():
return render_template('login.html')
if __name__ == '__main__':
app.run(debug=True)
|
[
"ljressrg@gmail.com"
] |
ljressrg@gmail.com
|
c0e4444f72f861346f349b0d3e17248c24e001e7
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/test/python/b12493130ac5924d0181574c0779561a13d48541admin.py
|
b12493130ac5924d0181574c0779561a13d48541
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994
| 2018-11-29T23:21:23
| 2018-11-29T23:21:23
| 158,597,498
| 0
| 0
|
MIT
| 2018-11-21T19:36:42
| 2018-11-21T19:36:41
| null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
from django.contrib import admin
from repositories.models import Repository, Credential
from repositories.forms import RepositoryForm, CredentialForm
class RepositoryAdmin(admin.ModelAdmin):
form = RepositoryForm
list_display = ('name', 'manager', 'endpoint')
class CredentialAdmin(admin.ModelAdmin):
form = CredentialForm
list_display = ('repository_name', 'public_key')
def repository_name(sef, obj):
return obj.repository.name
admin.site.register(Repository, RepositoryAdmin)
admin.site.register(Credential, CredentialAdmin)
|
[
"aliostad+github@gmail.com"
] |
aliostad+github@gmail.com
|
fbefe5ee53df4789d1f34bd68a4a528e2a05ab55
|
a3785b6ff7734d98af1417000cd619a59bd5a268
|
/part_2_mmntv/regression/mpg/master_process.py
|
5cb1a476caa23132c8799010886daa0520f3c007
|
[] |
no_license
|
SuryodayBasak/mst-final-run
|
bd9800744ab4fb6f0947c258ebc1be1151bc9ff2
|
2cde94af03d63f66cc3753843e4e60c92c313466
|
refs/heads/master
| 2022-07-01T22:40:39.003096
| 2020-05-09T04:25:18
| 2020-05-09T04:25:18
| 260,341,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
import numpy as np
import data_api as da
import multiprocessing
import time
from knn import KNNRegressor, DwKNNRegressor
from sklearn.model_selection import train_test_split
from ga import GeneticAlgorithm
from sklearn.metrics import mean_squared_error as skmse
from ga_run import ga_run
from pso_run import gbest_pso_run, lbest_pso_run
from sklearn.decomposition import PCA
# Load data.
data = da.Mpg()
X, y = data.Data()
_, nFeats = np.shape(X)
# Values of parameter k to iterate over.
K_VALS = [3, 5, 7, 9, 11, 13, 15]
starttime = time.time()
# Repeat each trial 10 times.
for i in range (0, 10):
x_train, x_test, y_train, y_test = train_test_split(X, y,\
test_size=0.2)
"""
Try non-optimized methods.
"""
# Vanilla KNN.
for k in K_VALS:
reg = KNNRegressor(x_train, y_train, k)
y_pred = reg.predict(x_test)
mse_iter = skmse(y_test, y_pred)
print("xx,knn,", k,",", mse_iter)
# Distance-weighted KNN.
for k in K_VALS:
reg = DwKNNRegressor(x_train, y_train, k)
y_pred = reg.predict(x_test)
mse_iter = skmse(y_test, y_pred)
print("xx,dknn,", k,",", mse_iter)
"""
PCA with KNN.
"""
pca = PCA(n_components = 4)
pca.fit(x_train.copy())
x_train_pca = pca.transform(x_train.copy())
x_test_pca = pca.transform(x_test.copy())
# PCA + Vanilla KNN.
for k in K_VALS:
reg = KNNRegressor(x_train_pca, y_train, k)
y_pred = reg.predict(x_test_pca)
mse_iter = skmse(y_test, y_pred)
print("pca,knn,", k,",", mse_iter)
# PCA + Distance-weighted KNN.
for k in K_VALS:
reg = DwKNNRegressor(x_train_pca, y_train, k)
y_pred = reg.predict(x_test_pca)
mse_iter = skmse(y_test, y_pred)
print("pca,dknn,", k,",", mse_iter)
x_train, x_verif, y_train, y_verif = train_test_split(x_train,\
y_train,\
test_size=0.33)
"""
GA-driven methods.
"""
processes = []
# Use different values of k.
for k in K_VALS:
# Run the GA based optimization.
p = multiprocessing.Process(target = ga_run,\
args = (x_train.copy(),\
y_train.copy(),\
x_test.copy(),\
y_test.copy(),\
x_verif.copy(),\
y_verif.copy(),\
k,))
processes.append(p)
p.start()
for process in processes:
process.join()
"""
GBest_PSO-driven methods.
"""
processes = []
# Use different values of k.
for k in K_VALS:
# Run the GA based optimization.
p = multiprocessing.Process(target = gbest_pso_run,\
args = (x_train.copy(),\
y_train.copy(),\
x_test.copy(),\
y_test.copy(),\
x_verif.copy(),\
y_verif.copy(),\
k,))
processes.append(p)
p.start()
for process in processes:
process.join()
"""
LBest_PSO-driven methods.
"""
processes = []
# Use different values of k.
for k in K_VALS:
# Run the GA based optimization.
p = multiprocessing.Process(target = lbest_pso_run,\
args = (x_train.copy(),\
y_train.copy(),\
x_test.copy(),\
y_test.copy(),\
x_verif.copy(),\
y_verif.copy(),\
k,))
processes.append(p)
p.start()
for process in processes:
process.join()
print('That took {} seconds'.format(time.time() - starttime))
|
[
"suryodaybasak@gmail.com"
] |
suryodaybasak@gmail.com
|
6ca259815af0d360b758e2b3500b632cac6ef117
|
301a6d0e527c0740faa9d07f3e75ef719c51f858
|
/gantcal/cal/migrations/0006_auto_20160210_1233.py
|
e1243e4b17e588761e3b64e02d5c4d74b5ef563c
|
[] |
no_license
|
akmiller01/gantcal
|
249eafa2de1d261ba7e54d59a8275e980caaf408
|
da99526f3a353bcc83fb2900044a4c8e8db0e6b5
|
refs/heads/master
| 2021-01-17T15:24:23.206363
| 2017-02-22T14:21:04
| 2017-02-22T14:21:04
| 51,007,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-10 12:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cal', '0005_auto_20160208_2037'),
]
operations = [
migrations.AddField(
model_name='event',
name='attendee',
field=models.ManyToManyField(blank=True, related_name='events', related_query_name='event', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='event',
name='estimated_cost',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='event',
name='purpose',
field=models.TextField(blank=True, null=True),
),
]
|
[
"alex.k.miller@gmail.com"
] |
alex.k.miller@gmail.com
|
a8b262d5222cd6bfb53827635cddb4926379e537
|
a93cb5d670ab3b11f75f1afbd925fea2fac3aa92
|
/backend/maze_game_19808/settings.py
|
edb194014f1bab70390523539427a56c2ea70179
|
[] |
no_license
|
crowdbotics-apps/maze-game-19808
|
156f1d011f8da29906ebde97688885207a1ce0b7
|
4abba9ca9b5adbb9a6c16005beae1c7946963182
|
refs/heads/master
| 2022-12-05T16:41:49.030137
| 2020-08-27T00:27:17
| 2020-08-27T00:27:17
| 290,634,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,901
|
py
|
"""
Django settings for maze_game_19808 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'maze_game_19808.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'maze_game_19808.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
24450b29d23b2b7f0f99a3b2bb2811bd670bf89a
|
babc3e26d66a8084c9f84a0431338bafabae6ffd
|
/TaeJuneJoung/ACM/brute_force/p1182.부분수열의 합.py
|
57966b23796f176632ebfe67ed031fd3ce6133ab
|
[] |
no_license
|
hoteldelluna/AlgoStudy
|
5c23a1bfb07dbfbabc5bedd541d61784d58d3edc
|
49ec098cecf2b775727d5648161f773e5488089b
|
refs/heads/dev
| 2022-10-09T14:29:00.580834
| 2020-01-25T14:40:55
| 2020-01-25T14:40:55
| 201,632,052
| 5
| 0
| null | 2020-01-25T14:40:57
| 2019-08-10T13:11:41
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
[완전탐색:Brute-force Search]
집합의 모든 원소의 집합을 꺼내서 합한 후,
해당 값과 같은지 비교하여 같으면 cnt++
"""
N, R = map(int, input().split())
arr = list(map(int, input().split()))
cnt = 0
for i in range(1, 1 << N):
sum_num = 0
for j in range(N):
if i & 1 << j != 0:
sum_num += arr[j]
if sum_num == R:
cnt += 1
print(cnt)
|
[
"jtj0525@gmail.com"
] |
jtj0525@gmail.com
|
fceb973003007ef5405eb8febf5ac8a41ce33ad7
|
b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339
|
/requests模块/5.5、高级用法-异常处理.py
|
42990463ecb480ba842a6fdf377487716d805e8c
|
[] |
no_license
|
python-yc/pycharm_script
|
ae0e72898ef44a9de47e7548170a030c0a752eb5
|
c8947849090c71e131df5dc32173ebe9754df951
|
refs/heads/master
| 2023-01-05T06:16:33.857668
| 2020-10-31T08:09:53
| 2020-10-31T08:09:53
| 296,778,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
# -*- coding: utf-8 -*-
#异常处理
import requests
from requests.exceptions import * #可以查看requests.exceptions获取异常类型
try:
r = requests.get('http://www.baidu.com', timeout=0.001)
except ReadTimeout:
print('=======:')
except ConnectionError: # 网络不通
print('------')
except Timeout:
print('aaaaaa')
except RequestException:
print('Error')
|
[
"15655982512.com"
] |
15655982512.com
|
7a4e323cd0bfbc8996619f0391c5f023deeeeaa8
|
6c866622c26e36da473d411d8f61252da12ecf4c
|
/demo/mongodb/runserver.py
|
dc77eacbd1d8ad03bc3c1518ca54d2e2fa08562e
|
[] |
no_license
|
mrpadan/resource
|
d3d5e018ae927871af8f463293ccb762eab3aaa9
|
fe9cdfc7553c7748c3eb2b7e50ce1dc6167b9e8d
|
refs/heads/master
| 2021-01-20T17:33:35.828883
| 2014-10-13T15:02:25
| 2014-10-13T15:19:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from jsonform import JsonForm
from pymongo import MongoClient
from flask import Flask
from resource import Resource, Filter
from resource.index import Index
from resource.db.mongo import Collection, MongoSerializer
from resource.contrib.framework.flask import add_resource, make_index
DB = MongoClient().test
class UserForm(JsonForm):
def validate_datetime(value):
if not isinstance(value, datetime):
return 'value must be an instance of `datetime`'
schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'password': {'type': 'string'},
'date_joined': {'custom': validate_datetime}
}
}
class UserFilter(Filter):
def query_date_range(self, query_params):
date_joined_gt = query_params.pop('date_joined_gt', None)
date_joined_lt = query_params.pop('date_joined_lt', None)
conditions = {}
if date_joined_gt:
conditions.update({'$gt': date_joined_gt})
if date_joined_lt:
conditions.update({'$lt': date_joined_lt})
if conditions:
return {'date_joined': conditions}
else:
return {}
resources = [
Resource('users', Collection, form_cls=UserForm,
serializer_cls=MongoSerializer, filter_cls=UserFilter,
kwargs={'db': DB, 'table_name': 'user'})
]
app = Flask(__name__)
if __name__ == '__main__':
for r in resources:
add_resource(app, r)
index = Resource('index', Index, uri='/',
kwargs={'resources': resources})
make_index(app, index)
app.run(debug=True)
|
[
"luopeng.he@gmail.com"
] |
luopeng.he@gmail.com
|
f3ef463a068a6d88ed0913e877237474d96845e8
|
0f887bc316d8c665899258406b67f6e25838f9cf
|
/kangaroo.py
|
18362f5f21c7f43b649fe0f2a3dec1f8224f2523
|
[] |
no_license
|
bawejakunal/hackerrank
|
f38489be488f76e782b7f8e8c5fdc2ba1a024c30
|
999008a19c3196c9706a9165923f5683ea887127
|
refs/heads/master
| 2021-01-12T04:31:50.984640
| 2017-09-08T02:28:44
| 2017-09-08T02:28:44
| 77,662,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#!/bin/python
"""
https://www.hackerrank.com/challenges/kangaroo
x1 + v1.n = x2 + v2.n
n = float(x2 - x1)/(v1 - v2)
if n > 0 and whole integer then possible because the
kangaroo starting at lesser position will be able to
catch up AND land at exactly same position as the
kangaroo with a head start
"""
import sys
x1, v1, x2, v2 = raw_input().strip().split(' ')
x1, v1, x2 ,v2 = [int(x1),int(v1),int(x2),int(v2)]
#avoid division by zero
if v1 == v2:
print 'NO'
else:
n = float(x2 - x1)/(v1 - v2)
# given x1 < x2, so n can not be 0
if n > 0 and n.is_integer():
print 'YES'
else:
print 'NO'
|
[
"bawejakunal15@gmail.com"
] |
bawejakunal15@gmail.com
|
302dc8251d11ce221c0e3dafc58c003f18e54076
|
88a856c080080dfd15c6c50e81a82ae4f230b65a
|
/tests/selenium.py
|
3b31f912fb83b134f4b47383aa4226e9edd67bb3
|
[] |
no_license
|
ephremworkeye/nov_ecommerce
|
682c0776bf86f8656ee968ee13cb95deb73f4a7a
|
f847bfd1f0fff29f321113d200f7c653ae4c7214
|
refs/heads/master
| 2023-09-04T01:32:32.191459
| 2021-10-28T08:17:02
| 2021-10-28T08:17:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
@pytest.fixture(scope="module")
def chrome_browser_instance(request):
"""
provide a selenium webdriver instance
"""
options = Options()
options.headless = False
browser = webdriver.Chrome(options=options)
yield browser
browser.close()
|
[
"ephremworkeye@gmail.com"
] |
ephremworkeye@gmail.com
|
28fdc6ab07daa339e7c05ac8e9fdcee819a5441a
|
05109979de89fbd5f69d6cc85ac794497dc441d1
|
/apps/destination/adminx.py
|
c00a4a4636d2b3fc21bcc9fbe9a56db2b356ce7d
|
[] |
no_license
|
bbright3493/douxing
|
c70bdb11d3f0e253d8545ab4bdf89d330b0e0d6f
|
0607aee7c59aa4845d6bc86940d7885cd83466a6
|
refs/heads/master
| 2021-05-06T10:11:22.815128
| 2017-12-15T09:40:34
| 2017-12-15T09:40:34
| 114,102,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'bb'
__date__ = '2017/12/15 23:34'
import xadmin
from .models import *
from xadmin import views
class GlobalSettings(object):
site_title="都行后台管理系统"
site_footer="都行"
menu_style="accordion"
xadmin.site.register(views.CommAdminView, GlobalSettings)
class DestinationAdmin(object):
list_display = ['name', 'publish_user', 'desc', 'lat', 'lng', 'custom', 'festival', 'religion', 'address','publish_time']
search_fields = ['name', 'publish_user', 'desc', 'lat', 'lng', 'custom', 'festival', 'religion', 'address','publish_time']
list_filter = ['name', 'publish_user', 'desc', 'lat', 'lng', 'custom', 'festival', 'religion', 'address','publish_time']
xadmin.site.register(Destination, DestinationAdmin)
class TagInfoAdmin(object):
list_display = ['name', 'type', 'desc', 'second_tag']
search_fields = ['name', 'type', 'desc', 'second_tag']
list_filter = ['name', 'type', 'desc', 'second_tag']
xadmin.site.register(TagInfo, TagInfoAdmin)
class SecondTagInfoAdmin(object):
list_display = ['name', 'type', 'desc', 'third_tag']
search_fields = ['name', 'type', 'desc', 'third_tag']
list_filter = ['name', 'type', 'desc', 'third_tag']
xadmin.site.register(SecondTagInfo, SecondTagInfoAdmin)
class ThirdTagInfoAdmin(object):
list_display = ['name', 'type', 'desc']
search_fields = ['name', 'type', 'desc']
list_filter = ['name', 'type', 'desc']
xadmin.site.register(ThirdTagInfo, ThirdTagInfoAdmin)
class TagDestinationAdmin(object):
list_display = ['tag', 'destination', 'add_time']
search_fields = ['tag', 'destination']
list_filter = ['tag', 'destination']
xadmin.site.register(TagDestination, TagDestinationAdmin)
class DestinationImageAdmin(object):
list_display = ['image', 'destination', 'add_id']
search_fields = ['image', 'destination', 'add_id']
list_filter = ['image', 'destination', 'add_id']
model_icon = 'fa fa-film'
xadmin.site.register(ImageInfo, DestinationImageAdmin)
|
[
"44704708@qq.com"
] |
44704708@qq.com
|
0814cd6f81ee06c0ee5732b9574cd914859e16c9
|
4610d0284416361643095ca9c3f404ad82ca63c2
|
/src/sploitego/metasploit/utils.py
|
1f796affd48ec6853417a13b33a928b7f53ed032
|
[] |
no_license
|
mshelton/sploitego
|
165a32874d955621c857552fb9692ecf79e77b7e
|
3944451a110f851a626459767d114569d80a158c
|
refs/heads/master
| 2020-12-25T03:11:58.071280
| 2012-08-16T22:33:10
| 2012-08-16T22:33:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
#!/usr/bin/env python
from optparse import OptionParser
__author__ = 'Nadeem Douba'
__copyright__ = 'Copyright 2012, Sploitego Project'
__credits__ = ['Nadeem Douba']
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Nadeem Douba'
__email__ = 'ndouba@gmail.com'
__status__ = 'Development'
__all__ = [
'parseargs'
]
def parseargs():
p = OptionParser()
p.add_option("-P", dest="password", help="Specify the password to access msfrpcd", metavar="opt")
p.add_option("-S", dest="ssl", help="Disable SSL on the RPC socket", action="store_false", default=True)
p.add_option("-U", dest="username", help="Specify the username to access msfrpcd", metavar="opt", default="msf")
p.add_option("-a", dest="server", help="Connect to this IP address", metavar="host", default="127.0.0.1")
p.add_option("-p", dest="port", help="Connect to the specified port instead of 55553", metavar="opt", default=55553)
o, a = p.parse_args()
if o.password is None:
print '[-] Error: a password must be specified (-P)\n'
p.print_help()
exit(-1)
return o
|
[
"ndouba@gmail.com"
] |
ndouba@gmail.com
|
ac77fbc3989b6f85e192007025647230f9038e70
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/ptp1b_input/L77/77-bs_wat_20Abox/set_1ns_equi.py
|
55249588767ba5d4a982ae8d8e78310b01454d53
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L77/wat_20Abox/ti_one-step/77_bs/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../77-bs_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
28c70b3e1130f8b502b3c9c2c40227b8a0097822
|
155bf47fa1b33a31576f6b8b90aaa74cd41e352a
|
/04PythonScraping/chap07提升爬虫的速度/demo08-mutilprocess3.py
|
16dc3b5e6f88d6f3427e90b4e4c02db36751cbe7
|
[] |
no_license
|
ares5221/Python-Crawler-Projects
|
af4ec40a26f4f69ef285a0edf0428192a594d4cd
|
45b496000631f0f3b887501d9d67f3e24f5e6186
|
refs/heads/master
| 2021-07-03T07:11:25.474055
| 2020-09-08T08:17:17
| 2020-09-08T08:17:17
| 145,980,513
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
from multiprocessing import Pool, Manager
import time
import requests
link_list = []
with open('alexa.txt', 'r') as file:
file_list = file.readlines()
for eachone in file_list:
link = eachone.split('\t')[1]
link = link.replace('\n','')
link_list.append(link)
start = time.time()
def crawler(q, index):
Process_id = 'Process-' + str(index)
while not q.empty():
url = q.get(timeout=2)
try:
r = requests.get(url, timeout=20)
print (Process_id, q.qsize(), r.status_code, url)
except Exception as e:
print (Process_id, q.qsize(), url, 'Error: ', e)
if __name__ == '__main__':
manager = Manager()
workQueue = manager.Queue(1000)
# 填充队列
for url in link_list:
workQueue.put(url)
pool = Pool(processes=3)
for i in range(4):
pool.apply(crawler, args=(workQueue, i))
print ("Started processes")
pool.close()
pool.join()
end = time.time()
print ('Pool + Queue多进程爬虫的总时间为:', end-start)
print ('Main process Ended!')
|
[
"674361437@qq.com"
] |
674361437@qq.com
|
aada138df4c56627acf62eaa60266b52a077bae2
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/321/107494/submittedfiles/jogoDaVelha_BIB.py
|
ae59cd865960f39bb8cd8d243bc082c289f59efd
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,715
|
py
|
# -*- coding: utf-8 -*-
# COLOQUE SUA BIBLIOTECA A PARTIR DAQUI
import random
tabuleiro = [
[' ',' ',' '],
[' ',' ',' '],
[' ',' ',' ']]
def nome():
nome = str(input('Qual seu nome? \n'))
return nome
def solicitaSimboloDoHumano():
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) \n'))
while s != 'X' and s != 'O':
print('Insira um símbolo válido.')
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
return s
def sorteioPrimeiraJogada(nome):
j1 = nome
j2 = 'Computador'
sort = random.randint(0,1)
if sort == 1:
print ('Vencedor do sorteio para início do jogo: %s' % j1)
if sort == 0:
print ('Vencedor do sorteio para início do jogo: %s' % j2)
return sort
def JogadaHumana(nome,b):
while True:
c= int(input('Qual a sua jogada, %s? ' % nome))
x = c // 10
y = c % 10
if validaJogada(nome,tabuleiro,x,y,b):
tabuleiro[x][y]= ' '+b+' '
return True
#else:
#return False
def jogadaComputador(computador):
while True:
linha= random.randint(0,2)
coluna= random.randint(0,2)
if tabuleiro[linha][coluna]==' ':
tabuleiro[linha][coluna] = computador
mostraTabuleiro()
return True
def mostrarTabuleiro() :
print(' ')
print(tabuleiro[0][0]+'|'+tabuleiro[0][1]+'|'+tabuleiro[0][2])
print(' ')
print(tabuleiro[1][0]+'|'+tabuleiro[1][1]+'|'+tabuleiro[1][2])
print(' ')
print(tabuleiro[2][0]+'|'+tabuleiro[2][1]+'|'+tabuleiro[2][2])
print(' ')
def validaJogada(nome,tabuleiro,l,c,s) :
jogadapossivel = False
if not tabuleiro[l][c]==' ':
if nome!='':
print('OPS!!! Essa jogada não está disponível. Tente novamente!')
return False
else:
return True
'''
def verificaVencedor(s,tabuleiro,nome):
if (tabuleiro[0][0] == tabuleiro[0][1] == tabuleiro[0][2] == s or
tabuleiro[1][0] == tabuleiro[1][1] == tabuleiro[1][2] == s or
tabuleiro[2][0] == tabuleiro[2][1] == tabuleiro[2][2] == s or
tabuleiro[0][0] == tabuleiro[1][0] == tabuleiro[2][0] == s or
tabuleiro[0][1] == tabuleiro[1][1] == tabuleiro[2][1] == s or
tabuleiro[0][2] == tabuleiro[1][2] == tabuleiro[2][2] == s or
tabuleiro[0][0] == tabuleiro[1][1] == tabuleiro[2][2] == s or
tabuleiro[0][2] == tabuleiro[1][1] == tabuleiro[2][0] == s ):
w= tabuleiro [0][0]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
else:
cont=0
for i in range(0,len(tabuleiro)):
for j in range(0,len(tabuleiro)):
if tabuleiro[i][j]!=' ':
cont += 1
if cont==9:
print ('Deu Velha')
return True
else:
return False
'''
def verificaVencedor(s,tabuleiro,nome):
if (tabuleiro[0][0] == tabuleiro[0][1] == tabuleiro[0][2]) and tabuleiro[0][0]!=' ' :
w= tabuleiro[0][0]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
elif (tabuleiro[1][0] == tabuleiro[1][1] == tabuleiro[1][2]) and tabuleiro[1][0]!=' ' :
w= tabuleiro[1][0]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
elif (tabuleiro[2][0] == tabuleiro[2][1] == tabuleiro[2][2]) and tabuleiro[2][0]!=' ' :
w= tabuleiro[2][0]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
elif (tabuleiro[0][0] == tabuleiro[1][0] == tabuleiro[2][0]) and tabuleiro[0][0]!=' ' :
w= tabuleiro[0][0]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
elif (tabuleiro[0][1] == tabuleiro[1][1] == tabuleiro[2][1]) and tabuleiro[0][1]!=' ' :
w= tabuleiro[0][1]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
elif (tabuleiro[0][2] == tabuleiro[1][2] == tabuleiro[2][2]) and tabuleiro[0][2]!=' ' :
w= tabuleiro[0][2]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
elif (tabuleiro[0][0] == tabuleiro[1][1] == tabuleiro[2][2]) and tabuleiro[0][0]!=' ' :
w= tabuleiro[0][0]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
elif (tabuleiro[0][2] == tabuleiro[1][1] == tabuleiro[2][0]) and tabuleiro[0][2]!=' ' :
w= tabuleiro[0][2]
if w==s:
print('Vencedor: %s' %nome)
return True
else:
print('Vencedor: Computador')
return True
else:
cont=0
for i in range(0,3,1):
for j in range(0,3,1):
if tabuleiro[i][j]!=' ':
cont += 1
if cont==9:
print ('Deu Velha')
return True
else:
return False
def jogueNovamente():
print('Você quer jogar de novo? (sim ou não)')
return input().lower().startswith('y')
#Olhar o erro
'''
def verifica(m,situacao):
flag=False
if (m[0][0]==m[0][1]==m[0][2]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[1][0]==m[1][1]==m[1][2]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[2][0]==m[2][1]==m[2][2]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][0]==m[1][0]==m[2][0]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][1]==m[1][1]==m[2][1]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][2]==m[1][2]==m[2][2]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][0]==m[1][1]==m[2][2]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][2]==m[1][1]==m[2][0]=='X'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][0]==m[0][1]==m[0][2]=='0'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[1][0]==m[1][1]==m[1][2]=='0'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[2][0]==m[2][1]==m[2][2]=='0'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][0]==m[1][0]==m[2][0]=='0'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][1]==m[1][1]==m[2][1]=='0'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][2]==m[1][2]==m[2][2]=='0'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][0]==m[1][1]==m[2][2]=='0'):
print 'Jogador 1 ganhou'
situacao=True
elif (m[0][2]==m[1][1]==m[2][0]=='0'):
print ('Jogador 1 ganhou')
situacao=True
else:
for i in matriz:
for j in i:
if j==' ':
flag=True
if flag==False:
print ('Deu Velha')
situacao=True
return situacao
'''
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
bdf5c1a2838f3284de099196371fc88742604fc6
|
f73e0439f953f968f2021e54866a367617debff8
|
/blog/views.py
|
e4b976339b39b8067c110e831249a1e9e5319cde
|
[
"MIT"
] |
permissive
|
kkampardi/DjangoTesting
|
d86542beae8a7cc16fc2fc2774c1c3348142345b
|
1092c41d9d4930f0512fac79b4d95836c70e5f3a
|
refs/heads/master
| 2020-06-16T12:13:17.240496
| 2017-05-09T07:26:30
| 2017-05-09T07:26:30
| 75,104,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from django.shortcuts import render
from django.views.generic.base import TemplateView
from .models import Entry
class HomeView(TemplateView):
template_name = 'index.html'
queryset = Entry.objects.order_by('-created_at')
|
[
"kkampardi@gmail.com"
] |
kkampardi@gmail.com
|
a8fe0988ccaba26dd6c06fdb0050af81e79fc56b
|
bebec878576db20eb38f7e981ab42a7dee20d431
|
/src/publisher.py
|
1ee23b019c6b41a01d5afcf236fd07f02ba0045a
|
[] |
no_license
|
rorymcstay/algo
|
890809d03c68b3e09ee3e48b3cf71d74ac867215
|
499e39511b2ad495a6559790c8c7d84bdaa0a32e
|
refs/heads/master
| 2021-06-28T06:34:26.410025
| 2021-03-28T19:39:16
| 2021-03-28T19:39:16
| 224,317,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,401
|
py
|
import csv
import logging
import threading
from threading import Thread
from multiprocessing import Process
from event_config import subscribers
from src.engine import ThreadPool
class EventConfig:
def __init__(self, dataClass, mapping, complex=False):
"""
The configuration class for an event to be published
:param dataClass: the type of the data published
:param complex: whether or not the mapping is complex TODO check the type of the result of the mapping instead
:param mapping: the mapping to use. Either complex and returns the object or a tuple order mapping
"""
self.dataClass = dataClass
self.dataClass.mapping = mapping
self.dataClass = dataClass
self.complex = complex
class Engine:
engine = None
def __init__(self, connectionString):
"""
Base class of Engines to provide to publisher
:param connectionString: A single connection string to the feed
"""
self.connectionString = connectionString
pass
def __iter__(self):
pass
class FileEngine:
def __init__(self, connectionString):
"""
Stream over a file
:param connectionString: path to file
"""
self.engine = csv.reader(open(connectionString, 'r'))
def __iter__(self):
"""
The cursor to the data feed
:return:
"""
for line in self.engine:
logging.debug(line)
yield line
class Publisher(Process):
def __init__(self, connectionString, engine, eventConfig):
"""
Publish event to all subscribers in the global subscriber
:param engine: the class of engine to use
:param connectionString: parameter to engine
:param eventConfig: the event config object
"""
self.pause_cond = threading.Condition(threading.Lock())
self.engine = engine(connectionString)
self.data_type = eventConfig.dataClass
self.complex = eventConfig.complex
self.threadPool = ThreadPool(2)
self.connectionString = connectionString
def notifySubscribers(self, data):
"""
Call back to global subscriber list
:param data:
:return:
"""
global subscribers
for sub in subscribers:
self.threadPool.add_task(sub.onPublishedEvent, data)
self.threadPool.wait_completion()
def factory(self, *fields):
"""
Construct events to publish
:param fields:
:return:
"""
if self.complex:
return self.data_type.mapping(*fields)
else:
return self.data_type(*self.data_type.mapping(*fields))
def run(self) -> None:
"""
Run method for the publisher
:return:
"""
logging.info(f'starting publisher {self.__class__} on {self.connectionString}')
for i in self.engine:
with self.pause_cond:
logging.info(f'received {self.data_type.__name__} event: {i} ')
self.notifySubscribers(self.factory(*i))
self.pause_cond.wait(0.1)
def init(self) -> None:
"""
start the publisher in a new thread
"""
Process.__init__(self, target=self.run, args=())
self.name = self.connectionString
self.start()
|
[
"rory@rorymcstay.com"
] |
rory@rorymcstay.com
|
f300ce2eff2dcc46b9a46377c2b2bbee80217dc6
|
906171cc2ff7a669149d807c0e6d6db3fbea5f91
|
/algorithm-books/程序员面试指南/python/递归和动态规划/龙与地下城游戏问题.py
|
204b196a1cb2565b9aed86764b5bca945d6e2cbb
|
[] |
no_license
|
Deanhz/normal_works
|
a2942e9eab9b51d190039b659b6924fe2fba7ae9
|
7ae3d5b121bb7af38e3c7e6330ce8ff78ceaadb1
|
refs/heads/master
| 2020-03-25T20:51:02.835663
| 2019-02-18T16:34:44
| 2019-02-18T16:34:44
| 144,149,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 20:44:57 2018
@author: Dean
本题的更新需要从下到上,从右到左
p223
"""
def minHP1(m):#时间复杂度O(M*N),空间复杂度O(M*N)
if not m:
return 1
row = len(m)
col = len(m[0])
#dp[i][j]表示如果骑士走上位置(i,j)前,从该位置能够走到右下角,最少具备的血量
dp = [[0 for j in range(col)] for i in range(row)]
#初始化dp[row-1][col-1]
#重要
if m[row-1][col-1] > 0:
dp[row-1][col-1] = 1
else:
dp[row-1][col-1] = 1 - m[row-1][col-1]
#从右向左更新最后一行
for j in range(col - 1)[::-1]:
#dp不能小于1,因为血量随时都不能小于1
dp[row-1][j] = max(dp[row-1][j+1] - m[row-1][j], 1)
#从下到上,更新剩余行
for i in range(row-1)[::-1]:
#更新每行的最右端
dp[i][col-1] = max(dp[i+1][col-1] - m[i][col-1], 1)
for j in range(col-1)[::-1]:
#水平方向
dp_row = max(dp[i][j+1] - m[i][j], 1)
#垂直方向
dp_col = max(dp[i+1][j] - m[i][j], 1)
#取最小值
dp[i][j] = min(dp_col,dp_row)
return dp[0][0]
def minHP2(m):#使用空间压缩,空间复杂度O(M*N)
if not m:
return 1
row = len(m)
col = len(m[0])
dp = [0 for j in range(col)]
#初始化dp[col-1]
if m[row-1][col-1] > 0:
dp[col-1] = 1
else:
dp[col-1] = 1 - m[row-1][col-1]
#更新最后一行
for j in range(col-1)[::-1]:
dp[j] = max(dp[j+1] - m[row-1][j], 1)
#更新剩余所有行
for i in range(row-1)[::-1]:
#更新每行的最右端
dp[col-1] = max(dp[col-1] - m[i][col-1], 1)
for j in range(col-1)[::-1]:
#水平方向
dp_row = max(dp[j+1] - m[i][j], 1)
#垂直方向
dp_col = max(dp[j] - m[i][j], 1)
#取最小值
dp[j] = min(dp_row, dp_col)
return dp[0]
if __name__ == "__main__":
m = [[-2, -3, 3],[-5, -10, 1], [0, 30, -5]]
print(minHP1(m))
print(minHP2(m))
|
[
"258796762@qq.com"
] |
258796762@qq.com
|
10f7ef7944bbc118a972447b591a865681e469ee
|
502da37d15b473edd9ac56f10871a2e74a774920
|
/misc/deb-package/spairo-deb-d/usr/lib/python3/dist-packages/spairo/parse/obj.py
|
003719aee7bc156b13109532cf1d43d4f12fab2a
|
[] |
no_license
|
adhuliya/sparcv8-ajit
|
5cf7c8fc1e89dc97b3160c010ba9c95cd993eee6
|
c121c33f5b1eaabf040cf929678229a21f8283de
|
refs/heads/master
| 2022-08-22T08:20:09.423969
| 2022-07-24T04:10:11
| 2022-07-24T04:10:11
| 97,362,905
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
#!/usr/bin/env python3
import re
import sys
extractInstruction = re.compile(
r"(?P<addr>[a-fA-F0-9]+):\s*([a-fA-F0-9][a-fA-F0-9]\s){4}\s*(?P<instr>[^!]*)")
labelSuffix = re.compile(r"<[.$_a-zA-Z][.$_a-zA-Z0-9]*>$")
# Parse obj file
def parse(filename):
instr = dict()
with open(filename, "r") as f:
for line in f:
line = line.strip()
match = extractInstruction.search(line)
if match:
line = labelSuffix.sub("", line)
match = extractInstruction.search(line)
addr = int(match.group("addr"), 16)
instr[addr] = match.group("instr").strip()
instr = None if not instr else instr
return instr
def printInstr(instr):
for key in sorted(instr):
print("{0:4X}".format(key), ":", instr[key])
print("Total Instr:", len(instr))
if __name__ == "__main__":
filename = "testfiles/test.obj.save"
if len(sys.argv) == 2:
filename = sys.argv[1]
instr = parse(filename)
printInstr(instr)
|
[
"lazynintel@gmail.com"
] |
lazynintel@gmail.com
|
069df0b91770278c18cb69fae6abb68bfff5b37e
|
e76f883f7f93b2d2e735fe1c8a72ddfb146a6ffb
|
/tests/test_vpx.py
|
22da24b31ccc3eeca8fe128cd833d5932f3ec619
|
[
"BSD-3-Clause"
] |
permissive
|
turbographics2000/aiortc
|
4da92a85b5b3b3ec3286bad46265158f04d7b373
|
f3275012f59312a4f7a73c932bbf52f6f69f1a1a
|
refs/heads/master
| 2021-04-03T07:07:52.485523
| 2018-03-07T10:49:56
| 2018-03-07T10:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,467
|
py
|
from unittest import TestCase
from aiortc.codecs import get_decoder, get_encoder
from aiortc.codecs.vpx import (VpxDecoder, VpxEncoder, VpxPayloadDescriptor,
_vpx_assert)
from aiortc.mediastreams import VideoFrame
from aiortc.rtp import Codec
VP8_CODEC = Codec(kind='video', name='VP8', clockrate=90000)
class VpxPayloadDescriptorTest(TestCase):
def test_no_picture_id(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x10')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x10')
self.assertEqual(repr(descr), 'VpxPayloadDescriptor(S=1, PID=0, pic_id=None)')
self.assertEqual(rest, b'')
def test_short_picture_id_17(self):
"""
From RFC 7741 - 4.6.3
"""
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x11')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 17)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x11')
self.assertEqual(repr(descr), 'VpxPayloadDescriptor(S=1, PID=0, pic_id=17)')
self.assertEqual(rest, b'')
def test_short_picture_id_127(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x7f')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 127)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x7f')
self.assertEqual(rest, b'')
def test_long_picture_id_128(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x80\x80')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 128)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x80\x80')
self.assertEqual(rest, b'')
def test_long_picture_id_4711(self):
"""
From RFC 7741 - 4.6.5
"""
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x92\x67')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 4711)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x92\x67')
self.assertEqual(rest, b'')
def test_tl0picidx(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\xc0\x92\x67\x81')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 4711)
self.assertEqual(descr.tl0picidx, 129)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\xc0\x92\x67\x81')
self.assertEqual(rest, b'')
def test_tid(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x20\xe0')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, (3, 1))
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x20\xe0')
self.assertEqual(rest, b'')
def test_keyidx(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x10\x1f')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, 31)
self.assertEqual(bytes(descr), b'\x90\x10\x1f')
self.assertEqual(rest, b'')
class Vp8Test(TestCase):
def test_assert(self):
with self.assertRaises(Exception) as cm:
_vpx_assert(1)
self.assertEqual(str(cm.exception), 'libvpx error: Unspecified internal error')
def test_decoder(self):
decoder = get_decoder(VP8_CODEC)
self.assertTrue(isinstance(decoder, VpxDecoder))
def test_encoder(self):
encoder = get_encoder(VP8_CODEC)
self.assertTrue(isinstance(encoder, VpxEncoder))
frame = VideoFrame(width=320, height=240)
payloads = encoder.encode(frame)
self.assertEqual(len(payloads), 1)
self.assertTrue(len(payloads[0]) < 1300)
def test_encoder_large(self):
encoder = get_encoder(VP8_CODEC)
self.assertTrue(isinstance(encoder, VpxEncoder))
frame = VideoFrame(width=2560, height=1920)
payloads = encoder.encode(frame)
self.assertEqual(len(payloads), 7)
self.assertEqual(len(payloads[0]), 1300)
|
[
"jeremy.laine@m4x.org"
] |
jeremy.laine@m4x.org
|
b4d624563bfde2acc7337c773ee6135ca29b3bc1
|
6af81c1e3853255f064ce58e848b34211decdd23
|
/test/top/api/rest/SubuserDutyDeleteRequest.py
|
4915d8f74cdf40234090e02dd520a45fee58387f
|
[] |
no_license
|
dacy413/TBAutoTool
|
d472445f54f0841f2cd461d48ec6181ae2182d92
|
ca7da4638d38dd58e38c680ee03aaccf575bce7b
|
refs/heads/master
| 2016-09-06T16:13:01.633177
| 2015-02-01T00:04:50
| 2015-02-01T00:04:50
| 29,625,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
'''
Created by auto_sdk on 2015-01-20 12:36:26
'''
from top.api.base import RestApi
class SubuserDutyDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.duty_id = None
self.user_nick = None
def getapiname(self):
return 'taobao.subuser.duty.delete'
|
[
"1656505353@qq.com"
] |
1656505353@qq.com
|
adae908424c30f9d4e86f3532d053d8c5c4642a4
|
10f746943f2b6399bddf1fc79e16b2eec8b1bb0b
|
/gateway/dummy_secrets.py
|
26986ea41ef76635f3cf6205f617a66f5da5ab9b
|
[] |
no_license
|
edgecollective/belfast-harbor
|
1fa5977a46222f9178a5912ece72beb17891f5e1
|
5149eca44e95bf0525601249116ab23e59c15c0f
|
refs/heads/master
| 2020-09-20T14:16:00.411551
| 2019-11-29T13:02:49
| 2019-11-29T13:02:49
| 224,507,873
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
secrets = {
'ssid' : b'Your WiFi SSID',
'password' : b'Your WiFi Password',
'farmos_pubkey' : 'farmos_pubkey',
'farmos_privkey' : 'farmos_privkey'
}
|
[
"donblair@gmail.com"
] |
donblair@gmail.com
|
22232bd5654c414b046f470cef574422936a962c
|
de1b2cfca42e495257a0f50bed026d42d35ecc97
|
/flaskr/__init__.py
|
60787156af6e1f0acfafab34d4efb49016cb91eb
|
[] |
no_license
|
miniyk2012/miniyk2012-flask_tutorial
|
ad971a000b78b0267f33b40924f03154b0b84173
|
760d59b8c3ec534cdeb7c198d1adb61493b1bfff
|
refs/heads/master
| 2023-01-11T14:57:38.931170
| 2019-06-04T22:39:09
| 2019-06-04T22:39:09
| 189,253,167
| 0
| 0
| null | 2022-12-26T20:56:11
| 2019-05-29T15:34:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
import os
from flask import (
Flask, current_app
)
def create_app(test_config=None):
"""
create_app是默认的名称, flask run的时候会自动来运行这个函数
:param test_config:
:return:
"""
# create and configure the app
app: Flask = Flask(__name__, instance_relative_config=True)
# print(app.instance_path) # /Users/thomas_young/Documents/code/flask_project/instance
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
ret = app.config.from_pyfile('config.py', silent=True)
print('load the config.py ' + ('success' if ret else 'fail'))
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
print('make instance_path', app.instance_path)
os.makedirs(app.instance_path)
except OSError as e:
pass
# a simple page that says hello
def hello():
print(f'current app url map is {current_app.url_map}')
return 'Hello, World!'
app.add_url_rule('/hello', view_func=hello)
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', 'index')
return app
|
[
"yk_ecust_2007@163.com"
] |
yk_ecust_2007@163.com
|
b7405fe0659fd9354d9865949d27d48e40c6325b
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/ops/gen_sdca_ops.py
|
b1922e0314101ad623bed622fb34e7b337445bdc
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:167888a5bf0cec33f73cc87d5babc3869f495c2487658426b093ec2660b669c6
size 52962
|
[
"github@cuba12345"
] |
github@cuba12345
|
2d7bca278667a97eb99f961aef8b06561c6dc55a
|
6bf492920985e3741440ba53e1c7f8426b66ac1f
|
/snakemake_rules/rules/kpal/kpal_matrix.smk
|
e0f458d60c5838003d24e753e3360530f55ba0b6
|
[
"MIT"
] |
permissive
|
ukaraoz/snakemake-rules
|
5b2ba7c9ec19d88b56067a46f66fd0c72e48c368
|
07e96afeb39307cdf35ecc8482dc1f8b62c120b9
|
refs/heads/master
| 2020-03-31T15:20:44.444006
| 2018-09-07T08:53:47
| 2018-09-07T08:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
smk
|
# -*- snakemake -*-
include: "kpal.settings.smk"
config_default = {'kpal' :{'matrix' : _kpal_config_rule_default.copy()}}
config_default['kpal']['matrix'].update({'options' : "-m -S"})
update_config(config_default, config)
config = config_default
rule kpal_matrix:
"""kpal: generate matrix."""
params: cmd = config['kpal']['cmd'],
options = config['kpal']['matrix']['options'],
runtime = config['kpal']['matrix']['runtime']
wildcard_constraints: kmer = "[0-9]+"
input: kmer = "{prefix}.k{kmer}"
output: res = "{prefix}.k{kmer}.mat"
threads: config['kpal']['matrix']['threads']
conda: "env.yaml"
shell:
"{params.cmd} matrix {params.options} {input.kmer} {output.res}"
|
[
"per.unneberg@scilifelab.se"
] |
per.unneberg@scilifelab.se
|
bce20665fafee5860f3d0874347ff4fa12928558
|
1fa6c2650c791e35feaf57b87e832613e98797dd
|
/LeetCode/Binary Search/! M Search in Rotated Sorted Array.py
|
1f36ebf53459a4ab0e059c0dfdc4cadc8b3a3ba8
|
[] |
no_license
|
hz336/Algorithm
|
415a37313a068478225ca9dd1f6d85656630f09a
|
0d2d956d498742820ab39e1afe965425bfc8188f
|
refs/heads/master
| 2021-06-17T05:24:17.030402
| 2021-04-18T20:42:37
| 2021-04-18T20:42:37
| 194,006,383
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,912
|
py
|
"""
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
Your algorithm's runtime complexity must be in the order of O(log n).
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
Output: -1
"""
class Solution:
def search(self, nums: 'List[int]', target: 'int') -> 'int':
if nums is None or len(nums) == 0:
return -1
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if nums[start] <= nums[mid]:
if nums[start] <= target <= nums[mid]:
end = mid
else:
start = mid
else:
if nums[mid] <= target <= nums[end]:
start = mid
else:
end = mid
if nums[start] == target:
return start
if nums[end] == target:
return end
return -1
"""
Follow up:
What if duplicates are allowed?
Would this affect the run-time complexity? How and why?
这个问题在面试中不会让实现完整程序
只需要举出能够最坏情况的数据是 [1,1,1,1... 1] 里有一个0即可。
在这种情况下是无法使用二分法的,复杂度是O(n)
因此写个for循环最坏也是O(n),那就写个for循环就好了
如果你觉得,不是每个情况都是最坏情况,你想用二分法解决不是最坏情况的情况,那你就写一个二分吧。
反正面试考的不是你在这个题上会不会用二分法。这个题的考点是你想不想得到最坏情况。
"""
|
[
"hz336@cornell.edu"
] |
hz336@cornell.edu
|
9a28629a710d0c1e31f69b87934ebcc4916fe3e3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03651/s284460411.py
|
7a3cadd35f07b8049ff65e723cdebbfad6be1501
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
import math
n,k=map(int,input().split())
a=list(map(int,input().split()))
c=a[0]
for m in a:
c=math.gcd(c,m)
if k %c==0 and k<=max(a):
print("POSSIBLE")
else:
print("IMPOSSIBLE")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
02c8c0b816a3a71c169216770f654193fbcabcbc
|
b42879654055f320b7593358330eceb0c06ca1d8
|
/Hao_Test/data_base_structure/find_element.py
|
e69721651ac8124e0efbbb28d25c7022d69684fd
|
[] |
no_license
|
HaoREN211/python-scraping
|
de3b28b96fb43e47700a6a12b05db6800f583611
|
c802748e9067e6dfd1e3736ccc20fbd34091f9e5
|
refs/heads/master
| 2020-07-18T23:09:23.380362
| 2019-10-22T01:50:28
| 2019-10-22T01:50:28
| 206,331,191
| 0
| 0
| null | 2019-09-04T13:53:26
| 2019-09-04T13:53:26
| null |
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
# 作者:hao.ren3
# 时间:2019/10/16 18:25
# IDE:PyCharm
from python_scraping.Hao_Test.tools.sql import create_mysql_engine
from python_scraping.Hao_Test.data_base_structure.init_table import init_data_column_table, init_data_base_table, init_data_table_table
from sqlalchemy import MetaData
from sqlalchemy.orm.session import sessionmaker
if __name__ == "__main__":
my_engine = create_mysql_engine("hao_data_base_structure")
my_meta_data = MetaData(my_engine)
Session = sessionmaker(bind=my_engine)
session = Session()
table_data_base = init_data_base_table(mysql_meta_data=my_meta_data)
table_data_table = init_data_table_table(mysql_meta_data=my_meta_data)
table_data_column = init_data_column_table(mysql_meta_data=my_meta_data)
test = (table_data_column.select()
.join(table_data_table, table_data_column.c.data_table_id==table_data_table.c.id)
.join(table_data_base, table_data_column.c.data_base_id==table_data_base.c.id))
test = (session.query(table_data_base.c.name, table_data_table.c.name, table_data_column.c.name)
.join(table_data_table, table_data_column.c.data_table_id==table_data_table.c.id)
.join(table_data_base, table_data_column.c.data_base_id == table_data_base.c.id).all())
for current_row in test:
print(".".join(current_row))
my_engine.dispose()
|
[
"renhaojules@163.com"
] |
renhaojules@163.com
|
8d445faa9047767aed6d3755c95e343884f082d2
|
1ba9e4754ee30e7f45aeb210918be489559dd281
|
/books/migrations/0009_auto_20180325_1412.py
|
bfd85093ea4ebec5ee3296a3b5ef14517f90ccd8
|
[] |
no_license
|
muremwa/Django-Book-s-app
|
330f36c402f0af59f8e9f81d9751b283602c4b1c
|
af6829332b5009955d4290ea67af459d2a6b8b69
|
refs/heads/master
| 2021-04-15T08:26:13.165957
| 2018-04-11T22:15:59
| 2018-04-11T22:15:59
| 126,845,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
# Generated by Django 2.0 on 2018-03-25 11:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0008_book_votes'),
]
operations = [
migrations.AlterField(
model_name='author',
name='picture',
field=models.FileField(default='defaulta.png', upload_to=''),
),
migrations.AlterField(
model_name='book',
name='book_cover',
field=models.FileField(default='default.png', upload_to=''),
),
]
|
[
"danmburu254@gmail.com"
] |
danmburu254@gmail.com
|
03678610e296f6f34bf63062be58743d3ea319fa
|
b8dab6e1a07c4c95e0483df722a4188d44356c1e
|
/backend/endpoints/predict.py
|
1864ccb83162f82efe89e660e6a5d3f5f4036d7e
|
[
"Apache-2.0"
] |
permissive
|
maelstromdat/radon-defuse
|
363578106ea6c63e9b2f33d648b208708a075c25
|
269f050a656d527712d88d39db9ae1d5642027bf
|
refs/heads/main
| 2023-05-26T05:11:10.880316
| 2021-05-20T09:44:21
| 2021-05-20T09:44:21
| 370,704,590
| 2
| 0
|
Apache-2.0
| 2021-05-25T13:34:38
| 2021-05-25T13:34:37
| null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
import time
from flask import jsonify, make_response
from flask_restful import Resource, Api, reqparse
class Predict(Resource):
def __init__(self, **kwargs):
self.db = kwargs['db']
self.bucket = kwargs['bucket']
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, required=True) # Model id
self.args = parser.parse_args() # parse arguments to dictionary
# Create Task
task_id = self.db.collection('tasks').add({
'name': 'predict',
'repository_id': self.args.get('id'),
'status': 'progress',
'started_at': time.time()
})[1].id
# Predict
# blob = self.bucket.blob(f'{self.args.get("id")}.joblib')
# b_model = blob.download_as_bytes()
# Save prediction in collection "predictions"?
doc_ref = self.db.collection('tasks').document(task_id)
doc_ref.update({
'status': 'completed',
'ended_at': time.time()
})
return make_response(jsonify({"failure-prone": True}), 200)
|
[
"stefano.dallapalma0@gmail.com"
] |
stefano.dallapalma0@gmail.com
|
dab8fddf7efb767eb07b5fd72e4b5956cb2acb34
|
00b7b6a30e8c851cc1f288370a49a593ee7e0172
|
/bfrs/migrations/0021_auto_20190508_1443.py
|
0a5c7528281c61467e60b81f2db616d45e428d65
|
[] |
no_license
|
rockychen-dpaw/bfrs
|
e907fd5dbceb8af9769a964990dfaab38bc76080
|
f468794d1e36419f7bab718edad3f7b2939b82fa
|
refs/heads/master
| 2021-04-29T19:02:40.930093
| 2020-01-15T03:01:37
| 2020-01-15T03:01:37
| 121,706,290
| 0
| 0
| null | 2018-02-16T02:05:40
| 2018-02-16T02:05:40
| null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2019-05-08 06:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bfrs', '0020_auto_20190508_1247'),
]
operations = [
migrations.AddField(
model_name='cause',
name='report_name',
field=models.CharField(default=b'', max_length=50),
),
migrations.AddField(
model_name='cause',
name='report_order',
field=models.PositiveSmallIntegerField(default=1, verbose_name=b'order in annual report'),
),
migrations.AlterField(
model_name='tenure',
name='report_group_order',
field=models.PositiveSmallIntegerField(default=1, verbose_name=b'group order in annual report'),
),
migrations.AlterField(
model_name='tenure',
name='report_order',
field=models.PositiveSmallIntegerField(default=1, verbose_name=b'order in annual report'),
),
]
|
[
"asi@dpaw.wa.gov.au"
] |
asi@dpaw.wa.gov.au
|
6d6bb6382252a8618c4eb6a10a181d48b1d1f898
|
5919508f6fa3756b89b720ca9510423847af6b78
|
/inputs/loop_helix_loop/get_lhl_distributions.py
|
ff2570fe3a5b8d20c96d739a23418c00e32f2f3a
|
[
"MIT"
] |
permissive
|
karlyfear/protein_feature_analysis
|
0cb87ee340cb1aa5e619d684ebf8040713fee794
|
fa2ae8bc6eb7ecf17e8bf802ab30814461868114
|
refs/heads/master
| 2023-03-16T22:30:45.088924
| 2020-03-14T20:05:21
| 2020-03-14T20:05:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,168
|
py
|
#!/usr/bin/env python3
'''Get distributions for LHL units.
Dump the distributions of various features to text files.
Usage:
./get_lhl_distributions.py pdbs_path lhl_info_path edges_file
'''
import os
import sys
import json
import numpy as np
import pyrosetta
from pyrosetta import rosetta
def xyzV_to_np_array(xyz):
return np.array([xyz.x, xyz.y, xyz.z])
def get_backbone_points(pose, residues):
'''Get backbone points for residues in a pose.'''
points = []
for res in residues:
for atom in ['N', 'CA', 'C']:
points.append(xyzV_to_np_array(pose.residue(res).xyz(atom)))
return points
def calc_backbone_RMSD(pose1, residues1, pose2, residues2):
'''Calculate backbone RMSD between two poses for specific positions.'''
assert(len(residues1) == len(residues2))
def RMSD(points1, poinsts2):
'''Calcualte RMSD between two lists of numpy points.'''
diff = [points1[i] - poinsts2[i] for i in range(len(points1))]
return np.sqrt(sum(np.dot(d, d) for d in diff) / len(diff))
points1 = get_backbone_points(pose1, residues1)
points2 = get_backbone_points(pose2, residues2)
return RMSD(points1, points2)
def get_helix_direction(pose, helix_start, helix_stop):
'''Get the helix direction.
The direction is defined as the average of the C-O vectors.
'''
c_o_vectors = [pose.residue(i).xyz('O') - pose.residue(i).xyz('C')
for i in range(helix_start, helix_stop + 1)]
sum_vecs = c_o_vectors[0]
for i in range(1, len(c_o_vectors)):
sum_vecs += c_o_vectors[i]
return sum_vecs.normalized()
def get_lhl_lengths(lhl_infos):
'''Get the distribution of LHL lengths.'''
return [lhl['stop'] - lhl['start'] + 1 for lhl in lhl_infos]
def get_front_loop_lengths(lhl_infos):
'''Get the distribution of front loop lengths of LHL units.'''
return [lhl['H_start'] - lhl['start'] for lhl in lhl_infos]
def get_back_loop_lengths(lhl_infos):
'''Get the distribution of back loop lengths of LHL units.'''
return [lhl['stop'] - lhl['H_stop'] for lhl in lhl_infos]
def get_lhl_pair_helix_length_diffs(lhl_infos, edges):
'''Get the helix length difference between pairs of LHL units.'''
length_diffs = []
for i, j in edges:
length1 = lhl_infos[i]['H_stop'] - lhl_infos[i]['H_start'] + 1
length2 = lhl_infos[j]['H_stop'] - lhl_infos[j]['H_start'] + 1
length_diffs.append(np.absolute(length1 - length2))
return length_diffs
def get_lhl_pair_length_diffs(lhl_infos, edges):
'''Get the length difference between pairs of LHL units.'''
length_diffs = []
for i, j in edges:
length1 = lhl_infos[i]['stop'] - lhl_infos[i]['start'] + 1
length2 = lhl_infos[j]['stop'] - lhl_infos[j]['start'] + 1
length_diffs.append(np.absolute(length1 - length2))
return length_diffs
def get_lhl_pair_helix_rmsds(poses_map, lhl_infos, edges):
'''Get the helix backbone RMSDs between pairs of LHL units'''
rmsds = []
for i, j in edges:
length1 = lhl_infos[i]['H_stop'] - lhl_infos[i]['H_start'] + 1
length2 = lhl_infos[j]['H_stop'] - lhl_infos[j]['H_start'] + 1
len_comp = min(length1, length2)
h_mid_start1 = (lhl_infos[i]['H_start'] + lhl_infos[i]['H_stop'] - len_comp) // 2
h_mid_start2 = (lhl_infos[j]['H_start'] + lhl_infos[j]['H_stop'] - len_comp) // 2
residues1 = [h_mid_start1 + k for k in range(len_comp)]
residues2 = [h_mid_start2 + k for k in range(len_comp)]
rmsds.append(calc_backbone_RMSD(poses_map[lhl_infos[i]['pdb_file']], residues1,
poses_map[lhl_infos[j]['pdb_file']], residues2))
return rmsds
def get_lhl_pair_rmsds(poses_map, lhl_infos, edges):
'''Get the backbone RMSDs between pairs of LHL units'''
rmsds = []
for i, j in edges:
length1 = lhl_infos[i]['stop'] - lhl_infos[i]['start'] + 1
length2 = lhl_infos[j]['stop'] - lhl_infos[j]['start'] + 1
len_comp = min(length1, length2)
residues1 = [lhl_infos[i]['start'] + k for k in range(len_comp)]
residues2 = [lhl_infos[j]['start'] + k for k in range(len_comp)]
rmsds.append(calc_backbone_RMSD(poses_map[lhl_infos[i]['pdb_file']], residues1,
poses_map[lhl_infos[j]['pdb_file']], residues2))
return rmsds
def get_lhl_pair_helicies_angles(poses_map, lhl_infos, edges):
'''Get the angles between helices of pairs of LHL units'''
angles = []
for i, j in edges:
helix_direction1 = get_helix_direction(poses_map[lhl_infos[i]['pdb_file']], lhl_infos[i]['H_start'], lhl_infos[i]['H_stop'])
helix_direction2 = get_helix_direction(poses_map[lhl_infos[j]['pdb_file']], lhl_infos[j]['H_start'], lhl_infos[j]['H_stop'])
cos_angle = helix_direction1.dot(helix_direction2)
angles.append(180 / np.pi * np.arccos(cos_angle))
return angles
def dump_distribution(data, data_name):
'''Dump a distribution to a text file'''
with open('{0}.txt'.format(data_name), 'w') as f:
for d in data:
f.write('{0}\n'.format(d))
def get_lhl_distributions(pdbs_path, lhl_info_path, edges_file):
'''Get LHL distributions'''
# Load the pdbs
poses_map = {}
for pdb_file in os.listdir(pdbs_path):
poses_map[pdb_file] = rosetta.core.import_pose.pose_from_file(os.path.join(pdbs_path, pdb_file))
# Load the lhl_infos
lhl_infos = []
for lhl_info_file in os.listdir(lhl_info_path):
with open(os.path.join(lhl_info_path, lhl_info_file), 'r') as f:
lhl_info = json.load(f)
lhl_infos += lhl_info
# Load the edges
with open(edges_file, 'r') as f:
edges = json.load(f)
# Calcualte and dump the distributions
# lhl_lengths = get_lhl_lengths(lhl_infos)
# dump_distribution(lhl_lengths, 'lhl_lengths')
#
# front_loop_lengths = get_front_loop_lengths(lhl_infos)
# dump_distribution(front_loop_lengths, 'front_loop_lengths')
#
# back_loop_lengths = get_back_loop_lengths(lhl_infos)
# dump_distribution(back_loop_lengths, 'back_loop_lengths')
#
lhl_pair_helix_length_diffs = get_lhl_pair_helix_length_diffs(lhl_infos, edges)
dump_distribution(lhl_pair_helix_length_diffs, 'lhl_pair_helix_length_diffs')
#
# lhl_pair_length_diffs = get_lhl_pair_length_diffs(lhl_infos, edges)
# dump_distribution(lhl_pair_length_diffs, 'lhl_pair_length_diffs')
#
lhl_pair_helix_rmsds = get_lhl_pair_helix_rmsds(poses_map, lhl_infos, edges)
dump_distribution(lhl_pair_helix_rmsds, 'lhl_pair_helix_rmsds')
#
# lhl_pair_rmsds = get_lhl_pair_rmsds(poses_map, lhl_infos, edges)
# dump_distribution(lhl_pair_rmsds, 'lhl_pair_rmsds')
#
# lhl_pair_helices_angles = get_lhl_pair_helicies_angles(poses_map, lhl_infos, edges)
# dump_distribution(lhl_pair_helices_angles, 'lhl_pair_helices_angles')
if __name__ == '__main__':
pdbs_path = sys.argv[1]
lhl_info_path = sys.argv[2]
edges_file = sys.argv[3]
pyrosetta.init(options='-ignore_unrecognized_res true')
get_lhl_distributions(pdbs_path, lhl_info_path, edges_file)
|
[
"xingjiepan@gmail.com"
] |
xingjiepan@gmail.com
|
0ebec273c17fa7bf8132a5c77df524c49eb07764
|
df92ea5a3206b2b920086203b3fe0f48ac106d15
|
/django_docs/onetomany/migrations/0002_car_model.py
|
6f2951bba6f495c61b627f23e5b211b0d054afe5
|
[
"MIT"
] |
permissive
|
miscreant1/django_tutorial
|
b354ea5c28224fe9e3612301e146d0c6a8778f28
|
736743be66d83ec579b22a0378381042f5207f38
|
refs/heads/master
| 2022-04-24T17:48:10.724609
| 2020-04-27T07:52:24
| 2020-04-27T07:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# Generated by Django 3.0.5 on 2020-04-20 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onetomany', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='car',
name='model',
field=models.CharField(default='소나타', max_length=40, verbose_name='자동차모델'),
preserve_default=False,
),
]
|
[
"headfat1218@gmail.com"
] |
headfat1218@gmail.com
|
579eac1755cc6dfc8c601ef7df132c1a4a830673
|
674f5dde693f1a60e4480e5b66fba8f24a9cb95d
|
/armulator/armv6/opcodes/abstract_opcodes/smlsld.py
|
55da34bdc70ecb6cd43315893e0a2e0c83e6c62f
|
[
"MIT"
] |
permissive
|
matan1008/armulator
|
75211c18ebc9cd9d33a02890e76fc649483c3aad
|
44f4275ab1cafff3cf7a1b760bff7f139dfffb07
|
refs/heads/master
| 2023-08-17T14:40:52.793120
| 2023-08-08T04:57:02
| 2023-08-08T04:57:02
| 91,716,042
| 29
| 7
|
MIT
| 2023-08-08T04:55:59
| 2017-05-18T16:37:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
from armulator.armv6.bits_ops import to_signed, set_substring, to_unsigned, substring
from armulator.armv6.opcodes.opcode import Opcode
from armulator.armv6.shift import ror
class Smlsld(Opcode):
def __init__(self, instruction, m_swap, m, d_hi, d_lo, n):
super().__init__(instruction)
self.m_swap = m_swap
self.m = m
self.d_hi = d_hi
self.d_lo = d_lo
self.n = n
def execute(self, processor):
if processor.condition_passed():
operand2 = ror(processor.registers.get(self.m), 32, 16) if self.m_swap else processor.registers.get(self.m)
n = processor.registers.get(self.n)
product1 = to_signed(substring(n, 15, 0), 16) * to_signed(substring(operand2, 15, 0), 16)
product2 = to_signed(substring(n, 31, 16), 16) * to_signed(substring(operand2, 31, 16), 16)
d_total = to_signed(
set_substring(processor.registers.get(self.d_lo), 63, 32, processor.registers.get(self.d_hi)), 64
)
result = to_unsigned(product1 - product2 + d_total, 64)
processor.registers.set(self.d_hi, substring(result, 63, 32))
processor.registers.set(self.d_lo, substring(result, 31, 0))
|
[
"matan1008@gmail.com"
] |
matan1008@gmail.com
|
c3f603d4891e64afab9802c2e85b912f517572e7
|
f698ab1da4eff7b353ddddf1e560776ca03a0efb
|
/examples/ogbg_molpcba/models.py
|
f1cfc01d8d5185b83ecd0d6620dd538ae9890250
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
patrickvonplaten/flax
|
7489a002190c5cd5611c6ee1097d7407a932314c
|
48b34ab87c7d20afc567f6e0fe5d67e423cf08bc
|
refs/heads/main
| 2023-08-23T13:55:08.002500
| 2021-10-08T09:23:15
| 2021-10-08T09:23:54
| 319,617,897
| 0
| 0
|
Apache-2.0
| 2020-12-08T11:28:16
| 2020-12-08T11:28:15
| null |
UTF-8
|
Python
| false
| false
| 6,732
|
py
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of the GNN model."""
from typing import Callable, Sequence
from flax import linen as nn
import jax.numpy as jnp
import jraph
def add_graphs_tuples(graphs: jraph.GraphsTuple,
other_graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Adds the nodes, edges and global features from other_graphs to graphs."""
return graphs._replace(
nodes=graphs.nodes + other_graphs.nodes,
edges=graphs.edges + other_graphs.edges,
globals=graphs.globals + other_graphs.globals)
class MLP(nn.Module):
"""A multi-layer perceptron."""
feature_sizes: Sequence[int]
dropout_rate: float = 0
deterministic: bool = True
activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
@nn.compact
def __call__(self, inputs):
x = inputs
for size in self.feature_sizes:
x = nn.Dense(features=size)(x)
x = self.activation(x)
x = nn.Dropout(
rate=self.dropout_rate, deterministic=self.deterministic)(x)
return x
class GraphNet(nn.Module):
"""A complete Graph Network model defined with Jraph."""
latent_size: int
num_mlp_layers: int
message_passing_steps: int
output_globals_size: int
dropout_rate: float = 0
skip_connections: bool = True
use_edge_model: bool = True
layer_norm: bool = True
deterministic: bool = True
@nn.compact
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
# We will first linearly project the original features as 'embeddings'.
embedder = jraph.GraphMapFeatures(
embed_node_fn=nn.Dense(self.latent_size),
embed_edge_fn=nn.Dense(self.latent_size),
embed_global_fn=nn.Dense(self.latent_size))
processed_graphs = embedder(graphs)
# Now, we will apply a Graph Network once for each message-passing round.
mlp_feature_sizes = [self.latent_size] * self.num_mlp_layers
for _ in range(self.message_passing_steps):
if self.use_edge_model:
update_edge_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
else:
update_edge_fn = None
update_node_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
update_global_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
graph_net = jraph.GraphNetwork(
update_node_fn=update_node_fn,
update_edge_fn=update_edge_fn,
update_global_fn=update_global_fn)
if self.skip_connections:
processed_graphs = add_graphs_tuples(
graph_net(processed_graphs), processed_graphs)
else:
processed_graphs = graph_net(processed_graphs)
if self.layer_norm:
processed_graphs = processed_graphs._replace(
nodes=nn.LayerNorm()(processed_graphs.nodes),
edges=nn.LayerNorm()(processed_graphs.edges),
globals=nn.LayerNorm()(processed_graphs.globals),
)
# Since our graph-level predictions will be at globals, we will
# decode to get the required output logits.
decoder = jraph.GraphMapFeatures(
embed_global_fn=nn.Dense(self.output_globals_size))
processed_graphs = decoder(processed_graphs)
return processed_graphs
class GraphConvNet(nn.Module):
"""A Graph Convolution Network + Pooling model defined with Jraph."""
latent_size: int
num_mlp_layers: int
message_passing_steps: int
output_globals_size: int
dropout_rate: float = 0
skip_connections: bool = True
layer_norm: bool = True
deterministic: bool = True
pooling_fn: Callable[[jnp.ndarray, jnp.ndarray, jnp.ndarray],
jnp.ndarray] = jraph.segment_mean
def pool(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Pooling operation, taken from Jraph."""
# Equivalent to jnp.sum(n_node), but JIT-able.
sum_n_node = graphs.nodes.shape[0]
# To aggregate nodes from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# Example: if you have `n_node=[1,2]`, we construct the tensor [0, 1, 1].
n_graph = graphs.n_node.shape[0]
node_graph_indices = jnp.repeat(
jnp.arange(n_graph),
graphs.n_node,
axis=0,
total_repeat_length=sum_n_node)
# We use the aggregation function to pool the nodes per graph.
pooled = self.pooling_fn(graphs.nodes, node_graph_indices, n_graph)
return graphs._replace(globals=pooled)
@nn.compact
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
# We will first linearly project the original node features as 'embeddings'.
embedder = jraph.GraphMapFeatures(
embed_node_fn=nn.Dense(self.latent_size))
processed_graphs = embedder(graphs)
# Now, we will apply the GCN once for each message-passing round.
for _ in range(self.message_passing_steps):
mlp_feature_sizes = [self.latent_size] * self.num_mlp_layers
update_node_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
graph_conv = jraph.GraphConvolution(
update_node_fn=update_node_fn, add_self_edges=True)
if self.skip_connections:
processed_graphs = add_graphs_tuples(
graph_conv(processed_graphs), processed_graphs)
else:
processed_graphs = graph_conv(processed_graphs)
if self.layer_norm:
processed_graphs = processed_graphs._replace(
nodes=nn.LayerNorm()(processed_graphs.nodes),
)
# We apply the pooling operation to get a 'global' embedding.
processed_graphs = self.pool(processed_graphs)
# Now, we decode this to get the required output logits.
decoder = jraph.GraphMapFeatures(
embed_global_fn=nn.Dense(self.output_globals_size))
processed_graphs = decoder(processed_graphs)
return processed_graphs
|
[
"no-reply@google.com"
] |
no-reply@google.com
|
e1df6fc2583f420d43b25ca435c7e20316b96453
|
b5c5c27d71348937322b77b24fe9e581cdd3a6c4
|
/graphql/pyutils/cached_property.py
|
0727c1949d5a114eb73a371fd40463fb8998efc0
|
[
"MIT"
] |
permissive
|
dfee/graphql-core-next
|
92bc6b4e5a39bd43def8397bbb2d5b924d5436d9
|
1ada7146bd0510171ae931b68f6c77dbdf5d5c63
|
refs/heads/master
| 2020-03-27T10:30:43.486607
| 2018-08-30T20:26:42
| 2018-08-30T20:26:42
| 146,425,198
| 0
| 0
|
MIT
| 2018-08-28T09:40:09
| 2018-08-28T09:40:09
| null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
# Code taken from https://github.com/bottlepy/bottle
__all__ = ['cached_property']
class CachedProperty:
"""A cached property.
A property that is only computed once per instance and then replaces itself
with an ordinary attribute. Deleting the attribute resets the property.
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
cached_property = CachedProperty
|
[
"cito@online.de"
] |
cito@online.de
|
dfbb2a64692b993bd4d3388133dc94ce6316ab09
|
141d1fb160fcfb4294d4b0572216033218da702d
|
/exec -l /bin/zsh/google-cloud-sdk/lib/surface/dataflow/sql/query.py
|
8660148df0f94ddf4b5afd8079353232390d6b5c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
sudocams/tech-club
|
1f2d74c4aedde18853c2b4b729ff3ca5908e76a5
|
c8540954b11a6fd838427e959e38965a084b2a4c
|
refs/heads/master
| 2021-07-15T03:04:40.397799
| 2020-12-01T20:05:55
| 2020-12-01T20:05:55
| 245,985,795
| 0
| 1
| null | 2021-04-30T21:04:39
| 2020-03-09T08:51:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,392
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of `gcloud dataflow sql query` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dataflow import apis
from googlecloudsdk.api_lib.dataflow import sql_query_parameters
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataflow import dataflow_util
from googlecloudsdk.command_lib.dataflow import sql_util
from googlecloudsdk.core import properties
DETAILED_HELP = {
'DESCRIPTION':
'Execute the user-specified SQL query on Dataflow. Queries must '
'comply to the ZetaSQL dialect (https://github.com/google/zetasql). '
'Results may be written to either BigQuery or Cloud Pub/Sub.',
'EXAMPLES':
"""\
To execute a simple SQL query on Dataflow that reads from and writes to BigQuery, run:
$ {command} "SELECT word FROM bigquery.table.`my-project`.input_dataset.input_table where count > 3" --job-name=my-job --region=us-west1 --bigquery-dataset=my_output_dataset --bigquery-table=my_output_table
To execute a simple SQL query on Dataflow that reads from and writes to Cloud
Pub/Sub, run:
$ {command} "SELECT word FROM pubsub.topic.`my-project`.input_topic where count > 3" --job-name=my-job --region=us-west1 --pubsub-topic=my_output_topic
To join data from BigQuery and Cloud Pub/Sub and write the result to Cloud
Pub/Sub, run:
$ {command} "SELECT bq.name AS name FROM pubsub.topic.`my-project`.input_topic p INNER JOIN bigquery.table.`my-project`.input_dataset.input_table bq ON p.id = bq.id" --job-name=my-job --region=us-west1 --pubsub-topic=my_output_topic
To execute a parameterized SQL query that reads from and writes to BigQuery, run:
$ {command} "SELECT word FROM bigquery.table.`my-project`.input_dataset.input_table where count > @threshold" --parameter=threshold:INT64:5 --job-name=my-job --region=us-west1 --bigquery-dataset=my_output_dataset --bigquery-table=my_output_table
""",
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class Query(base.Command):
"""Execute the user-specified SQL query on Dataflow."""
detailed_help = DETAILED_HELP
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: argparse.ArgumentParser to register arguments with.
"""
sql_util.ArgsForSqlQuery(parser)
def Run(self, args):
region = dataflow_util.GetRegion(args)
if args.sql_launcher_template:
gcs_location = args.sql_launcher_template
else:
gcs_location = 'gs://dataflow-sql-templates-{}/latest/sql_launcher_template'.format(
region)
if args.parameters_file:
query_parameters = sql_query_parameters.ParseParametersFile(
args.parameters_file)
elif args.parameter:
query_parameters = sql_query_parameters.ParseParametersList(
args.parameter)
else:
query_parameters = '[]'
template_parameters = {
'dryRun': 'true' if args.dry_run else 'false',
'outputs': sql_util.ExtractOutputs(args),
'queryParameters': query_parameters,
'queryString': args.query,
}
arguments = apis.TemplateArguments(
project_id=properties.VALUES.core.project.GetOrFail(),
region_id=region,
job_name=args.job_name,
gcs_location=gcs_location,
zone=args.worker_zone,
max_workers=args.max_workers,
disable_public_ips=properties.VALUES.dataflow.disable_public_ips
.GetBool(),
parameters=template_parameters,
service_account_email=args.service_account_email)
return apis.Templates.LaunchDynamicTemplate(arguments)
|
[
"yogocamlus@gmail.com"
] |
yogocamlus@gmail.com
|
6d79e71393d64f0edbbbf664cb42d7c442fe0144
|
291ab4b5b1b99d0d59ce2fb65efef04b84fd78bd
|
/tmp_testdir/Forex_Trading212/test7_login_trading212_getlist_clickable_ids.py
|
aea9cb52055d4c857f788869d5590a1fda71a568
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
cromox1/Trading212
|
15b5ea55d86e7063228f72dd92525e1fca693338
|
68f9b91098bc9184e16e9823a5e07e6b31e59602
|
refs/heads/main
| 2023-04-17T23:03:07.078229
| 2021-05-05T23:02:54
| 2021-05-05T23:02:54
| 320,100,427
| 0
| 2
| null | 2021-04-13T07:03:41
| 2020-12-09T22:58:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,499
|
py
|
__author__ = 'cromox'
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
# from selenium.webdriver.common.action_chains import ActionChains as hoover
chromedriverpath = r'C:\tools\chromedriver\chromedriver.exe'
chrome_options = Options()
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument("--disable-web-security")
# chrome_options.add_argument("--incognito")
chrome_options.add_argument("--allow-running-insecure-content")
chrome_options.add_argument("--allow-cross-origin-auth-prompt")
chrome_options.add_argument("--disable-cookie-encryption")
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--disable-default-apps')
chrome_options.add_argument('--disable-prompt-on-repost')
chrome_options.add_argument("--disable-zero-browsers-open-for-tests")
chrome_options.add_argument("--no-default-browser-check")
chrome_options.add_argument("--test-type")
prefs = {"profile.default_content_setting_values.notifications" : 2}
chrome_options.add_experimental_option("prefs", prefs)
## webdriver section
driver = webdriver.Chrome(chromedriverpath, options=chrome_options)
driver.implicitly_wait(10)
base_url = "https://www.trading212.com"
driver.maximize_window()
driver.get(base_url)
driver.find_element_by_id("cookie-bar").click()
driver.find_element_by_id("login-header-desktop").click()
user1 = "mycromox@gmail.com"
pswd1 = "Serverg0d!"
driver.find_element_by_id("username-real").send_keys(user1 + Keys.ENTER)
driver.find_element_by_id("pass-real").send_keys(pswd1 + Keys.ENTER)
sleep(10)
# ### Need to find a way to go to pop-up window
# but for now I just use simple solution - find the xpath :-)
xpath1 = '//*[@id="onfido-upload"]/div[1]/div[2]'
driver.find_element_by_xpath(xpath1).click()
template_bar = '//*[@id="chartTabTemplates"]/div'
driver.find_element_by_id("chartTabTemplates").click()
search_section = driver.find_element_by_id("navigation-search-button")
search_section.click()
# search_section.send_keys('GBP/USD' + Keys.ENTER)
driver.find_element_by_xpath("//*[contains(text(),'Currencies')]").click()
driver.find_element_by_xpath("//*[contains(text(),'Major')]").click()
# CSS selector
# valuetofind = 'input[id*="uniqName_"]'
# list_ids = driver.find_elements_by_css_selector(valuetofind)
# # XPATH
valuetofind = '//*[contains(@id, "uniqName_")]'
list_ids = driver.find_elements_by_xpath(valuetofind)
# print('ALL = ', list_ids)
print('ALL uniqName = ', len(list_ids))
if len(list_ids) >= 1:
i = 1
for idx in list_ids:
try:
idxx = idx.get_attribute('id')
print(i, idxx, end='')
try:
if 'GBP/USD' in driver.find_element_by_id(idxx).text:
idx.click()
print(' / CLICKABLE')
else:
print(' / # NO GBP/USD')
except WebDriverException:
print(' / NOT CLICKABLE')
except WebDriverException:
print(i, idx.id, end='')
try:
if 'GBP/USD' in idx.text:
idx.click()
print(' / CLICKABLE')
else:
print(' / # NO GBP/USD')
except WebDriverException:
print(' / NOT CLICKABLE')
i += 1
else:
print('NO ELEMENT APPEARED !!')
|
[
"xixa01@yahoo.co.uk"
] |
xixa01@yahoo.co.uk
|
5b87694f3a2a886ff560e2762344f137ca502f69
|
434fb731cb30b0f15e95da63f353671b0153c849
|
/build/hector_slam/hector_slam_launch/catkin_generated/pkg.installspace.context.pc.py
|
f6f612668cd0e6089313919ad50250aaa8624d2b
|
[] |
no_license
|
lievech/lhn_ws
|
e3e10ff20e28e59583e51660d2802ff24c7cd0b5
|
644fc48b91788078734df9bdece06c8b9f6b45b9
|
refs/heads/master
| 2020-08-02T20:21:19.489061
| 2019-09-28T12:08:26
| 2019-09-28T12:08:26
| 211,494,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_slam_launch"
PROJECT_SPACE_DIR = "/home/lhn/lhn_ws/install"
PROJECT_VERSION = "0.3.5"
|
[
"2328187416@qq.com"
] |
2328187416@qq.com
|
3ab187b444848f3432ee2fe85296aa11c48ef180
|
7cdfbe80ac56a042b9b99c1cb17766683da439b4
|
/paper2/setup_files/old_setup/results.py
|
e65818b069a43060086e85eafae81b63aa9666d4
|
[] |
no_license
|
bacook17/pixcmd
|
1e918cc6b147abe1885f9533836005b9f2b30012
|
fac20ced14492fd32448d2722c377d88145f90a1
|
refs/heads/master
| 2021-01-18T11:54:13.625834
| 2019-07-30T15:13:12
| 2019-07-30T15:13:12
| 67,228,636
| 0
| 0
| null | 2016-09-02T14:18:59
| 2016-09-02T14:18:58
| null |
UTF-8
|
Python
| false
| false
| 6,480
|
py
|
__all__ = ['models', 'results', 'pcmds', 'data']
try:
import pcmdpy_gpu as ppy
except:
import pcmdpy as ppy
import numpy as np
from os.path import expanduser
models = {}
run_names = {}
results = {}
pcmds = {}
data = {}
results_dir = expanduser('~/pCMDs/pixcmd/paper2/results/')
data_dir = expanduser('~/pCMDs/pixcmd/data/')
model_nonparam = ppy.galaxy.CustomGalaxy(
ppy.metalmodels.SingleFeH(),
ppy.dustmodels.SingleDust(),
ppy.sfhmodels.NonParam(),
ppy.distancemodels.VariableDistance()
)
model_fixeddist = ppy.galaxy.CustomGalaxy(
ppy.metalmodels.SingleFeH(),
ppy.dustmodels.SingleDust(),
ppy.sfhmodels.NonParam(),
ppy.distancemodels.FixedDistance()
)
model_tau = ppy.galaxy.CustomGalaxy(
ppy.metalmodels.SingleFeH(),
ppy.dustmodels.SingleDust(),
ppy.sfhmodels.TauModel(),
ppy.distancemodels.VariableDistance()
)
model_ssp = ppy.galaxy.CustomGalaxy(
ppy.metalmodels.SingleFeH(),
ppy.dustmodels.SingleDust(),
ppy.sfhmodels.SSPModel(),
ppy.distancemodels.VariableDistance()
)
model_ssp_mdf = ppy.galaxy.CustomGalaxy(
ppy.metalmodels.FixedWidthNormMDF(0.2),
ppy.dustmodels.SingleDust(),
ppy.sfhmodels.SSPModel(),
ppy.distancemodels.VariableDistance()
)
model_ssp_fixed = ppy.galaxy.CustomGalaxy(
ppy.metalmodels.SingleFeH(),
ppy.dustmodels.SingleDust(),
ppy.sfhmodels.SSPModel(),
ppy.distancemodels.FixedDistance()
)
custom_sfh = ppy.sfhmodels.NonParam()
custom_sfh.update_sfh_edges(np.array([9.5, 9.75, 10.0, 10.2]))
custom_sfh.update_edges(np.arange(9.0, 10.3, 0.1))
model_df2_nonparam = ppy.galaxy.CustomGalaxy(
ppy.metalmodels.SingleFeH(),
ppy.dustmodels.SingleDust(),
custom_sfh,
ppy.distancemodels.VariableDistance()
)
def add_set(galaxy, mnum, region, key, model=model_nonparam,
colors='z_gz', run_name=None):
data_file = data_dir + f'{galaxy.lower()}/pcmds/{galaxy}_{colors}_{region}.pcmd'
run_names[key] = run_name
res_file = results_dir + f'{galaxy}_m{mnum}_r{region}.csv'
live_file = res_file.replace('.csv', '_live.csv')
pcmd_file = res_file.replace('.csv', '.pcmd')
models[key] = model.copy()
results[key] = ppy.results.ResultsPlotter(
res_file, live_file=live_file, run_name=run_name,
gal_model=models[key], model_is_truth=False)
data[key] = np.loadtxt(data_file, unpack=True)
try:
pcmds[key] = np.loadtxt(pcmd_file, unpack=True)
except:
pass
# M87
print('M87')
add_set('M87', 3, 44, 'M87_m3', colors='I_VI')
add_set('M87', 4, 104, 'M87_m4', colors='I_VI')
add_set('M87', 4, 101, 'M87_m4_q1', colors='I_VI')
add_set('M87', 4, 102, 'M87_m4_q2', colors='I_VI')
add_set('M87', 4, 103, 'M87_m4_q3', colors='I_VI')
add_set('M87', 5, 204, 'M87_m5', colors='I_VI')
add_set('M87', 6, 264, 'M87_m6', colors='I_VI')
add_set('M87', 7, 104, 'M87_m7', model=model_fixeddist, colors='I_VI')
add_set('M87', 8, 104, 'M87_m8', model=model_tau, colors='I_VI')
add_set('M87', 9, 104, 'M87_m9', model=model_ssp, colors='I_VI')
add_set('M87', 10, 104, 'M87_m10', model=model_ssp, colors='I_VI')
add_set('M87', 11, 104, 'M87_m11', model=model_ssp, colors='I_VI')
add_set('M87', 12, 104, 'M87_m12', model=model_ssp_fixed, colors='I_VI')
add_set('M87', 13, 104, 'M87_m13', model=model_ssp, colors='I_VI')
add_set('M87', 14, 104, 'M87_m14', model=model_ssp, colors='I_VI')
add_set('M87', 15, 104, 'M87_m15', model=model_ssp_mdf, colors='I_VI')
add_set('M87', 16, 104, 'M87_m16', model=model_ssp, colors='I_VI')
add_set('M87', 17, 104, 'M87_m17', model=model_ssp, colors='I_VI')
add_set('M87', 18, 44, 'M87_m18_1', model=model_ssp, colors='I_VI')
add_set('M87', 18, 104, 'M87_m18_2', model=model_ssp, colors='I_VI')
add_set('M87', 18, 204, 'M87_m18_3', model=model_ssp, colors='I_VI')
add_set('M87', 18, 264, 'M87_m18_4', model=model_ssp, colors='I_VI')
# M49
print('M49')
add_set('M49', 3, 40, 'M49_m3')
add_set('M49', 4, 100, 'M49_m4')
add_set('M49', 4, 97, 'M49_m4_q1')
add_set('M49', 4, 98, 'M49_m4_q2')
add_set('M49', 4, 99, 'M49_m4_q3')
add_set('M49', 5, 204, 'M49_m5')
add_set('M49', 6, 256, 'M49_m6')
add_set('M49', 7, 100, 'M49_m7', model=model_fixeddist)
add_set('M49', 8, 100, 'M49_m8', model=model_tau)
add_set('M49', 9, 100, 'M49_m9', model=model_ssp)
add_set('M49', 10, 100, 'M49_m10', model=model_ssp)
add_set('M49', 11, 40, 'M49_m11_1', model=model_ssp)
add_set('M49', 11, 100, 'M49_m11_2', model=model_ssp)
add_set('M49', 11, 204, 'M49_m11_3', model=model_ssp)
add_set('M49', 11, 256, 'M49_m11_4', model=model_ssp)
# NGC 3377
print('NGC3377')
add_set('NGC3377', 3, 41, 'NGC3377_m3')
add_set('NGC3377', 4, 97, 'NGC3377_m4')
add_set('NGC3377', 4, 98, 'NGC3377_m4_q1')
add_set('NGC3377', 4, 99, 'NGC3377_m4_q2')
add_set('NGC3377', 4, 100, 'NGC3377_m4_q3')
add_set('NGC3377', 5, 173, 'NGC3377_m5')
add_set('NGC3377', 6, 241, 'NGC3377_m6')
add_set('NGC3377', 7, 97, 'NGC3377_m7', model=model_fixeddist)
add_set('NGC3377', 8, 97, 'NGC3377_m8', model=model_tau)
add_set('NGC3377', 9, 97, 'NGC3377_m9', model=model_ssp)
add_set('NGC3377', 10, 97, 'NGC3377_m10', model=model_ssp)
add_set('NGC3377', 11, 41, 'NGC3377_m11_1', model=model_ssp)
add_set('NGC3377', 11, 97, 'NGC3377_m11_2', model=model_ssp)
add_set('NGC3377', 11, 173, 'NGC3377_m11_3', model=model_ssp)
add_set('NGC3377', 11, 241, 'NGC3377_m11_4', model=model_ssp)
# NGC 4993
print('NGC4993')
add_set('NGC4993', 3, 35, 'NGC4993_m3')
add_set('NGC4993', 4, 83, 'NGC4993_m4')
add_set('NGC4993', 4, 81, 'NGC4993_m4_q1')
add_set('NGC4993', 4, 82, 'NGC4993_m4_q2')
add_set('NGC4993', 4, 84, 'NGC4993_m4_q3')
add_set('NGC4993', 5, 103, 'NGC4993_m5')
# add_set('NGC4993', 6, 241, 'NGC4993_m6')
add_set('NGC4993', 7, 83, 'NGC4993_m7', model=model_fixeddist)
add_set('NGC4993', 8, 83, 'NGC4993_m8', model=model_tau)
add_set('NGC4993', 9, 83, 'NGC4993_m9', model=model_ssp)
# DF2
print('DF2')
for i in range(1, 6):
df2_res = results_dir + f'DF2_m{i}.csv'
df2_live = df2_res.replace('.csv', '_live.csv')
df2_data = data_dir + 'DF2/pcmds/DF2_I_VI_1.pcmd'
if i in [2, 4]:
model = model_df2_nonparam.copy()
else:
model = model_ssp.copy()
results[f'DF2_m{i}'] = ppy.results.ResultsPlotter(
df2_res, live_file=df2_live, run_name=f'DF2, model {i}',
gal_model=model, model_is_truth=False)
data[f'DF2_m{i}'] = np.loadtxt(df2_data, unpack=True)
try:
pcmds[f'DF2_m{i}'] = np.loadtxt(df2_res.replace('.csv', '.pcmd'), unpack=True)
except:
pass
|
[
"bcook@cfa.harvard.edu"
] |
bcook@cfa.harvard.edu
|
1fc1f45b446d4c3afe5b69bf2f9515f4c46607ff
|
3e381dc0a265afd955e23c85dce1e79e2b1c5549
|
/hs-S1/icice_ucgenler.py
|
77342ff2d6194916bac0664099c50bf72246660a
|
[] |
no_license
|
serkancam/byfp2-2020-2021
|
3addeb92a3ff5616cd6dbd3ae7b2673e1a1a1a5e
|
c67206bf5506239d967c3b1ba75f9e08fdbad162
|
refs/heads/master
| 2023-05-05T04:36:21.525621
| 2021-05-29T11:56:27
| 2021-05-29T11:56:27
| 322,643,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
import turtle as t
t.Screen().setup(600, 600)
# https://studio.code.org/s/course4/stage/10/puzzle/2
for adim in range(50, 101, 10): # 50 60 70 80 90 100
for i in range(3): # 0 1 2
t.forward(adim)
t.left(120)
t.done()
|
[
"sekocam@gmail.com"
] |
sekocam@gmail.com
|
a3411e1ceb6ec195c179f41219c2ee0009ff2aee
|
a851573ec818149d03602bb17b1b97235b810a06
|
/apps/network1/views.py
|
4660b228bf38e659146ac63097baf2a2b9f8438a
|
[] |
no_license
|
kswelch53/mypython_projects2
|
42afc71714ff7e10e1d8d4e6a5965ff58380e9bd
|
97d9faa5ea326b86dd7f48be1a822b3a58f3189c
|
refs/heads/master
| 2021-04-06T07:22:04.982315
| 2018-03-30T02:35:20
| 2018-03-30T02:35:20
| 125,312,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
from django.shortcuts import render, HttpResponse, redirect
# links model to view functions
from .models import User
# allows flash messages to html
from django.contrib import messages
# Note: Registration and login validations are done in models.py
# displays a form on index.html for users to enter login or registration info
def index(request):
print("This is index function in network1 views.py")
return render(request, 'network1/index.html')
# logs in user if validations are met
def login(request):
print("This is login function in network1 views.py")
# saves user POST data from models method login_user in response_from_models:
response_from_models = User.objects.login_user(request.POST)
print("Response from models:", response_from_models)
if response_from_models['status']:#if true (validations are met):
#saves user data in session, sends user to 2nd app:
request.session['user_id'] = response_from_models['user'].id
request.session['user_name'] = response_from_models['user'].name
return redirect('network2:index')
else:#returns user to index.html, displays error message:
messages.error(request, response_from_models['errors'])
return redirect('network1:index')
# saves a user object if registration validations are met
def register(request):
print("This is register function in network1 views.py")
# this checks that users have submitted form data before proceeding to register route
if request.method == 'POST':
print("Request.POST:", request.POST)
# invokes validations method from the model manager
# saves user data from models.py in a variable
# whatever is sent back in the UserManager return statement
response_from_models = User.objects.validate_user(request.POST)
print("Response from models:", response_from_models)
if response_from_models['status']:#if true
# passed the validations and created a new user
# user can now be saved in session, by id:
# index method in 2nd app will use this:
request.session['user_id'] = response_from_models['user'].id
request.session['user_name'] = response_from_models['user'].name
print("Name:", request.session['user_name'])
#redirects to index method in 2nd app via named route network2 from project-level urls.py
return redirect('network2:index')#named route/views.py method
# 1st app handles only logging in / registering users
else:
# add flash messages to html:
for error in response_from_models['errors']:
messages.error(request, error)
# returns to index.html via named route network1, index method in views.py
return redirect('network1:index')
# if not POST, redirects to index method via named route namespace=network1
else:
return redirect('network1:index')
def logout (request):
request.session.clear()#deletes everything in session
return redirect('network1:index')
|
[
"kswelch53@gmail.com"
] |
kswelch53@gmail.com
|
1eb1a20cca4e64744c3c860ba9ffc78209de8c23
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/3339d802402fd2f2ed5e954434c637bf7a68124d-<_make_validation_split>-bug.py
|
1827f4775913b4407f7cdf26bba80b06443aca84
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
def _make_validation_split(self, y):
'Split the dataset between training set and validation set.\n\n Parameters\n ----------\n y : array, shape (n_samples, )\n Target values.\n\n Returns\n -------\n validation_mask : array, shape (n_samples, )\n Equal to 1 on the validation set, 0 on the training set.\n '
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if (not self.early_stopping):
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction, random_state=self.random_state)
(idx_train, idx_val) = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if ((idx_train.shape[0] == 0) or (idx_val.shape[0] == 0)):
raise ValueError(('Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.' % (n_samples, self.validation_fraction, idx_train.shape[0], idx_val.shape[0])))
validation_mask[idx_val] = 1
return validation_mask
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
214b351155bd5bbd835658ddee5d7b0cf7b101c2
|
e1fa4f0e678bcc7a71afd23fd1bd693a4f503765
|
/ss/ss_coroutine.py
|
b0e5c58010598a07ff04339fa96dc5854805145a
|
[] |
no_license
|
smallstrong0/spider
|
b20460b33aeee5989870acd95cc1addd6996c1ed
|
cb3807978ff9599fbf669fe4068040f4252f6432
|
refs/heads/master
| 2020-03-31T07:59:43.046207
| 2019-01-03T15:47:06
| 2019-01-03T15:47:06
| 152,041,262
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/18 22:07
# @Author : SmallStrong
# @Des :
# @File : ss_coroutine.py
# @Software: PyCharm
import sys
import os
# 被逼无奈
sys.path.append(os.getcwd().replace('/ss', ''))
from spider_core import go
from func import exe_time
from gevent import monkey, pool
import config
monkey.patch_all()
@exe_time
def main():
p = pool.Pool(config.COROUTINE_LIMIT_NUM)
while config.FLAG:
p.spawn(go)
if __name__ == '__main__':
main()
|
[
"393019766@qq.com"
] |
393019766@qq.com
|
d8fadea0f97b759ec5a8eb75c034fb3b9505259d
|
e46a95f42e61c71968c60400f11924b4ad22bf59
|
/0x09-Unittests_and_integration_tests/test_utils.py
|
c82ada87f519caa128197f16d307056ad525f535
|
[] |
no_license
|
mahdibz97/holbertonschool-web_back_end
|
669b68c7ba6be937757a88999c7acc6afd6b58ca
|
017a250f477599aee48f77e9d215c74b4bb62a14
|
refs/heads/master
| 2023-06-04T15:25:14.752977
| 2021-06-15T23:39:43
| 2021-06-15T23:39:43
| 348,126,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,351
|
py
|
#!/usr/bin/env python3
""" unittesting Module """
from typing import Mapping, Sequence
import unittest
from unittest.case import TestCase
from unittest.mock import patch
from parameterized import parameterized
from utils import access_nested_map, get_json, memoize
class TestAccessNestedMap(unittest.TestCase):
""" access_nested_map unit testing class """
@parameterized.expand([
({"a": 1}, ("a",), 1),
({"a": {"b": 2}}, ("a",), {"b": 2}),
({"a": {"b": 2}}, ("a", "b"), 2)
])
def test_access_nested_map(self, nested_map: Mapping, path: Sequence, res):
""" access_nested_map result testing method """
self.assertEqual(access_nested_map(nested_map, path), res)
@parameterized.expand([
({}, ("a",)),
({"a": 1}, ("a", "b")),
])
def test_access_nested_map_exception(self,
nested_map: Mapping,
path: Sequence):
""" access_nested_map exceptions testing method """
self.assertRaises(KeyError, access_nested_map, nested_map, path)
class TestGetJson(unittest.TestCase):
""" get_json unit testing class """
@parameterized.expand([
("http://example.com", {"test_payload": True}),
("http://holberton.io", {"test_payload": False})
])
def test_get_json(self, test_url, test_payload):
""" get_json result and number of calls testing method """
with patch('requests.get') as patched:
patched.return_value.json.return_value = test_payload
self.assertEqual(get_json(test_url), test_payload)
patched.assert_called_once()
class TestMemoize(unittest.TestCase):
""" memoize decorator unit testing class """
def test_memoize(self):
""" memoize decorator result and number of calls testing method """
class TestClass:
def a_method(self):
return 42
@memoize
def a_property(self):
return self.a_method()
with patch.object(TestClass, 'a_method', return_value=42) as patched:
test_class = TestClass()
self.assertEqual(test_class.a_property, patched.return_value)
self.assertEqual(test_class.a_property, patched.return_value)
patched.assert_called_once()
|
[
"ben.zouitina.mahdi97@gmail.com"
] |
ben.zouitina.mahdi97@gmail.com
|
e372ecde50ffe894a9ac6d0b20f743cc0b640425
|
b125f9a750a519c9c7a5ed66adb8530e0237367b
|
/str/StrDemo11.py
|
5931339c369e670d1e64b0c073cf7184825c807b
|
[] |
no_license
|
isisisisisitch/geekPython
|
4e49fe19b4cca9891056f33464518272265e3dab
|
635e246dca7a221d87a3b3c5b07d1e177527498f
|
refs/heads/master
| 2021-05-27T01:29:09.755225
| 2021-01-18T23:18:46
| 2021-01-18T23:18:46
| 254,200,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
#the taste is not xxx poor!
#the taste is good!
#the taste is poor!
#the taste is poor!
str="the taste is xxx poor"
snot = str.find("not")
print(snot)
spoor = str.find("poor")
if spoor> snot and snot>0:
str = str.replace(str[snot:(spoor+4)],"good")
print(str)
else:
print(str)
|
[
"dallucus@gmail.com"
] |
dallucus@gmail.com
|
9f52f653a93cd4087e7542d49ffc7bedf4a10ac7
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4066/codes/1846_1273.py
|
b84605ab2c0be8357722a17f2744c6e7aaf7d32b
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from numpy import *
from numpy.linalg import *
# Nosso sistema aqui tem
matriz_dos_coeficientes = array([[1,-1,0,0],
[0, 1, -1, 0],
[0, 0, 1, 0],
[1, 0, 0, 1]])
# Resolução do sistema AX = B
# onde A = Matriz dos coeficientes, X = Vetor do Fluxo e B = Matriz das incognitas
matriz_das_incognitas = array([50,-120,350,870])
Vetor_do_Fluxo = dot(inv(matriz_dos_coeficientes),matriz_das_incognitas)
z = zeros(4)
for i in range(size(Vetor_do_Fluxo)):
z[i] = round(Vetor_do_Fluxo[i], 1)
print(z)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
54f8f44f719f7d3c0acc91b5c3995ea9a048a642
|
c0c4b1db16a7f85a74cba224f3ebea5660db379e
|
/old_files/AGILENT33220A_SERVER.py
|
9bd730306419b55de399a47b7a53dd5763d04f45
|
[] |
no_license
|
trxw/HaeffnerLabLattice
|
481bd222ebbe4b6df72a9653e18e0bf0d43ba15e
|
d88d345c239e217eeb14a39819cfe0694a119e7c
|
refs/heads/master
| 2021-01-16T18:13:36.548643
| 2014-05-09T21:47:23
| 2014-05-09T21:47:23
| 20,747,092
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,087
|
py
|
from labrad.server import LabradServer, setting
import serial
class AgilentServer(LabradServer):
"""Controls Agilent 33220A Signal Generator"""
name = "%LABRADNODE% AGILENT 33220A SERVER"
def initServer( self ):
#communication configuration
self._port = 'COM11'
self._addr = 0 #instruments GPIB address
#initialize communication
self._ser = serial.Serial(self._port)
self._ser.timeout = 1
self._ser.write(self.SetAddrStr(self._addr)) #set address
self.SetControllerWait(0) #turns off automatic listen after talk, necessary to stop line unterminated errors
@setting(1, "Identify", returns='s')
def Identify(self, c):
'''Ask instrument to identify itself'''
command = self.IdenStr()
self._ser.write(command)
self.ForceRead() #expect a reply from instrument
#time.sleep(self._waitTime) ## apperently not needed, communication fast
answer = self._ser.readline()[:-1]
return answer
@setting(2, "GetFreq", returns='v')
def GetFreq(self,c):
'''Returns current frequency'''
command = self.FreqReqStr()
self._ser.write(command)
self.ForceRead() #expect a reply from instrument
answer = self._ser.readline()
return answer
@setting(3, "SetFreq", freq = 'v', returns = "")
def SetFreq(self,c,freq):
'''Sets frequency, enter value in MHZ'''
command = self.FreqSetStr(freq)
self._ser.write(command)
@setting(4, "GetState", returns='w')
def GetState(self,c):
'''Request current on/off state of instrument'''
command = self.StateReqStr()
self._ser.write(command)
self.ForceRead() #expect a reply from instrument
answer = str(int(self._ser.readline()))
return answer
@setting(5, "SetState", state= 'w', returns = "")
def SetState(self,c, state):
'''Sets on/off (enter 1/0)'''
command = self.StateSetStr(state)
self._ser.write(command)
@setting(6, "GetPower", returns = 'v')
def GetPower(self,c):
''' Returns current power level in dBm'''
command = self.PowerReqStr()
self._ser.write(command)
self.ForceRead() #expect a reply from instrument
answer = self._ser.readline()
return answer
@setting(7, "SetPower", level = 'v',returns = "")
def SetPower(self,c, level):
'''Sets power level, enter power in dBm'''
command = self.PowerSetStr(level)
self._ser.write(command)
@setting(8, "GetVoltage", returns = 'v')
def GetVoltage(self,c):
'''Returns current voltage level in Volts'''
command = self.VoltageReqStr()
self._ser.write(command)
self.ForceRead() #expect a reply from instrument
answer = self._ser.readline()
return answer
@setting(9, "SetVoltage", level = 'v',returns = "")
def SetVoltage(self,c, level):
'''Sets voltage level, enter power in volts'''
command = self.VoltageSetStr(level)
self._ser.write(command)
@setting(10, "Get Function", returns = 's')
def GetFunc(self,c):
''' Returns the current function output of the instrument'''
command = self.FunctionReqStr()
self._ser.write(command)
self.ForceRead() #expect a reply from instrument
answer = self._ser.readline()[:-1]
return answer
@setting(11, "Set Function", func = 's',returns = "")
def setFunc(self,c, func):
'''Sets type of function to output: SINE, SQUARE, RAMP, PULSE, NOISE, or DC'''
command = self.FunctionSetStr(func)
self._ser.write(command)
#send message to controller to indicate whether or not (status = 1 or 0)
#a response is expected from the instrument
def SetControllerWait(self,status):
command = self.WaitRespStr(status) #expect response from instrument
self._ser.write(command)
def ForceRead(self):
command = self.ForceReadStr()
self._ser.write(command)
def IdenStr(self):
return '*IDN?'+'\r\n'
# string to request current frequency
def FreqReqStr(self):
return 'FREQuency?' + '\r\n'
# string to set freq in Hz
def FreqSetStr(self,freq):
return 'FREQuency '+ str(freq) +'\r\n'
# string to request on/off?
def StateReqStr(self):
return 'OUTPut?' + '\r\n'
# string to set on/off (state is given by 0 or 1)
def StateSetStr(self, state):
if state == 1:
comstr = 'OUTPut ON' + '\r\n'
else:
comstr = 'OUTPut OFF' + '\r\n'
return comstr
# string to request current power
def PowerReqStr(self):
return 'Voltage:UNIT DBM\r\n'+'Voltage?' + '\r\n'
# string to request voltage
def VoltageReqStr(self):
return 'Voltage:UNIT VPP\r\n'+'Voltage?' + '\r\n'
# string to set power (in dBm)
def PowerSetStr(self,pwr):
return 'Voltage:UNIT DBM\r\n' + 'Voltage ' +str(pwr) + '\r\n'
# string to set voltage
def VoltageSetStr(self,volt):
return 'Voltage:UNIT VPP\r\n'+'Voltage ' +str(volt) + '\r\n'
# string to get current function
def FunctionReqStr(self):
return 'FUNCtion?\r\n'
# string to set function
def FunctionSetStr(self,func):
if func == 'SINE':
comstr = 'FUNCtion ' + 'SIN' + '\r\n'
elif func == 'SQUARE':
comstr = 'FUNCtion ' + 'SQU' + '\r\n'
elif func == 'RAMP':
comstr = 'FUNCtion ' + 'RAMP' + '\r\n'
elif func == 'PULSE':
comstr = 'FUNCtion ' + 'PULSe' + '\r\n'
elif func == 'NOISE':
comstr = 'FUNCtion ' + 'NOISe' + '\r\n'
elif func == 'DC':
comstr = 'FUNCtion ' + 'DC' + '\r\n'
return comstr
# string to force read
def ForceReadStr(self):
return '++read eoi' + '\r\n'
# string for prologix to request a response from instrument, wait can be 0 for listen / for talk
def WaitRespStr(self, wait):
return '++auto '+ str(wait) + '\r\n'
# string to set the addressing of the prologix
def SetAddrStr(self, addr):
return '++addr ' + str(addr) + '\r\n'
if __name__ == "__main__":
from labrad import util
util.runServer(AgilentServer())
|
[
"micramm@gmail.com"
] |
micramm@gmail.com
|
e258490c9c96a24d1455e651209ea0988d282c37
|
83f2c9c26a79fdb2d6dd47218484e06875ccdb77
|
/rpca.py
|
3b733283c7cb1ac83b215bfd6a464454110d5b53
|
[] |
no_license
|
zuoshifan/rpca_HI
|
4fe7a0c76326b57489a719583e2c8f13d07c363c
|
f94060ba3adbce5971eb4440bd7867240ebd088d
|
refs/heads/master
| 2021-01-21T10:14:01.521672
| 2017-05-12T06:51:17
| 2017-05-12T06:51:17
| 83,398,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
import math
import numpy.linalg
def robust_pca(M):
"""
Decompose a matrix into low rank and sparse components.
Computes the RPCA decomposition using Alternating Lagrangian Multipliers.
Returns L,S the low rank and sparse components respectively
"""
L = numpy.zeros(M.shape)
S = numpy.zeros(M.shape)
Y = numpy.zeros(M.shape)
print M.shape
mu = (M.shape[0] * M.shape[1]) / (4.0 * L1Norm(M))
lamb = max(M.shape) ** -0.5
while not converged(M,L,S):
L = svd_shrink(M - S - (mu**-1) * Y, mu)
S = shrink(M - L + (mu**-1) * Y, lamb * mu)
Y = Y + mu * (M - L - S)
return L,S
def svd_shrink(X, tau):
"""
Apply the shrinkage operator to the singular values obtained from the SVD of X.
The parameter tau is used as the scaling parameter to the shrink function.
Returns the matrix obtained by computing U * shrink(s) * V where
U are the left singular vectors of X
V are the right singular vectors of X
s are the singular values as a diagonal matrix
"""
U,s,V = numpy.linalg.svd(X, full_matrices=False)
return numpy.dot(U, numpy.dot(numpy.diag(shrink(s, tau)), V))
def shrink(X, tau):
"""
Apply the shrinkage operator the the elements of X.
Returns V such that V[i,j] = max(abs(X[i,j]) - tau,0).
"""
V = numpy.copy(X).reshape(X.size)
for i in xrange(V.size):
V[i] = math.copysign(max(abs(V[i]) - tau, 0), V[i])
if V[i] == -0:
V[i] = 0
return V.reshape(X.shape)
def frobeniusNorm(X):
"""
Evaluate the Frobenius norm of X
Returns sqrt(sum_i sum_j X[i,j] ^ 2)
"""
accum = 0
V = numpy.reshape(X,X.size)
for i in xrange(V.size):
accum += abs(V[i] ** 2)
return math.sqrt(accum)
def L1Norm(X):
"""
Evaluate the L1 norm of X
Returns the max over the sum of each column of X
"""
return max(numpy.sum(X,axis=0))
def converged(M,L,S):
"""
A simple test of convergence based on accuracy of matrix reconstruction
from sparse and low rank parts
"""
error = frobeniusNorm(M - L - S) / frobeniusNorm(M)
print "error =", error
return error <= 10e-6
|
[
"zuoshifan@163.com"
] |
zuoshifan@163.com
|
5ef1e7f10dc123b57434e69de06b120e3e884c89
|
88ed6ed99589f7fb8e49aeb6c15bf0d51fe14a01
|
/004_medianOfTwoSortedArrays.py
|
f50d1fe4f60aca33c8dc8105b02120ac6779206e
|
[] |
no_license
|
ryeLearnMore/LeetCode
|
3e97becb06ca2cf4ec15c43f77447b6ac2a061c6
|
04ec1eb720474a87a2995938743f05e7ad5e66e3
|
refs/heads/master
| 2020-04-07T19:02:43.171691
| 2019-06-23T15:09:19
| 2019-06-23T15:09:19
| 158,634,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
#-*-coding:utf-8-*-
__author__ = 'Rye'
'''
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
'''
# 正确写法,需要多次学习
# https://github.com/apachecn/awesome-algorithm/blob/master/docs/Leetcode_Solutions/Python/004._median_of_two_sorted_arrays.md
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
def findKth(A, B, k):
if len(A) == 0:
return B[k - 1]
if len(B) == 0:
return A[k - 1]
if k == 1:
return min(A[0], B[0])
a = A[k // 2 - 1] if len(A) >= k // 2 else None
b = B[k // 2 - 1] if len(B) >= k // 2 else None
if b is None or (a is not None and a < b):
return findKth(A[k // 2:], B, k - k // 2)
return findKth(A, B[k // 2:], k - k // 2)
num = len(nums1) + len(nums2)
if num % 2 == 1:
return self.findKth(nums1, nums2, num // 2 + 1)
else:
smaller = self.findKth(nums1, nums2, num // 2)
larger = self.findKth(nums1, nums2, num // 2 + 1)
return (smaller + larger) / 2.0
# 自己写的,侥幸也能通过,不过时间复杂度不对
class Solution1:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
num = sorted(nums1 + nums2)
if len(num) % 2 == 1:
result = (len(num) - 1) / 2
# print(result)
return num[int(result)]
else:
result1 = int(len(num) / 2)
result2 = result1 - 1
result = (num[int(result1)] + num[int(result2)]) / 2
return result
|
[
"noreply@github.com"
] |
ryeLearnMore.noreply@github.com
|
38d58c1a1117d022b2d55be2c9392e708dbeb924
|
583d03a6337df9f1e28f4ef6208491cf5fb18136
|
/dev4qx/purus-repo/tasks/sync_task.py
|
b8d89d4424113bbe84c0b939794fb5b17b43eabb
|
[] |
no_license
|
lescpsn/lescpsn
|
ece4362a328f009931c9e4980f150d93c4916b32
|
ef83523ea1618b7e543553edd480389741e54bc4
|
refs/heads/master
| 2020-04-03T14:02:06.590299
| 2018-11-01T03:00:17
| 2018-11-01T03:00:17
| 155,309,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,148
|
py
|
# encoding: utf-8
import logging
import tornado.gen
import tornado.ioloop
from sqlalchemy.orm import sessionmaker
import core
request_log = logging.getLogger("purus.request")
class SyncTask(tornado.ioloop.PeriodicCallback):
def __init__(self, application, callback_time):
super(SyncTask, self).__init__(self.do_sync, callback_time)
self.application = application
self.master = self.application.sentinel.master_for('madeira')
self.in_sync = False
def session(self, name):
if name in self.application.engine:
engine = self.application.engine[name]
return sessionmaker(bind=engine)()
return None
@tornado.gen.coroutine
def do_sync(self):
if self.in_sync:
return
if self.master.exists('flag:task'):
request_log.info('STOP FLAG FOUND!')
return
if not self.master.exists('list:sync:pricing'):
return
session = self.session('repo')
try:
self.in_sync = True
sync_list = []
full_sync_set = set()
line = self.master.lpop('list:sync:pricing')
while line:
request_log.info('SYNC LINE {%s}', line)
domain_id, product_id, user_id = line.split(',')
if product_id == '' and user_id == '':
full_sync_set.add(domain_id)
sync_list.append((domain_id, product_id, user_id))
line = self.master.lpop('list:sync:pricing')
# TODO: merge same, remove
for domain_id in full_sync_set:
request_log.info('SYNC FULL DOMAIN {%s}', domain_id)
sync_list = list(filter(lambda x: x[0] != domain_id, sync_list))
sync_list.append((domain_id, '', ''))
for domain_id, product_id, user_id in sync_list:
yield core.sync_pricing(session, domain_id, filter_product=product_id, filter_user=user_id)
except:
request_log.exception('SYNC FAIL')
finally:
self.in_sync = False
session.close()
|
[
"lescpsn@aliyun.com"
] |
lescpsn@aliyun.com
|
10f15096eff2605cc1b6f34cae9b10d7dbd66012
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/3/usersdata/138/733/submittedfiles/ex1.py
|
1e83a25bea9a34b44af82f240fef204ac4552b94
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a = input('digite o valor de a:')
b = input('digite o valor de b:')
c = input('digite o valor de c:')
delta = (b**2)-(4*a*c)
x1 = (-b+(delta)**(1/2))/(2*a)
x2 = (-b-(delta)**(1/2))/(2*a)
print('valor de x1')
print('valor de x2')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e1f5e6d3be9a072b2c8c54a7db8c2b69ee601459
|
b60849c71c644b488a9777edd9809883cbaff884
|
/stringsync/sync.py
|
ba9ca155174cc1b6c88aba0402ca29118333c4b1
|
[
"Apache-2.0"
] |
permissive
|
dev-junior/strings-datasync
|
ded0183a9bea5a715555c4b55ce1ea3ecf2ba6c7
|
858a0d97687dfc42e0010b25718e2b17084e75fb
|
refs/heads/master
| 2020-03-21T02:53:51.338210
| 2015-10-05T22:41:35
| 2015-10-05T22:41:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
from StringIO import StringIO
import sys
from stringsync.mysql2ldif import mysql2ldif, organization_dn
from stringsync.ldif_dumper import dump_tree_sorted
from stringsync import db
from stringsync.ldiff import ldiff_and_apply, ldiff_to_ldif
def _sync_db_to_ldap(organization_id, db_server, ldap_server, dry_run):
base_domain = organization_dn(organization_id, db_server)
if not base_domain:
raise Exception("Couldn't get a base dn for org %s, refusing to continue"
% organization_id)
new_ldif = StringIO()
mysql2ldif(organization_id, db_server, new_ldif)
new_ldif.seek(0)
cur_ldif = StringIO()
dump_tree_sorted(ldap_server, base_domain, cur_ldif)
cur_ldif.seek(0)
if not dry_run:
ldiff_and_apply(cur_ldif, new_ldif, ldap_server)
else:
ldiff_to_ldif(cur_ldif, new_ldif, dry_run)
def sync_from_config(db_server, ldap_server, organization_id, dry_run=None):
"""
If dry_run is non-None, it is considered a file in which to put
the ldif, and no changes will be applied to the ldap server
itself.
"""
_sync_db_to_ldap(organization_id, db_server, ldap_server,
dry_run=dry_run)
|
[
"tomheon@gmail.com"
] |
tomheon@gmail.com
|
5d9fa9cd1b5f06a381cee08c729581e92b5e5ed0
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/advanced/93_v3/rps.py
|
6390079246a910365c6f43a18f17e098926b1800
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
# ____ r__ _______ c..
#
# defeated_by d.. paper_ scissors
# rock_ paper
# scissors_ rock
# lose '@ beats @, you lose!'
# win '@ beats @, you win!'
# tie 'tie!'
#
# c.. ?.v..
#
# ___ _get_computer_move
# """Randomly select a move"""
# r.. c.. c..
#
#
# ___ _get_winner computer_choice player_choice
# """Return above lose/win/tie strings populated with the
# appropriate values (computer vs player)"""
# __ ? n.. __ c..
# r.. 'Invalid choice'
# __ ? __ ?
# r.. t..
# __ ? __ d.. c..
# r.. w__.f.. ? ?
# ____
# r.. l__.f.. ? ?
#
#
# ___ game
# """Game loop, receive player's choice via the generator's
# send method and get a random move from computer (_get_computer_move).
# Raise a StopIteration exception if user value received = 'q'.
# Check who wins with _get_winner and print its return output."""
# w... T...
# player_choice y.. ''
# __ ? __ 'q'
# r.. S..
# computer_choice _g..
# print _? ? ?
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
72dd61a158c302f6c81f7b839bd22510c5704f18
|
9bbc76b0d31fb550392eb387aab54862576c3e6d
|
/0x08-python-more_classes/6-rectangle.py
|
df22ebe5faae5a083f60627a756b6e2e54646874
|
[] |
no_license
|
Jesus-Acevedo-Cano/holbertonschool-higher_level_programming
|
39c6e99f6368dba61c8668ffac41ea2257910366
|
ad28e8f296d4c226e1c0d571e476fedb3755fde5
|
refs/heads/master
| 2020-09-29T03:20:19.770661
| 2020-05-15T03:50:10
| 2020-05-15T03:50:10
| 226,937,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
#!/usr/bin/python3
class Rectangle():
"""class named Rectangle"""
number_of_instances = 0
def __init__(self, width=0, height=0):
"""Initialization of instance attributes
Args:
width (int): width of rectangle
height (int): rectangle height
"""
self.width = width
self.height = height
Rectangle.number_of_instances += 1
@property
def width(self):
"""getter fun"""
return self.__width
@width.setter
def width(self, value):
"""setter function
value: new value to set
"""
if not isinstance(value, int):
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
"""getter fun"""
return self.__height
@height.setter
def height(self, value):
"""setter function
value: new value to set
"""
if not isinstance(value, int):
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
"""func to calculate the area
Return: area of square
"""
return self.__width * self.__height
def perimeter(self):
"""func to calculate the area
Return: perimeter of square
"""
if (self.__width == 0) or (self.__height == 0):
return 0
else:
return (self.__width + self.__height) * 2
def __str__(self):
""" returning the string representation of the rectangle """
rectangle = ""
if self.height == 0 or self.width == 0:
return rectangle
for i in range(self.__height):
rectangle += "#" * self.__width
if i + 1 != self.__height:
rectangle += "\n"
return rectangle
def __repr__(self):
""" return a string representation of the rectangle """
rep = "{}({}, {})".format(self.__class__.__name__,
self.width, self.height)
return rep
def __del__(self):
"""prints msg when instance is deleted"""
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
|
[
"jeacevedocano@gmail.com"
] |
jeacevedocano@gmail.com
|
fa1dcb7cf75c38c73c228a0c326b4791efa0218c
|
09301c71638abf45230192e62503f79a52e0bd80
|
/besco_erp/besco_warehouse/general_stock_shipping/wizard/__init__.py
|
39773324ebf286802583121ca1528961e4ec8a3c
|
[] |
no_license
|
westlyou/NEDCOFFEE
|
24ef8c46f74a129059622f126401366497ba72a6
|
4079ab7312428c0eb12015e543605eac0bd3976f
|
refs/heads/master
| 2020-05-27T06:01:15.188827
| 2017-11-14T15:35:22
| 2017-11-14T15:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
# -*- coding: utf-8 -*-
#import stock_partial_picking
import stock_invoice_onshipping
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"son.huynh@nedcoffee.vn"
] |
son.huynh@nedcoffee.vn
|
27a3e0d3c60ec1fc09deb3b837285e09d0373c5c
|
380a47268c5975473a2e7c38c747bc3bdbd981b1
|
/benchmark/third_party/transformers/src/transformers/models/swinv2/configuration_swinv2.py
|
ffffcd12aff0cda9d890cf0c52350bde1c406755
|
[
"Apache-2.0"
] |
permissive
|
FMInference/FlexGen
|
07aa9b1918c19b02077e13ad07e76840843810dd
|
d34f7b4b43ed87a374f394b0535ed685af66197b
|
refs/heads/main
| 2023-07-24T02:29:51.179817
| 2023-07-21T22:38:31
| 2023-07-21T22:38:31
| 602,270,517
| 6,821
| 411
|
Apache-2.0
| 2023-07-07T22:59:24
| 2023-02-15T21:18:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,527
|
py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swinv2 Transformer model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/swinv2_tiny_patch4_windows8_256": (
"https://huggingface.co/microsoft/swinv2_tiny_patch4_windows8_256/resolve/main/config.json"
),
}
class Swinv2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Swinv2Model`]. It is used to instantiate a Swin
Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
[microsoft/swinv2_tiny_patch4_windows8_256](https://huggingface.co/microsoft/swinv2_tiny_patch4_windows8_256)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
patch_norm (`bool`, *optional*, defaults to `True`):
Whether or not to add layer normalization after patch embedding.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, `optional`, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
Example:
```python
>>> from transformers import Swinv2Config, Swinv2Model
>>> # Initializing a Swinv2 microsoft/swinv2_tiny_patch4_windows8_256 style configuration
>>> configuration = Swinv2Config()
>>> # Initializing a model (with random weights) from the microsoft/swinv2_tiny_patch4_windows8_256 style configuration
>>> model = Swinv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "swinv2"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
patch_norm=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
encoder_stride=32,
**kwargs
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.path_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.pretrained_window_sizes = (0, 0, 0, 0)
|
[
"sqy1415@gmail.com"
] |
sqy1415@gmail.com
|
0c64319d4b22ac9fcd813f02a4b038fd2894fb06
|
d61183674ed7de0de626490cfba77d67c298d1be
|
/py_scripts/bench_plot_lasso_path_63.py
|
fe9f6bc621d038bca5caf96b8eede3fd982255b9
|
[] |
no_license
|
Giannos-G/python_dataset
|
bc670a53143d92cf781e88dee608da38b0e63886
|
18e24cbef16ada1003a3e15a2ed2a3f995f25e46
|
refs/heads/main
| 2023-07-25T20:24:31.988271
| 2021-09-09T10:31:41
| 2021-09-09T10:31:41
| 363,489,911
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,978
|
py
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, lars_path_gram
from sklearn.linear_model import lasso_path
from sklearn.datasets import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features // 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 500, 3).astype(int)
features_range = np.linspace(10, 800 , 3).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
#plt.show()
|
[
"giannos.gavrielides@gmail.com"
] |
giannos.gavrielides@gmail.com
|
266caf0b8aac1131892e35df498ac3d0f0b896d4
|
801c0f1bb516684308ff78cd7a51616791a0e874
|
/chroniker/management/commands/test_status_update.py
|
421eb285e80b9811de5d66c6e334a4e16a2a8f26
|
[] |
no_license
|
Andy-R/django-chroniker
|
c63283cb1f45b2dfe6aeb6559323085cad226c15
|
91d239e69fe3ef5eefa8088e80987ccafb6b45ef
|
refs/heads/master
| 2020-04-05T22:58:59.964744
| 2017-02-10T15:08:33
| 2017-02-10T15:08:33
| 62,376,921
| 0
| 0
| null | 2016-07-01T08:18:00
| 2016-07-01T08:17:59
| null |
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
from __future__ import print_function
import time
from optparse import make_option
import django
from django.core.management.base import BaseCommand
from chroniker.models import Job
class Command(BaseCommand):
help = 'Incrementally updates status, to help testing transaction ' + \
'behavior on different database backends.'
option_list = getattr(BaseCommand, 'option_list', ()) + (
make_option('--seconds',
dest='seconds',
default=60,
help='The number of total seconds to count up to.'),
)
def create_parser(self, prog_name, subcommand):
"""
For ``Django>=1.10``
Create and return the ``ArgumentParser`` which extends ``BaseCommand`` parser with
chroniker extra args and will be used to parse the arguments to this command.
"""
from distutils.version import StrictVersion # pylint: disable=E0611
parser = super(Command, self).create_parser(prog_name, subcommand)
version_threshold = StrictVersion('1.10')
current_version = StrictVersion(django.get_version(django.VERSION))
if current_version >= version_threshold:
parser.add_argument('args', nargs="*")
parser.add_argument('--seconds',
dest='seconds',
default=60,
help='The number of total seconds to count up to.')
self.add_arguments(parser)
return parser
def handle(self, *args, **options):
seconds = int(options['seconds'])
for i in range(seconds):
Job.update_progress(total_parts=seconds, total_parts_complete=i)
print('%i of %i' % (i, seconds))
time.sleep(1)
|
[
"chrisspen@gmail.com"
] |
chrisspen@gmail.com
|
7e0b8ad5684c410de74942820196b3c81c829fbe
|
d36de316f920342823dd60dc10fa8ee5ce146c5e
|
/brexit_legislation/urls.py
|
924cff38cbcfd8a9bf87ee22b129c91f764ff4ba
|
[] |
no_license
|
DemocracyClub/EURegulation
|
a621afa3ffae555ebd5fbdc205eceb7746a468d1
|
7cf0bf31b200ab1cb59922f0b3ca120be3757637
|
refs/heads/master
| 2022-07-22T04:48:59.555642
| 2018-06-26T14:42:19
| 2018-06-26T14:42:19
| 80,203,236
| 2
| 0
| null | 2022-07-08T16:02:53
| 2017-01-27T11:47:20
|
Python
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.conf.urls import include, url
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='home.html'), name="home"),
url(r'^api/', include('api.urls')),
url(r'^browse/', include('browse.urls')),
url(r'^search/', include('search.urls')),
]
|
[
"sym.roe@talusdesign.co.uk"
] |
sym.roe@talusdesign.co.uk
|
aa4222d7121296f46a7502528baf20ee183c59f5
|
af0deaee911417589b932022ad8bd0127fe42f24
|
/store/urls.py
|
db22aac04b2701050a91449ba9c4263d85097de3
|
[] |
no_license
|
mayankkushal/go-green-v1
|
b74e7ec7ca6ab0921e72fe43b5ea76f8eea739c6
|
76f16616073ecc101fbbbe9fe49173b9638f597c
|
refs/heads/master
| 2022-12-08T21:21:12.223347
| 2019-02-08T16:29:37
| 2019-02-08T16:29:37
| 99,202,678
| 2
| 1
| null | 2022-11-22T01:52:51
| 2017-08-03T07:14:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
from django.conf.urls import url,include
from django.views.generic import DetailView, TemplateView, ListView
from django.views.decorators.csrf import csrf_exempt
from . import views
from .models import Store
app_name = "store"
urlpatterns = [
url(r'^create_store/$', views.StoreCreate.as_view(), name='store_add'),
url(r'^store_update/(?P<slug>[\w\-]+)/$', views.StoreUpdate.as_view(), name="store_update"),
url(r'^detail/(?P<slug>[\w\-]+)$', DetailView.as_view(
context_object_name="store",
model=Store
), name="store_detail"),
url(r'^store_list', ListView.as_view(
context_object_name='store_list',
model=Store
), name="store_list"),
url(r'^locator', views.StoreLocator.as_view(), name='locator'),
url(r'^statement', views.StoreStatement.as_view(), name='statement'),
url(r'^add_product', views.ProductCreate.as_view(), name="add_product"),
url(r'^update_product/(?P<pk>[\w\-]+)/', views.ProductUpdate.as_view(), name="update_product"),
url(r'^product_list', views.ProductListView.as_view(), name='product_list'),
url(r'^(?P<pk>[\w\-]+)/delete', views.ProductDelete.as_view(), name='delete_product')
]
|
[
"mayankkushal26@gmail.com"
] |
mayankkushal26@gmail.com
|
5306a2f64af4a94ca1b18c60aedf583b89749974
|
875bb84440094ce058a2ec25a661a7da6bb2e129
|
/algo_py/boj/bj5648.py
|
711657eeb72989ec9d754ee49844ee659979c08d
|
[] |
no_license
|
shg9411/algo
|
150e4291a7ba15990f17ca043ae8ab59db2bf97b
|
8e19c83b1dbc0ffde60d3a3b226c4e6cbbe89a7d
|
refs/heads/master
| 2023-06-22T00:24:08.970372
| 2021-07-20T06:07:29
| 2021-07-20T06:07:29
| 221,694,017
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
import sys
num = []
while l := sys.stdin.readline():
num.extend(map(lambda x: int(x[::-1]), l.split()))
[*map(print,sorted(num[1:]))]
|
[
"shg9411@naver.com"
] |
shg9411@naver.com
|
dda22826a3fe9a31734e8493f8266e7652b50549
|
5c6a1c0d7fc61c13f3c5b370f4c07542ddfbb582
|
/others/ODES/orbit_adaptive.py
|
c0f442686a2211dd2f07d297c6eea1163c5a2d34
|
[
"Apache-2.0"
] |
permissive
|
hbcbh1999/Numerical-Methods-for-Physics
|
7afab39eb3171ce22ebbad8bdced8d9e0a263607
|
77c41fdf6184fe15fd6b897c3472f2bc00e4347c
|
refs/heads/master
| 2021-01-12T01:28:40.519633
| 2014-03-20T15:32:37
| 2014-03-20T15:32:37
| 78,389,812
| 6
| 4
| null | 2017-01-09T03:19:33
| 2017-01-09T03:19:32
| null |
UTF-8
|
Python
| false
| false
| 7,089
|
py
|
# orbital motion. We consider low mass objects orbiting the Sun. We
# work in units of AU, yr, and solar masses. From Kepler's third law:
#
# 4 pi**2 a**3 = G M P**2
#
# if a is in AU, P is in yr, and M is in solar masses, then
#
# a**3 = P**2
#
# and therefore
#
# 4 pi**2 = G
#
# we work in coordinates with the Sun at the origin
#
# This version implements adaptive timestepping
#
# M. Zingale (2013-02-19)
import math
import numpy
# global parameters
GM = 4.0*math.pi**2 #(assuming M = 1 solar mass)
# adaptive timestepping
S1 = 0.9
S2 = 4.0
class orbitHistory:
""" a simple container to store the integrated history of an
orbit """
def __init__(self, t=None, x=None, y=None, u=None, v=None):
self.t = numpy.array(t)
self.x = numpy.array(x)
self.y = numpy.array(y)
self.u = numpy.array(u)
self.v = numpy.array(v)
def finalR(self):
""" the radius at the final integration time """
N = len(self.t)
return math.sqrt(self.x[N-1]**2 + self.y[N-1]**2)
def displacement(self):
""" distance between the starting and ending point """
N = len(self.t)
return math.sqrt( (self.x[0] - self.x[N-1])**2 +
(self.y[0] - self.y[N-1])**2 )
def energy(self):
""" return the energy (per unit mass) at each point in time """
return 0.5*(self.u**2 + self.v**2) \
- GM/numpy.sqrt(self.x**2 + self.y**2)
def RK4_singlestep(X0, V0, t, dt, rhs):
""" take a single RK-4 timestep from t to t+dt for the system
ydot = rhs """
x = X0[0]
y = X0[1]
u = V0[0]
v = V0[1]
# get the RHS at several points
xdot1, ydot1, udot1, vdot1 = rhs([x,y], [u,v])
xdot2, ydot2, udot2, vdot2 = \
rhs([x+0.5*dt*xdot1,y+0.5*dt*ydot1],
[u+0.5*dt*udot1,v+0.5*dt*vdot1])
xdot3, ydot3, udot3, vdot3 = \
rhs([x+0.5*dt*xdot2,y+0.5*dt*ydot2],
[u+0.5*dt*udot2,v+0.5*dt*vdot2])
xdot4, ydot4, udot4, vdot4 = \
rhs([x+dt*xdot3,y+dt*ydot3],
[u+dt*udot3,v+dt*vdot3])
# advance
unew = u + (dt/6.0)*(udot1 + 2.0*udot2 + 2.0*udot3 + udot4)
vnew = v + (dt/6.0)*(vdot1 + 2.0*vdot2 + 2.0*vdot3 + vdot4)
xnew = x + (dt/6.0)*(xdot1 + 2.0*xdot2 + 2.0*xdot3 + xdot4)
ynew = y + (dt/6.0)*(ydot1 + 2.0*ydot2 + 2.0*ydot3 + ydot4)
return xnew, ynew, unew, vnew
class orbit:
""" hold the initial conditions of a planet/comet/etc. orbiting
the Sun and integrate """
def __init__(self, a, e):
""" a = semi-major axis (in AU),
e = eccentricity """
self.x0 = 0.0 # start at x = 0 by definition
self.y0 = a*(1.0 - e) # start at perihelion
self.a = a
self.e = e
# perihelion velocity (see C&O Eq. 2.33 for ex)
self.u0 = -math.sqrt( (GM/a)* (1.0 + e) / (1.0 - e) )
self.v0 = 0.0
def keplerPeriod(self):
""" return the period of the orbit in yr """
return math.sqrt(self.a**3)
def circularVelocity(self):
""" return the circular velocity (in AU/yr) corresponding to
the initial radius -- assuming a circle """
return math.sqrt(GM/self.a)
def escapeVelocity(self):
""" return the escape velocity (in AU/yr) corresponding to
the initial radius -- assuming a circle """
return math.sqrt(2.0*GM/self.a)
def intRK4(self, dt, err, tmax):
""" integrate the equations of motion using 4th order R-K
method with an adaptive stepsize, to try to achieve the
relative error err. dt here is the initial timestep
if err < 0, then we don't do adaptive stepping, but rather
we always walk at the input dt
"""
# initial conditions
t = 0.0
x = self.x0
y = self.y0
u = self.u0
v = self.v0
# store the history for plotting
tpoints = [t]
xpoints = [x]
ypoints = [y]
upoints = [u]
vpoints = [v]
# start with the old timestep
dtNew = dt
while (t < tmax):
if (err > 0.0):
# adaptive stepping
# iteration loop -- keep trying to take a step until
# we achieve our desired error
relError = 1.e10
while (relError > err):
dt = dtNew
if t+dt > tmax:
dt = tmax-t
# take 2 half steps
xtmp, ytmp, utmp, vtmp = \
RK4_singlestep([x,y], [u,v],
t, 0.5*dt, self.rhs)
xnew, ynew, unew, vnew = \
RK4_singlestep([xtmp,ytmp], [utmp,vtmp],
t+0.5*dt, 0.5*dt, self.rhs)
# now take just a single step to cover dt
xsingle, ysingle, usingle, vsingle = \
RK4_singlestep([x,y], [u,v],
t, dt, self.rhs)
# {x,y,u,v}double should be more accurate that
# {x,y,u,v}single, since it used smaller steps.
# estimate the relative error now
relError = max( abs((xnew-xsingle)/xnew),
abs((ynew-ysingle)/ynew),
abs((unew-usingle)/unew),
abs((vnew-vsingle)/vnew) )
# adaptive timestep algorithm from Garcia (Eqs. 3.30
# and 3.31)
dtEst = dt*abs(err/relError)**0.2
dtNew = min(max(S1*dtEst, dt/S2), S2*dt)
else:
if t+dt > tmax:
dt = tmax-t
# take just a single step to cover dt
xnew, ynew, unew, vnew = \
RK4_singlestep([x,y], [u,v],
t, dt, self.rhs)
t += dt
# store
tpoints.append(t)
xpoints.append(xnew)
ypoints.append(ynew)
upoints.append(unew)
vpoints.append(vnew)
# set for the next step
x = xnew; y = ynew; u = unew; v = vnew
# return a orbitHistory object with the trajectory
H = orbitHistory(tpoints, xpoints, ypoints, upoints, vpoints)
return H
def rhs(self, X, V):
""" RHS of the equations of motion. X is the input coordinate
vector and V is the input velocity vector """
# current radius
r = math.sqrt(X[0]**2 + X[1]**2)
# position
xdot = V[0]
ydot = V[1]
# velocity
udot = -GM*X[0]/r**3
vdot = -GM*X[1]/r**3
return xdot, ydot, udot, vdot
|
[
"steinkirch@gmail.com"
] |
steinkirch@gmail.com
|
7f2ab6c1bdbd251b25752a7629109fcb73413d66
|
4e353bf7035eec30e5ad861e119b03c5cafc762d
|
/QtGui/QWhatsThis.py
|
5b6146b3bf483cf8974d8bf69fddaa4d3f5f1a53
|
[] |
no_license
|
daym/PyQt4-Stubs
|
fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5
|
57d880c0d453641e31e1e846be4087865fe793a9
|
refs/heads/master
| 2022-02-11T16:47:31.128023
| 2017-10-06T15:32:21
| 2017-10-06T15:32:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
# encoding: utf-8
# module PyQt4.QtGui
# from C:\Python27\lib\site-packages\PyQt4\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QWhatsThis(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def createAction(self, QObject_parent=None): # real signature unknown; restored from __doc__
""" QWhatsThis.createAction(QObject parent=None) -> QAction """
return QAction
def enterWhatsThisMode(self): # real signature unknown; restored from __doc__
""" QWhatsThis.enterWhatsThisMode() """
pass
def hideText(self): # real signature unknown; restored from __doc__
""" QWhatsThis.hideText() """
pass
def inWhatsThisMode(self): # real signature unknown; restored from __doc__
""" QWhatsThis.inWhatsThisMode() -> bool """
return False
def leaveWhatsThisMode(self): # real signature unknown; restored from __doc__
""" QWhatsThis.leaveWhatsThisMode() """
pass
def showText(self, QPoint, QString, QWidget_widget=None): # real signature unknown; restored from __doc__
""" QWhatsThis.showText(QPoint, QString, QWidget widget=None) """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
|
[
"thekewlstore@gmail.com"
] |
thekewlstore@gmail.com
|
bbf399a94e325f9bc2091e29633b06ec8093eecf
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/lblearnedinfo_ec56ce29319e76b0e1d8451951f052e6.py
|
e78c3385472f2daf5a117b909eb5acbcc9efa81a
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 5,389
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LbLearnedInfo(Base):
"""This object contains the loopback learned information.
The LbLearnedInfo class encapsulates a list of lbLearnedInfo resources that is managed by the system.
A list of resources can be retrieved from the server using the LbLearnedInfo.find() method.
"""
__slots__ = ()
_SDM_NAME = 'lbLearnedInfo'
def __init__(self, parent):
super(LbLearnedInfo, self).__init__(parent)
@property
def CVlan(self):
"""(read only) The stacked VLAN identifier for the loopback message.
Returns:
str
"""
return self._get_attribute('cVlan')
@property
def DstMacAddress(self):
"""(read only) The destination MAC address for the loopback message.
Returns:
str
"""
return self._get_attribute('dstMacAddress')
@property
def MdLevel(self):
"""(read only) The MD level for the loopback message.
Returns:
number
"""
return self._get_attribute('mdLevel')
@property
def Reachability(self):
"""(read only) Indiates the status of the Ping. If true, the ping was responded to.
Returns:
bool
"""
return self._get_attribute('reachability')
@property
def Rtt(self):
"""(read only) The round trip time for the loopback message.
Returns:
number
"""
return self._get_attribute('rtt')
@property
def SVlan(self):
"""(read only) The single VLAN identifier for the loopback message.
Returns:
str
"""
return self._get_attribute('sVlan')
@property
def SrcMacAddress(self):
"""(read only) The source MAC address for the loopback message.
Returns:
str
"""
return self._get_attribute('srcMacAddress')
@property
def TransactionId(self):
"""(read only) The transaction identifier attached to the loopback message.
Returns:
number
"""
return self._get_attribute('transactionId')
def find(self, CVlan=None, DstMacAddress=None, MdLevel=None, Reachability=None, Rtt=None, SVlan=None, SrcMacAddress=None, TransactionId=None):
"""Finds and retrieves lbLearnedInfo data from the server.
All named parameters support regex and can be used to selectively retrieve lbLearnedInfo data from the server.
By default the find method takes no parameters and will retrieve all lbLearnedInfo data from the server.
Args:
CVlan (str): (read only) The stacked VLAN identifier for the loopback message.
DstMacAddress (str): (read only) The destination MAC address for the loopback message.
MdLevel (number): (read only) The MD level for the loopback message.
Reachability (bool): (read only) Indiates the status of the Ping. If true, the ping was responded to.
Rtt (number): (read only) The round trip time for the loopback message.
SVlan (str): (read only) The single VLAN identifier for the loopback message.
SrcMacAddress (str): (read only) The source MAC address for the loopback message.
TransactionId (number): (read only) The transaction identifier attached to the loopback message.
Returns:
self: This instance with matching lbLearnedInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of lbLearnedInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the lbLearnedInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
a7e57c370cdd942ba3edee059b279d2474ce76b3
|
31c50ab5729ee999698f0683c7f893a11c994269
|
/home/migrations/0002_load_initial_data.py
|
b3b1bef16fb9c537436e22c7f4f5835b367fe7f8
|
[] |
no_license
|
crowdbotics-apps/regi-test-20062
|
e86c2eeabb0156f17c7046820452fa8249ffb8f6
|
3089a7ccf13444fbdaedb40b02b9347ed7338010
|
refs/heads/master
| 2022-12-09T00:20:00.701341
| 2020-09-06T11:46:38
| 2020-09-06T11:46:38
| 293,265,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Regi Test"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Regi Test</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "regi-test-20062.botics.co"
site_params = {
"name": "Regi Test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.