blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
836924b6936b1ef655900c7fe38af18dfa353232 | 5c58587ebfbf56192b3dc6ed6f43bc002c8e2cff | /payments/api_clients/payeer.py | f71e3b1f3c7e7413e30dbc9dc0f7bf76fb48caaf | [] | no_license | hossamelneily/nexchange | fb9a812cfc72ac00b90cf64d6669a8129c2d2d4b | 6d69274cd3808989abe2f5276feb772d1f0fa8b4 | refs/heads/release | 2022-12-13T09:20:47.297943 | 2019-02-12T08:20:34 | 2019-02-12T08:20:34 | 210,064,740 | 1 | 2 | null | 2022-12-09T00:54:01 | 2019-09-21T23:19:34 | Python | UTF-8 | Python | false | false | 2,487 | py | from payments.api_clients.base import BasePaymentApi
import requests
import json
class PayeerAPIClient(BasePaymentApi):
""" Documentation: http://docs.payeercom.apiary.io/# """
def __init__(self, account='12345', apiId='12345', apiPass='12345',
url='https://payeer.com/ajax/api/api.php'):
self.account = account
self.apiId = apiId
self.apiPass = apiPass
self.url = url
def authorization_check(self):
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass
}
response = requests.post(self.url, payload)
return response
def balance_check(self):
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass,
'action': 'balance'
}
response = requests.post(self.url, payload)
return response
def get_transaction_history(self, from_date=None, to_date=None,
page_size=50, sort='desc',
trans_type='incoming'):
from_date, to_date = self.get_default_ranges(from_date, to_date)
# to is removed, because it is not UTC on Payeer side.
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass,
'action': 'history',
'sort': sort,
'count': page_size,
'from': from_date,
'type': trans_type
}
response = requests.post(self.url, payload)
content = json.loads(response.content.decode('utf-8'))
try:
res = content['history']
except KeyError:
res = content['errors']
return res
def transfer_funds(self, currency_in=None, currency_out=None, amount=None,
receiver=None, comment=None):
""" http://docs.payeercom.apiary.io/#reference/0/transferring-funds """
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass,
'action': 'transfer',
'curIn': currency_in,
'sum': amount,
'curOut': currency_out,
'comment': comment,
'to': receiver
}
response = requests.post(self.url, payload)
content = json.loads(response.content.decode('utf-8'))
return content
| [
"oleg@nexchange.co.uk"
] | oleg@nexchange.co.uk |
a8714593d0c65c179984a8185652a1628d449660 | cf398ec3d87f334ecc9aadf23de6813b729b4633 | /src/tests/test_logging.py | 9462dbcd83175d57d3de26cd2373117f6315e65c | [
"MIT",
"MS-PL",
"LicenseRef-scancode-generic-cla"
] | permissive | fuhuifang/confidential-ml-utils | 71a385efb81eb27acd59c6152584313176165092 | 6b530a893fc650a5cf2bb68d560c2833ac1bf014 | refs/heads/main | 2023-05-05T01:46:23.576045 | 2021-03-01T16:19:14 | 2021-03-01T16:19:14 | 344,288,160 | 0 | 0 | MIT | 2021-03-03T23:05:16 | 2021-03-03T23:05:15 | null | UTF-8 | Python | false | false | 3,609 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import confidential_ml_utils
from confidential_ml_utils.constants import DataCategory
import io
import logging
import pytest
import re
import sys
def test_basic_config():
logging.warning("before basic config")
logging.basicConfig()
logging.warning("warning from test_basic_config")
log = logging.getLogger("foo")
log.warning("warning from foo logger")
class StreamHandlerContext:
"""
Add, then remove a stream handler with the provided format string. The
`__str__` method on this class returns the value of the internal stream.
"""
def __init__(self, log, fmt: str):
self.logger = log
self.stream = io.StringIO()
self.handler = logging.StreamHandler(self.stream)
self.handler.setLevel(log.getEffectiveLevel())
self.handler.setFormatter(logging.Formatter(fmt))
def __enter__(self):
self.logger.addHandler(self.handler)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logger.removeHandler(self.handler)
self.handler.flush()
def __str__(self):
return self.stream.getvalue()
@pytest.mark.parametrize("level", ["debug", "info", "warning", "error", "critical"])
def test_data_category_and_log_info_works_as_expected(level):
confidential_ml_utils.enable_confidential_logging()
log = logging.getLogger()
log.setLevel(level.upper())
assert isinstance(log, confidential_ml_utils.logging.ConfidentialLogger)
with StreamHandlerContext(
log, "%(prefix)s%(levelname)s:%(name)s:%(message)s"
) as context:
func = getattr(log, level)
func("PRIVATE")
func("public", category=DataCategory.PUBLIC)
logs = str(context)
assert re.search(r"^SystemLog\:.*public$", logs, flags=re.MULTILINE)
assert not re.search(r"^SystemLog\:.*\:PRIVATE", logs, flags=re.MULTILINE)
@pytest.mark.parametrize("exec_type,message", [(ArithmeticError, "1+1 != 3")])
def test_exception_works_as_expected(exec_type, message):
confidential_ml_utils.enable_confidential_logging()
log = logging.getLogger()
assert isinstance(log, confidential_ml_utils.logging.ConfidentialLogger)
with StreamHandlerContext(
log, "%(prefix)s%(levelname)s:%(name)s:%(message)s"
) as context:
try:
raise exec_type(message)
except exec_type:
log.error("foo", category=DataCategory.PUBLIC)
logs = str(context)
assert re.search(r"^SystemLog\:.*foo$", logs, flags=re.MULTILINE)
def test_all_the_stuff():
confidential_ml_utils.enable_confidential_logging()
log = logging.getLogger("foo")
log.info("public", category=DataCategory.PUBLIC)
log.info("PRIVATE", category=DataCategory.PRIVATE)
log.info("PRIVATE2")
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Requires Python >= 3.8")
def test_enable_confidential_logging_sets_force():
# Pytest adds handlers to the root logger by default.
initial_handlers = list(logging.root.handlers)
confidential_ml_utils.enable_confidential_logging()
assert len(logging.root.handlers) == 1
assert all(h not in logging.root.handlers for h in initial_handlers)
def test_warn_if_root_handlers_already_exist(capsys):
# Pytest adds handlers to the root logger by default.
confidential_ml_utils.enable_confidential_logging()
# https://docs.pytest.org/en/stable/capture.html
stderr = capsys.readouterr().err
assert "SystemLog:The root logger already has handlers set!" in stderr
| [
"noreply@github.com"
] | fuhuifang.noreply@github.com |
04521521cd080fa531cf3cecce5a57426136edae | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/w11.py | d34dec34a1f3c4bc9a0709f495d47f09c917783a | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'w11':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
1c3fe5065e8682d0bc0842ad949a3e5b60eae09c | 9a51a7d5ddd3103e5d6ecce0077d70cf0db81927 | /148A.py | d5794a5dbf698af3b2db9c91475cde6fe6e2e5a4 | [] | no_license | Anikcb/Codeforces | 6eb9627eee07756d04e246e78ec1ad7c7b956c99 | 9a8d620447a23afa519f0aef8df6a8be189ddf12 | refs/heads/main | 2023-06-15T05:12:57.169088 | 2021-07-12T15:21:48 | 2021-07-12T15:21:48 | 380,301,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | k = int(input())
l = int(input())
m = int (input())
n = int (input())
d = int (input())
res=0
for i in range(1,d+1):
if i%k==0 or i%l==0 or i%m==0 or i%n==0:
res=res+1
print(res)
| [
"noreply@github.com"
] | Anikcb.noreply@github.com |
a1ccf5b2c9bf663dea0c43f1ea78d0c558b05a2c | c744a0c0abb6a09144329deca0cfec542e0a147c | /trainw_keras.py | 08b3283e6097cddef24b8a68e41a2a81c355e20a | [] | no_license | pattywan234/Human-Activity-Recognition-HAR | c627421209910905be04035672e7f5158fd4a9bb | 636e0aa843350e7b61b5667b96ddcfdba009b82b | refs/heads/master | 2023-06-03T02:25:35.675593 | 2021-06-24T02:47:48 | 2021-06-24T02:47:48 | 235,273,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | # from numpy import array
import numpy as np
from LSTM_model05 import vanilla_LSTM, stacked_LSTM, bi_LSTM
#import training and test data
X_train = np.load('wesad/S2/Normalize/label_selected/train_keras/X_train.npy')
X_test = np.load('wesad/S2/Normalize/label_selected/train_keras/X_test.npy')
y_train = np.load('wesad/S2/Normalize/label_selected/train_keras/y_train.npy')
y_test = np.load('wesad/S2/Normalize/label_selected/train_keras/y_test.npy')
# def split_sequence(sequence, n_steps):
# X, y = list(), list()
# for i in range(len(sequence)):
# # find the end of this pattern
# end_ix = i + n_steps
# # check if we are beyond the sequence
# if end_ix > len(sequence)-1:
# break
# # gather input and output parts of the pattern
# seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
# X.append(seq_x)
# y.append(seq_y)
# return array(X), array(y)
#
# raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# time_step = 3
# X, y = split_sequence(raw_seq, time_step)
# n_feature = 1
# X = X.reshape((X.shape[0], X.shape[1], n_feature))
# n_hidden = 50
#
# x_input = array([70, 80, 90])
# x_input = x_input.reshape((1, time_step, n_feature))
n_hidden = 128
time_step = 200
n_feature = len(X_train[2][1])
epoch = 100
# vanilla LSTM
# training
vlstm = vanilla_LSTM(n_hidden, time_step, n_feature)
vlstm.fit(X_train, y_train, epochs=epoch, verbose=0)
# testing
vhat = vlstm.predict(X_test, verbose=0)
print('predicted result of vanilla LSTM', vhat)
# stacked LSTM
# training
slstm = stacked_LSTM(n_hidden, time_step, n_feature)
slstm.fit(X_train, y_train, epochs=100, verbose=0)
# testing
shat = slstm.predict(X_test, verbose=0)
print('predicted result of stacked LSTM', shat)
# Bidireectional LSTM
# training
blstm = bi_LSTM(n_hidden, time_step, n_feature)
blstm.fit(X_train, y_train, epochs=100, verbose=0)
# testing
bhat = blstm.predict(X_test, verbose=0)
print('predicted result of Bidireectional LSTM', bhat)
np.save('wesad/wesad/S2/Normalize/label_selected/train_keras/v_result.npy', vhat)
np.save('wesad/wesad/S2/Normalize/label_selected/train_keras/s_result.npy', shat)
np.save('wesad/wesad/S2/Normalize/label_selected/train_keras/b_result.npy', bhat)
| [
"noreply@github.com"
] | pattywan234.noreply@github.com |
08fa44a4100c48ec759dfc091669bab7cb0353f1 | 0805820a350d51e832e5a5be995a3046b0445a20 | /developer_test/migrations/0005_auto_20210513_1716.py | 9b2295dd42f55bd3f19053b990b44a588b15b724 | [] | no_license | nseetim/patricia_task | a36ffdac5c15c603c1231800815a62fc619b90ec | 85bf0b3e1d2fa2cef3d7450b7485af8efb078db1 | refs/heads/master | 2023-05-02T07:33:24.371578 | 2021-05-14T05:35:19 | 2021-05-14T05:35:19 | 367,241,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | # Generated by Django 3.1.5 on 2021-05-13 17:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('developer_test', '0004_auto_20210513_0013'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='RequestLogs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(max_length=100, null=True)),
('response_code', models.PositiveSmallIntegerField()),
('method', models.CharField(max_length=10, null=True)),
('remote_address', models.CharField(max_length=20, null=True)),
('exec_time', models.IntegerField(null=True)),
('date', models.DateTimeField(auto_now=True)),
('body_response', models.TextField()),
('body_request', models.TextField()),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"etimnseabasi@gmail.com"
] | etimnseabasi@gmail.com |
3f1b92b2238951235a205e6a99611b26200bd26b | a03fad402b5380c290737ea31d2419bad404fe26 | /enfants/urls.py | 51b80d140f9125d428db40257faa56ef29ed9bc0 | [] | no_license | DavidS1106/pfe_backend | 982986ecf52b6c9b73ea1d45f55662841d51de60 | 7a66f8f1573488fa32c643fd1a3697f2fa4a3d65 | refs/heads/master | 2023-04-13T23:04:51.432942 | 2021-05-10T21:02:41 | 2021-05-10T21:02:41 | 276,981,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('enfants', views.EnfantsView, basename='enfant')
router.register('logged_enfant', views.logged_enfant, basename='logged_enfant')
router.register('non_logged_enfant', views.non_logged_enfant, basename='non_logged_enfant')
router.register('info_supplementaire', views.info_supplementaireview, basename='info_supplementaire')
router.register('personne_contact', views.personne_contactview, basename='personne_contact')
router.register('handicaps', views.HandicapsView, basename='handicap')
router.register('handicaps_enfants', views.HandicapsEnfantsView, basename='handicap_enfant')
urlpatterns = [
path('', include(router.urls)),
] | [
"david.sabo@hotmail.be"
] | david.sabo@hotmail.be |
52bece35aa3f449fd4068d45847eb3aca3b36443 | 411eff94020c192d5e5f657fa6012232ab1d051c | /game/src/coginvasion/ai/AIBaseGlobal.py | e02f38f0e5f171a4dab307e0fed79073eeab559e | [] | no_license | xMakerx/cio-src | 48c9efe7f9a1bbf619a4c95a4198aaace78b8491 | 60b2bdf2c4a24d506101fdab1f51752d0d1861f8 | refs/heads/master | 2023-02-14T03:12:51.042106 | 2021-01-15T14:02:10 | 2021-01-15T14:02:10 | 328,268,776 | 1 | 0 | null | 2021-01-15T15:15:35 | 2021-01-09T23:51:37 | Python | UTF-8 | Python | false | false | 960 | py | from AIBase import AIBase
from direct.directnotify.DirectNotifyGlobal import directNotify
from panda3d.core import RescaleNormalAttrib, NodePath, Notify
__builtins__['base'] = AIBase()
__builtins__['ostream'] = Notify.out()
__builtins__['run'] = base.run
__builtins__['taskMgr'] = base.taskMgr
__builtins__['jobMgr'] = base.jobMgr
__builtins__['eventMgr'] = base.eventMgr
__builtins__['messenger'] = base.messenger
__builtins__['bboard'] = base.bboard
__builtins__['config'] = base.config
__builtins__['directNotify'] = directNotify
render = NodePath('render')
render.setAttrib(RescaleNormalAttrib.makeDefault())
render.setTwoSided(0)
__builtins__['render'] = render
from direct.showbase import Loader
base.loader = Loader.Loader(base)
__builtins__['loader'] = base.loader
directNotify.setDconfigLevels()
def inspect(anObject):
from direct.tkpanels import Inspector
Inspector.inspect(anObject)
__builtins__['inspect'] = inspect
taskMgr.finalInit()
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
2c60324b3fa048f21d4ddb7e4a4d608d2f4ae9fe | a8fa4a499c44dce9a82e768edc82bdd193797128 | /ScrapePlugins/Crunchyroll/Run.py | 072c151bc74086a6fe1c380808eb0b7785a732e7 | [] | no_license | oliuz/MangaCMS | d8b2e44922955f6b9310fb6e189115f1985f2e93 | 7e2a710a56248261ab01686d3e586c36ce4a857d | refs/heads/master | 2020-12-28T19:46:41.265347 | 2016-08-27T23:37:47 | 2016-08-27T23:37:47 | 67,316,457 | 1 | 0 | null | 2016-09-03T23:36:21 | 2016-09-03T23:36:21 | null | UTF-8 | Python | false | false | 505 | py |
from .DbLoader import DbLoader
from .ContentLoader import ContentLoader
import runStatus
import ScrapePlugins.RunBase
class Runner(ScrapePlugins.RunBase.ScraperBase):
loggerPath = "Main.Manga.CrunchyRoll.Run"
pluginName = "CrunchyRoll"
def _go(self):
fl = DbLoader()
fl.go()
fl.closeDB()
if not runStatus.run:
return
cl = ContentLoader()
cl.go()
cl.closeDB()
if __name__ == "__main__":
import utilities.testBase as tb
with tb.testSetup():
run = Runner()
run.go()
| [
"something@fake-url.com"
] | something@fake-url.com |
dd901b37ae78074d1b136ce7ad9d125fb38bfa9b | 1f38af9bae11acbe20dd8f5057b374b9760e6659 | /pyscf/geomopt/geometric_solver.py | 6e63b860d5f970435b404aca3d39f5e5b97bdb6f | [
"Apache-2.0"
] | permissive | highlight0112/pyscf | d36104ef727f593d46fbfd3e5d865c6cd0316d84 | 4afbd42bad3e72db5bb94d8cacf1d5de76537bdd | refs/heads/master | 2020-03-25T01:16:59.927859 | 2019-03-06T01:11:59 | 2019-03-06T01:11:59 | 143,229,588 | 0 | 0 | Apache-2.0 | 2019-03-06T01:12:00 | 2018-08-02T02:05:59 | Python | UTF-8 | Python | false | false | 5,188 | py | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Interface to geomeTRIC library https://github.com/leeping/geomeTRIC
'''
import tempfile
import numpy
import geometric
import geometric.molecule
#from geometric import molecule
from pyscf import lib
from pyscf.geomopt.addons import as_pyscf_method, dump_mol_geometry
from pyscf import __config__
INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True)
ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True)
class PySCFEngine(geometric.engine.Engine):
def __init__(self, scanner):
molecule = geometric.molecule.Molecule()
mol = scanner.mol
molecule.elem = [mol.atom_symbol(i) for i in range(mol.natm)]
# Molecule is the geometry parser for a bunch of formats which use
# Angstrom for Cartesian coordinates by default.
molecule.xyzs = [mol.atom_coords()*lib.param.BOHR] # In Angstrom
super(PySCFEngine, self).__init__(molecule)
self.scanner = scanner
self.cycle = 0
def calc_new(self, coords, dirname):
scanner = self.scanner
mol = scanner.mol
lib.logger.note(scanner, '\nGeometry optimization step %d', self.cycle)
self.cycle += 1
# geomeTRIC handles coords and gradients in atomic unit
coords = coords.reshape(-1,3)
if scanner.verbose >= lib.logger.NOTE:
dump_mol_geometry(self.scanner.mol, coords*lib.param.BOHR)
mol.set_geom_(coords, unit='Bohr')
energy, gradient = scanner(mol)
if scanner.assert_convergence and not scanner.converged:
raise RuntimeError('Nuclear gradients of %s not converged' % scanner.base)
return energy, gradient.ravel()
def kernel(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, constraints=None, **kwargs):
'''Optimize geometry with geomeTRIC library for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'convergence_energy': 1e-6, # Eh
'convergence_grms': 3e-4, # Eh/Bohr
'convergence_gmax': 4.5e-4, # Eh/Bohr
'convergence_drms': 1.2e-3, # Angstrom
'convergence_dmax': 1.8e-3, # Angstrom
}
from pyscf import geometric_solver
geometric_solver.optimize(method, **conv_params)
'''
if isinstance(method, lib.GradScanner):
g_scanner = method
elif getattr(method, 'nuc_grad_method', None):
g_scanner = method.nuc_grad_method().as_scanner()
else:
raise NotImplementedError('Nuclear gradients of %s not available' % method)
if not include_ghost:
g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0]
g_scanner.assert_convergence = assert_convergence
tmpf = tempfile.mktemp(dir=lib.param.TMPDIR)
m = geometric.optimize.run_optimizer(customengine=PySCFEngine(g_scanner),
input=tmpf, constraints=constraints,
**kwargs)
#FIXME: geomeTRIC library keeps running until converged. We need a function
# to terminate the program even not converged.
conv = True
#return conv, method.mol.copy().set_geom_(m.xyzs[-1], unit='Bohr')
return method.mol.copy().set_geom_(m.xyzs[-1], unit='Angstrom')
optimize = kernel
del(INCLUDE_GHOST, ASSERT_CONV)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf, dft, cc, mp
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
mf = scf.RHF(mol)
conv_params = {
'convergence_energy': 1e-4, # Eh
'convergence_grms': 3e-3, # Eh/Bohr
'convergence_gmax': 4.5e-3, # Eh/Bohr
'convergence_drms': 1.2e-2, # Angstrom
'convergence_dmax': 1.8e-2, # Angstrom
}
mol1 = optimize(mf, **conv_params)
print(mf.kernel() - -153.219208484874)
print(scf.RHF(mol1).kernel() - -153.222680852335)
mf = dft.RKS(mol)
mf.xc = 'pbe,'
mf.conv_tol = 1e-7
mol1 = optimize(mf)
mymp2 = mp.MP2(scf.RHF(mol))
mol1 = optimize(mymp2)
mycc = cc.CCSD(scf.RHF(mol))
mol1 = optimize(mycc)
| [
"osirpt.sun@gmail.com"
] | osirpt.sun@gmail.com |
efcef61de6ca47fda0549982e2032134ed07f440 | f9be26f1a01aecda9f4ded28922e1490c450558d | /plc-2.0/lib/python2.7/site-packages/pyfits-3.5-py2.7-linux-x86_64.egg/pyfits/compression.py | 8b72aebbd02c0324116a1c1f29b8765231a7a106 | [] | no_license | NinaKate/CCR | 7c71c5dc2a31af2bb76b2cc2d9e5a7c6193486ee | ba4c5ae159d97bcc78eb3570428ec33f87761ebe | refs/heads/master | 2018-11-14T01:24:27.890646 | 2018-08-27T17:08:23 | 2018-08-27T17:08:23 | 105,586,700 | 0 | 0 | null | 2017-12-03T20:20:34 | 2017-10-02T21:24:35 | C++ | UTF-8 | Python | false | false | 281 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'compression.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"ninastei@buffalo.edu"
] | ninastei@buffalo.edu |
96f689b758e78d0bdd04395f059ac2cacaa4ac36 | 256e664847bf6a24d02b98f755348272f1e73368 | /merra/grid.py | d2a454a75ff7aab1101a7311ca434a67b3c75ecb | [
"BSD-3-Clause"
] | permissive | BRIK-Engenharia/merra | c4c5f9e3be5c6e534248c331bedfb9399d507a0d | 504d167b044a2cf236b727b6d0befcb435612e6a | refs/heads/master | 2021-10-21T14:01:43.391399 | 2019-03-04T10:46:25 | 2019-03-04T10:46:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | # The MIT License (MIT)
#
# Copyright (c) 2019, TU Wien
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The grid module implements the asymmetrical GMAO 0.5 x 0.625 grid
used in MERRA2 as a pygeogrids BasicGrid instance.
"""
import numpy as np
from pygeogrids.grids import BasicGrid
def create_merra_cell_grid():
"""
Function creates the asymmetrical GMAO 0.5 x 0.625 grid as a
BasicGrid instance.
Returns
-------
BasicGrid instance
"""
# define horizontal and vertical resolution of asymmetrical grid
lon_res = 0.625
lat_res = 0.5
# create 361 (lat) x 576 (lon) mesh grid
lon, lat = np.meshgrid(
np.arange(-180, 180, lon_res),
np.arange(-90, 90 + lat_res / 2, lat_res)
)
return BasicGrid(lon.flatten(), lat.flatten()).to_cell_grid(cellsize=5.)
| [
"felix.zaussinger@geo.tuwien.ac.at"
] | felix.zaussinger@geo.tuwien.ac.at |
7a0349daadc974318ba22b4c139e3450f7f2d011 | b008f826a1eaccdadaf7cba6d7a757f709d41ee6 | /salt-2016.3.2/salt/modules/data.py | 7263f1316eed85e060a86740fb98a706aace7ed1 | [
"Apache-2.0"
] | permissive | stephane-martin/salt-debian-packaging | 76b3c6f53bf908c230774abc3c35c0c8dc31d5dd | 4ec73750ba67bfe35a5bc0faa110f2bdec5c6a66 | refs/heads/master | 2020-07-21T14:58:31.913198 | 2016-09-04T18:18:57 | 2016-09-04T18:18:57 | 66,794,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,972 | py | # -*- coding: utf-8 -*-
'''
Manage a local persistent data structure that can hold any arbitrary data
specific to the minion
'''
from __future__ import absolute_import
# Import python libs
import os
import ast
import logging
# Import salt libs
import salt.utils
import salt.payload
# Import 3rd-party lib
import salt.ext.six as six
log = logging.getLogger(__name__)
def clear():
'''
Clear out all of the data in the minion datastore, this function is
destructive!
CLI Example:
.. code-block:: bash
salt '*' data.clear
'''
try:
os.remove(os.path.join(__opts__['cachedir'], 'datastore'))
except (IOError, OSError):
pass
return True
def load():
'''
Return all of the data in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.load
'''
serial = salt.payload.Serial(__opts__)
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
fn_ = salt.utils.fopen(datastore_path, 'rb')
return serial.load(fn_)
except (IOError, OSError, NameError):
return {}
def dump(new_data):
'''
Replace the entire datastore with a passed data structure
CLI Example:
.. code-block:: bash
salt '*' data.dump '{'eggs': 'spam'}'
'''
if not isinstance(new_data, dict):
if isinstance(ast.literal_eval(new_data), dict):
new_data = ast.literal_eval(new_data)
else:
return False
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
with salt.utils.fopen(datastore_path, 'w+b') as fn_:
serial = salt.payload.Serial(__opts__)
serial.dump(new_data, fn_)
return True
except (IOError, OSError, NameError):
return False
def update(key, value):
'''
Update a key with a value in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.update <key> <value>
'''
store = load()
store[key] = value
dump(store)
return True
def getval(key):
'''
Get a value from the minion datastore
.. deprecated:: Carbon
Use ``get`` instead
CLI Example:
.. code-block:: bash
salt '*' data.getval <key>
'''
salt.utils.warn_until(
'Carbon',
'Support for \'getval\' has been deprecated and will be removed '
'in Salt Carbon. Please use \'get\' instead.'
)
return get(key)
def getvals(*keylist):
'''
Get values from the minion datastore
.. deprecated:: Carbon
Use ``get`` instead
CLI Example:
.. code-block:: bash
salt '*' data.getvals <key> [<key> ...]
'''
salt.utils.warn_until(
'Carbon',
'Support for \'getvals\' has been deprecated and will be removed '
'in Salt Carbon. Please use \'get\' instead.'
)
return get(keylist)
def cas(key, value, old_value):
'''
Check and set a value in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.cas <key> <value> <old_value>
'''
store = load()
if key not in store:
return False
if store[key] != old_value:
return False
store[key] = value
dump(store)
return True
def pop(key, default=None):
'''
Pop (return & delete) a value from the minion datastore
.. versionadded:: 2015.5.2
CLI Example:
.. code-block:: bash
salt '*' data.pop <key> "there was no val"
'''
store = load()
val = store.pop(key, default)
dump(store)
return val
def get(key, default=None):
'''
Get a (list of) value(s) from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.get <key(s)>
'''
store = load()
if isinstance(key, six.string_types):
return store.get(key, default)
elif default is None:
return [store[k] for k in key if k in store]
else:
return [store.get(k, default) for k in key]
def keys():
'''
Get all keys from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.keys
'''
store = load()
return store.keys()
def values():
'''
Get values from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.values
'''
store = load()
return store.values()
def items():
'''
Get items from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.items
'''
store = load()
return store.items()
def has_key(key):
'''
Check if key is in the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.has_key <mykey>
'''
store = load()
return key in store
| [
"stephane.martin@vesperal.eu"
] | stephane.martin@vesperal.eu |
030af696a1ebdd2d98a56cc9345bfe20f5099896 | 67ceb35320d3d02867350bc6d460ae391e0324e8 | /practice/hard/0675-Cut_Trees_for_Golf_Event.py | e91dcd1441c759908435b4cb1b2766949823a97b | [] | no_license | mattjp/leetcode | fb11cf6016aef46843eaf0b55314e88ccd87c91a | 88ccd910dfdb0e6ca6a70fa2d37906c31f4b3d70 | refs/heads/master | 2023-01-22T20:40:48.104388 | 2022-12-26T22:03:02 | 2022-12-26T22:03:02 | 184,347,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | class Solution:
def cutOffTree(self, forest: List[List[int]]) -> int:
"""
0. while there are trees to cut down
1. walk to coordinates of next tree; cut down - do BFS dummy
2. if tree is unreachable - return
"""
from collections import deque
from sortedcontainers import SortedDict
def go_to_tree(grid, i, j, tree) -> int:
queue = deque([(i, j, 0)]) # (i, j, steps)
visited = set()
while queue:
row, col, steps = queue.popleft()
if (row, col) == tree:
return steps
for r,c in [(1,0), (-1,0), (0,1), (0,-1)]:
new_row, new_col = row+r, col+c
if (
new_row < len(grid) and
new_col < len(grid[0]) and
new_row > -1 and
new_col > -1 and
(new_row, new_col) not in visited and
grid[new_row][new_col] != 0
):
if (new_row, new_col) == tree:
return steps+1
visited.add((new_row, new_col))
queue.append((new_row, new_col, steps+1))
return None
trees = SortedDict()
for i in range(len(forest)):
for j in range(len(forest[i])):
if forest[i][j] > 1:
trees[forest[i][j]] = (i,j)
total_steps = 0
i = j = 0
for h,tree in trees.items():
steps = go_to_tree(forest, i, j, tree)
if steps == None:
return -1
total_steps += steps
i,j = tree
return total_steps
| [
"noreply@github.com"
] | mattjp.noreply@github.com |
be2c9c0203597021595d503e7f6d72164b2028eb | 889b542e025a3f4857aa9faf7abcec6afebdce37 | /init.spec | c2ada7cfd4de5d92f4af1c8ed130636b9c6554cb | [
"MIT"
] | permissive | mfneirae/Sara | c685bafe6bd69378cd81ada98d011d7f18564948 | a4b4bcae05bf39e4f8ad49b47640ccf552abce03 | refs/heads/master | 2020-03-25T19:20:04.915517 | 2018-09-19T21:43:24 | 2018-09-19T21:43:24 | 144,077,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['init.py'],
pathex=['C:\\Users\\Ing_99\\Desktop\\Sara'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='init',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"mfneirae@unal.edu.co"
] | mfneirae@unal.edu.co |
9514286077c40b1598552cdc24d2d2d31844d5fe | 34ed92a9593746ccbcb1a02630be1370e8524f98 | /lib/pints/pints/tests/test_mcmc_relativistic.py | 1fb0e2abb531defd9c0d3b86dccf543b66d3e108 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | HOLL95/Cytochrome_SV | 87b7a680ed59681230f79e1de617621680ea0fa0 | d02b3469f3ee5a4c85d756053bc87651093abea1 | refs/heads/master | 2022-08-01T05:58:16.161510 | 2021-02-01T16:09:31 | 2021-02-01T16:09:31 | 249,424,867 | 0 | 0 | null | 2022-06-22T04:09:11 | 2020-03-23T12:29:29 | Jupyter Notebook | UTF-8 | Python | false | false | 6,142 | py | #!/usr/bin/env python3
#
# Tests the basic methods of the Relativistic MCMC routine.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import unittest
import numpy as np
import pints
import pints.toy
from shared import StreamCapture
class TestRelativisticMCMC(unittest.TestCase):
"""
Tests the basic methods of the Relativistic MCMC routine.
"""
def test_method(self):
# Create log pdf
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
# Create mcmc
x0 = np.array([2, 2])
sigma = [[3, 0], [0, 3]]
mcmc = pints.RelativisticMCMC(x0, sigma)
# This method needs sensitivities
self.assertTrue(mcmc.needs_sensitivities())
# Set number of leapfrog steps
ifrog = 10
mcmc.set_leapfrog_steps(ifrog)
# Perform short run
chain = []
for i in range(100 * ifrog):
x = mcmc.ask()
fx, gr = log_pdf.evaluateS1(x)
sample = mcmc.tell((fx, gr))
if i >= 50 * ifrog and sample is not None:
chain.append(sample)
if np.all(sample == x):
self.assertEqual(mcmc.current_log_pdf(), fx)
chain = np.array(chain)
self.assertEqual(chain.shape[0], 50)
self.assertEqual(chain.shape[1], len(x0))
def test_logging(self):
"""
Test logging includes name and custom fields.
"""
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
x0 = [np.array([2, 2]), np.array([8, 8])]
mcmc = pints.MCMCController(
log_pdf, 2, x0, method=pints.RelativisticMCMC)
mcmc.set_max_iterations(5)
with StreamCapture() as c:
mcmc.run()
text = c.text()
self.assertIn('Relativistic MCMC', text)
self.assertIn(' Accept.', text)
def test_flow(self):
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
x0 = np.array([2, 2])
# Test initial proposal is first point
mcmc = pints.RelativisticMCMC(x0)
self.assertTrue(np.all(mcmc.ask() == mcmc._x0))
# Repeated asks
self.assertRaises(RuntimeError, mcmc.ask)
# Tell without ask
mcmc = pints.RelativisticMCMC(x0)
self.assertRaises(RuntimeError, mcmc.tell, 0)
# Repeated tells should fail
x = mcmc.ask()
mcmc.tell(log_pdf.evaluateS1(x))
self.assertRaises(RuntimeError, mcmc.tell, log_pdf.evaluateS1(x))
# Bad starting point
mcmc = pints.RelativisticMCMC(x0)
mcmc.ask()
self.assertRaises(
ValueError, mcmc.tell, (float('-inf'), np.array([1, 1])))
def test_kinetic_energy(self):
"""
Tests kinetic energy values and derivatives
"""
x0 = np.array([2, 2])
model = pints.RelativisticMCMC(x0)
model.ask()
# kinetic energy
mc2 = 100.0
momentum = [1.0, 2.0]
squared = np.sum(np.array(momentum)**2)
ke1 = mc2 * (squared / mc2 + 1.0)**0.5
ke2 = model._kinetic_energy(momentum)
self.assertEqual(ke1, ke2)
c = 1.0
m = 1.0
mc2 = m * c**2
squared = np.sum(np.array(momentum)**2)
ke1 = mc2 * (squared / mc2 + 1.0)**0.5
model = pints.RelativisticMCMC(x0)
model.set_speed_of_light(c)
model.ask()
ke2 = model._kinetic_energy(momentum)
self.assertEqual(ke1, ke2)
def test_set_hyper_parameters(self):
"""
Tests the parameter interface for this sampler.
"""
x0 = np.array([2, 2])
mcmc = pints.RelativisticMCMC(x0)
# Test leapfrog parameters
n = mcmc.leapfrog_steps()
d = mcmc.leapfrog_step_size()
self.assertIsInstance(n, int)
self.assertTrue(len(d) == mcmc._n_parameters)
mcmc.set_leapfrog_steps(n + 1)
self.assertEqual(mcmc.leapfrog_steps(), n + 1)
self.assertRaises(ValueError, mcmc.set_leapfrog_steps, 0)
mcmc.set_leapfrog_step_size(0.5)
self.assertEqual(mcmc.leapfrog_step_size()[0], 0.5)
self.assertRaises(ValueError, mcmc.set_leapfrog_step_size, -1)
self.assertEqual(mcmc.n_hyper_parameters(), 4)
mcmc.set_hyper_parameters([n + 2, 2, 0.4, 2.3])
self.assertEqual(mcmc.leapfrog_steps(), n + 2)
self.assertEqual(mcmc.leapfrog_step_size()[0], 2)
self.assertEqual(mcmc.mass(), 0.4)
self.assertEqual(mcmc.speed_of_light(), 2.3)
mcmc.set_epsilon(0.4)
self.assertEqual(mcmc.epsilon(), 0.4)
self.assertRaises(ValueError, mcmc.set_epsilon, -0.1)
mcmc.set_leapfrog_step_size(1)
self.assertEqual(len(mcmc.scaled_epsilon()), 2)
self.assertEqual(mcmc.scaled_epsilon()[0], 0.4)
self.assertEqual(len(mcmc.divergent_iterations()), 0)
self.assertRaises(ValueError, mcmc.set_leapfrog_step_size, [1, 2, 3])
mcmc.set_leapfrog_step_size([1.5, 3])
self.assertEqual(mcmc.leapfrog_step_size()[0], 1.5)
self.assertEqual(mcmc.leapfrog_step_size()[1], 3)
c = 3.5
mcmc.set_speed_of_light(c)
self.assertEqual(mcmc.speed_of_light(), c)
self.assertRaises(ValueError, mcmc.set_speed_of_light, -0.1)
m = 2.9
mcmc.set_mass(m)
self.assertEqual(mcmc.mass(), m)
self.assertRaises(ValueError, mcmc.set_mass, -1.8)
self.assertRaises(ValueError, mcmc.set_mass, [1, 3])
def test_other_setters(self):
# Tests other setters and getters.
x0 = np.array([2, 2])
mcmc = pints.RelativisticMCMC(x0)
self.assertRaises(ValueError, mcmc.set_hamiltonian_threshold, -0.3)
threshold1 = mcmc.hamiltonian_threshold()
self.assertEqual(threshold1, 10**3)
threshold2 = 10
mcmc.set_hamiltonian_threshold(threshold2)
self.assertEqual(mcmc.hamiltonian_threshold(), threshold2)
if __name__ == '__main__':
unittest.main()
| [
"henney@localhost.localdomain"
] | henney@localhost.localdomain |
e0164f076b0f134294981ab8b376febecefb227c | 02f2e8b08231290656ffaea8fce5bf03b2150780 | /mall/apps/areas/serializers.py | e74483ac5c6f3463545b6b8434d75d72ba22fb25 | [
"MIT"
] | permissive | googleliyang/django-meiduo-teach | 407aa08b65d29c914281a8b759db3b1cdd25defb | afb89767b77118f38dca6f1c2e3b98db0474e48f | refs/heads/master | 2020-04-28T16:25:23.905191 | 2019-03-11T04:38:01 | 2019-03-11T04:38:01 | 175,409,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | from rest_framework import serializers
from areas.models import Area
# area
# 省的信息
class AreaSerializer(serializers.ModelSerializer):
# area_set = serializers.PrimaryKeyRelatedField(many=True,read_only=True)
class Meta:
model = Area
fields = ['id','name']
# 市的序列化器
class SubsAreaSerializer(serializers.ModelSerializer):
# 想 通过id 来获取这个id所对应的所有的值 area
# 同时 转换为 字典
# area_set = serializers.PrimaryKeyRelatedField(many=True,read_only=True)
# [1,2,3,4,5]
area_set = AreaSerializer(many=True,read_only=True)
# subs = AreaSerializer(many=True,read_only=True)
class Meta:
model = Area
# fields = ['subs','id','name']
fields = ['area_set','id','name'] | [
"qiruihua@itcast.cn"
] | qiruihua@itcast.cn |
d2ec3022630495244009b0dfaad6ae6fb279abda | 37dcb5cbe43eb2fd1ffe716e6bcf318dce10a275 | /RLE/R8C.py | bb7bfec73a62d8bfdc2d2f2ab76e00969219e847 | [
"MIT"
] | permissive | nesdoug/SNES_00 | 488f405d673927443dbbec05cfa5bfae6667d8c0 | c5755eb3e8d01c017b962a8ad2b5ec1b7067f18a | refs/heads/master | 2021-11-21T10:50:15.410248 | 2021-11-06T13:59:24 | 2021-11-06T13:59:24 | 237,279,311 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,049 | py | #!/usr/bin/python3
# 8 bit RLE compressor
# written by Doug Fraker 2020
# for SNES background maps (and other things)
# all 8 bit units, and 8-16 bit headers
# non-planar and planar combined version
# (planar = split all even and odd bytes)
# it tests both and outputs only the smaller
# eof byte (last of file) of F0 indicates
# non-planar.
# eof byte FF = planar, which is what we
# expect for SNES maps.
# one byte header ----
# MM CCCCCC
# M - mode, C - count (+1)
# 0 - literal, C+1 values (1-64)
# 1 - rle run, C+1 times (1-64)
# 2 - rle run, add 1 each pass, C+1 times (1-64)
# 3 - extend the value count to 2 bytes
# 00 lit, 40 rle, 80 plus, F0 special
# two byte header ----
# 11 MM CCCC (high) CCCCCCCC (low)
# M - mode (as above), C - count (+1)
# count 1-4096
# c0 lit big, d0 = rle big, e0 = plus big
# F0 - end of data, non-planar
# FF - end of data, planar
# input binary up to 32768 bytes
# note, planar expects an even # of bytes,
# and will pad a zero 00 at the end
# of an odd number of input bytes.
import sys
import os
def try_rle(out_array):
global index
global filesize
global count
global index2
oldindex = index
count = 0
byte1 = 0
byte2 = 0
byte3 = 0
while(index < filesize):
if(count >= 4095):
break
if(in_array[index-1] == in_array[index]):
count = count + 1
index = index + 1
else:
break
if (count > 0): # zero is better, leaving it.
#output to the out array
if(count > 31): # 2 byte header d0 00
byte1 = ((count >> 8) & 0x0f) + 0xd0
byte2 = count & 0xff
byte3 = in_array[index-1]
out_array[index2] = byte1
index2 = index2 + 1
out_array[index2] = byte2
index2 = index2 + 1
out_array[index2] = byte3
index2 = index2 + 1
else: # 1 byte header 40
byte1 = (count & 0x3f) + 0x40
byte2 = in_array[index-1]
out_array[index2] = byte1
index2 = index2 + 1
out_array[index2] = byte2
index2 = index2 + 1
index = index + 1
else:
count = 0
index = oldindex
def try_plus(out_array):
global index
global filesize
global count
global index2
oldindex = index
count = 0
start_value = in_array[index-1]
byte1 = 0
byte2 = 0
byte3 = 0
while(index < filesize):
if(count >= 255): # in the 8 bit version 4095 doesn't make sense
break
if(in_array[index-1] == in_array[index] - 1): #what about wrap around ?
count = count + 1
index = index + 1
else:
break
if (count > 0): # zero is better, leaving it.
#output to the out array
if(count > 31): # 2 byte header e0 00
byte1 = ((count >> 8) & 0x0f) + 0xe0
byte2 = count & 0xff
byte3 = start_value
out_array[index2] = byte1
index2 = index2 + 1
out_array[index2] = byte2
index2 = index2 + 1
out_array[index2] = byte3
index2 = index2 + 1
else: # 1 byte header 80
byte1 = (count & 0x3f) + 0x80
byte2 = start_value
out_array[index2] = byte1
index2 = index2 + 1
out_array[index2] = byte2
index2 = index2 + 1
index = index + 1
else:
count = 0
index = oldindex
def do_literal(out_array):
global index
global filesize
global count
global index2
byte1 = 0
byte2 = 0
byte3 = 0
start_index = index-1
count = 0
index = index + 1
while(index < filesize):
if(count >= 4094): # 2 less to fix possible error
break
if((in_array[index-2] == in_array[index-1]) and (in_array[index-1] == in_array[index])):
break
if(((in_array[index-2] == in_array[index-1] - 1)) and (in_array[index-1] == in_array[index] - 1)):
break
count = count + 1
index = index + 1
# back up 1, found a repeat, or repeat + 1
count = count - 1
index = index - 1
nearend = filesize - index
if (nearend < 2):
#end of file, dump rest
if (nearend == 1):
count = count + 1
index = index + 1
count = count + 1
index = index + 1
if (count >= 0):
#output to the out array
count2 = count + 1
if(count > 31): # 2 byte header c0 00
byte1 = ((count >> 8) & 0x0f) + 0xc0
byte2 = count & 0xff
out_array[index2] = byte1
index2 = index2 + 1
out_array[index2] = byte2
index2 = index2 + 1
for i in range (0,count2):
byte3 = in_array[start_index]
out_array[index2] = byte3
index2 = index2 + 1
start_index = start_index + 1
else: # 1 byte header 00
byte1 = (count & 0x3f)
out_array[index2] = byte1
index2 = index2 + 1
for i in range (0,count2):
byte2 = in_array[start_index]
out_array[index2] = byte2
index2 = index2 + 1
start_index = start_index + 1
filename = sys.argv[1]
newname = filename[0:-4] + ".rle"
oldfile = open(filename, 'rb')
newfile = open(newname, 'wb') # warning, this may overwrite old file !
filesize = os.path.getsize(filename)
print("input filesize = " + str(filesize))
if(filesize > 32768):
exit("error, too large. File should be <= 32768 bytes.")
if(filesize < 3):
exit("error, file too small.")
in_array = [0] * 32768
in_array_P = [0] * 32768
out_array_nonP = [0] * 33000 # a little extra, just in case
out_array_P = [0] * 33000
#copy to array
for i in range (0, filesize):
in_array[i] = ord(oldfile.read(1))
# first try non-planar...
index = 1 # start at 1, subtract
index2 = 0
non_pl_size = 0
count = 0
#main
while(index < filesize):
count = 0
try_rle(out_array_nonP)
# returns with count > 1 if successful
if(count == 0):
try_plus(out_array_nonP)
# returns with count > 1 if successful
if(count == 0):
do_literal(out_array_nonP)
# do final literal, last byte
if(index == filesize):
#we need 1 more literal
out_array_nonP[index2] = 0
index2 = index2 + 1
byte1 = in_array[filesize-1]
out_array_nonP[index2] = byte1
index2 = index2 + 1
# put a final f0 - non-planar symbol
out_array_nonP[index2] = 0xf0
index2 = index2 + 1
non_pl_size = index2
# try again with planar...
filesize_half = (filesize + 1) // 2 # round up, divide by 2
filesize = filesize_half * 2
split_array = [0] * 16384
split_array2 = [0] * 16384
# split the array
for i in range (0, filesize_half):
j = i * 2
k = j + 1
split_array[i] = in_array[j] # even bytes
split_array2[i] = in_array[k] # odd bytes
# copy them back
# (so I don't have to change the rest of the code)
for i in range (0, filesize_half):
in_array_P[i] = split_array[i]
j = i + filesize_half
in_array_P[j] = split_array2[i]
# copy out to another array
# so I don't have to refactor the original code.
#for i in range(0, index2):
# out_array_nonP[i] = out_array[i]
#copy planar to original
for i in range (0, filesize):
in_array[i] = in_array_P[i]
#reset and rerun planar
count = 0
index2 = 0
index = 1
#main again, planar
while(index < filesize):
count = 0
try_rle(out_array_P)
# returns with count > 1 if successful
if(count == 0):
try_plus(out_array_P)
# returns with count > 1 if successful
if(count == 0):
do_literal(out_array_P)
# do final literal, last byte
if(index == filesize):
#we need 1 more literal
out_array_P[index2] = 0
index2 = index2 + 1
byte1 = in_array[filesize-1]
out_array_P[index2] = byte1
index2 = index2 + 1
# put a final ff - planar symbol
out_array_P[index2] = 0xff
# note out_array_P[] is the Planar version
# non-planar is out_array_nonP[]
index2 = index2 + 1
pl_size = index2
print("planar out size = " + str(pl_size))
print("non-planar out size = " + str(non_pl_size))
a = 0
if(non_pl_size <= pl_size): #3 is smaller, non-planar
print("using non-planar...")
for i in range (0, non_pl_size):
byte1 = out_array_nonP[i]
newfile.write(bytes([byte1]))
a = non_pl_size
else:
print("using planar...")
for i in range (0, pl_size):
byte1 = out_array_P[i]
newfile.write(bytes([byte1]))
a = pl_size
# output percent of original the output is.
b = 100.0 * a / filesize
b = round(b, 2)
print(" new filesize = " + str(a))
print(" compared to orig = " + str(b) + "%")
# close the files.
oldfile.close
newfile.close
| [
"dougfraker@gmail.com"
] | dougfraker@gmail.com |
e6a758972e70604e79582b0290a099ed73535f7d | 0f880611b30941662cee4b37fb16e90b1227a3e4 | /lib/utils/log.py | 1a50c636269ed470cc8a80b1712004bcd313ad75 | [] | no_license | ColdHumour/PortfolioMonitor | 6cefa499aa6bec059e035fca7300926f84c840e9 | d7da8a8c52e494bee30b1238521fc383b2489154 | refs/heads/master | 2020-04-06T05:13:51.245235 | 2016-11-02T07:01:34 | 2016-11-02T07:01:34 | 54,858,149 | 0 | 1 | null | 2016-08-24T07:08:16 | 2016-03-28T01:59:22 | Python | UTF-8 | Python | false | false | 731 | py | # -*- coding: utf-8 -*-
"""
log.py
@author: yudi.wu
"""
import json
import logging
from . path import LOG_FILE, CONFIG_FILE
open(LOG_FILE, 'w').close()
def set_logger(name, level):
logger = logging.Logger(name)
logger.setLevel(level)
# set handler
handler = logging.FileHandler(LOG_FILE)
handler.setLevel(level)
# set formatter
formatter = logging.Formatter('%(asctime)s [%(levelname)s] File:%(filename)s Line:%(lineno)d - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
with open(CONFIG_FILE, 'r') as config_file:
config = json.load(config_file)
loglevel = config["loglevel"]
logger = set_logger("APIServer", getattr(logging, loglevel))
| [
"jasper.wuyd@gmail.com"
] | jasper.wuyd@gmail.com |
21b6f5c9313bb2beb0284929069e54b107bf6ea9 | 1d7a6d74a9a906a96aa61ab94c1edee610ccd92b | /zip_build/rvt-qgis/qrvt_dialog.py | 36b723c750e4f016c2627a61d72d8aac9017b43b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | IsaacMrSmile/rvt-qgis | 350fc6283cfe17e021a22ec62ec98e000b2f0606 | fac15ef95534483967be691a1dfacd5b0bd9a047 | refs/heads/master | 2023-02-21T00:32:30.917054 | 2021-01-22T07:12:22 | 2021-01-22T07:12:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | # -*- coding: utf-8 -*-
"""
/***************************************************************************
QRVTDialog
A QGIS plugin
RVT plugin lets you compute different visualizations from raster DEM.
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2020-10-12
git sha : $Format:%H$
copyright : (C) 2020 by Research Centre of the Slovenian Academy of Sciences and Arts
email : ziga.kokalj@zrc-sazu.si
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from qgis.PyQt import uic
from qgis.PyQt import QtWidgets
# This loads your .ui file so that PyQt can populate your plugin with the elements from Qt Designer
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'qrvt_dialog_base.ui'))
class QRVTDialog(QtWidgets.QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(QRVTDialog, self).__init__(parent)
# Set up the user interface from Designer through FORM_CLASS.
# After self.setupUi() you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
| [
"zm8597@student.uni-lj.si"
] | zm8597@student.uni-lj.si |
9bff11e3a8633333af71b3cc5a2bc2241e5e3ec0 | 68c182cbb167ec6870ec1a301958e71ce8f9bcbb | /test/functional/p2p_permissions.py | d59b0acadab68cf792b145888a6743bf9ce0b48e | [
"MIT"
] | permissive | megamcloud/umkoin | de10e9bbe0afbdc7210db56e41f823a0805283be | 3e0d7a48f459ff09f0b9e02c3ed30563670009c8 | refs/heads/master | 2022-05-30T00:18:10.962521 | 2020-04-26T08:21:01 | 2020-04-26T08:21:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,694 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p permission message.
Test that permissions are correctly calculated and applied
"""
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.messages import (
CTransaction,
CTxInWitness,
FromHex,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
OP_TRUE,
)
from test_framework.test_node import ErrorMatch
from test_framework.test_framework import UmkoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
p2p_port,
wait_until,
)
class P2PPermissionsTests(UmkoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def run_test(self):
self.check_tx_relay()
self.checkpermission(
# default permissions (no specific permissions)
["-whitelist=127.0.0.1"],
["relay", "noban", "mempool"],
True)
self.checkpermission(
# relay permission removed (no specific permissions)
["-whitelist=127.0.0.1", "-whitelistrelay=0"],
["noban", "mempool"],
True)
self.checkpermission(
# forcerelay and relay permission added
# Legacy parameter interaction which set whitelistrelay to true
# if whitelistforcerelay is true
["-whitelist=127.0.0.1", "-whitelistforcerelay"],
["forcerelay", "relay", "noban", "mempool"],
True)
# Let's make sure permissions are merged correctly
# For this, we need to use whitebind instead of bind
# by modifying the configuration file.
ip_port = "127.0.0.1:{}".format(p2p_port(1))
self.replaceinconfig(1, "bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)
self.checkpermission(
["-whitelist=noban@127.0.0.1"],
# Check parameter interaction forcerelay should activate relay
["noban", "bloomfilter", "forcerelay", "relay"],
False)
self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")
self.checkpermission(
# legacy whitelistrelay should be ignored
["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"],
["noban", "mempool"],
False)
self.checkpermission(
# legacy whitelistforcerelay should be ignored
["-whitelist=noban,mempool@127.0.0.1", "-whitelistforcerelay"],
["noban", "mempool"],
False)
self.checkpermission(
# missing mempool permission to be considered legacy whitelisted
["-whitelist=noban@127.0.0.1"],
["noban"],
False)
self.checkpermission(
# all permission added
["-whitelist=all@127.0.0.1"],
["forcerelay", "noban", "mempool", "bloomfilter", "relay"],
False)
self.stop_node(1)
self.nodes[1].assert_start_raises_init_error(["-whitelist=oopsie@127.0.0.1"], "Invalid P2P permission", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitelist=noban@127.0.0.1:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitebind=noban@127.0.0.1/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
def check_tx_relay(self):
block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_P2WSH_OP_TRUE)[0])
self.sync_all()
self.log.debug("Create a connection from a whitelisted wallet that rebroadcasts raw txs")
# A python mininode is needed to send the raw transaction directly. If a full node was used, it could only
# rebroadcast via the inv-getdata mechanism. However, even for whitelisted connections, a full node would
# currently not request a txid that is already in the mempool.
self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"])
p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore())
self.log.debug("Send a tx from the wallet initially")
tx = FromHex(
CTransaction(),
self.nodes[0].createrawtransaction(
inputs=[{
'txid': block_op_true['tx'][0],
'vout': 0,
}], outputs=[{
ADDRESS_BCRT1_P2WSH_OP_TRUE: 5,
}]),
)
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
txid = tx.rehash()
self.log.debug("Wait until tx is in node[1]'s mempool")
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool")
connect_nodes(self.nodes[1], 0)
with self.nodes[1].assert_debug_log(["Force relaying tx {} from whitelisted peer=0".format(txid)]):
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
wait_until(lambda: txid in self.nodes[0].getrawmempool())
self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
tx.vout[0].nValue += 1
txid = tx.rehash()
p2p_rebroadcast_wallet.send_txs_and_test(
[tx],
self.nodes[1],
success=False,
reject_reason='Not relaying non-mempool transaction {} from whitelisted peer=0'.format(txid),
)
def checkpermission(self, args, expectedPermissions, whitelisted):
self.restart_node(1, args)
connect_nodes(self.nodes[0], 1)
peerinfo = self.nodes[1].getpeerinfo()[0]
assert_equal(peerinfo['whitelisted'], whitelisted)
assert_equal(len(expectedPermissions), len(peerinfo['permissions']))
for p in expectedPermissions:
if not p in peerinfo['permissions']:
raise AssertionError("Expected permissions %r is not granted." % p)
def replaceinconfig(self, nodeid, old, new):
with open(self.nodes[nodeid].umkoinconf, encoding="utf8") as f:
newText = f.read().replace(old, new)
with open(self.nodes[nodeid].umkoinconf, 'w', encoding="utf8") as f:
f.write(newText)
if __name__ == '__main__':
P2PPermissionsTests().main()
| [
"vmta@yahoo.com"
] | vmta@yahoo.com |
fee35bcc21bb856c0b4260204cdaba7d991b37b3 | 8ead622826bc21b37eb6fd2048af91d510151356 | /projects/alonememo/app.py | 94e5e9ca5d90d447b61d5b72d5f09acf303d21a0 | [] | no_license | smc5720/Sparta-Coding-Basic | 5762a5e397c60fcbde5e9ea58fcb9449931d8031 | ff4271ededa516ed5c44875509e4b916f04320c5 | refs/heads/main | 2023-05-14T19:21:53.848721 | 2021-06-11T09:32:00 | 2021-06-11T09:32:00 | 374,037,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.dbsparta
## HTML을 주는 부분
@app.route('/')
def home():
return render_template('index.html')
@app.route('/memo', methods=['GET'])
def listing():
articles = list(db.articles.find({}, {'_id': False}))
return jsonify({'all_articles': articles})
## API 역할을 하는 부분
@app.route('/memo', methods=['POST'])
def saving():
url_receive = request.form['url_give']
comment_receive = request.form['comment_give']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get(url_receive, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
og_title = soup.select_one('meta[property="og:title"]')['content']
og_image = soup.select_one('meta[property="og:image"]')['content']
og_desc = soup.select_one('meta[property="og:description"]')['content']
doc = {
'title': og_title,
'image': og_image,
'desc': og_desc,
'url': url_receive,
'comment': comment_receive,
}
db.articles.insert_one(doc)
return jsonify({'msg': '저장 완료'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
| [
"smc503@naver.com"
] | smc503@naver.com |
6e69cbf407d691845da736d0f0e4fe9b57e0019a | a65c77b44164b2c69dfe4bfa2772d18ae8e0cce2 | /test/testgen.py | 045d1ec03e086452f960b6967c1fd274939e737d | [] | no_license | dl8sd11/online-judge | 553422b55080e49e6bd9b38834ccf1076fb95395 | 5ef8e3c5390e54381683f62f88d03629e1355d1d | refs/heads/master | 2021-12-22T15:13:34.279988 | 2021-12-13T06:45:49 | 2021-12-13T06:45:49 | 111,268,306 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | #!/usr/bin/env python3
from random import randint, shuffle
import string
import random
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def randomTree(sz):
edg = []
for i in range(1, sz):
edg.append((i+1, randint(0, i-1)+1))
return edg
def get_perm(sz):
a = [x for x in range(1,sz+1)]
shuffle(a)
return a
def genInt ():
return randint(1,10)
def getChar ():
x = "AKQJ"
if randint(1,13) <= 4:
return random.choice(x)
else :
return str(randint(2, 10))
n = randint(3, 4)
a = [str(randint(1, 10)) for x in range(n)]
print(n)
print(" ".join(a))
| [
"tmd910607@gmail.com"
] | tmd910607@gmail.com |
f7fcb553c02ffff0e4816ffbb847e1c926470726 | b55f70755712b26688b80a8ba3806a4124fbcd11 | /BinaryTree/lowest_common_ancestor.py | c5fac7a034bae171afb4a6a2bb03b6ce00e81aa2 | [] | no_license | Shanshan-IC/Algorithm_Python | a44703a0f33370c47e3e55af70aadeae08d5a1a5 | ace23976d2f1f51141498c4c4ea6bca0039b233f | refs/heads/master | 2021-09-08T07:16:59.576674 | 2018-03-08T09:24:01 | 2018-03-08T09:24:01 | 114,254,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | '''
两个值都在左边,则LCA在左边
两个值都在右边,则LCA在右边
一个在左一个在右,则说明LCA就是当前的root节点。
'''
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
"""
def lowestCommonAncestor(self, root, A, B):
if not root or root is A or root is B:
return root
left = self.lowestCommonAncestor(root.left, A, B)
right = self.lowestCommonAncestor(root.right, A, B)
if left and right:
return root
if left:
return left
if right:
return right
return None | [
"shanshan.fu15@imperial.ac.uk"
] | shanshan.fu15@imperial.ac.uk |
80c5331bac48eab4818a62eed9690372b32be540 | 0fbc1c6f72a69f368c14f2ca9af8b03487140b6d | /contact/views.py | a057a95c4ae29a16f497435a13015110b9c9b17d | [] | no_license | ritusabu/Ecommerce-product | 1c3e95d09438ab2ff8dfb83aaf3c2771f97e3440 | b9e6f8f16c1dbf19a0f2624f5c9b1bbdd8b2d670 | refs/heads/master | 2023-03-05T05:18:02.065448 | 2021-02-16T10:30:10 | 2021-02-16T10:30:10 | 334,204,268 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from django.shortcuts import render
from django.http import HttpResponse
from contact.models import Contact
from product.models import Offer, Category, Products
# Create your views here.
def contact(request):
a= request.POST.get("name", "")
b= request.POST.get("mail", "")
c= request.POST.get("text", "")
o= Offer.objects.all()
t= Category.objects.all()
if a!="" and b!="" and c!="":
c=Contact(name=a, email=b, messege=c)
c.save()
context={
"offer":o,
"catagory":t
}
return render(request, 'contact/contact.html' , context)
def home(request):
return HttpResponse("E-commerse") | [
"ritusabu12@gmail.com"
] | ritusabu12@gmail.com |
eb02dc1e6b7e567cad140a6e64b4ffcbd14c3557 | 44f3a3b7541c99554cbd94b015a8c25593f3e95e | /email/pyse_mail/public/login.py | d317bbd7ab8d43b189f8f4b3b87307630db4bbd1 | [] | no_license | reblues/study | adc2d255c4c5990fcfcd4d8691625057c95357d8 | 7ea2854c778279d40d1dd3df362f463774f5971b | refs/heads/master | 2020-12-31T05:24:13.440027 | 2016-06-01T06:28:33 | 2016-06-01T06:28:33 | 58,917,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | #!/user/bin/env python
# -*- coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
import time,unittest
def Login(self):
driver = self.driver
driver.maximize_window()
target = driver.find_element_by_id("lbNormal")
ActionChains(driver).move_to_element(target).perform()
time.sleep(2)
driver.find_element_by_id("idInput").send_keys("18710924623")
driver.find_element_by_id("pwdInput").send_keys("yang9351yu")
driver.find_element_by_id("loginBtn").click()
time.sleep(2)
print "登录成功"
def Logout(self):
driver = self.driver
time.sleep(2)
aim = driver.find_element_by_link_text(u"退出")
print aim.text
aim.click()
| [
"534948651@qq.com"
] | 534948651@qq.com |
e1c0280fa4672db1594118ed124f9ae4c0c5f5c0 | 0acfe39cbbf843e35696c4c5e312192ac7b7159b | /renrenribao/article/migrations/0002_article_created_time.py | e974de0e9f1617a4c14c24ba56fbcc7905085d90 | [] | no_license | tiant167/renrenribao | c2d8bdaa2ecdb60cecfdfb53a17e96c940851920 | 1b0a1fdcd5ce3fa97adc515c3178402ec869f018 | refs/heads/master | 2021-01-25T07:39:45.462791 | 2014-12-17T08:50:58 | 2014-12-17T08:50:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='created_time',
field=models.DateTimeField(default=datetime.datetime(2014, 12, 14, 8, 40, 24, 800832, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
]
| [
"haotianchai@wandoujia.com"
] | haotianchai@wandoujia.com |
2c4d4ae9918a8cfb4516783ec1911a72d0c2c3c0 | ca51878f71e5b3e1958e42e45019f92c9045b02e | /ClubWebsite/settings.py | 299f6528cb142d270cc7732323a0fb9ce4ec8ea1 | [
"Apache-2.0"
] | permissive | pettta/ClubWebsite | 07d1c828fa73f8d5b35fba99bfa910ba3a610b7c | 83ed5e2c923d8c13aa1f76e564bb817e6ad81fb1 | refs/heads/main | 2023-07-16T10:43:55.841813 | 2021-07-01T20:32:46 | 2021-07-01T20:32:46 | 401,377,529 | 0 | 0 | Apache-2.0 | 2021-08-30T14:41:02 | 2021-08-30T14:41:01 | null | UTF-8 | Python | false | false | 3,280 | py | """
Django settings for ClubWebsite project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-g7$1p+p7425l%g=$v-g22csj)wn3&e7s*7!#3=cd9_xswsm061'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ClubWebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ClubWebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"vivekkhimani07@gmail.com"
] | vivekkhimani07@gmail.com |
b7787491c00166a9f9516646d4c2054fe8fe1245 | 557ca4eae50206ecb8b19639cab249cb2d376f30 | /Chapter12/Ex12_3.py | 96ad465cf0df4d21b32435eb806eb5946bf1eb75 | [] | no_license | philipdongfei/Think-python-2nd | 781846f455155245e7e82900ea002f1cf490c43f | 56e2355b8d5b34ffcee61b38fbfd200fd6d4ffaf | refs/heads/master | 2021-01-09T19:57:49.658680 | 2020-03-13T06:32:11 | 2020-03-13T06:32:11 | 242,441,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from Ex12_2 import *
def metathesis_pairs(d):
for anagrams in d.values():
for word1 in anagrams:
for word2 in anagrams:
if word1 < word2 and word_distance(word1, word2) == 2:
print(word1, word2)
def word_distance(word1, word2):
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count
def main():
sets = all_anagrams('words.txt')
metathesis_pairs(sets)
if __name__ == '__main__':
main()
| [
"philip.dongfei@gmail.com"
] | philip.dongfei@gmail.com |
fff66fda560450668aa9428a18225497f0cf09ee | cbb6ef5b109d262459167dc8cee8264e4b4b4487 | /pollsapi/polls/migrations/0001_initial.py | e72942e34d8f5ed5363e047f78baa698c959b1a4 | [] | no_license | TaTRaTa/Django_DRF | 2b5541eec39fbc85fa71634f80efadf80ff15a36 | 232d36058073ee74e63e13cf9f69481b51d0a3b9 | refs/heads/master | 2021-03-13T12:38:16.164786 | 2020-03-17T21:56:32 | 2020-03-17T21:56:32 | 246,681,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | # Generated by Django 3.0.4 on 2020-03-12 22:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=100)),
('pub_date', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=100)),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='polls.Poll')),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='polls.Choice')),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Poll')),
('voted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('poll', 'voted_by')},
},
),
]
| [
"cvetomir.defyy@gmail.com"
] | cvetomir.defyy@gmail.com |
ac572ec20daedd8a6e8d42a53fc9d042e52f7602 | 24820f2df644263d6273e7de78f0733ca81511d9 | /Robot_Simulator/pose_script.py | d28af4ff074b8220be03e92cd55a9d617ebc6258 | [] | no_license | dwhisler/robotic_arm | 1c0603d9a21710bd08d108b3397aa7147f789720 | c8eef383e1401141f5a87f485798240cfd2ff8fb | refs/heads/master | 2020-03-15T18:37:24.885097 | 2018-09-20T04:03:42 | 2018-09-20T04:03:42 | 132,288,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 23:18:15 2018
@author: David
"""
from ServoDriver import *
from DTRobot import *
COM = 4
serv = ServoDriver(COM)
rob = DTRobot('config.ini')
#serv.setPose([0, 0, 0, 0, 0, 0])
#input("Press Enter to continue...")
serv.setPose(rob.robot.joints)
input("Press Enter to continue...")
while(1):
currentPose = rob.robot.fk(rob.robot.joints)
trans = translation_matrix([0,-0.1,0])
newPose = np.matmul(trans,currentPose)
print(currentPose)
print(trans)
print(newPose)
input("Press Enter to continue...")
newJointAngles = rob.robot.ik(newPose)
print(newJointAngles)
input("Press Enter to continue...")
serv.setPose(newJointAngles)
rob.robot.joints = newJointAngles
input("Press Enter to continue...")
serv.ser.close() | [
"dwhis428@gmail.com"
] | dwhis428@gmail.com |
848df2db66999946ac0072ad26a7281ea8eebf06 | 7a110d64c3d1ca8a88623b6fb0453904011552ad | /weather_app/weather/views.py | 75ca56cb64cdd7fbcaeccffc2ff30c9f06367b9b | [] | no_license | shubhamt10/weather-app-django | a93e15ce146fd8fcbd0aaaed7166eb418d97d244 | 539a4d087f88d78621f456aec5ef1a6f261ae4f0 | refs/heads/master | 2021-03-25T11:44:52.885816 | 2020-03-16T05:09:35 | 2020-03-16T05:09:35 | 247,614,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | from django.shortcuts import render,redirect
import requests
from .models import City
from .forms import CityForm
# Create your views here.
def index(request):
url = 'http://api.openweathermap.org/data/2.5/find?q={}&units=metric&APPID=cc40415657293a697c8f80bd1e548a4b'
err_msg = ''
message = ''
message_class = ''
if request.method == 'POST':
form = CityForm(request.POST)
if form.is_valid():
new_city = form.cleaned_data['name']
existing_city_count = City.objects.filter(name=new_city).count()
if existing_city_count == 0:
r = requests.get(url.format(new_city)).json()
if(len(r['list']) == 0):
err_msg = 'City does not exist'
else:
form.save()
else:
err_msg = 'City already exists in the database!'
if err_msg:
message = err_msg
message_class = 'is-danger'
else:
message = 'City added successfully'
message_class = 'is-success'
form = CityForm()
cities = City.objects.all()
weather_data = []
for city in cities:
r = requests.get(url.format(city)).json()
data = r['list'][0]
city_weather = {
'city' : data['name'],
'temperature' : data['main']['temp'],
'description' : data['weather'][0]['description'],
'icon' : data['weather'][0]['icon'],
}
weather_data.append(city_weather)
context = {
'weather_data' : weather_data,
'form' : form,
'message' : message,
'message_class' : message_class
}
return render(request,'weather/weather.html',context)
def delete_city(request,city_name):
City.objects.get(name=city_name).delete()
return redirect('home') | [
"tripathi.shubham509@gmail.com"
] | tripathi.shubham509@gmail.com |
f8041e3723b6c11795df50a6b0361b01f3581dbe | c81a507a4c76db54e9e29a2a457a017a8725a8c2 | /scripts/data/CIFAR10.py | 1b8661fae5a1a2e9020944f726264ac045937008 | [] | no_license | CSWater/blitz | b8e5d1f5a69a64a9dc12be7252f95ed40f2178c5 | cc5488f1623f5b3161fa334e6813d499918dcc5e | refs/heads/master | 2020-06-11T00:27:52.466242 | 2016-12-27T07:49:22 | 2016-12-27T07:49:22 | 75,832,706 | 1 | 0 | null | 2016-12-07T12:11:00 | 2016-12-07T12:10:59 | null | UTF-8 | Python | false | false | 3,580 | py | import os
import sys
import cPickle
import h5py
import numpy as np
def _valid_path_append(path, *args):
full_path = os.path.expanduser(path)
res = []
if not os.path.exists(full_path):
os.makedirs(full_path)
if not os.path.isdir(full_path):
raise ValueError("path: {0} is not a valid directory".format(path))
for suffix_path in args:
res.append(os.path.join(full_path, suffix_path))
if len(res) == 0:
return path
elif len(res) == 1:
return res[0]
else:
return res
def load_cifar10(path="./data", normalize=True, contrast_normalize=False, whiten=False):
cifar = dataset_meta['cifar-10']
workdir, filepath = _valid_path_append(path, '', cifar['file'])
batchdir = os.path.join(workdir, '')
train_batches = [os.path.join(batchdir, 'data_batch_' + str(i)) for i in range(1, 6)]
Xlist, ylist = [], []
for batch in train_batches:
with open(batch, 'rb') as f:
d = cPickle.load(f)
Xlist.append(d['data'])
ylist.append(d['labels'])
X_train = np.vstack(Xlist)
y_train = np.vstack(ylist)
with open(os.path.join(batchdir, 'test_batch'), 'rb') as f:
d = cPickle.load(f)
X_test, y_test = d['data'], d['labels']
y_train = y_train.reshape(-1, 1)
y_test = np.array(y_test).reshape(-1, 1)
num_train = y_train.shape[0]
num_test = y_test.shape[0]
y_train_new = np.zeros((num_train, 10))
y_test_new = np.zeros((num_test, 10))
for col in range(10):
y_train_new[:, col] = y_train[:,0] = col
y_test_new[:, col] = y_test[:,0] = col
if contrast_normalize:
norm_scale = 55.0
X_train = global_contrast_normalize(X_train, scale=norm_scale)
X_test = global_contrast_normalize(X_test, scale=norm_scale)
if normalize:
X_train = X_train / 255.
X_test = X_test / 255.
if whiten:
zca_cache = os.path.join(workdir, 'cifar-10-zca-cache.pkl')
X_train, X_test = zca_whiten(X_train, X_test, cache=zca_cache)
#save the hdf5 files
repo_path = os.path.expandvars(os.path.expanduser(workdir))
save_dir = os.path.join(repo_path, 'HDF5')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fname = os.path.join(save_dir, 'train_data.h5')
file_train_data = h5py.File(fname, 'w')
fname = os.path.join(save_dir, 'train_label.h5')
file_train_label = h5py.File(fname, 'w')
fname = os.path.join(save_dir, 'test_data.h5')
file_test_data = h5py.File(fname, 'w')
fname = os.path.join(save_dir, 'test_label.h5')
file_test_label = h5py.File(fname, 'w')
file_train_data.create_dataset('data', data = X_train)
file_train_data.create_dataset('sample_num', data = num_train)
file_train_label.create_dataset('data', data = y_train_new)
file_train_label.create_dataset('sample_num', data = num_train)
file_test_data.create_dataset('data', data = X_test)
file_test_data.create_dataset('sample_num', data = num_test)
file_test_label.create_dataset('data', data = y_test_new)
file_test_label.create_dataset('sample_num', data = num_test)
file_train_data.close()
file_train_label.close()
file_test_data.close()
file_test_label.close()
return (X_train, y_train_new), (X_test, y_test_new), 10
dataset_meta = {
'cifar-10': {
'size': 170498071,
'file': 'cifar-10-python.tar.gz',
'url': 'http://www.cs.toronto.edu/~kriz',
'func': load_cifar10
}
}
if __name__ == '__main__':
load_cifar10()
| [
"robinho364@gmail.com"
] | robinho364@gmail.com |
0aa0ebe728e983713ebd5eb41983a47e04759129 | d1f4ab60c5439c4e91025d96384acdedbcb47271 | /DFS,BFS/5-6 미로 탈출.py | 8d23a0a4c92327dfb359e8bd664531de2d099f1c | [] | no_license | ans4572/CodingTest-with-Python | 8722f07967c76073485282c4eee7506a6d75edb6 | ab5e559fd2b8bd73785435c60debd6d558b01ca7 | refs/heads/main | 2023-07-31T10:24:01.685112 | 2021-09-24T16:13:34 | 2021-09-24T16:13:34 | 372,752,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | from collections import deque
N,M = map(int,input().split())
graph = []
for i in range(N):
graph.append(list(map(int,input())))
dx = [-1,0,1,0]
dy = [0,1,0,-1]
def BFS(x,y):
queue = deque()
queue.append((x,y))
while queue:
x, y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx >= 0 and nx < N and ny >= 0 and ny < M and graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
queue.append((nx,ny))
BFS(0,0)
print(graph[N-1][M-1]) | [
"noreply@github.com"
] | ans4572.noreply@github.com |
414e11c2131f405f7fa99aba0b23eed0f5667a6f | e6a43cb50cd36ed8bc3f70a3cd830bffb3f03d5e | /set03/q23_sectionlv.py | 4b077194b48d1e5b797178b986dcb9b80fcb16aa | [] | no_license | pizzaboi/nlp100_2_python | a74b4ab0aadffe6a5cd75cd205bca8777ebce8e5 | b36a50442b581c6547ab6f02c7c2b18038a7b420 | refs/heads/master | 2016-09-05T09:31:40.081775 | 2015-12-17T05:53:29 | 2015-12-17T05:53:29 | 34,831,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | #! /usr/bin/python
#-*- coding: utf-8 -*-
"""
USAGE: python q20_readjson.py < jawiki-country.json.gz | python q23_sectionlv.py
"""
import re
import sys
def section_lv():
for line in sys.stdin:
if line.startswith('=='):
print line.strip('= \n'), (line.count('=') / 2) - 1
if __name__ == '__main__':
section_lv() | [
"tophamhatt.crazy@gmail.com"
] | tophamhatt.crazy@gmail.com |
d080f213409dd5c85811969f8b2e789bd4d38370 | 78b165272ba2cb3719de3c27e83d9c5b3b6b0a85 | /scripts/python/minheap.py | 9e4ddae3174b98e471e0e7a2d94196a4efea0227 | [] | no_license | SeaTalk/Funcodes | 5fcd4727565d2ca99d226b04bd58385218bd2fbf | bd34b19fa99be42433e1305f2f8c6bef14a6bb3f | refs/heads/master | 2021-10-11T22:11:18.898476 | 2021-09-30T07:26:34 | 2021-09-30T07:26:34 | 140,306,910 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | #!/usr/bin/python
import string
class MinHeap(object):
def __init__(self, cap):
self.data=[]
self.count=len(self.data)
self.capacity=cap
def size(self):
return self.count
def isEmpty(self):
return self.count == 0
def getData(self):
return self.data
def insert(self, item):
if self.count < self.capacity:
self.data.append(item)
self.count += 1
if self.count == self.capacity:
return self.build_heap()
return 0
#self.shiftup(self.count)
if item[1] > self.data[0][1] :
self.data[0] = item
return self.fix_heap(0)
def build_heap(self):
if self.count < 1:
return -1
middle = (self.count-1)/2
while middle >= 0 :
tmp_index = (middle << 1) + 1
tmp_index2 = (middle + 1) << 1
tmp = middle
if tmp_index2 < self.count and self.data[tmp_index][1] > self.data[tmp_index2][1]:
tmp = tmp_index2
else :
tmp = tmp_index
if tmp < self.count and self.data[tmp][1] < self.data[middle][1]:
self.data[middle], self.data[tmp] = self.data[tmp], self.data[middle]
middle = middle - 1
return 0
def fix_heap(self, ind):
index = ind
hasChild = True
while index < self.count and hasChild == True:
j = (index << 1) + 1
k = (index + 1) << 1
tmp = j
hasChild = False
if k < self.count and self.data[j][1] > self.data[k][1]:
tmp = k
hasChild = True
else :
tmp = j
hasChild = True
if tmp < self.count and self.data[tmp][1] < self.data[index][1]:
self.data[index], self.data[tmp] = self.data[tmp], self.data[index]
hasChild = True
index = tmp
return 0
| [
"tangjintao@jd.com"
] | tangjintao@jd.com |
5d03c3f6f21cf2a5cf29fc8907a7adfcc620d57f | 2ad41c2a31618433568c86e63f68a3ef2918d55c | /tool/Modules/cfg_scripts.py | 25ca07351b013433ffe1409fb953f7919d31d99b | [
"MIT"
] | permissive | Iemnur/megaman-zx-traducao-ptbr | 7cad0b7f7bcfd6692fe850f3c6c4e26ab2b90f63 | f2710a06052384cf93d423681e9875c6cd424f06 | refs/heads/master | 2021-12-14T20:13:48.206022 | 2020-05-26T01:53:10 | 2020-05-26T01:53:10 | 82,298,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | '''
Created on 05/03/2013
@author: diego.hahn
'''
import os.path
import sys
python_path = os.path.dirname( sys.executable )
packages_path = os.path.join( python_path , r"Lib\site-packages" )
scripts_path = os.path.dirname( os.path.abspath( __file__ ) )
libs = [r"" , r"rhCompression", r"rhImages", r"pytable"]
with open( os.path.join( packages_path , "mylibs.pth" ), "w" ) as pth:
for lib in libs:
lib_path = os.path.join( scripts_path, lib )
if os.path.isdir( lib_path ):
print( ">>> Adding %s to pth file" % lib )
pth.write( "%s\n" % lib_path )
| [
"hansen.hahn@gmail.com"
] | hansen.hahn@gmail.com |
f50377730a35ff7aa5b58fa06bcf47fcd71189ea | 033da72a51c76e5510a06be93229a547a538cf28 | /Data Engineer with Python Track/20. Introduction to Spark SQL in Python/Chapter/01. Pyspark SQL/02-Determine the column names of a table.py | a60646c8daa0abfe3fe390558fd3a17b52d8658c | [] | no_license | ikhwan1366/Datacamp | d5dcd40c1bfeb04248977014260936b1fb1d3065 | 7738614eaebec446842d89177ae2bc30ab0f2551 | refs/heads/master | 2023-03-06T13:41:06.522721 | 2021-02-17T22:41:54 | 2021-02-17T22:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | '''
Determine the column names of a table
The video lesson showed how to run an SQL query. It also showed how to inspect the column names of a Spark table using SQL. This is important to know because in practice relational tables are typically provided without additional documentation giving the table schema.
Don't hesitate to refer to the slides available at the right of the console if you forget how something was done in the video.
Instructions
100 XP
- Use a DESCRIBE query to determine the names and types of the columns in the table schedule.
'''
# Inspect the columns in the table df
spark.sql("DESCRIBE schedule").show()
| [
"surel.chandrapratama@gmail.com"
] | surel.chandrapratama@gmail.com |
d8ec9342e39bee550628a862353198ee5b37bcd4 | cd2a90eb6df7361a2ed00312ff1be196bf686f0e | /homework/conftest.py | 389e88c0dba10d7f481700966d7f3336333c57db | [] | no_license | Liuzicheng1994/auto | 8006337b530b80fea39cd36a04bf103e2a3ba711 | e9a4069f454d0be0e5be2dd79700944623dea153 | refs/heads/master | 2023-07-30T17:59:28.558035 | 2021-09-10T01:40:00 | 2021-09-10T01:40:00 | 356,810,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | import pytest
from homework.calculator import Caculator
@pytest.fixture()
def calculate():
print("开始计算")
cal =Caculator()
yield cal
print("结束计算") | [
"569974494@qq.com"
] | 569974494@qq.com |
35365a1db34207810102ae5b1c2ee05f251e40f1 | 93c1d86cfa6893063519a89e73a647f0c2b48257 | /stronka/migrations/0033_auto_20190617_1916.py | 8e7099f79b5e85ec7f89d6a13685914ca616f883 | [] | no_license | panchrobry/ProjektSzkieletoweREPO | 24629649989a2eefe6af199e31b84e0b8d88b9b8 | a0eeb244cfa8fc829739932ce167802b54f62382 | refs/heads/master | 2022-12-10T04:51:03.773554 | 2019-06-20T11:13:09 | 2019-06-20T11:13:09 | 177,784,319 | 0 | 0 | null | 2022-11-21T21:31:28 | 2019-03-26T12:29:41 | Python | UTF-8 | Python | false | false | 453 | py | # Generated by Django 2.0.13 on 2019-06-17 17:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stronka', '0032_auto_20190617_1737'),
]
operations = [
migrations.AlterField(
model_name='team',
name='Register_Date',
field=models.DateField(default=datetime.datetime(2019, 6, 17, 19, 16, 43, 700263)),
),
]
| [
"karolek9.10@o2.pl"
] | karolek9.10@o2.pl |
69d495704dd2fc3552f5d20c65f2c842be179e23 | 164ebfa24817cd7d4c5dbf760c1600232aebee1c | /src/filer/observers/observer.py | 086f2765aafd9ab0ef6d4cd8cfb8657ebcc293d2 | [] | no_license | adarshtri/filer | bd3806e4d8b3859b5cc68a30ed4ee2e46178342d | 985fcb9a9792ba18603771489bb6dbdae19d638d | refs/heads/master | 2020-05-19T05:49:34.631133 | 2019-05-04T06:08:05 | 2019-05-04T06:08:05 | 184,858,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from interface import Interface
class Observer(Interface):
"""Interface to support observers for newly registered directories"""
def update(self, updateinfo: dict):
"""
:param updateinfo: dictionary containing information regarding the updation.
:return: None
"""
pass
| [
"adarsh.trivedi100@gmail.com"
] | adarsh.trivedi100@gmail.com |
7fd98a807bdf12bc76653e933673247354cf7d49 | 692a43899841bf0569c4f44bf2bba435eea71bbe | /pycloud/pycloud/network/cloudlet_dns.py | 3850491cebff1b2b95defae29992e92f2166ad82 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-2-Clause"
] | permissive | SEI-TAS/pycloud | 52a4403cdbdb2eaeabf18c377a806089591aea31 | beebe297b3ed35de0bc53b75f9f2b4a2e80d892a | refs/heads/master | 2023-06-22T08:19:46.038367 | 2023-06-15T18:24:31 | 2023-06-15T18:24:31 | 30,374,770 | 5 | 4 | NOASSERTION | 2023-06-15T18:24:32 | 2015-02-05T19:46:50 | Python | UTF-8 | Python | false | false | 3,913 | py | # KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
import dynamic_dns
import os
from pycloud.pycloud.network.tsig import load_tsig_key
SVMS_ZONE_NAME = 'svm.cloudlet.local.'
CLOUDLET_HOST_NAME = 'cloudlet'
# Internal file path, relative to data folder.
KEY_FILE_PATH = 'dns/Ksvm.cloudlet.local.private'
#################################################################################################################
# Object used to manage the cloudlet DNS server.
#################################################################################################################
class CloudletDNS(object):
#################################################################################################################
# Constructor.
#################################################################################################################
def __init__(self, root_data_folder):
full_path = os.path.join(os.path.abspath(root_data_folder), KEY_FILE_PATH)
self.key = load_tsig_key(full_path)
#################################################################################################################
# Generates a FQDN in our SVM zone from a hostname.
#################################################################################################################
@staticmethod
def generate_fqdn(hostname):
return hostname + "." + SVMS_ZONE_NAME
#################################################################################################################
# Registers an SVM.
#################################################################################################################
def register_svm(self, svm_fqdn, ip_address=None):
# Depending on networking mode, we will need to register an explicit A record with an IP, or a cname to cloudlet.
if ip_address:
record_value = ip_address
record_type = 'A'
else:
record_value = CLOUDLET_HOST_NAME
record_type = 'CNAME'
if not self.key:
print "Can't register SVM: TSIG key not loaded."
return
dynamic_dns.add_dns_record(SVMS_ZONE_NAME, self.key, svm_fqdn, record_value, record_type=record_type)
#################################################################################################################
# Unregisters an SVM.
#################################################################################################################
def unregister_svm(self, svm_fqdn):
if not self.key:
print "Can't unregister SVM: TSIG key not loaded."
return
dynamic_dns.remove_dns_record(SVMS_ZONE_NAME, self.key, svm_fqdn)
| [
"sebastian.echeverria@gmail.com"
] | sebastian.echeverria@gmail.com |
f8b10ec3dbc9c3b2d9a88a5d5b82dd518963aa1a | 155b365fb459caff5f57f9e5eb55a26895a016cd | /evento_01/evento_01/wsgi.py | a69659105175ae9239e21c792c5af9e5efe7552a | [] | no_license | Claison/EVENTO | a43fa39eb3ea4fdd29c4c9ffd858b6e20be1c02b | 61b97f980f7cd77b4de92088c6601378a520ea86 | refs/heads/master | 2021-06-25T18:55:51.229559 | 2017-09-12T00:59:17 | 2017-09-12T00:59:17 | 103,141,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for evento_01 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evento_01.settings")
application = get_wsgi_application()
| [
"30608657+Claison@users.noreply.github.com"
] | 30608657+Claison@users.noreply.github.com |
229ad85295f1114269334096a485f0e1a78646d3 | 17d7d9a35c617132965f331060e846d9fe12f129 | /lessons/16/serialization2.py | f93ed3958b09fb964dae1bb369120a7ae61b2498 | [] | no_license | loristissino/oopython | c46f3e07dc6c4829362efb43f5bbfb496053747b | 0e3ab872f8c2736a0e2d189f7d9700997a3c857a | refs/heads/master | 2016-09-05T11:33:54.287376 | 2011-12-14T17:01:45 | 2011-12-14T17:01:45 | 40,006,358 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #!/usr/bin/env python3.1
import pickle
data=('foo', 'bar', 'baz')
with open('mydata', 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
data=()
with open('mydata', 'rb') as f:
data = pickle.load(f)
print(data)
| [
"loris.tissino@5be9ef65-fe4e-d553-c31d-209ad9341544"
] | loris.tissino@5be9ef65-fe4e-d553-c31d-209ad9341544 |
bfc85210f3b259e11c7149a04537292c0612d22b | dfb193385a1d15f0aa0898ee67a7abd837666a5f | /RPi4B/dht22.py | eb036879e6ac282710090a6af7b1b70e8719d759 | [] | no_license | OSHW-UHU-Group/SBA-IST | ed3a25e01c9efbc35262187c1b6f29f812138c9e | 3c3a2cfafea0754f6aa51151db828401a7048ab6 | refs/heads/master | 2023-06-16T08:29:26.648085 | 2021-07-09T11:36:52 | 2021-07-09T11:36:52 | 379,353,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | import time
import board
import adafruit_dht
dht = adafruit_dht.DHT22(board.D26, use_pulseio=False)
def humidity():
global dht
i = 0
while i<10:
try:
temperature_c = dht.temperature
humidity = dht.humidity
return humidity, temperature_c
except RuntimeError as error:
# Reading doesn't always work! (try again max. 10)
i = i + 1
time.sleep(1.0)
continue
#
#while 1:
# h, t= humidity()
# print(h)
# print(t)
# time.sleep(1) | [
"alvarez@uhu.es"
] | alvarez@uhu.es |
a07db48cd66c17dc6ba5bed0065585a071c5c8a4 | 8f83fb0541a9b542de4f4456bd2c14d6ed188b95 | /mysite/config/settings/local.py | d8c13717c8331dda3aae85834587525058f6ad0e | [] | no_license | klee2017/Deploy-EB-docker | fc3b8c7d577f141b0273290f6fd8716f2e423204 | b78292d6ef2f38987a1b6ad816ffaa89c4698789 | refs/heads/master | 2021-07-24T12:23:02.248951 | 2017-11-03T08:01:02 | 2017-11-03T08:01:02 | 108,818,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | import random
import string
from .base import *
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'.elasticbeanstalk.com',
'.locomoco.co.kr',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
SECRET_KEY = ''.join(
[random.choice(string.ascii_lowercase) for i in range(40)]
) | [
"kaythechemist@gmail.com"
] | kaythechemist@gmail.com |
0aac049c8263f7e956cea14027ed8e142b6344e5 | 0931696940fc79c4562c63db72c6cabfcb20884d | /Exercises/Regular_Expresions/furniture.py | 8a02f7b386384bfbe0d6b9fe2cf832c3b0cd53d3 | [] | no_license | ivklisurova/SoftUni_Fundamentals_module | f847b9de9955c8c5bcc057bb38d57162addd6ad8 | 69242f94977c72005f04da78243a5113e79d6c33 | refs/heads/master | 2021-12-01T01:56:22.067928 | 2021-11-08T17:07:31 | 2021-11-08T17:07:31 | 253,281,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import re
furniture = []
total_money = 0
while True:
order = input()
if order == 'Purchase':
break
pattern = r'>{2}([a-zA-z]+)<{2}(\d+[.]\d+|\d+)!(\d+)'
matches = re.findall(pattern, order)
for i in matches:
if len(i) == 0:
break
furniture.append(i[0])
total_money += float(i[1]) * float(i[2])
print('Bought furniture:')
[print(x) for x in furniture]
print(f'Total money spend: {total_money:.2f}')
| [
"55747390+ivklisurova@users.noreply.github.com"
] | 55747390+ivklisurova@users.noreply.github.com |
e4902d2ddf6c8dfca1b465dcd01596243d1635fb | 063a45f56dab2a1ae77aaa3a96539fe25512e15c | /gazebo_turtlebot3_dqlearn.py | 1af1209067bfd6c2a3096ab19a6e0700c931d28e | [] | no_license | changpowei/ROS_Gazebo_SB | 44fe7c4312faad6b095a956e5074c26dfed25328 | 229759fd13f98d36e3802f4e4ac3ced19d4dab14 | refs/heads/master | 2023-08-23T21:34:22.277892 | 2021-10-21T05:38:49 | 2021-10-21T05:38:49 | 419,157,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,481 | py | import rospy
import roslaunch
import time
import numpy as np
import math
import random
from gazebo_msgs.srv import SpawnModel, DeleteModel, SetModelState
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from nav_msgs.msg import Odometry
import tf
from sensor_msgs.msg import LaserScan
from std_srvs.srv import Empty
"""
There are 3 different maze map in this packet
After start one of them with launch file you have to edit this parameter
for that maze.
Options:
maze1
maze2
maze3
"""
SELECT_MAP = "maze1"
class AgentPosController():
'''
This class control robot position
We teleport our agent when environment reset
So agent start from different position in every episode
'''
def __init__(self):
self.agent_model_name = "turtlebot3_waffle"
def teleportRandom(self):
'''
Teleport agent return new x and y point
return agent posX, posY in list
'''
model_state_msg = ModelState()
model_state_msg.model_name = self.agent_model_name
"""Set the position of the begin"""
if SELECT_MAP == "maze1":
# maze 1
"""
xy_list = [
[-1.5, 0.5], [-1.5, 1.5], [-0.5, 0.5], [-0.5, 1.5],
[0.5, -0.5], [0.5, -1.5], [2.5, -0.5], [2.5, 0.5],
[5.5,-1.5], [5.5,-0.5], [5.5,0.5], [5.5,1.5]
]
"""
xy_list = [
[-1.5, 0.5], [-1.5, 1.5], [-0.5, 0.5], [-0.5, 1.5],
[0.5, -0.5], [0.5, -1.5], [2.5, -0.5], [2.5, 0.5],
[5.5,-1.5], [5.5,-0.5], [5.5,0.5], [5.5,1.5]
]
elif SELECT_MAP == "maze2":
# maze 2
xy_list = [
[-1.5,-1.5], [-0.5,-1.5], [-1.5,-0.5],
[-0.5,1.5], [1.5,0.5],
[2.5,2.5], [2.5,3.5], [1.5,3.5],
]
else:
# maze 3
xy_list = [
[0.5,0.5], [1.5,0.5], [0.5,1.5], [1.5,1.5],
[-0.5,-0.5], [-1.5,-0.5], [-1.5,-1.5],
[0.5,-0.5], [0.5,-1.5], [1.5,-1.5],
[-1.5,0.5], [-0.5,1.5], [-1.5,1.5],
]
# Get random position for agent
"""
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
"""
pose = Pose()
pose.position.x, pose.position.y = random.choice(xy_list)
model_state_msg.pose = pose
"""
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
"""
model_state_msg.twist = Twist()
"""
string reference_frame
# set pose/twist relative to the frame of this entity (Body/Model)
# leave empty or "world" or "map" defaults to world-frame
"""
model_state_msg.reference_frame = "world"
# Start teleporting in Gazebo
isTeleportSuccess = False
for i in range(5):
if not isTeleportSuccess:
try:
rospy.wait_for_service('/gazebo/set_model_state')
telep_model_prox = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
telep_model_prox(model_state_msg)
isTeleportSuccess = True
break
except Exception as e:
rospy.logfatal("Error when teleporting agent " + str(e))
else:
rospy.logwarn("Trying to teleporting agent..." + str(i))
time.sleep(2)
if not isTeleportSuccess:
rospy.logfatal("Error when teleporting agent")
return "Err", "Err"
return pose.position.x, pose.position.y
class GoalController():
"""
This class controls target model and position
"""
def __init__(self):
self.model_path = "../models/gazebo/goal_sign/model.sdf"
f = open(self.model_path, 'r')
self.model = f.read()
self.goal_position = Pose()
self.goal_position.position.x = None # Initial positions
self.goal_position.position.y = None
self.last_goal_x = self.goal_position.position.x
self.last_goal_y = self.goal_position.position.y
self.model_name = 'goal_sign'
self.check_model = False # This used to checking before spawn model if there is already a model
def respawnModel(self):
'''
Spawn model in Gazebo
'''
isSpawnSuccess = False
for i in range(5):
if not self.check_model: # This used to checking before spawn model if there is already a model
try:
rospy.wait_for_service('gazebo/spawn_sdf_model')
spawn_model_prox = rospy.ServiceProxy('gazebo/spawn_sdf_model', SpawnModel)
spawn_model_prox(self.model_name, self.model, 'robotos_name_space', self.goal_position, "world")
isSpawnSuccess = True
self.check_model = True
break
except Exception as e:
rospy.logfatal("Error when spawning the goal sign " + str(e))
else:
rospy.logwarn("Trying to spawn goal sign ..." + str(i))
time.sleep(2)
if not isSpawnSuccess:
rospy.logfatal("Error when spawning the goal sign")
def deleteModel(self):
'''
Delete model from Gazebo
'''
while True:
if self.check_model:
try:
rospy.wait_for_service('gazebo/delete_model')
del_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)
del_model_prox(self.model_name)
self.check_model = False
break
except Exception as e:
rospy.logfatal("Error when deleting the goal sign " + str(e))
else:
break
def calcTargetPoint(self):
"""
This function return a target point randomly for robot
"""
self.deleteModel()
# Wait for deleting
time.sleep(0.5)
if SELECT_MAP == "maze1":
# maze 1
"""
goal_xy_list = [
[-1.5, 0.5], [-1.5, 1.5], [-0.5, 0.5], [-0.5, 1.5],
[0.5, -0.5], [0.5, -1.5], [2.5, -0.5], [2.5, 0.5],
[5.5,-1.5], [5.5,-0.5], [5.5,0.5], [5.5,1.5]
]
"""
goal_xy_list = [
[-1.5, 0.5], [-1.5, 1.5], [-0.5, 0.5], [-0.5, 1.5],
[0.5, -0.5], [0.5, -1.5], [2.5, -0.5], [2.5, 0.5],
[5.5,-1.5], [5.5,-0.5], [5.5,0.5], [5.5,1.5]
]
elif SELECT_MAP == "maze2":
# maze 2
goal_xy_list = [
[-1.5,-1.5], [-0.5,-1.5], [-1.5,-0.5],
[-0.5,1.5], [1.5,0.5],
[2.5,2.5], [2.5,3.5], [1.5,3.5],
]
else:
# maze 3
goal_xy_list = [
[0.5,0.5], [1.5,0.5], [0.5,1.5], [1.5,1.5],
[-0.5,-0.5], [-1.5,-0.5], [-1.5,-1.5],
[0.5,-0.5], [0.5,-1.5], [1.5,-1.5],
[-1.5,0.5], [-0.5,1.5], [-1.5,1.5],
]
# Check last goal position not same with new goal
while True:
self.goal_position.position.x, self.goal_position.position.y = random.choice(goal_xy_list)
if self.last_goal_x != self.goal_position.position.x:
if self.last_goal_y != self.goal_position.position.y:
break
# Spawn goal model
self.respawnModel()
self.last_goal_x = self.goal_position.position.x
self.last_goal_y = self.goal_position.position.y
# Inform user
rospy.logwarn("New goal position : " + str(self.goal_position.position.x) + " , " + str(self.goal_position.position.y))
return self.goal_position.position.x, self.goal_position.position.y
def getTargetPoint(self):
return self.goal_position.position.x, self.goal_position.position.y
class Turtlebot3GymEnv():
'''
Main Gazebo environment class
Contains reset and step function
'''
def __init__(self):
# Initialize the node
rospy.init_node('turtlebot3_gym_env', anonymous=True)
# Connect to gazebo
self.velPub = rospy.Publisher('/cmd_vel', Twist, queue_size=5)
self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.reset_proxy = rospy.ServiceProxy(
'/gazebo/reset_simulation', Empty)
self.laserPointCount = 24 # 24 laser point in one time
self.minCrashRange = 0.2 # Asume crash below this distance
self.laserMinRange = 0.2 # Modify laser data and fix min range to
self.laserMaxRange = 10.0 # Modify laser data and fix max range to
self.stateSize = self.laserPointCount + 4 # Laser(arr), heading, distance, obstacleMinRange, obstacleAngle
self.actionSize = 5 # Size of the robot's actions
self.targetDistance = 0 # Distance to target
self.targetPointX = 0 # Target Pos X
self.targetPointY = 0 # Target Pos Y
# Means robot reached target point. True at beginning to calc random point in reset func
self.isTargetReached = True
self.goalCont = GoalController()
self.agentController = AgentPosController()
def pauseGazebo(self):
'''
Pause the simulation
'''
rospy.wait_for_service('/gazebo/pause_physics')
try:
self.pause()
except Exception:
print("/gazebo/pause_physics service call failed")
def unpauseGazebo(self):
'''
Unpause the simulation
'''
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except Exception:
print("/gazebo/unpause_physics service call failed")
def resetGazebo(self):
'''
Reset simualtion to initial phase
'''
rospy.wait_for_service('/gazebo/reset_simulation')
try:
self.reset_proxy()
except Exception:
print("/gazebo/reset_simulation service call failed")
def getLaserData(self):
'''
ROS callback function
return laser scan in 2D list
'''
try:
laserData = rospy.wait_for_message('/scan', LaserScan, timeout=5)
return laserData
except Exception as e:
rospy.logfatal("Error to get laser data " + str(e))
def getOdomData(self):
'''
ROS callback function
Modify odom data quaternion to euler
return yaw, posX, posY of robot known as Pos2D
'''
try:
odomData = rospy.wait_for_message('/odom', Odometry, timeout=5)
odomData = odomData.pose.pose
quat = odomData.orientation
quatTuple = (
quat.x,
quat.y,
quat.z,
quat.w,
)
roll, pitch, yaw = tf.transformations.euler_from_quaternion(
quatTuple)
robotX = odomData.position.x
robotY = odomData.position.y
return yaw, robotX, robotY
except Exception as e:
rospy.logfatal("Error to get odom data " + str(e))
def calcHeadingAngle(self, targetPointX, targetPointY, yaw, robotX, robotY):
'''
Calculate heading angle from robot to target
return angle in float
'''
targetAngle = math.atan2(targetPointY - robotY, targetPointX - robotX)
heading = targetAngle - yaw
if heading > math.pi:
heading -= 2 * math.pi
elif heading < -math.pi:
heading += 2 * math.pi
return round(heading, 2)
def calcDistance(self, x1, y1, x2, y2):
'''
Calculate euler distance of given two points
return distance in float
'''
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
def calculateState(self, laserData, odomData):
'''
Modify laser data
Calculate heading angle
Calculate distance to target
Calculate min range to nearest obstacle
Calculate angle to nearest obstacle
returns state as np.array
State contains:
laserData, heading, distance, obstacleMinRange, obstacleAngle
'''
heading = self.calcHeadingAngle(
self.targetPointX, self.targetPointY, *odomData)
_, robotX, robotY = odomData
distance = self.calcDistance(
robotX, robotY, self.targetPointX, self.targetPointY)
isCrash = False # If robot hit to an obstacle
laserData = list(laserData.ranges)
for i in range(len(laserData)):
if (self.minCrashRange > laserData[i] > 0):
isCrash = True
if np.isinf(laserData[i]):
laserData[i] = self.laserMaxRange
if np.isnan(laserData[i]):
laserData[i] = 0
obstacleMinRange = round(min(laserData), 2)
obstacleAngle = np.argmin(laserData)
return laserData + [heading, distance, obstacleMinRange, obstacleAngle], isCrash
def step(self, action):
'''
Act in envrionment
After action return new state
Calculate reward
Calculate bot is crashed or not
Calculate is episode done or not
returns state as np.array
State contains:
laserData, heading, distance, obstacleMinRange, obstacleAngle, reward, done
'''
self.unpauseGazebo()
# Move
maxAngularVel = 1.5
angVel = ((self.actionSize - 1)/2 - action) * maxAngularVel / 2
velCmd = Twist()
velCmd.linear.x = 0.15
velCmd.angular.z = angVel
self.velPub.publish(velCmd)
# More basic actions
"""
if action == 0: #BRAKE LEFT
velCmd = Twist()
velCmd.linear.x = 0.17
velCmd.angular.z = 1.6
self.velPub.publish(velCmd)
elif action == 1: #LEFT
velCmd = Twist()
velCmd.linear.x = 0.17
velCmd.angular.z = 0.8
self.velPub.publish(velCmd)
elif action == 2: #FORWARD
velCmd = Twist()
velCmd.linear.x = 0.17
velCmd.angular.z = 0.0
self.velPub.publish(velCmd)
elif action == 3: #RIGHT
velCmd = Twist()
velCmd.linear.x = 0.17
velCmd.angular.z = -0.8
self.velPub.publish(velCmd)
elif action == 4: #BRAKE RIGHT
velCmd = Twist()
velCmd.linear.x = 0.17
velCmd.angular.z = -1.6
self.velPub.publish(velCmd)
"""
# Observe
laserData = self.getLaserData()
odomData = self.getOdomData()
self.pauseGazebo()
state, isCrash = self.calculateState(laserData, odomData)
done = False
if isCrash:
done = True
distanceToTarget = state[-3]
if distanceToTarget < 0.2: # Reached to target
self.isTargetReached = True
if isCrash:
reward = -150
elif self.isTargetReached:
# Reached to target
rospy.logwarn("Reached to target!")
reward = 200
# Calc new target point
self.targetPointX, self.targetPointY = self.goalCont.calcTargetPoint()
self.isTargetReached = False
else:
# Neither reached to goal nor crashed calc reward for action
yawReward = []
currentDistance = state[-3]
heading = state[-4]
# Calc reward
# reference https://emanual.robotis.com/docs/en/platform/turtlebot3/ros2_machine_learning/
for i in range(self.actionSize):
angle = -math.pi / 4 + heading + (math.pi / 8 * i) + math.pi / 2
tr = 1 - 4 * math.fabs(0.5 - math.modf(0.25 + 0.5 * angle % (2 * math.pi) / math.pi)[0])
yawReward.append(tr)
try:
distanceRate = 2 ** (currentDistance / self.targetDistance)
except Exception:
print("Overflow err CurrentDistance = ", currentDistance, " TargetDistance = ", self.targetDistance)
distanceRate = 2 ** (currentDistance // self.targetDistance)
reward = ((round(yawReward[action] * 5, 2)) * distanceRate)
return np.asarray(state), reward, done
def reset(self):
'''
Reset the envrionment
Reset bot position
returns state as np.array
State contains:
laserData, heading, distance, obstacleMinRange, obstacleAngle
'''
self.resetGazebo()
while True:
# Teleport bot to a random point
agentX, agentY = self.agentController.teleportRandom()
if self.calcDistance(self.targetPointX, self.targetPointY, agentX, agentY) > self.minCrashRange:
break
else:
rospy.logerr("Reteleporting the bot!")
time.sleep(2)
if self.isTargetReached:
while True:
self.targetPointX, self.targetPointY = self.goalCont.calcTargetPoint()
if self.calcDistance(self.targetPointX, self.targetPointY, agentX, agentY) > self.minCrashRange:
self.isTargetReached = False
break
else:
rospy.logerr("Recalculating the target point!")
time.sleep(2)
# Unpause simulation to make observation
self.unpauseGazebo()
laserData = self.getLaserData()
odomData = self.getOdomData()
self.pauseGazebo()
state, isCrash = self.calculateState(laserData, odomData)
self.targetDistance = state[-3]
self.stateSize = len(state)
return np.asarray(state) # Return state
| [
"c95cpw@ncsist.org.tw"
] | c95cpw@ncsist.org.tw |
9670e8c23fe063f1c2f789cdbab33cb5eb371392 | 543efc7b32ab55a44ba4b85029c322228ac207db | /scripts/fit_aves.py | 80a60cb3346d348755ab51ba454b7a6467e7899a | [] | no_license | hmnaik/aves | 2238627b0a73237e74479f67eb78ee198283aab1 | 92cb2c5a50fd65d7056f93f209931e721946e818 | refs/heads/main | 2023-05-05T13:20:37.315266 | 2021-05-19T17:56:55 | 2021-05-19T17:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import _init_paths
from models import mesh_regressor, AVES
from optimization import base_renderer, AVES_Fitter
from utils.renderer import Renderer
from utils.cub_dataset import CUB_Dataset
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='cuda', help='Device to use')
parser.add_argument('--species_id', default=2, type=int, help='Species to run reconstruction')
args = parser.parse_args()
device = args.device
species_id = args.species_id
# dataset
root = 'data/CUB'
dataset = CUB_Dataset(root, species_id=species_id)
images = dataset.images
keypoints = dataset.keypoints
segmentations = dataset.segmentations
# objects
size = 256
focal = 2167
aves = AVES(device=device, high_res=True)
renderer = Renderer(focal, (size/2, size/2), img_w=size, img_h=size, faces=aves.dd['F'])
silhouette_renderer = base_renderer(size=256, focal=2167, device=device)
regressor = mesh_regressor(device=device)
avesfit = AVES_Fitter(model=aves, prior_weight=10, mask_weight=1, beta_weight=150,
global_iters=180, pose_iters=300, mask_iters=100,
renderer=silhouette_renderer, device=device)
# Regression to initialize
print('Reconstructing', dataset.species, 'using AVES')
print('Initializing ...')
with torch.no_grad():
k = torch.tensor(keypoints).float().to(device)
k[:, [9,15], :] = 0
k = k.reshape(-1, 54)
pose, bone, tran = regressor(k)
pose = regressor.postprocess(pose)
# Optimize alignment
print('Optimizing AVES ...')
masks = torch.tensor(segmentations).clone().float().to(device)
kpts = torch.tensor(keypoints).clone().float().to(device)
pose_op, bone_op, tran_op, beta, model_mesh, model_kpts = avesfit(pose, bone, tran,
focal_length=2167, camera_center=128,
keypoints=kpts, masks=masks.squeeze(1), favor_mask=True)
# Render and save all results
print('Saving results ...')
output_dir = 'output_aves_{}'.format(species_id)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for i in range(len(images)):
img = images[i]
img_aves, _ = renderer(model_mesh[i].detach().cpu().numpy(), np.eye(3), [0,0,0], img)
img_out = np.hstack([img, img_aves])
plt.imsave(output_dir + '/{:04d}.png'.format(i), img_out.astype(np.uint8))
| [
"yufu@seas.upenn.edu"
] | yufu@seas.upenn.edu |
dd58b30a002ad28ef364d7b282cc7d5afacbd9da | 2a115b2a0a4d3c87ae6d262ba97a673a6c9a748b | /web-py/myWeb/get_img.py | 741b804d6dfce4281c843bbc639d1882cda926de | [] | no_license | GoldenZhu/classroom | 935407db720ecdda2c0ad05973a36784a205d0ab | 26672b20477d3ae1d5aac9713302f1d3a6d1d834 | refs/heads/master | 2021-01-21T10:19:40.625219 | 2017-11-17T09:00:39 | 2017-11-17T09:00:45 | 83,407,700 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import urllib
import time
import web
def get_poster(id, url):
pic = urllib.urlopen(url).read()
file_name = 'poster/%s.jpg' % id
f = file(file_name, "wb")
f.write(pic)
f.close()
db = web.database(dbn='sqlite', db='MovieSite.db')
movies = db.select('movie')
count = 0
for movie in movies:
get_poster(movie.id, movie.image)
count += 1
print count, movie.title
time.sleep(2) | [
"2451255133@qq.com"
] | 2451255133@qq.com |
9660216677c127738ccae93efffd11f88bb9cf9a | fa21ed4e665f00bd22236b6b224b247014ff2772 | /nodes/speed_controller.py | a964d53f56b5d404997bc813aff01b95e3d68cdf | [] | no_license | klipfel/rosGoturn | 87d77969102e22aff94ce83a07c77556d0e47062 | 2243259820a542791df5baed0eb97c16836dba46 | refs/heads/master | 2020-06-15T23:00:27.008175 | 2019-07-19T13:45:44 | 2019-07-19T13:45:44 | 195,414,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | #!/usr/bin/env python3
import board
import adafruit_pca9685
import busio
import time
import os
class Servomotor:
def __init__(self):
i2c = busio.I2C(board.SCL, board.SDA)
self.pwm = adafruit_pca9685.PCA9685(i2c)
self.pwm.frequency = 50
# values
self.leftMax = 100
self.rightMax = 0
self.straight = 50
self.angle = None
self.set_bearing(self.straight)
print("Servomotor initialization SUCCESS")
def test(self):
accuracy = 10
for angle in range(0,100 + accuracy,accuracy):
self.set_bearing(angle)
time.sleep(1)
self.set_bearing(self.straight)
def terminal_test(self):
value = input("Angle [0-100]: ")
while value != 'q':
self.set_bearing(float(value))
value = input("Angle [0-100]: ")
print("value entered : " + value)
def set_bearing(self,angle):
self.pwm.channels[1].duty_cycle = int(3932+ angle*2620/100)
self.angle = angle
class Motor:
def __init__(self):
i2c = busio.I2C(board.SCL, board.SDA)
self.pwm = adafruit_pca9685.PCA9685(i2c)
self.pwm.frequency = 50
# values
self.off = 50
self.forwardMin = 68
# motor setup
self.setup()
print("Motor initialization SUCCESS")
def stop(self):
self.set_speed(self.off)
def setup(self):
self.set_speed(50)
time.sleep(1)
def test(self):
for speed in range(0,100,10):
self.set_speed(speed)
time.sleep(1)
self.set_speed(self.forwardMin)
time.sleep(4)
self.stop()
def terminal_test(self):
value = input("Speed [0-100]: ")
while value != 'q':
self.set_speed(float(value))
value = input("Speed [0-100]: ")
print("value entered : " + value)
def set_speed(self,speed):
self.pwm.channels[0].duty_cycle = int(3932+ speed*2620/100)
if __name__ == "__main__":
print("test")
b = Servomotor()
m = Motor()
b.test()
m.terminal_test()
| [
"arnaudklipfel@hotmail.com"
] | arnaudklipfel@hotmail.com |
eee4cab21177b67375dd31d211b70f0587198b8e | 0e014984751a44761864c79546939bc21d699752 | /edx_AI_Week9/search.py | ba613c6121722cbb407f881c34bbefb2ba79bd47 | [] | no_license | miguel-ossa/AI-Columbia-Exercises | 8c76bb0e469f8b55976b22b035867ea68041d4da | 25758f35bc7f93e8e3dece6fed5b6db7a4cf5c81 | refs/heads/master | 2020-03-29T12:14:05.010584 | 2019-06-02T07:32:29 | 2019-06-02T07:32:29 | 149,854,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,167 | py | """Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from __future__ import generators
from utils import *
import agents
import math, random, sys, time, bisect, string
class Problem:
"""The abstract class for a formal problem. You should subclass this and
implement the method successor, and possibly __init__, goal_test, and
path_cost. Then you will create instances of your subclass and solve them
with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial; self.goal = goal
def successor(self, state):
"""Given a state, return a sequence of (action, state) pairs reachable
from this state. If there are many successors, consider an iterator
that yields the successors one at a time, rather than building them
all at once. Iterators will work fine within the framework."""
#abstract
return
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal, as specified in the constructor. Implement this
method if checking against a single self.goal is not enough."""
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
#abstract
return
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
update(self, state=state, parent=parent, action=action,
path_cost=path_cost, depth=0)
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node %s>" % (self.state,)
def path(self):
"Create a list of nodes from the root to this node."
x, result = self, [self]
while x.parent:
result.append(x.parent)
x = x.parent
return result
def expand(self, problem):
"Return a list of nodes reachable from this node. [Fig. 3.8]"
return [Node(next, self, act,
problem.path_cost(self.path_cost, self.state, act, next))
for (act, next) in problem.successor(self.state)]
class SimpleProblemSolvingAgent(agents.Agent):
"""Abstract framework for problem-solving agent. [Fig. 3.1]"""
def __init__(self):
Agent.__init__(self)
state = []
seq = []
def program(percept):
state = self.update_state(state, percept)
if not seq:
goal = self.formulate_goal(state)
problem = self.formulate_problem(state, goal)
seq = self.search(problem)
action = seq[0]
seq[0:1] = []
return action
self.program = program
## Uninformed Search algorithms
def tree_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
Don't worry about repeated paths to a state. [Fig. 3.8]"""
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
fringe.extend(node.expand(problem))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return tree_search(problem, Stack())
def graph_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
If two paths reach a state, only use the best one. [Fig. 3.18]"""
closed = {}
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
if node.state not in closed:
closed[node.state] = True
fringe.extend(node.expand(problem))
return None
def breadth_first_graph_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return graph_search(problem, FIFOQueue())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return graph_search(problem, Stack())
def depth_limited_search(problem, limit=50):
"[Fig. 3.12]"
def recursive_dls(node, problem, limit):
cutoff_occurred = False
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
for successor in node.expand(problem):
result = recursive_dls(successor, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result != None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Fig. 3.13]"
for depth in xrange(sys.maxint):
result = depth_limited_search(problem, depth)
if result is not 'cutoff':
return result
# Informed (Heuristic) Search
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have depth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f))
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search.
Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
h = h or problem.h
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f)
## Other search algorithms
def recursive_best_first_search(problem):
"[Fig. 4.5]"
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node
successors = expand(node, problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + s.h, node.f)
while True:
successors.sort(lambda x,y: x.f - y.f) # Order by lowest f value
best = successors[0]
if best.f > flimit:
return None, best.f
alternative = successors[1]
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result
return RBFS(Node(problem.initial), infinity)
def hill_climbing(problem):
"""From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better. [Fig. 4.11]"""
current = Node(problem.initial)
while True:
neighbor = argmax(expand(node, problem), Node.value)
if neighbor.value() <= current.value():
return current.state
current = neighbor
def exp_schedule(k=20, lam=0.005, limit=100):
"One possible schedule function for simulated annealing"
return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"[Fig. 4.5]"
current = Node(problem.initial)
for t in xrange(sys.maxint):
T = schedule(t)
if T == 0:
return current
next = random.choice(expand(node. problem))
delta_e = next.path_cost - current.path_cost
if delta_e > 0 or probability(math.exp(delta_e/T)):
current = next
def online_dfs_agent(a):
"[Fig. 4.12]"
pass #### more
def lrta_star_agent(a):
"[Fig. 4.12]"
pass #### more
# Genetic Algorithm
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.0, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires that the problem has a successor function that generates
reasonable states, and that it has a path_cost function that scores states.
We use the negative of the path_cost function, because costs are to be
minimized, while genetic-algorithm expects a fitness_fn to be maximized."""
states = [s for (a, s) in problem.successor(problem.initial_state)[:n]]
random.shuffle(states)
fitness_fn = lambda s: - problem.path_cost(0, s, None, s)
return genetic_algorithm(states, fitness_fn, ngen, pmut)
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.0):
"""[Fig. 4.7]"""
def reproduce(p1, p2):
c = random.randrange(len(p1))
return p1[:c] + p2[c:]
for i in range(ngen):
new_population = []
for i in len(population):
p1, p2 = random_weighted_selections(population, 2, fitness_fn)
child = reproduce(p1, p2)
if random.uniform(0,1) > pmut:
child.mutate()
new_population.append(child)
population = new_population
return argmax(population, fitness_fn)
def random_weighted_selection(seq, n, weight_fn):
"""Pick n elements of seq, weighted according to weight_fn.
That is, apply weight_fn to each element of seq, add up the total.
Then choose an element e with probability weight[e]/total.
Repeat n times, with replacement. """
totals = []; runningtotal = 0
for item in seq:
runningtotal += weight_fn(item)
totals.append(runningtotal)
selections = []
for s in range(n):
r = random.uniform(0, totals[-1])
for i in range(len(seq)):
if totals[i] > r:
selections.append(seq[i])
break
return selections
# The remainder of this file implements examples for the search algorithms.
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (verticies) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, dict=None, directed=True):
self.dict = dict or {}
self.directed = directed
if not directed: self.make_undirected()
def make_undirected(self):
"Make a digraph into an undirected graph by adding symmetric edges."
for a in self.dict.keys():
for (b, distance) in self.dict[a].items():
self.connect1(b, a, distance)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed: self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A,{})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.dict.setdefault(a, {})
if b is None: return links
else: return links.get(b)
def nodes(self):
"Return a list of nodes in the graph."
return self.dict.keys()
def UndirectedGraph(dict=None):
"Build a Graph where every edge (including future ones) goes both ways."
return Graph(dict=dict, directed=False)
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
## Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
## Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node,n): return infinity
return distance(g.locations[n], here)
neighbor = argmin(nodes, distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
romania = UndirectedGraph(Dict(
A=Dict(Z=75, S=140, T=118),
B=Dict(U=85, P=101, G=90, F=211),
C=Dict(D=120, R=146, P=138),
D=Dict(M=75),
E=Dict(H=86),
F=Dict(S=99),
H=Dict(U=98),
I=Dict(V=92, N=87),
L=Dict(T=111, M=70),
O=Dict(Z=71, S=151),
P=Dict(R=97),
R=Dict(S=80),
U=Dict(V=142)))
romania.locations = Dict(
A=( 91, 492), B=(400, 327), C=(253, 288), D=(165, 299),
E=(562, 293), F=(305, 449), G=(375, 270), H=(534, 350),
I=(473, 506), L=(165, 379), M=(168, 339), N=(406, 537),
O=(131, 571), P=(320, 368), R=(233, 410), S=(207, 457),
T=( 94, 410), U=(456, 350), V=(509, 444), Z=(108, 531))
australia = UndirectedGraph(Dict(
T=Dict(),
SA=Dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=Dict(WA=1, Q=1),
NSW=Dict(Q=1, V=1)))
australia.locations = Dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37))
class GraphProblem(Problem):
"The problem of searching a graph from one node to another."
def __init__(self, initial, goal, graph):
Problem.__init__(self, initial, goal)
self.graph = graph
def successor(self, A):
"Return a list of (action, result) pairs."
return [(B, B) for B in self.graph.get(A).keys()]
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A,B) or infinity)
def h(self, node):
"h function is straight-line distance from a node's state to goal."
locs = getattr(self.graph, 'locations', None)
if locs:
return int(distance(locs[node.state], locs[self.goal]))
else:
return infinity
#### NOTE: NQueensProblem not working properly yet.
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where the
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of None means that the c-th column has not been
filled in left. We fill in columns left to right."""
def __init__(self, N):
self.N = N
self.initial = [None] * N
def successor(self, state):
"In the leftmost empty column, try all non-conflicting rows."
if state[-1] is not None:
return [] ## All columns filled; no successors
else:
def place(col, row):
new = state[:]
new[col] = row
return new
col = state.index(None)
return [(row, place(col, row)) for row in range(self.N)
if not self.conflicted(state, row, col)]
def conflicted(self, state, row, col):
"Would placing a queen at (row, col) conflict with anything?"
for c in range(col-1):
if self.conflict(row, col, state[c], c):
return True
return False
def conflict(self, row1, col1, row2, col2):
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
return (row1 == row2 ## same row
or col1 == col2 ## same column
or row1-col1 == row2-col2 ## same \ diagonal
or row1+col1 == row2+col2) ## same / diagonal
def goal_test(self, state):
"Check if all columns filled, no conflicts."
if state[-1] is None:
return False
for c in range(len(state)):
if self.conflicted(state, state[c], c):
return False
return True
## Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
## iterative-repair and related search tehniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n*n)]
random.shuffle(cubes)
return map(random.choice, cubes)
## The best 5x5 board found by Boyan, with our word list this board scores
## 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board); n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0: print
if board[i] == 'Q': print 'Qu',
else: print str(board[i]) + ' ',
print
def boggle_neighbors(n2, cache={}):
""""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i+1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left: neighbors[i].append(i - n - 1)
if not on_right: neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left: neighbors[i].append(i + n - 1)
if not on_right: neighbors[i].append(i + n + 1)
if not on_left: neighbors[i].append(i - 1)
if not on_right: neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"If n2 is a perfect square, return its square root, else raise error."
n = int(math.sqrt(n2))
assert n * n == n2
return n
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, filename, min_len=3):
lines = open(filename).read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.words[bisect.bisect_left(self.words, word)] == word
def __len__(self):
return len(self.words)
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board. """
wordlist = None ## A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist("../data/wordlist")
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"Set the board, and find all the words in it."
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q': c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"The words found."
return self.found.keys()
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"The number of words found."
return len(self.found)
def boggle_hill_climbing(board=None, ntimes=100, print_it=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
print best, _, board
else:
board[i] = oldc ## Change back
if print_it:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
board[i] = random.choice(random.choice(cubes16)) ##random.choice(boyan_best)
return i, oldc
## Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def successor(self, state):
"Return a list of (action, state) pairs reachable from this state."
result = self.problem.successor(state)
self.succs += 1; self.states += len(result)
return result
def goal_test(self, state):
"Return true if the state is a goal."
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def __getattr__(self, attr):
if attr in ('succs', 'goal_tests', 'states'):
return self.__dict__[attr]
else:
return getattr(self.problem, attr)
def __repr__(self):
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
self.states, str(self.found)[0:4])
def compare_searchers(problems, header, searchers=[breadth_first_tree_search,
breadth_first_graph_search, depth_first_graph_search,
iterative_deepening_search, depth_limited_search,
astar_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
compare_searchers(problems=[GraphProblem('A', 'B', romania),
GraphProblem('O', 'N', romania),
GraphProblem('Q', 'WA', australia)],
header=['Searcher', 'Romania(A,B)', 'Romania(O, N)', 'Australia']) | [
"miguel.ossa.abellan@gmail.com"
] | miguel.ossa.abellan@gmail.com |
1b006e7f6b992e3c1dccc846fe3689745833841a | 9c88bd2c194ccc19b4989bf844564c5857e2ea82 | /Simple_Calculations/rectangle_area.py | 379677136a28e027dd00a9629978df03cbd0f470 | [] | no_license | ectky/PythonProjects | faa1d600f6b1e3aaee19b96a1cbf384ac0dc47a2 | 5920ee1752df729c178fe088b58ba17d8585b333 | refs/heads/master | 2021-08-11T09:31:38.562054 | 2017-11-13T13:59:04 | 2017-11-13T13:59:04 | 110,552,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | import math
x1 = float(input())
y1 = float(input())
x2 = float(input())
y2 = float(input())
upper_side = math.fabs(x1 - x2)
right_side = math.fabs(y2 - y1)
area = upper_side * right_side
perimeter = upper_side * 2 + right_side * 2
print(area)
print(perimeter)
| [
"noreply@github.com"
] | ectky.noreply@github.com |
e7f1ba699ec76f92212d50d4d70c0fb5f85ae38d | 97228f81e03cebd8c250c267b4943bddb3640fd1 | /gdksite/event/migrations/0011_auto_20181221_1244.py | e01af6b8008dd2e6ca92ca49f5c67484b84daf3c | [
"MIT"
] | permissive | vgrivtsov/moygdk | 88e48e234d8737e044dc6bd9c4e1794c2a25fe32 | 74d63299c7326bcae92a17b61d978ad91f0d5552 | refs/heads/master | 2022-12-09T18:38:28.731819 | 2021-02-12T10:20:45 | 2021-02-12T10:20:45 | 164,198,744 | 0 | 0 | MIT | 2022-12-08T01:34:21 | 2019-01-05T09:02:21 | JavaScript | UTF-8 | Python | false | false | 443 | py | # Generated by Django 2.1.4 on 2018-12-21 12:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('event', '0010_auto_20181220_0821'),
]
operations = [
migrations.RemoveField(
model_name='eventpage',
name='event_date',
),
migrations.RemoveField(
model_name='eventpage',
name='event_time',
),
]
| [
"vgrivtsov@gmail.com"
] | vgrivtsov@gmail.com |
bdc06186b30b43623dc40611426d6798cf09e14e | 08c3f4b38204609d9f398f4b0a7a1aa6457993f0 | /archive/Decathlon-meta-deeplearning/meta.py | a88a9223b3f618d89077beeb4ad102798afaf8d7 | [
"MIT"
] | permissive | tueimage/meta-segmentation-msc-2018 | 0cb9b25c75352ab023dc307f956fc0577fee119b | d497e6ea99b89fdb54e11568452894f022269043 | refs/heads/master | 2020-04-07T13:16:01.725093 | 2019-12-13T18:02:00 | 2019-12-13T18:02:00 | 158,400,064 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | # from meta_network import meta_learner
from data import Data, MetaData
from utils import subset_index_to_address
from utils import meta_pred_generator, historyPlot, create_data_subsets, dice_coef_loss, auc, mean_iou, dice_coef
from keras.optimizers import Adam
from meta_network import meta_learner
from networks import EncoderDecoderNetwork
from tqdm import tqdm
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def main():
tasks_list= ['Task10_Colon','Task01_BrainTumour','Task02_Heart','Task03_Liver','Task04_Hippocampus','Task05_Prostate', 'Task06_Lung', 'Task07_Pancreas', 'Task08_HepaticVessel']
# ,'Task09_Spleen',
feature_extractors = ['VGG16', 'ResNet50', 'MobileNetV1']
meta_data = MetaData('t', 's')
for task in tasks_list:
# try:
# data = Data(task)
#
# data.train_size = 1000
# data.val_size = 100
# data.imageDimensions = (224, 224)
# meta_subset_size = 5
# nr_of_meta_subsets = 5
#
# data.load_training_data()
# data.load_valid_data()
# data.get_meta_subsets(nr_of_meta_subsets, meta_subset_size)
for fe in feature_extractors:
meta_inter = MetaData(task, fe)
try:
meta_inter.load()
for x in range(5):
meta_data.total_addresses.append(meta_inter.addresses[x])
meta_data.total_results.append(meta_inter.results[x])
except:
print("oei")
# meta_data = MetaData(task, fe)
# struct = EncoderDecoderNetwork(fe, 2)
# struct.task = task
# struct.minibatch_size = 5
# struct.epochs = 10
# struct.imageDimensions = (224, 224)
# struct.build_encoder()
# struct.build_decoder()
# struct.load_weights()
# struct.model.compile(optimizer = Adam(lr = 1e-5), loss = dice_coef_loss, metrics = ['accuracy', auc, mean_iou, dice_coef])
# for subset in range(data.meta_subsets.shape[0]):
# addresses = subset_index_to_address(data.meta_subsets[subset, :], data.train_data)
# meta_data.addresses.append(addresses)
# result = struct.get_meta_data(addresses)[2:]
# print(result)
# meta_data.results.append(result)
# meta_data.save()
print(meta_data.total_addresses)
print(meta_data.total_results)
print(len(meta_data.total_results))
# except:
# print("MASSIVE FAIL")
meta_model = meta_learner('VGG16')
meta_model.build_feature_extractor()
meta_model.build_meta_model()
meta_model.train(meta_data.total_addresses, meta_data.total_results, (224, 224), 1)
# historyPlot(meta_model.history, "testmeta.png")
# meta_model.save_model()
# meta_model.load_weights()
# for x in range(30):
# pred = meta_model.model.predict_generator(meta_pred_generator(meta_data.total_addresses[x], meta_model.minibatch_size, (224,224)), steps = 1)
# print("pred: {}".format(pred))
# print("result: {}".format(meta_data.total_results[x]))
if __name__ == '__main__':
main()
| [
"45205052+tjvsonsbeek@users.noreply.github.com"
] | 45205052+tjvsonsbeek@users.noreply.github.com |
7a5ddbcad3a15a9946b3546a3153387a9b875871 | 202e657b5c9bfcf3040146f779eafaf453d43d10 | /number-of-people.py | cd46b5b272560c65fca28b84aae50f97b45be1bd | [] | no_license | derekmcauley7/star-wars-api-college-assignment | 4695ff5c7393cefeb0d837a11c065431f3266998 | 1d080dbe7dc759de1f313fe727518e7e4feb8e6c | refs/heads/master | 2020-05-09T17:05:50.835601 | 2019-04-14T11:08:44 | 2019-04-14T11:08:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | from urllib.request import Request, urlopen
from json import loads
import ssl
# How many people are returned by the API? Show how you can solve this without using the results count attribute?s
certsPath='/usr/local/lib/python3.7/site-packages/pip/_vendor/certifi/cacert.pem'
context=ssl.SSLContext()
context.load_verify_locations(certsPath)
url = 'http://swapi.co/api/people'
results = []
while url != None:
req = Request(url, None, {
'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
})
data = loads(urlopen(req, context=context).read().decode("utf-8"))
results += data['results']
url = data['next']
print("Number of people: " + str(len(results))) | [
"derekmcauley7@gmail.com"
] | derekmcauley7@gmail.com |
03d97741b6759ae1f367e5a530b53d4f599f17f5 | ec841eb43a9bdf840e227bed932900405a89fe95 | /maze.py | 0f037d255b655f43d76b449720d3ac77c2d3f91d | [] | no_license | 130e/SnakeGame | e358c85a0f80eb1740d75e93857eba8828b91aa1 | 28166ea346b7aae9728fa3f12355a1bf35733ab6 | refs/heads/master | 2022-11-17T13:02:14.806785 | 2020-07-16T18:19:19 | 2020-07-16T18:19:19 | 139,757,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,902 | py | # fernival made
# set certain macros
KEY_EXIT = 27 # define macros
KEY_SPACE = 32
KEY_UP = 119 # WASD
KEY_DOWN = 115
KEY_LEFT = 97
KEY_RIGHT = 100
KEY_LOST = KEY_DOWN
NO_ENTRY = float('-inf')
ABSORB = 1.0
MIN_TOLERANCE = 0.0000000001 # 10^-11
class node():
def __init__(self, y, x, value=0.0):
self.pi = {KEY_UP: 0.25, KEY_DOWN: 0.25, KEY_LEFT: 0.25, KEY_RIGHT: 0.25}
self.value = value
self.neighborV = {KEY_UP: 0.0, KEY_DOWN: 0.0, KEY_LEFT: 0.0, KEY_RIGHT: 0.0}
self.y = y
self.x = x
class maze():
def __init__(self, height, width, value=0.0):
self.axis = (height, width)
self.map = [[node(y, x, value=value) for y in range(width)] for x in range(height)]
# (height, width) so map[height][width]
def setAbsorb(self, cord):
self.map[cord[0]-1][cord[1]-1].value = ABSORB
def __refreshNeighborValue(self):
for j in range(self.axis[1]):
for i in range(self.axis[0]):
if j == 0:
self.map[i][j].neighborV[KEY_LEFT] = self.map[i][self.axis[1] - 1].value
else:
self.map[i][j].neighborV[KEY_LEFT] = self.map[i][j - 1].value
if j == self.axis[1] - 1:
self.map[i][j].neighborV[KEY_RIGHT] = self.map[i][0].value
else:
self.map[i][j].neighborV[KEY_RIGHT] = self.map[i][j + 1].value
if i == 0:
self.map[i][j].neighborV[KEY_UP] = self.map[self.axis[0] - 1][j].value
else:
self.map[i][j].neighborV[KEY_UP] = self.map[i - 1][j].value
if i == self.axis[0] - 1:
self.map[i][j].neighborV[KEY_DOWN] = self.map[0][j].value
else:
self.map[i][j].neighborV[KEY_DOWN] = self.map[i + 1][j].value
def refreshValue(self, gamma=1, instantValue=-1):
self.__refreshNeighborValue()
bConverge = True
# for j in range(self.axis[1]):
# for i in range(self.axis[0]):
# nd = self.map[i][j]
# nd.value = nd.pi[KEY_UP] * (nd.value + nd.neighborV[KEY_UP]) + \
# nd.pi[KEY_DOWN] * (nd.value + nd.neighborV[KEY_DOWN]) + \
# nd.pi[KEY_LEFT] * (nd.value + nd.neighborV[KEY_LEFT]) + \
# nd.pi[KEY_RIGHT] * (nd.value + nd.neighborV[KEY_RIGHT])
for col in self.map:
for nd in col:
if nd.value == ABSORB or nd.value == NO_ENTRY:
continue
oldValue = nd.value
nd.value = 0
for k in nd.pi.keys():
if nd.pi[k] == 0: # exclude cal with -inf
continue
nd.value += nd.pi[k] * (instantValue + gamma * nd.neighborV[k])
if bConverge == True: # check if converged
if abs(oldValue - nd.value) >= MIN_TOLERANCE:
bConverge = False
return bConverge
def refreshPi(self):
self.__refreshNeighborValue()
for col in self.map:
for nd in col:
moves = 0
for k in nd.pi.keys():
if nd.pi[k] != 0 and nd.neighborV[k] != NO_ENTRY and nd.neighborV[k] >= nd.value:
moves += 1
else:
nd.pi[k] = 0
if moves != 0:
p = 1 / moves
else:
p = 0
for k in nd.pi.keys():
if nd.pi[k] != 0:
nd.pi[k] = p
# # no choice set the biggest, if surrounded, go up and die
# max = float('-inf')
# key = KEY_UP
# for k in nd.pi.keys():
# if nd.neighborV[k] > max:
# max = nd.neighborV[k]
# key = k
# for k in nd.pi.keys():
# nd.pi[k] = 0
# nd.pi[key] = 1
def refreshDead(self, cords):
for c in cords:
self.map[c[0]-1][c[1]-1].value = NO_ENTRY
def getMove(self, cord):
nd = self.map[cord[0]-1][cord[1]-1]
max = nd.neighborV[KEY_DOWN]
key = KEY_DOWN
for k in nd.neighborV.keys():
if nd.pi[k] != 0 and max < nd.neighborV[k]:
max = nd.neighborV[k]
key = k
return key
def show(self):
for i in range(self.axis[0]):
l = []
for j in range(self.axis[1]):
l.append(self.map[i][j].value)
print(l) | [
"noreply@github.com"
] | 130e.noreply@github.com |
61f623bb2311199c6f90a06eafc6177b8604e7b1 | a38856315e9a35f5eb0905a10eae6840741c468a | /stix_edh/cyber_profile.py | a1b921a93c6da80b797c6892d9627ef92aadfe44 | [
"BSD-3-Clause"
] | permissive | emmanvg/stix-edh | bbf4cebb908ad8a7c7dd8728ebfc67284f17365d | b426f9785339ab741bb9fb21d356b36193791afc | refs/heads/master | 2020-04-11T23:35:44.934139 | 2018-08-01T16:16:15 | 2018-08-01T16:16:15 | 162,172,740 | 0 | 0 | NOASSERTION | 2018-12-17T18:22:40 | 2018-12-17T18:22:39 | null | UTF-8 | Python | false | false | 4,224 | py | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# python-stix
import stix
from mixbox import fields
# internal bindings
from stix_edh.bindings import cyber_profile
class AccessPrivilege(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.AccessPrivilegeType
_namespace = 'urn:edm:edh:cyber:v3'
privilege_action = fields.TypedField("privilegeAction", type_="stix_edh.common.NMTokens", key_name="privilege_action")
privilege_scope = fields.TypedField("privilegeScope", type_="stix_edh.common.NMTokens", multiple=True, key_name="privilege_scope")
rule_effect = fields.TypedField("ruleEffect", type_="stix_edh.common.NMTokens", key_name="rule_effect")
def __init__(self):
super(AccessPrivilege, self).__init__()
def add_privilege_scope(self, value):
from stix_edh import common
if not value:
return
nmtokens = common.NMTokens(value)
self.privilege_scope.append(nmtokens)
class ResourceDisposition(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.ResourceDispositionType
_namespace = 'urn:edm:edh:cyber:v3'
disposition_date = fields.DateField("dispositionDate", key_name="disposition_date")
disposition_process = fields.TypedField("dispositionProcess", type_="stix_edh.common.NMTokens", key_name="disposition_process")
def __init__(self):
super(ResourceDisposition, self).__init__()
class OriginalClassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.OriginalClassificationType
_namespace = 'urn:edm:edh:cyber:v3'
classified_by = fields.TypedField("classifiedBy", type_="stix_edh.common.NMTokens", key_name="classified_by")
classified_on = fields.DateField("classifiedOn", key_name="classified_on")
classification_reason = fields.TypedField("classificationReason", type_="stix_edh.common.NMTokens", key_name="classification_reason")
compilation_reason = fields.TypedField("compilationReason", type_="stix_edh.common.NMTokens", key_name="compilation_reason")
def __init__(self):
super(OriginalClassification, self).__init__()
class DerivativeClassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.DerivativeClassificationType
_namespace = 'urn:edm:edh:cyber:v3'
classified_by = fields.TypedField("classifiedBy", type_="stix_edh.common.NMTokens", key_name="classified_by")
classified_on = fields.DateField("classifiedOn", key_name="classified_on")
derived_from = fields.TypedField("derivedFrom", type_="stix_edh.common.NMTokens", key_name="derived_from")
def __init__(self):
super(DerivativeClassification, self).__init__()
class FurtherSharing(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.FurtherSharingType
_namespace = "urn:edm:edh:cyber:v3"
rule_effect = fields.TypedField("ruleEffect", key_name="rule_effect")
sharing_scope = fields.TypedField("sharingScope", type_="stix_edh.common.NMTokens", key_name="sharing_scope")
def __init__(self):
super(FurtherSharing, self).__init__()
class Declassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.DeclassificationType
_namespace = 'urn:edm:edh:cyber:v3'
declass_exemption = fields.TypedField("declassExemption", type_="stix_edh.common.NMTokens", key_name="declass_exemption")
declass_period = fields.IntegerField("declassPeriod", key_name="declass_period")
declass_date = fields.DateField("declassDate", key_name="declass_date")
declass_event = fields.TypedField("declassEvent", type_="stix_edh.common.NMTokens", key_name="declass_event")
def __init__(self):
super(Declassification, self).__init__()
class PublicRelease(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.PublicReleaseType
_namespace = 'urn:edm:edh:cyber:v3'
released_by = fields.TypedField("releasedBy", type_="stix_edh.common.NMTokens", key_name="released_by")
released_on = fields.DateField("releasedOn", key_name="released_on")
def __init__(self):
super(PublicRelease, self).__init__()
| [
"gback@mitre.org"
] | gback@mitre.org |
ff7fa112a3352d67dbc4074aac0cf24ea3e98617 | 7d35fcbcceb1ff4d458cef69a6eda3c5a3a5734b | /app/core/migrations/0001_initial.py | 6a57eddf491c01619605b9ae2fe151f53636b5b4 | [
"MIT"
] | permissive | hemant-mehra/UDEMY_REST_API_advance_course | 4b1ea47037633090b06c037a8abc63eea73af8a7 | c8c0298e3650e64d2fb35370fb5b8a1bae741937 | refs/heads/main | 2023-04-23T09:03:17.131445 | 2021-04-29T12:28:23 | 2021-04-29T12:28:23 | 361,781,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | # Generated by Django 3.2 on 2021-04-27 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"hemantmehra.p@gmail.com"
] | hemantmehra.p@gmail.com |
595ee2d2c462822d03e02122bed94ccf834309c7 | 5ed917ada5766c0a028914f2c15549b6a9095b53 | /pyt/bin/jupyter-labextension | 03c9c79c4c5f4cbadfdd6106350523ec20a32373 | [] | no_license | shreyanshu007/backup | eb53c3cc44b17e1bcaa42ff2f46ea9364f1bcdfc | 5a0a4c9715375ae224db8c0f427f847022a9af02 | refs/heads/master | 2023-01-12T13:33:04.174671 | 2020-06-12T06:00:53 | 2020-06-12T06:00:53 | 201,099,769 | 0 | 0 | null | 2023-01-04T06:33:29 | 2019-08-07T17:46:24 | Python | UTF-8 | Python | false | false | 250 | #!/home/shreyanshu/sem_fir/pyt/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyterlab.labextensions import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"2016csb1060@iitrpr.ac.in"
] | 2016csb1060@iitrpr.ac.in | |
84f9eee0a4ffea06d2accb83ecb72ac8b44d7b69 | a1bd103181681e1ea0af8859585e3ce599801335 | /musictagger/handlers/numbering.py | 130631e398f3a784589e8b35021b0575309f7358 | [] | no_license | Tbsc/musictagger | 3ce82bf1cf6bd9b265d787014ce23cfb9fdd8103 | c01a3ebdb573f6a3ec5957bb876170192ecb73b8 | refs/heads/master | 2021-12-22T19:18:46.876057 | 2021-12-18T12:05:54 | 2021-12-18T12:05:54 | 99,435,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | import re
# ensure numbering always consists of at least 2 digits, adding zeros if needed
def check(filename):
# if numbering is correct, return function with current filename
if check_correct(filename):
return filename
new_filename = filename
# begin checks
new_filename = check_single_digit(new_filename)
return new_filename
# checks if numbering is formatted correctly
def check_correct(filename):
correct_pattern = re.compile("\d\d.*")
if correct_pattern.match(filename):
print("Numbering is valid")
return True
return False
# check if numbering is only 1 digit
def check_single_digit(filename):
single_digit_pattern = re.compile("\d.*")
if single_digit_pattern.match(filename):
# just append a 0 before the number
print("Numbering consists of a single digit, prepending a zero")
return "0" + filename
return filename
| [
"danielben60@gmail.com"
] | danielben60@gmail.com |
5de03e92e379b9d36dae4594c56cd3ed06ad61cc | 8a60ed6b07bb361cea238462689f51ff2f220e0a | /Final Project/tags.py | e14052985b3bb540146a1554a48b96f0db7fb37f | [] | no_license | joshsee/P2-Data-Wrangling-with-MongoDB | 3b4b9820e7e6d1ac58006570cf2ee3330766d759 | 658d084bfd7459349018cd1bdea11ef4b3acdd24 | refs/heads/master | 2016-09-06T00:21:19.961782 | 2015-03-25T15:34:14 | 2015-03-25T15:34:14 | 29,285,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,427 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import pprint
import re
"""
Your task is to explore the data a bit more.
Before you process the data and add it into MongoDB, you should
check the "k" value for each "<tag>" and see if they can be valid keys in MongoDB,
as well as see if there are any other potential problems.
We have provided you with 3 regular expressions to check for certain patterns
in the tags. As we saw in the quiz earlier, we would like to change the data model
and expand the "addr:street" type of keys to a dictionary like this:
{"address": {"street": "Some value"}}
So, we have to see if we have such tags, and if we have any tags with problematic characters.
Please complete the function 'key_type'.
"""
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
for name, value in element.items():
if name == 'k':
if lower.search(value):
keys["lower"] += 1
elif lower_colon.search(value):
keys["lower_colon"] += 1
elif problemchars.search(value):
<<<<<<< HEAD
try:
print value
except UnicodeEncodeError:
pass
keys["problemchars"] += 1
else:
try:
print value
except UnicodeEncodeError:
pass
=======
keys["problemchars"] += 1
else:
>>>>>>> origin/master
keys["other"] += 1
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
# You can use another testfile 'map.osm' to look at your solution
# Note that the assertions will be incorrect then.
<<<<<<< HEAD
keys = process_map('hong-kong.osm')
# pprint.pprint(keys)
=======
keys = process_map('hong-kong_china.osm')
pprint.pprint(keys)
# assert keys == {'lower': 5, 'lower_colon': 0, 'other': 1, 'problemchars': 1}
>>>>>>> origin/master
if __name__ == "__main__":
test() | [
"ychian@gmail.com"
] | ychian@gmail.com |
a5dc301a477f897e6ebee27e6add64e1ac8c514c | 173ccb51fbf21555a4bc3e7f3f1e28c02a6c3499 | /apps/goods/views.py | dcb59e4890bee3e984309bedf473ed8fce8b1e1e | [] | no_license | lingyunds/myproject | ec9bcb9a5917bd73377ecedc112f48cac62fb246 | d8bf8c784f434680b9584d84387f4a428b69ae1c | refs/heads/master | 2023-04-22T17:45:07.249340 | 2021-05-04T09:13:34 | 2021-05-04T09:13:34 | 363,365,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,484 | py | from django.shortcuts import render,redirect
from django.urls import reverse
from django.views import View
from django_redis import get_redis_connection
from django.core.cache import cache
from django.core.paginator import Paginator
from apps.goods.models import GoodsSKU,IndexGoodsBanner,IndexPromotionBanner,GoodsType,IndexTypeGoodsBanner
from apps.order.models import OrderGoods
# Create your views here.
class Index(View):
def get(self,request):
#获取缓存
context = cache.get('index_data')
if context is None:
#首页商品种类
types = GoodsType.objects.all()
#首页轮播商品
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
#首页轮播活动商品
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
#首页分类商品
for type in types:
image_banners = IndexTypeGoodsBanner.objects.filter(type=type,display_type=1).order_by('index')
title_banners = IndexTypeGoodsBanner.objects.filter(type=type,display_type=0).order_by('index')
type.image_banners = image_banners
type.title_banners = title_banners
context = {'types':types,
'goods_banners':goods_banners,
'promotion_banners':promotion_banners,
}
#没有缓存则设置缓存
cache.set('index_data',context,3600)
#获取购物车条目数
user = request.user
cart_count = 0
if user.is_authenticated:
conn = get_redis_connection('default')
cart_key = 'cart_%d'%user.id
cart_count = conn.hlen(cart_key)
context.update(cart_count=cart_count)
return render(request,'index.html',context)
class Detail(View):
def get(self,request,sku_id):
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return redirect(reverse('goods:index'))
types = GoodsType.objects.all()
spu_skus = GoodsSKU.objects.filter(goods=sku.goods).exclude(id=sku_id)
new_skus = GoodsSKU.objects.filter(type=sku.type).order_by('-create_time')[:2]
sku_comments = OrderGoods.objects.filter(sku=sku).exclude(comment='')
user = request.user
sku_count = 0
if user.is_authenticated:
conn = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = conn.hlen(cart_key)
conn = get_redis_connection('default')
history_key = 'history_%d'%user.id
conn.lrem(history_key,0,sku_id)
conn.lpush(history_key,sku_id)
conn.ltrim(history_key,0,4)
context = {'sku':sku,
'types':types,
'spu_skus':spu_skus,
'new_skus':new_skus,
'sku_comments':sku_comments,
'cart_count':cart_count
}
return render(request,'detail.html',context)
# /list?type_id=种类id&page=页码&sort=排序方式
# /list/种类id/页码/排序方式
# /list/种类id/页码?sort=排序方式
class List(View):
def get(self,request,type_id,page):
try:
type = GoodsType.objects.get(id=type_id)
except GoodsType.DoesNotExist:
return redirect(reverse('goods:index'))
types = GoodsType.objects.all()
#设置排序方式
sort = request.GET.get('sort')
if sort == 'price':
order_by = 'price'
elif sort == 'hot':
order_by = '-sales'
else:
sort = 'default'
order_by = '-id'
skus = GoodsSKU.objects.filter(type=type).order_by('%s'%order_by)
#对内容分页
paginator = Paginator(skus,1)
try:
page = int(page)
except Exception as e:
page = 1
if page > paginator.num_pages:
page = 1
#获取分页内容
page_skus = paginator.page(page)
# 1.总页数小于5页,页面上显示所有页码
# 2.如果当前页是前3页,显示1-5页
# 3.如果当前页是后3页,显示后5页
# 4.其他情况,显示当前页的前2页,当前页,当前页的后2页
num_pages = paginator.num_pages
if num_pages < 5:
pages = range(1, num_pages + 1)
elif page <= 3:
pages = range(1, 6)
elif num_pages - page <= 2:
pages = range(num_pages - 4, num_pages + 1)
else:
pages = range(page - 2, page + 3)
new_skus = GoodsSKU.objects.filter(type=type).order_by('-create_time')[:2]
user = request.user
cart_count = 0
if user.is_authenticated:
conn = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = conn.hlen(cart_key)
context = {'type':type,
'types':types,
'page_skus':page_skus,
'new_skus': new_skus,
'cart_count':cart_count,
'sort':sort,
'pages':pages,
}
return render(request,'list.html',context) | [
"200826704@qq.com"
] | 200826704@qq.com |
49db5b641d88aa13b1b34b6fcceea02798bae5a6 | cf64c598ee8ace5eb72a3992ca214b8f05383177 | /Tools/lib/cherrysoda.py | 09e40be6062bab2616daef95af54740f9d9178ca | [
"MIT"
] | permissive | brucelevis/cherrysoda-engine | 6c6efb9545a27ca8a612b56e6ffb61a8347a3923 | 64b2ff5a2a53831ded3e60ff2a28a101fa84f3fd | refs/heads/master | 2023-02-01T15:57:38.809819 | 2020-12-17T02:35:01 | 2020-12-17T02:35:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import os
import pathlib
import platform
import shutil
import subprocess
import sys
import zipfile
if sys.version_info.major == 3:
import urllib.request as urllib2
else:
import urllib2
def is_windows_system():
return platform.system() == 'Windows'
def join_path(a, b):
return os.path.join(a, b)
def abspath(a):
return os.path.abspath(a)
def get_file_path(f):
return os.path.dirname(f)
def get_file_name(f):
return os.path.basename(f)
executable_suffix = ('', '.exe')[is_windows_system()]
project_path = abspath(join_path(get_file_path(__file__), '../..'))
engine_path = join_path(project_path, 'Engine')
tool_path = join_path(project_path, 'Tools')
external_path = join_path(project_path, 'External')
tmp_path = join_path(project_path, 'Tmp')
bgfx_src_path = join_path(external_path, 'bgfx/bgfx/src')
shaderc = join_path(tool_path, 'bin/shaderc' + executable_suffix)
sdl2_version = '2.0.12'
sdl2_path = join_path(external_path, 'SDL2-' + sdl2_version)
def make_sure_folder_exist(f):
p = os.path.dirname(f)
pathlib.Path(p).mkdir(parents=True, exist_ok=True)
def execute_command(command):
# print('$ ' + ' '.join(command))
subprocess.run(command)
def compile_shader(shader_source, output, platform, shader_type, include_path=None, profile=None, opt_level=None, bin2c_array=None):
command = [shaderc, '-f', shader_source, '-o', output, '--platform', platform, '--type', shader_type]
if include_path:
command += ['-i', include_path]
if profile:
command += ['--profile', profile]
if opt_level:
command += ['-O', str(opt_level)]
if bin2c_array:
command += ['--bin2c', bin2c_array]
make_sure_folder_exist(output)
execute_command(command)
def download_url_to(url, dest):
filename = url.split('/')[-1]
print("Downloading '%s' from '%s'..." % (filename, url))
u = urllib2.urlopen(url)
filedest = os.path.join(dest, filename)
make_sure_folder_exist(filedest)
f = open(filedest, 'wb')
f.write(u.read())
f.close()
print("Finished!")
return filedest
def extract_zip_to(filename, dest):
zfile = zipfile.ZipFile(filename, 'r')
for ef in zfile.namelist():
if ef.endswith('/'):
continue
p = os.path.join(dest, ef)
make_sure_folder_exist(p)
f = open(p, 'wb')
f.write(zfile.read(ef))
f.close()
zfile.close()
def get_file_list_from_wildcard(wildcard):
return glob.glob(wildcard)
def get_file_list_of_path(path):
walkList = os.walk(path)
file_list = []
for i in walkList:
root, dirs, files = i
for f in files:
file_list.append(os.path.join(root, f))
return file_list
def exists(path):
return os.path.exists(path)
def copy(src, dest):
src_list = get_file_list_from_wildcard(src)
for source in src_list:
shutil.copy(source, dest)
def copytree(src, dest):
shutil.copytree(src, dest)
def move(src, dest):
src_list = get_file_list_from_wildcard(src)
for source in src_list:
shutil.move(source, dest)
def set_environment_variable(env_var, value):
command = ['setx', env_var, value]
execute_command(command)
def write_str_file(s, dest):
f = open(dest, 'w')
f.write(s)
f.close()
def read_file(file):
f = open(file, 'r')
ret = f.read()
f.close()
return ret
def replace_file_str(file, replace_list):
s = read_file(file)
for i in replace_list:
find, rep = i
s = s.replace(find, rep)
write_str_file(s, file)
def replace_file_name(file, replace_list):
s = file
for i in replace_list:
find, rep = i
s = s.replace(find, rep)
if file != s:
if exists(s):
print('"%s" already exists!' % (s))
return
move(file, s)
| [
"c.even@live.cn"
] | c.even@live.cn |
23159149d8d9130887e04ea1d4f0fd74e9ad9bd7 | 4e8ae597e7f5010c4dd7208226c3cda99b11e561 | /blog/models.py | eee192ab822d24d4e5c69c3761f3737ecc6181a8 | [] | no_license | peckzl/django_bloggy | a50999c0f527854a5af5e655155c56c1e8e051b0 | cf3e9d3b052f0504256c87c7fab8d1a199fe586a | refs/heads/master | 2021-01-10T05:28:32.091423 | 2015-11-05T00:13:44 | 2015-11-05T00:13:44 | 45,576,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from django.db import models
from uuslug import uuslug
class Post(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100)
content = models.TextField()
tag = models.CharField(max_length=20, blank=True, null=True)
image = models.ImageField(upload_to="images", blank=True, null=True)
views = models.IntegerField(default=0)
slug = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = uuslug(self.title, instance=self, max_length=100)
super(Post, self).save(*args, **kwargs)
| [
"peckzl@clarkson.edu"
] | peckzl@clarkson.edu |
f3c4d9fd7986ae013086ca33b92b7e7b2de6fac1 | 831b2c2d88d03ce366c091ebc851281a20b3c567 | /clean_unsupervised/predictorUnsupervised.py | 18398a5162c9cd84ea48ea8cc360a732834bfba4 | [] | no_license | nathanieljblack/PriceRight | cb2d0ca0929f6b5791397bd1f72267da20ac0144 | c4fb112ae27f3adbd75bb88603d23db35de29ff3 | refs/heads/master | 2020-04-19T11:24:56.683210 | 2015-05-01T14:05:33 | 2015-05-01T14:05:33 | 67,873,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,011 | py | import pandas as pd
import re
from unicodedata import category as cat
from nltk.corpus import stopwords
from gensim import corpora, models
import operator
from collections import OrderedDict
import scipy
from scipy.cluster.vq import kmeans, vq
class Predictor(): # Note: This predictor trains the model and runs the model on the same data set
def __init__(self, list):
"""
:param list: a list of dictionaries as read in from a json file
"""
self.data = pd.DataFrame(list)
def regularize_training_corpus(self):
def clean_titles(title):
if re.search('(wtb|wanted|want|purchase|repair|buy|need|trade|replacement| \
looking|fix|cash|me|pawn|wtt|trading)', title, re.IGNORECASE):
out = "exclude" # common non-sales words
elif re.search('(ipad|ipod)', title, re.IGNORECASE):
out = "exclude" # common iPhone related items for sale
else:
out = "OK"
return out
print "\nRegularizing titles ..."
self.data['status'] = self.data.title.apply(clean_titles)
self.data = self.data[self.data.status != 'exclude']
return self.data
def tokenize_training_corpus(self):
stoplist = stopwords.words('english')
sales_words = ['like', 'new', 'brand', 'excellent', 'condition', 'pristine', 'never', 'used', 'clean',
'perfect', 'great', 'sale', 'sell', 'selling', 'good', 'obo', 'warranty', 'cl', 'color',
'works', 'extras', 'open', 'flawless', 'bad', 'guaranteed', 'working', 'unopened'
] # common sales words
iphone_words = ['iphone', 'apple', 'icloud', 'contract', 'iphones',
'carrier', 'verizon', 'tmobile', 'att', 'metropcs', 'sprint', 'cricket',
'wireless', 'mobile', 'phone', 'smartphone', 'unlocked', 'unlock', 'locked', 'lock', 'factory',
'box', 'sealed', 'gsm', 'esn', '4g', 'imei', 'international', 'cracked', 'screen', 'charger',
'8', '16', '32', '64', '128',
'g', '8g', '16g', '32g', '64g', '128g',
'gb', '8gb', '16gb', '32gb', '64gb', '128gb',
'gig', '8gig', '16gig', '32gig', '64gig','128gig',
'gigs', '8gigs', '16gigs', '32gigs', '64gigs','128gigs',
'white', 'black', 'gray', 'grey', 'spacegray', 'spacegrey', 'space', 'pink', 'mint',
'gold', 'silver', 'blue', 'yellow', 'green', 'pink', 'whitesilver', 'blackgray', 'slate',
'whitegold'
] # words that will appear across all iphone brands (4, 4s, 5, 5c, 5s, 6, 6+)
custom_stoplist = sales_words + iphone_words
# Tokenize titles
def create_tokens(title):
out = []
for word in title.lower().split():
out.append(word)
return out
# Remove punctuation
def strip_punctuation(token):
out = []
for word in token:
if __name__ == "__main__":
new_word = "".join(char for char in word if not cat(char).startswith('P'))
else:
new_word = "".join(char for char in word.decode('utf-8') if not cat(char).startswith('P'))
out.append(new_word)
return out
# Remove common words
def remove_common_words(token):
out = []
for word in token:
if word not in stoplist and word not in custom_stoplist and word != '':
out.append(word)
return out
# Remove words that appear only once
def remove_once_words(token):
out = []
for word in token:
if word not in tokens_once:
out.append(word)
return out
# Exclude postings whose tokens are empty
def remove_empty_tokens(token):
if token:
out = "OK"
else:
out = "exclude"
return out
def cheat_replace(title):
newtitle = title.replace('6 plus', '6+') # Cheat
newtitle = newtitle.replace('6 Plus', '6+') # Cheat
newtitle = newtitle.replace('6 PLUS', '6+') # Cheat
newtitle = newtitle.replace('6plus', '6+') # Cheat
newtitle = newtitle.replace('6Plus', '6+') # Cheat
newtitle = newtitle.replace('6PLUS', '6+') # Cheat
return newtitle
print "Preparing Training Corpus ..."
print " Cheating ..."
self.data.title = self.data.title.apply(cheat_replace)
print " Creating tokens ..."
self.data['tokens'] = self.data.title.apply(create_tokens)
print " Removing punctuation ..."
self.data.tokens = self.data.tokens.apply(strip_punctuation)
print " Removing common words ..."
self.data.tokens = self.data.tokens.apply(remove_common_words)
print " Removing words that appear only once ..."
all_tokens = sum(self.data.tokens, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
self.data.tokens = self.data.tokens.apply(remove_once_words)
print " Removing empty tokens ..."
self.data.status = self.data.tokens.apply(remove_empty_tokens)
self.data = self.data[self.data.status != 'exclude']
return self.data
def train_model(self, numTopics):
# Create training dictionary
def create_dictionary(training_data):
dictionary = corpora.Dictionary(training_data)
dictionary.save('training_dictionary.dict') # store to disk, for later use
return 'training_dictionary.dict'
# Turn training corpus into sparse Bag of Words vectors
def corpus_to_bag_of_words(training_dict, training_data):
training_corpus_bow = [training_dict.doc2bow(token) for token in training_data]
corpora.MmCorpus.serialize('training_corpus.mm', training_corpus_bow) # store to disk, for later use
return 'training_corpus.mm'
# Train TF-IDF model
def corpus_to_tfidf(training_corp):
tfidf_model = models.TfidfModel(training_corp)
corpus_tfidf = tfidf_model[training_corp]
return corpus_tfidf, tfidf_model
# Train LSI model
def corpus_to_lsi(dict, corpus, topics):
lsi_model = models.LsiModel(corpus, id2word=dict, num_topics=topics) # initialize an LSI transformation
topicWordProbMat = lsi_model.print_topics(topics)
return lsi_model, topicWordProbMat
def create_topic_dict(topic_words):
def create_tokens(topic):
out = []
for word in topic.lower().split():
out.append(word)
return out
topics_list = []
for topic in topic_words:
topic_wds = []
topic = topic.replace(' + ', ' ')
topic = topic.replace('*', ' ')
topic = topic.replace('"', '')
topic_tokens = create_tokens(topic)
for i in range(1, len(topic_tokens)/2):
pair = (float(topic_tokens[2*i-2]), topic_tokens[2*i-1],)
topic_wds.append(pair)
topics_list.append(topic_wds)
for i in range(len(topics_list)):
topics_list[i] = sorted(topics_list[i], key=operator.itemgetter(0))
topics_dict = OrderedDict()
i = 0
for topic in topics_list:
topics_dict[i] = dict(enumerate(topic))
i += 1
return topics_dict
print "Training model ..."
print " Creating training dictionary ..."
training_dictionary_file = create_dictionary(self.data.tokens)
training_dictionary = corpora.Dictionary.load(training_dictionary_file)
print " Transforming training corpus into bag-of-words vectors..."
training_corpus_file = corpus_to_bag_of_words(training_dictionary, self.data.tokens)
training_corpus = corpora.MmCorpus(training_corpus_file)
print " Creating TF-IDF vectors ..."
corpus_tfidf, tfidf_model = corpus_to_tfidf (training_corpus)
print " Training LSI model using " + str(numTopics) + " topics ..."
lsi_model, topic_words = corpus_to_lsi(training_dictionary, corpus_tfidf, numTopics)
print " Creating topic dictionary ..."
topics_dict = create_topic_dict(topic_words)
return self.data, training_dictionary, tfidf_model, lsi_model, topics_dict
def run_model(self, training_dictionary, tfidf_model, lsi_model, num_topics, num_clusters):
# Create LSI vectors for Clustering
def create_lsi_vectors(token, dict, tfidf_model, lsi_model):
vec_bow = dict.doc2bow(token)
vec_tfidf = tfidf_model[vec_bow] # convert the token to TF-IDF space
vec_lsi = lsi_model[vec_tfidf] # convert the token to LSI space
return vec_lsi
# Clean LSI vectors
def clean_lsi_vectors(lsi_vectors, tops):
# Remove vectors if they have less than numTopics elements
def remove_short_vectors(vec):
if len(vec) < tops:
out = "exclude"
else:
out = "OK"
return out
# Check that LSI model created vectors of proper length
print " Checking for short LSI vectors ..."
minLength = tops + 100
maxLength = 0
numTooSmall = 0
for vector in lsi_vectors:
if len(vector) < minLength:
minLength = len(vector)
if len(vector) > maxLength:
maxLength = len(vector)
if len(vector) < tops:
numTooSmall +=1
print (" MinLength = " + str(minLength) +"\n MaxLength = " + str(maxLength))
if numTooSmall > 0: # if lsi model fails, remove short vectors
print " After running the LSI model, " + str(numTooSmall) + " vectors were too short."
print " Removing short LSI vectors ..."
self.data.status = self.data.lsiVectors.apply(remove_short_vectors)
self.data = self.data[self.data.status != 'exclude']
return self.data
def cluster(numClusters):
# Prep LSI vectors for clustering
self.data.clusterVectors = [[x[1] for x in vector] for vector in self.data.lsiVectors]
self.data.lsiArray = scipy.array(self.data.clusterVectors)
# Compute K-Means
print " Running K-Means clustering with " + str(numClusters) + " clusters ..."
centroids, _ = kmeans(self.data.lsiArray, numClusters)
# Assign each title to a cluster
print " Assigning postings to their clusters ..."
self.data['pred_bin'], _ = vq(self.data.lsiArray,centroids)
# Save centroids
print " Saving centroids ..."
centroids_list = centroids.tolist()
centroids_dict = OrderedDict()
i = 0
for centroid in centroids_list:
centroids_dict[i] = dict(enumerate(centroid))
i += 1
for i in range(len(centroids_list)):
centroids_dict[i] = sorted(centroids_dict[i].items(), key=operator.itemgetter(1))
return self.data, centroids_dict
print"Running model ..."
print " Creating LSI vectors for clustering ..."
self.data['lsiVectors'] = self.data.tokens.apply(create_lsi_vectors,
args=(training_dictionary, tfidf_model, lsi_model))
print " Cleaning LSI vectors ..."
self.data = clean_lsi_vectors(self.data.lsiVectors, num_topics)
print" Clustering postings ..."
self.data, centroids = cluster(num_clusters)
return self.data, centroids
# Create predictions based on the model
def predict(self, numTopics, numClusters):
self.data = self.regularize_training_corpus()
self.data = self.tokenize_training_corpus()
self.data, training_dictionary, tfidf_model, lsi_model, topics_dict = self.train_model(numTopics)
self.data, centroids = self.run_model(training_dictionary, tfidf_model, lsi_model, numTopics, numClusters)
self.data = self.data.drop(['status', 'lsiVectors'], axis=1) # keep tokens for now
return self.data, topics_dict, centroids | [
"marguerite@oneto.us"
] | marguerite@oneto.us |
91e700c2b68497c13aa063de195ee61e65a3dc8e | 2552b11e55ebeab1ceb16f43ac2117e67292616d | /corpus_health/spiders/spider_medlive.py | e8e18783254833aaa1ce4082915b8919205eb8f3 | [] | no_license | bzqweiyi/corpus | 7271fc93ad0c91dcff90de9c123cd9aa90222ff3 | 69793804161361bf79944043aa786e96d3c14c9d | refs/heads/master | 2020-04-02T02:12:32.808658 | 2018-10-20T11:12:50 | 2018-10-20T11:12:50 | 153,897,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,815 | py | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
"""
中国好中医网:祛湿
James
"""
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from corpus_health.items import ArticlespiderItem
from scrapy_redis.spiders import RedisCrawlSpider
from scrapy_redis.spiders import Spider
from scrapy.http import Request
from scrapy.http import FormRequest
import requests
import re
from corpus_health.Util.LogHandler import LogHandler
logger = LogHandler(__name__, stream=True)
class Ask999Spider(RedisCrawlSpider):
handle_httpstatus_list = [404, 403, 500]
name = 'ymt'
allowed_domains = ['www.medlive.cn/']
start_urls = "http://news.medlive.cn/all/info-progress/list.html?ver=branch"
# "http://www.zghzyw.com/zyrm/fx/" # 丰胸
# "http://www.zghzyw.com/qushi" # 祛湿
redis_key = 'ymt:start_urls'
rules = (
Rule(LinkExtractor(allow=r"http://www.medlive.cn/\d+/$"), callback="parse", follow=False),
# Rule(LinkExtractor(allow=()), callback="parse_detail_mongo", follow=False),
)
# def start_requests(self):
# url = ""
# requests = []
# for i in range(0, 100):
# formdata = {
# "page": str(i),
# "submit_type": "ajax",
# "ac": "research_branch",
# "div_type": "all",
# "model_type": "info",
# "cat_type": "research"}
# request = FormRequest(url, callback=self.parse, formdata=formdata)
# requests.append(request)
# return requests
def parse(self, response):
urls = [
"http://news.medlive.cn/infect/info-progress/show-149976_171.html",
"http://news.medlive.cn/heart/info-progress/show-149938_129.html"]
# f"http://news.medlive.cn/psy/info-progress/show-149946_60.html",
# f"http://news.medlive.cn/endocr/info-progress/show-149951_46.html",
# f"http://news.medlive.cn/endocr/info-progress/show-149948_46.html",
# f"http://news.medlive.cn/imm/info-progress/show-149926_166.html"]
for url in urls:
print(f"url, {url}")
try:
# meta = {'dont_redirect': False}
yield Request(url, callback=self.parse_detail_mongo, dont_filter=True)
except Exception as e:
print(e)
# def parse_next(self, response):
# # nextpath = '//*[@id="main"]/div[1]/div/div[' + str(i) + "]" + "/div[1]/h3/a/@href"
# # url = response.xpath(nextpath)
# urls = response.xpath('.//div[@class="u-post"]//h3/a/@href').extract()
# for url in urls:
# newpath = "http://www.zghzyw.com" + url
# print(f"newpath,{newpath}")
# yield Request(newpath, callback=self.parse_detail_mongo, dont_filter=True)
def parse_detail_mongo(self, response):
item = ArticlespiderItem()
try:
# time.sleep(random.uniform(1.1, 5))
# 获取文章url & title
item['url'] = response.url
print("url: ", item['url'])
# response.xpath('//div[@class="u-post"]').extract()
try:
title = response.xpath('.//div[@id="content"]//div/h1/text()').extract()
title = self.filter_tags_blank(title)
except Exception as e:
title = ""
print("title :", e)
try:
# 获取文章内容" //*[@id="content"]/div/div[3]/text()"
content = "".join(response.xpath('.//div[@id="content"]//div/p/span/text()').extract())
content = self.filter_tags_blank(content)
# position = response.xpath('//body/div/div/div/div/b/text()').extract()[0]
# category0 = response.xpath('//body/div/div/div/div/a/text()').extract()[0]
# category1 = response.xpath('//body/div/div/div/div/a/text()').extract()[1]
# category = position + ": >" + category0 + ">" + category1
# category = self.filter_tags_blank(title)
# print("category :", category)
except Exception as e:
content = ""
print("content :", e)
item['title'] = title
item['content'] = content
print(f"title: ,{title}")
print(f"descText, {content}")
# item['category'] = category
yield item
except Exception as e:
print("Error2 :", e)
logger.info("匹配信息出错。错误原因:")
logger.info(e)
"""
去掉html标签和空格
"""
def filter_tags_blank(self, str):
p = re.compile('<[^>]+>').sub("", str)
return "".join(p.split())
| [
"bzqweiyi@163.com"
] | bzqweiyi@163.com |
d37a57bf782e04065867f737dfe764d33b9abd38 | da54e1256c8e66c78c069331af02a55a00f36faa | /new_python/day2/ch6_1.py | 3abcbed2eba1d17b92141354232f795b56841de6 | [] | no_license | tghyyhjuujki/Study2 | 5f7642d5c65176c46cf66b6341a9cdb8cd196400 | fffa315ebbe6088cdba219515c8d5ce1b5a4b8a6 | refs/heads/master | 2023-02-10T15:51:36.757719 | 2021-01-01T19:34:54 | 2021-01-01T19:34:54 | 276,801,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | import math
#a
math.floor(-2.8)
#b
abs(round(-4.3))
#c
math.ceil(math.sin(34.5)) | [
"tghyyhju@gmail.com"
] | tghyyhju@gmail.com |
396d3be1f2a5e259471ee0be5f9b6850177f96e3 | b648a0ff402d23a6432643879b0b81ebe0bc9685 | /benchmark/tslintbasic/thrift/run.py | 6b63c1b1ffc1c7036f1224f0530a63f3d6a08ca5 | [
"Apache-2.0"
] | permissive | jviotti/binary-json-size-benchmark | 4712faca2724d47d23efef241983ce875dc71cee | 165b577884ef366348bf48042fddf54aacfe647a | refs/heads/main | 2023-04-18T01:40:26.141995 | 2022-12-19T13:25:35 | 2022-12-19T13:25:35 | 337,583,132 | 21 | 1 | Apache-2.0 | 2022-12-17T21:53:56 | 2021-02-10T01:18:05 | C++ | UTF-8 | Python | false | false | 581 | py | def encode(json, schema):
payload = schema.Main()
payload.rules = schema.Rules()
payload.rules.orderedImports = schema.OrderedImports()
payload.rules.orderedImports.options = schema.Options()
payload.rules.orderedImports.options.groupedImports = \
json['rules']['ordered-imports']['options']['grouped-imports']
return payload
def decode(payload):
return {
'rules': {
'ordered-imports': {
'options': {
'grouped-imports': payload.rules.orderedImports.options.groupedImports
}
}
}
}
| [
"jv@jviotti.com"
] | jv@jviotti.com |
5eb3ad1011c8c6cf69038228cced51111ccd68ab | e6d24866b239362b84d4192ece761c9919f8f323 | /HashCode 2018/main.py | 386a27c316391568ed76b6639ecdaad6d858ee33 | [] | no_license | CodiceLoco/code-challenges | ef6f9d2bd1770dea21d9a419ea10c47f5ff96d86 | 4fd265d86cdaa05eac756d8a07b02c98fe88e50f | refs/heads/master | 2020-03-22T16:04:53.629023 | 2019-05-08T13:49:32 | 2019-05-08T13:49:32 | 140,301,446 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | from solution import try_to_solve, read_file
from scoring import JudgeSystem
from os import listdir
INPUT_FOLDER = 'input'
OUTPUT_FOLDER = 'output'
FILES = sorted(map(lambda f: f.split('.')[0], listdir(INPUT_FOLDER)))
INPUT_FILES = list(map(lambda f: f'{INPUT_FOLDER}/{f}.in', FILES))
OUTPUT_FILES = list(map(lambda f: f'{OUTPUT_FOLDER}/{f}.out', FILES))
total = 0
for input_file, output_file in zip(INPUT_FILES, OUTPUT_FILES):
params, ride = read_file(input_file)
try_to_solve(params, ride, output_file)
j = JudgeSystem(input_file, output_file)
score = int(j.score)
total += score
print(f'Scored {score} points with {input_file}')
print(f'Total: {total} points')
| [
"marcon.niccolo@gmail.com"
] | marcon.niccolo@gmail.com |
6343515d83aa5cbb5afcc7c49c954ed62aa5cfbb | ef147caaf28199b7961ec40320df4ff080ff3146 | /tsdownloaderv1.3.py | 364353d72db960c53b5a5896b25f697354c677d3 | [] | no_license | DicksonC96/TS-video-downloader | 6891868ee42476d99413dd8cb4a563ab922512de | 6917f7a32b5c9e0cbef8d587e24e075f940b20ed | refs/heads/main | 2023-05-12T14:02:00.242037 | 2021-06-02T07:24:09 | 2021-06-02T07:24:09 | 362,239,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | import requests
import sys
def main(url, fname, mini=0, maxi=5000):
with open(fname+".ts", 'wb') as f:
if not mini==0:
for i in range(mini):
r = requests.get(url+str(i)+".ts", stream=True)
sys.stdout.write("Downloading "+str(i)+".ts ...\n")
sys.stdout.flush()
for chunk in r.iter_content(chunk_size=None):
if chunk:
f.write(chunk)
r.close()
for i in range(mini, maxi):
r = requests.get(url+str(i)+".ts", stream=True)
if len(r.content) < 179:
print("Download finished with "+str(i+1)+" iterations.")
break
else:
sys.stdout.write("Downloading "+str(i)+".ts ...\n")
sys.stdout.flush()
for chunk in r.iter_content(chunk_size=None):
if chunk:
f.write(chunk)
r.close()
url = "https://abcd.voxzer.org/stream/608bd4cc0b8bd18237c8fc6d/1080/index"
filename = "Tom Clancy's Without Remorse"
### main(url, filename, minimum_iteration, maximum_iteration)
main(url, filename, 1300, 1400) | [
"66625723+Dickson96@users.noreply.github.com"
] | 66625723+Dickson96@users.noreply.github.com |
64e5d54acec6c968bbdc376004abbfcebe376ec6 | a4fba2d2aecbf299d7c32741b5572ed8cca6600c | /scrapydeme/car_scrapy/car_scrapy/spiders/autohome_spider.py | 23663243541210aee441e3f8eb8f52ace5b9fa98 | [] | no_license | rookiexjl/scrapydemo | 22a42565b1054d5f06512c7b286bb14858d0c141 | dc1a373fb583d659898a50f9e6ddf0dc0bb4c69d | refs/heads/master | 2021-01-23T06:01:15.220988 | 2017-06-12T03:18:13 | 2017-06-12T03:18:13 | 93,006,589 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,020 | py | # coding=utf-8
from scrapy import Request
from scrapy import Spider
from car_scrapy.items import CarScrapyItem
class AutohomeSpider(Spider):
name = "autohome.new"
start_urls = [
"http://www.autohome.com.cn/grade/carhtml/A.html",
"http://www.autohome.com.cn/grade/carhtml/B.html",
"http://www.autohome.com.cn/grade/carhtml/C.html",
"http://www.autohome.com.cn/grade/carhtml/D.html",
"http://www.autohome.com.cn/grade/carhtml/E.html",
"http://www.autohome.com.cn/grade/carhtml/F.html",
"http://www.autohome.com.cn/grade/carhtml/G.html",
"http://www.autohome.com.cn/grade/carhtml/H.html",
"http://www.autohome.com.cn/grade/carhtml/I.html",
"http://www.autohome.com.cn/grade/carhtml/J.html",
"http://www.autohome.com.cn/grade/carhtml/K.html",
"http://www.autohome.com.cn/grade/carhtml/L.html",
"http://www.autohome.com.cn/grade/carhtml/M.html",
"http://www.autohome.com.cn/grade/carhtml/N.html",
"http://www.autohome.com.cn/grade/carhtml/O.html",
"http://www.autohome.com.cn/grade/carhtml/P.html",
"http://www.autohome.com.cn/grade/carhtml/Q.html",
"http://www.autohome.com.cn/grade/carhtml/R.html",
"http://www.autohome.com.cn/grade/carhtml/S.html",
"http://www.autohome.com.cn/grade/carhtml/T.html",
"http://www.autohome.com.cn/grade/carhtml/U.html",
"http://www.autohome.com.cn/grade/carhtml/V.html",
"http://www.autohome.com.cn/grade/carhtml/W.html",
"http://www.autohome.com.cn/grade/carhtml/X.html",
"http://www.autohome.com.cn/grade/carhtml/Y.html",
"http://www.autohome.com.cn/grade/carhtml/Z.html",
]
def parse(self, response):
xPath = u'//ul[@class="rank-list-ul"]/li/h4/a/@href'
resList1 = response.xpath(xPath).extract()
for i in range(1, len(resList1) + 1):
# for i in range(1,2):
carURL = resList1[i - 1]
#print carURL
# baseItem['brand'] = carURL
# baseItem['brand'] = carURL meta={'brand': baseItem},
yield Request(carURL, callback=self.parse_detail)
def parse_detail(self,response):
baseItem = CarScrapyItem()
xPath = u'//div[@class="subnav-title-name"]/a/text()'
resList1 = response.xpath(xPath).extract()
baseItem['brand'] = resList1[0]
xPath = u'//div[@class="subnav-title-name"]/a/h1/text()'
name = response.xpath(xPath).extract()
if len(name)>0:
baseItem['name'] = name[0]
else:
xPath = u'//div[@class="subnav-title-name"]/a/text()'
name = response.xpath(xPath).extract()
name=name[0].split('-')
baseItem['name'] = name[1]
xPath = u'//div[@class="subnav-title-name"]/a/@href'
url = response.xpath(xPath).extract()
URL='m.autohome.com.cn'+url[0]
baseItem['url'] = URL
print baseItem
yield baseItem
| [
"18252005734@163.com"
] | 18252005734@163.com |
7fed42466b7f96d16dc21e46e0c0f0514aeb6977 | c199f949c04933205a25c71706c80a12d7ba20a6 | /webapp/models.py | 4b42c5d5b5c7dca25cc112842045a6af2d2bf9ac | [] | no_license | NepaliUtsab/NepaliUtsab.github.io | ea1909017782ba4da794b85b19ba41a9e41aa0ae | 7bd91c9cffc116e3daa11d77aa8b4276ed9e77e0 | refs/heads/master | 2020-03-23T02:17:01.233240 | 2018-07-14T17:55:16 | 2018-07-14T17:55:16 | 140,605,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from django.db import models
# Create your models here.
class Student(models.Model):
firstName = models.CharField(max_length = 10)
lastName = models.CharField(max_length = 10)
stdId = models.IntegerField()
def __str__(self):
return self.firstName
| [
"utsab@Utsab.local"
] | utsab@Utsab.local |
9ca07e7be454f472e705453ea355ee2b9fe48e9d | f8cc2cbde9aafca45f1ecd7fe2e0296d536cf94f | /src/ls-files.py | 933b6d6ea1a18eaf2f8f7293bfc7369a7ca957bc | [] | no_license | minekoa/mgit | aa6dbc9c971e77ce0f5005e99c4ebd00a5f8feee | de137829be2cd0098309a8dfdab8097f190f9b80 | refs/heads/master | 2021-01-23T15:41:57.708857 | 2015-05-13T14:30:31 | 2015-05-13T14:30:31 | 35,554,477 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | #!/usr/bin/env python
#-*- coding: shift_jis -*-
from mgitlib import *
import sys
import os
import os.path
import struct
if __name__ == '__main__':
db = GitDB()
index = GitIndex()
with db.openIndexFile('rb') as rf:
index.unpack(rf)
for key, item in index.rows.items():
print item
| [
"mineko.orange@gmail.com"
] | mineko.orange@gmail.com |
2a7751d735e8753be513461b0ed0f324a9cfac53 | c067c06fca7a6d371828b75200ed8862dd2977ec | /simple_exercises/profiti/02.av3.py | 943796bf35155a2b269172b4af5042e20c864d15 | [
"MIT"
] | permissive | ilante/programming_immanuela_englander | 1e170ebbadf0de56c35529b0d02268ea96779178 | 45d51c99b09ae335a67e03ac5ea79fc775bdf0bd | refs/heads/master | 2020-11-25T22:54:06.492425 | 2020-07-14T16:14:27 | 2020-07-14T16:14:27 | 228,879,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | def av3num(a, b, c):
av = (a+b+c)/3
return av
print(av3num(1,2,3)) | [
"ila@Immanuelas-Air.station"
] | ila@Immanuelas-Air.station |
c30096f5fdbe8afa9c1d25074d1d0eebbdc72540 | c8414c48d7e2b21674be71aa2596d0df547d090a | /config.py | 916379b9a47b5023b6913bb77c385c0bd4d1e18d | [] | no_license | Tyux/PRA_Detection_TF | 8cd9882a42c7f02220c9812948f6df2fbd17ee4a | 35157a915cca446796ba0d3971e2b162e03c10a9 | refs/heads/master | 2022-08-02T06:11:46.876012 | 2020-05-30T21:35:18 | 2020-05-30T21:35:18 | 267,481,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,187 | py | # -*- coding: utf-8 -*-
# File: config.py
import numpy as np
import os
import pprint
import six
from tensorpack.utils import logger
from tensorpack.utils.gpu import get_num_gpu
__all__ = ['config', 'finalize_configs']
class AttrDict():
_freezed = False
""" Avoid accidental creation of new hierarchies. """
def __getattr__(self, name):
if self._freezed:
raise AttributeError(name)
if name.startswith('_'):
# Do not mess with internals. Otherwise copy/pickle will fail
raise AttributeError(name)
ret = AttrDict()
setattr(self, name, ret)
return ret
def __setattr__(self, name, value):
if self._freezed and name not in self.__dict__:
raise AttributeError(
"Config was freezed! Unknown config: {}".format(name))
super().__setattr__(name, value)
def __str__(self):
return pprint.pformat(self.to_dict(), indent=1, width=100, compact=True)
__repr__ = __str__
def to_dict(self):
"""Convert to a nested dict. """
return {k: v.to_dict() if isinstance(v, AttrDict) else v
for k, v in self.__dict__.items() if not k.startswith('_')}
def update_args(self, args):
"""Update from command line args. """
for cfg in args:
keys, v = cfg.split('=', maxsplit=1)
keylist = keys.split('.')
dic = self
for i, k in enumerate(keylist[:-1]):
assert k in dir(dic), "Unknown config key: {}".format(keys)
dic = getattr(dic, k)
key = keylist[-1]
oldv = getattr(dic, key)
if not isinstance(oldv, str):
v = eval(v)
setattr(dic, key, v)
def freeze(self, freezed=True):
self._freezed = freezed
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.freeze(freezed)
# avoid silent bugs
def __eq__(self, _):
raise NotImplementedError()
def __ne__(self, _):
raise NotImplementedError()
config = AttrDict()
_C = config # short alias to avoid coding
# mode flags ---------------------
_C.TRAINER = 'horovod' # options: 'horovod', 'replicated'
_C.MODE_MASK = False # Faster R-CNN or Mask R-CNN
_C.MODE_FPN = True
# dataset -----------------------
_C.DATA.BASEDIR = '/work/DeepLearning/Data/objects365/'
# All available dataset names are defined in `dataset/coco.py:register_coco`.
# All TRAIN dataset will be concatenated for training.
_C.DATA.TRAIN = ('objects365_train.json',)
# _C.DATA.TRAIN = ('coco_train2014', 'coco_valminusminival2014') # i.e. trainval35k
# Each VAL dataset will be evaluated separately (instead of concatenated)
_C.DATA.VAL = ('objects365_val.json',)
# _C.DATA.VAL = ('coco_minival2014',) # AKA minival2014
# _C.DATA.TEST = ('objects365_Tiny_Testset_images_list.json',)
_C.DATA.TEST = ('test',)
# These two configs will be populated later inside `finalize_configs`.
_C.DATA.NUM_CATEGORY = -1 # without the background class (e.g., 80 for COCO)
_C.DATA.CLASS_NAMES = [] # NUM_CLASS (NUM_CATEGORY+1) strings, the first is "BG".
# whether the coordinates in the annotations are absolute pixel values, or a relative value in [0, 1]
_C.DATA.ABSOLUTE_COORD = True
# Number of data loading workers.
# In case of horovod training, this is the number of workers per-GPU (so you may want to use a smaller number).
# Set to 0 to disable parallel data loading
_C.DATA.NUM_WORKERS = 1
# backbone ----------------------
_C.BACKBONE.WEIGHTS = './weights/ImageNet-R50-AlignPadding.npz'
# To train from scratch, set it to empty, and set FREEZE_AT to 0
# To train from ImageNet pre-trained models, use the one that matches your
# architecture from http://models.tensorpack.com under the 'FasterRCNN' section.
# To train from an existing COCO model, use the path to that file, and change
# the other configurations according to that model.
_C.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] # for resnet50
# _C.BACKBONE.RESNET_NUM_BLOCKS = [2, 2, 2, 2]
# RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet101
_C.BACKBONE.FREEZE_AFFINE = False # do not train affine parameters inside norm layers
_C.BACKBONE.NORM = 'FreezeBN' # options: FreezeBN, SyncBN, GN, None
_C.BACKBONE.FREEZE_AT = 2 # options: 0, 1, 2. How many stages in backbone to freeze (not training)
# Use a base model with TF-preferred padding mode,
# which may pad more pixels on right/bottom than top/left.
# See https://github.com/tensorflow/tensorflow/issues/18213
# In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with "-AlignPadding".
# All other models under `ResNet/` in the model zoo are using TF_PAD_MODE=True.
# Using either one should probably give the same performance.
# We use the "AlignPadding" one just to be consistent with caffe2.
_C.BACKBONE.TF_PAD_MODE = False
_C.BACKBONE.STRIDE_1X1 = False # True for MSRA models
# schedule -----------------------
_C.TRAIN.NUM_GPUS = None # by default, will be set from code
_C.TRAIN.WEIGHT_DECAY = 1e-4
_C.TRAIN.BASE_LR = 1e-2 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.WARMUP = 1000 # in terms of iterations. This is not affected by #GPUs
_C.TRAIN.WARMUP_INIT_LR = 1e-2 * 0.33 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.STEPS_PER_EPOCH = 500
_C.TRAIN.STARTING_EPOCH = 1 # the first epoch to start with, useful to continue a training
# LR_SCHEDULE means equivalent steps when the total batch size is 8.
# When the total bs!=8, the actual iterations to decrease learning rate, and
# the base learning rate are computed from BASE_LR and LR_SCHEDULE.
# Therefore, there is *no need* to modify the config if you only change the number of GPUs.
_C.TRAIN.LR_SCHEDULE = [720000, 960000, 1080000] # "1x" schedule in detectron
_C.TRAIN.EVAL_PERIOD = 300 # period (epochs) to run evaluation
_C.TRAIN.CHECKPOINT_PERIOD = 5 # period (epochs) to save model
# preprocessing --------------------
# Alternative old (worse & faster) setting: 600
_C.PREPROC.TRAIN_SHORT_EDGE_SIZE = [800, 800] # [min, max] to sample from
_C.PREPROC.TEST_SHORT_EDGE_SIZE = 800
_C.PREPROC.MAX_SIZE = 1333
# mean and std in RGB order.
# Un-scaled version: [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
_C.PREPROC.PIXEL_MEAN = [123.675, 116.28, 103.53]
_C.PREPROC.PIXEL_STD = [58.395, 57.12, 57.375]
# anchors -------------------------
_C.RPN.ANCHOR_STRIDE = 16
_C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box
_C.RPN.ANCHOR_RATIOS = (0.5, 1., 2.)
_C.RPN.POSITIVE_ANCHOR_THRESH = 0.7
_C.RPN.NEGATIVE_ANCHOR_THRESH = 0.3
# rpn training -------------------------
_C.RPN.FG_RATIO = 0.5 # fg ratio among selected RPN anchors
_C.RPN.BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid
_C.RPN.MIN_SIZE = 0
_C.RPN.PROPOSAL_NMS_THRESH = 0.7
# Anchors which overlap with a crowd box (IOA larger than threshold) will be ignored.
# Setting this to a value larger than 1.0 will disable the feature.
# It is disabled by default because Detectron does not do this.
_C.RPN.CROWD_OVERLAP_THRESH = 9.99
_C.RPN.HEAD_DIM = 1024 # used in C4 only
# RPN proposal selection -------------------------------
# for C4
_C.RPN.TRAIN_PRE_NMS_TOPK = 12000
_C.RPN.TRAIN_POST_NMS_TOPK = 2000
_C.RPN.TEST_PRE_NMS_TOPK = 6000
_C.RPN.TEST_POST_NMS_TOPK = 1000 # if you encounter OOM in inference, set this to a smaller number
# for FPN, #proposals per-level and #proposals after merging are (for now) the same
# if FPN.PROPOSAL_MODE = 'Joint', these options have no effect
_C.RPN.TRAIN_PER_LEVEL_NMS_TOPK = 2000
_C.RPN.TEST_PER_LEVEL_NMS_TOPK = 1000
# fastrcnn training ---------------------
_C.FRCNN.BATCH_PER_IM = 512
_C.FRCNN.BBOX_REG_WEIGHTS = [10., 10., 5., 5.] # Slightly better setting: 20, 20, 10, 10
_C.FRCNN.FG_THRESH = 0.5
_C.FRCNN.FG_RATIO = 0.25 # fg ratio in a ROI batch
# FPN -------------------------
_C.FPN.ANCHOR_STRIDES = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES
_C.FPN.PROPOSAL_MODE = 'Level' # 'Level', 'Joint'
_C.FPN.NUM_CHANNEL = 256
_C.FPN.NORM = 'None' # 'None', 'GN'
# The head option is only used in FPN. For C4 models, the head is C5
_C.FPN.FRCNN_HEAD_FUNC = 'fastrcnn_2fc_head'
# choices: fastrcnn_2fc_head, fastrcnn_4conv1fc_{,gn_}head
_C.FPN.FRCNN_CONV_HEAD_DIM = 256
_C.FPN.FRCNN_FC_HEAD_DIM = 1024
_C.FPN.MRCNN_HEAD_FUNC = 'maskrcnn_up4conv_head' # choices: maskrcnn_up4conv_{,gn_}head
# Mask R-CNN
_C.MRCNN.HEAD_DIM = 256
_C.MRCNN.ACCURATE_PASTE = True # slightly more aligned results, but very slow on numpy
# Cascade R-CNN, only available in FPN mode
_C.FPN.CASCADE = False
_C.CASCADE.IOUS = [0.5, 0.6, 0.7]
_C.CASCADE.BBOX_REG_WEIGHTS = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]]
# testing -----------------------
_C.TEST.FRCNN_NMS_THRESH = 0.5
# Smaller threshold value gives significantly better mAP. But we use 0.05 for consistency with Detectron.
# mAP with 1e-4 threshold can be found at https://github.com/tensorpack/tensorpack/commit/26321ae58120af2568bdbf2269f32aa708d425a8#diff-61085c48abee915b584027e1085e1043 # noqa
_C.TEST.RESULT_SCORE_THRESH = 0.05
_C.TEST.RESULT_SCORE_THRESH_VIS = 0.5 # only visualize confident results
_C.TEST.RESULTS_PER_IM = 100
_C.freeze() # avoid typo / wrong config keys
def finalize_configs(is_training):
"""
Run some sanity checks, and populate some configs from others
"""
_C.freeze(False) # populate new keys now
if isinstance(_C.DATA.VAL, six.string_types): # support single string (the typical case) as well
_C.DATA.VAL = (_C.DATA.VAL, )
if isinstance(_C.DATA.TRAIN, six.string_types): # support single string
_C.DATA.TRAIN = (_C.DATA.TRAIN, )
# finalize dataset definitions ...
from dataset import DatasetRegistry
datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL)
_C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], "class_names")
_C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1
assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM
if _C.BACKBONE.NORM != 'FreezeBN':
assert not _C.BACKBONE.FREEZE_AFFINE
assert _C.BACKBONE.FREEZE_AT in [0, 1, 2]
_C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS)
assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES)
# image size into the backbone has to be multiple of this number
_C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[3] # [3] because we build FPN with features r2,r3,r4,r5
if _C.MODE_FPN:
size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1.
_C.PREPROC.MAX_SIZE = np.ceil(_C.PREPROC.MAX_SIZE / size_mult) * size_mult
assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint']
assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.NORM in ['None', 'GN']
if _C.FPN.CASCADE:
# the first threshold is the proposal sampling threshold
assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH
assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS)
if is_training:
train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE
if isinstance(train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100:
# don't autotune if augmentation is on
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '1'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '0'
assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER
lr = _C.TRAIN.LR_SCHEDULE
if isinstance(lr, six.string_types):
if lr.endswith("x"):
LR_SCHEDULE_KITER = {
"{}x".format(k):
[1080 * k - 720, 1080 * k - 240, 1080 * k]
for k in range(2, 10)}
LR_SCHEDULE_KITER["1x"] = [720, 960, 1080]
_C.TRAIN.LR_SCHEDULE = [x * 1000 for x in LR_SCHEDULE_KITER[lr]]
else:
_C.TRAIN.LR_SCHEDULE = eval(lr)
# setup NUM_GPUS
if _C.TRAINER == 'horovod':
import horovod.tensorflow as hvd
ngpu = hvd.size()
logger.info("Horovod Rank={}, Size={}, LocalRank={}".format(
hvd.rank(), hvd.size(), hvd.local_rank()))
else:
assert 'OMPI_COMM_WORLD_SIZE' not in os.environ
ngpu = get_num_gpu()
assert ngpu > 0, "Has to train with GPU!"
assert ngpu % 8 == 0 or 8 % ngpu == 0, "Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs".format(ngpu)
else:
# autotune is too slow for inference
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
ngpu = get_num_gpu()
if _C.TRAIN.NUM_GPUS is None:
_C.TRAIN.NUM_GPUS = ngpu
else:
if _C.TRAINER == 'horovod':
assert _C.TRAIN.NUM_GPUS == ngpu
else:
assert _C.TRAIN.NUM_GPUS <= ngpu
_C.freeze()
logger.info("Config: ------------------------------------------\n" + str(_C))
| [
"tong63377@163.com"
] | tong63377@163.com |
e128dcb7bacbd5c878d4d8dc67752ce9b8a0b7d1 | dbd7be725a36b22a99f3785bcdf2e3866b6e3cc7 | /tests/base.py | a2813c552c6c3d551bc6e9e5c9deea5c949d9492 | [] | no_license | MatthewMcGonagle/TSP_PictureMaker | 9aed2ddebfc8c36c5a835cd4fb3fd75a1650516c | b6df1b6e5ade27750e2974de1c46e46988fb16e9 | refs/heads/master | 2020-03-19T12:43:52.242516 | 2019-03-21T19:51:08 | 2019-03-21T19:51:08 | 136,536,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | import unittest
import numpy as np
import sys
import fake_random
sys.path.append('..')
import tsp_draw.base
class TestAnnealerMethods(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
angles = np.linspace(0, 2 * np.pi, 8)[:-1]
self.vertices = [[np.cos(3 * angle), np.sin(3 * angle)] for angle in angles]
self.vertices = np.array(self.vertices)
self.params = {'n_steps' : 3, 'vertices' : self.vertices, 'temperature' : 0.001,
'temp_cool' : 0.99, 'rand_state' : fake_random.State([])}
def test_get_cycle(self):
annealer = tsp_draw.base.Annealer(**self.params)
true_cycle = np.concatenate([self.vertices, [self.vertices[0]]], axis = 0)
test_cycle = annealer.get_cycle()
np.testing.assert_equal(true_cycle, test_cycle)
def test_get_energy(self):
annealer = tsp_draw.base.Annealer(**self.params)
cycle = annealer.get_cycle()
diffs = cycle[1:] - cycle[:-1]
true_energy = np.linalg.norm(diffs, axis = 1).sum()
test_energy = annealer.get_energy()
self.assertEqual(true_energy, test_energy)
def test_update_state(self):
annealer = tsp_draw.base.Annealer(**self.params)
annealer._update_state()
self.assertEqual(annealer.temperature,
self.params['temperature'] * self.params['temp_cool'])
self.assertEqual(annealer.steps_processed, 1)
def test_run_proposal_trial(self):
uniform_results = np.linspace(0, 1.0, 10)
uniform_stack = list(np.flip(uniform_results))
params = self.params.copy()
params['rand_state'] = fake_random.State(uniform_stack)
annealer = tsp_draw.base.Annealer(**params)
energy_diff = 0.5 * annealer.temperature
critical_val = np.exp(-energy_diff / annealer.temperature)
test_trials = [annealer._run_proposal_trial(energy_diff) for _ in uniform_results]
true_trials = [prob < critical_val for prob in uniform_results]
self.assertEqual(test_trials, true_trials)
def test_do_warm_restart(self):
annealer = tsp_draw.base.Annealer(**self.params)
annealer.steps_processed = 5
annealer.do_warm_restart()
self.assertEqual(annealer.steps_processed, 0)
if __name__ == '__main__':
unittest.main()
| [
"mwmcgonagle@gmail.com"
] | mwmcgonagle@gmail.com |
62ce19f3d0f04ce110c1dd241445d520cdfc6c0c | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/containerservice/v20210901/private_endpoint_connection.py | 2c0b95aa60f3e4a88ea7d42d3e4f40430d813fb8 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,975 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
private_link_service_connection_state: pulumi.Input['PrivateLinkServiceConnectionStateArgs'],
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
private_endpoint: Optional[pulumi.Input['PrivateEndpointArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input['PrivateLinkServiceConnectionStateArgs'] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name: The name of the managed cluster resource.
:param pulumi.Input['PrivateEndpointArgs'] private_endpoint: The resource of private endpoint.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
"""
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Input['PrivateLinkServiceConnectionStateArgs']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: pulumi.Input['PrivateLinkServiceConnectionStateArgs']):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointArgs']]:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The resource of private endpoint.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the managed cluster resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
if private_link_service_connection_state is None and not opts.urn:
raise TypeError("Missing required property 'private_link_service_connection_state'")
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20210901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20200601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20200701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20200901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20201101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20201101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20201201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20201201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210501:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210501:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210801:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210801:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:containerservice/v20210901:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
dae76b5a56cfbe512236e47e5b204fddff746a73 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/gray_code.py | 490b72b7d1576b6786b190e757dfced57e83460c | [] | no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | class Solution(object):
def grayCode(self, n):
def getCode(n):
if n == 0:
return ["0"]
rest = getCode(n-1)
reverse = reversed(rest)
ret = [x + "0" for x in rest] + [x + "1" for x in reverse]
return ret
ret = getCode(n)
ret = [int(x, 2) for x in ret]
return ret | [
"31617695+hayeonk@users.noreply.github.com"
] | 31617695+hayeonk@users.noreply.github.com |
ad01571f6a61ceda4ceecc3b05110483c0b3596f | 99172548909dfea0095f27d590277b87b61d8bac | /Programmers/Level1/직사각형 별찍기.py | 4828430b91f5d67edafdd69237b146947bc2038e | [] | no_license | rheehot/Algorithm-53 | 3612e51aee15fa4bc6fecd6d2c18fa45f5572117 | 2651f95ca25645943a8c3fe40662092b4925f06b | refs/heads/master | 2023-06-24T05:35:46.465422 | 2021-07-22T14:45:15 | 2021-07-22T14:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | a, b = map(int, input().strip().split(' '))
rect = ("*" * a + '\n') * b
print(rect)
| [
"59171154+HelloJihyoung@users.noreply.github.com"
] | 59171154+HelloJihyoung@users.noreply.github.com |
df0b59323ca9a839dcf6b4c11f1da303ae237fac | ecd2aa3d12a5375498c88cfaf540e6e601b613b3 | /Facebook/Pro105. Construct Binary Tree from Preorder and Inorder Traversal.py | a39da533bff18e1cca864459d11a600e0252ce83 | [] | no_license | YoyinZyc/Leetcode_Python | abd5d90f874af5cd05dbed87f76885a1ca480173 | 9eb44afa4233fdedc2e5c72be0fdf54b25d1c45c | refs/heads/master | 2021-09-05T17:08:31.937689 | 2018-01-29T21:57:44 | 2018-01-29T21:57:44 | 103,157,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
root = TreeNode(preorder[0])
index = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:index + 1], inorder[:index])
root.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])
return root
| [
"yoyinzyc@gmail.com"
] | yoyinzyc@gmail.com |
184e8e9933bf4850ac425bc2697124c4c5776379 | 03c9cd5bd96874d6117fb17c37ac4d7450c15933 | /Opencv-Python/wechat/autojump.py | 540e6d96cb2fd16283ba2e25403877731481716d | [] | no_license | atiger808/opencv-tutorial | 603de35e97679d6beae104298ae355edfdd9036a | 2ea9bb3818284fb75f85697e36fde37b6479d1c6 | refs/heads/master | 2020-05-29T23:16:30.462022 | 2019-11-05T10:08:20 | 2019-11-05T10:08:20 | 189,425,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | # _*_ coding: utf-8 _*_
# @Time : 2019/4/3 16:45
# @Author : Ole211
# @Site :
# @File : autojump.py
# @Software : PyCharm
import cv2
import numpy as np
import os
import time
import subprocess
import math
# os.chdir('d:\\img\\')
press_coefficient = 1.35
def get_center_coord(img):
region_lower = int(img.shape[0]*0.3)
region_upper = int(img.shape[0]*0.7)
region = img[region_lower:region_upper]
hsv_img = cv2.cvtColor(region, cv2.COLOR_BGR2HSV)
color_lower = np.array([105, 25, 45])
color_upper = np.array([135, 125, 130])
color_mask = cv2.inRange(hsv_img, color_lower, color_upper)
_, contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours)>0:
max_contour = max(contours, key=cv2.contourArea)
rect = cv2.boundingRect(max_contour)
x, y, w, h = rect
cv2.rectangle(region, (x, y), (x+w, y+h), (0, 255, 0), 3)
center_coord = (x+int(w/2), y+h-20)
cv2.circle(region, center_coord, 8, (0, 0, 255), -1)
cv2.drawContours(region, max_contour, -1, (0, 0, 255), 3)
# region = cv2.resize(region, (400, 800))
# cv2.imshow('color_mask', color_mask)
# cv2.imshow('region', region)
# cv2.waitKey()
# cv2.destroyAllWindows()
return hsv_img, color_mask, center_coord
def get_box_center(img):
region_lower = int(img.shape[0] * 0.3)
region_upper = int(img.shape[0] * 0.7)
region = img[region_lower:region_upper]
gray_img = cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray', gray_img)
canny_img = cv2.Canny(gray_img, 75, 150)
y_top = np.nonzero([max(row) for row in canny_img[:400]])[0][0]
x_top = int(np.mean(np.nonzero(canny_img[y_top])))
y_bottom = y_top + 200
# for row in range(y_bottom, 768):
# if canny_img[row, x_top] != 0:
# break
box_center_coord = (x_top, (y_top + y_bottom)//2)
cv2.circle(region, box_center_coord, 8, (0, 0, 255), -1)
return canny_img, region, box_center_coord
def pullScreenshot():
os.system('adb shell screencap -p /sdcard/autojump.png')
os.system('adb pull /sdcard/autojump.png .')
def jump(distance):
press_time = distance * 1.35
press_time = int(press_time)
cmd = 'adb shell input swipe 320 410 320 410 ' + str(press_time)
print(cmd)
os.system(cmd)
def beginJump():
while True:
pullScreenshot()
time.sleep(2)
img = cv2.imread('autojump.png')
color_mask, hsv_img, center_coord = get_center_coord(img)
canny_img, region, box_center_coord = get_box_center(img)
distance = math.sqrt((box_center_coord[0] - center_coord[0]) ** 2 + (box_center_coord[1] - center_coord[1]) ** 2)
w, h = region.shape[:2]
text = 'press time: %.3f ms' %(max(1.35*distance, 200))
cv2.putText(region, text, (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
text0 = 'distance: %.3f pixels' % (distance)
cv2.putText(region, text0, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
cv2.line(region, center_coord, box_center_coord, (0, 0, 255), 3)
print('棋子坐标:', center_coord)
print('盒子坐标:', box_center_coord)
print('距离:', distance)
cv2.imwrite('region.png', region)
# cv2.imshow('color_mask', color_mask)
# cv2.imshow('hsv_img', hsv_img)
# cv2.imshow('canny_img', canny_img)
# cv2.imshow('region', region)
# cv2.waitKey()
# cv2.destroyAllWindows()
jump(distance)
time.sleep(0.2)
if __name__ == '__main__':
beginJump()
# pullScreenshot()
# if __name__ == '__main__':
# get_center_coord() | [
"atiger0614@163.com"
] | atiger0614@163.com |
cac6679df85dec7cd9809210f80e1ddda0f67e88 | 1b2aeb34086ffd631e630008ffc73b6f1c458e8d | /kfold.py | 4ed9cc4e0681f14208c53012d2e16279b953e4d0 | [] | no_license | thunderboom/tecent_ad | bdde7af24aacce2cbe86d02e9defefa9378e3a3b | 5c051ddd63848c767ecbc118841a953178b4111a | refs/heads/master | 2022-09-08T02:31:49.090314 | 2020-05-26T08:41:48 | 2020-05-26T08:41:48 | 265,536,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | """根据切分形式,进行训练并验证"""
from torch.utils.data import DataLoader
from utils import AdvData
from sklearn.model_selection import train_test_split
from train_eval import model_train, model_evaluate
def cross_validation(config, model, train_data, test_data):
if config.pattern == 'cross_validation':
train_data, val_data = train_test_split(train_data, test_size=config.val_size, random_state=config.seed) #分训练集和验证集
train_data, val_data, test_data = \
AdvData(train_data, config), AdvData(val_data, config), AdvData(test_data, config)
train_loader = DataLoader(train_data, shuffle=True, batch_size=config.batch_size)
val_loader = DataLoader(val_data, shuffle=False, batch_size=config.batch_size)
test_loader = DataLoader(test_data, shuffle=False, batch_size=config.batch_size)
model_trained = model_train(config, model, train_loader, val_loader) #训练模型
predict_label = None
if config.test == True:
predict_label = model_evaluate(config, model_trained, test_loader, test=True) #对测试集进行输出
return model_trained, predict_label
| [
"470810634@qq.com"
] | 470810634@qq.com |
41997f3f3425416215fddea8290ac8569a86bb75 | 6aec91d3d881ccefd167c55417fae9bb3dd80747 | /myutils.py | 7ca5a4244d5204e041ea308ef2d425923a44f8e9 | [] | no_license | dxcv/spread_arbitrage | 8d3be4a3207e0c35eb3232e7cac9f9bd7037eba1 | 855638515f73d5e56852030639eff2d1f04b36aa | refs/heads/master | 2020-05-22T21:42:00.371973 | 2017-07-22T02:19:12 | 2017-07-22T02:19:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,955 | py | # -*- coding:utf-8 -*-
import logging
import sys
import json
import time
_MaZhaoYang = 0
_YangRui = 1
_NanHuaQiHuo = 2
_Instruments = {
"IC": ["IC1707", "IC1708"],
"IH": ["IH1707", "IH1708"],
"IF": ["IF1707", "IF1708"]
}
def decode(text):
return text.decode('gb2312').encode('utf-8')
def get_logger(logger_name, output_file):
logger = logging.getLogger(logger_name)
# 指定logger输出格式
formatter = logging.Formatter('%(asctime)s [%(levelname)-8s]: %(message)s')
# 文件日志
file_handler = logging.FileHandler(output_file)
file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.INFO)
return logger
def jsonload():
try:
with open('jsonfile.json', 'r') as f:
data = json.load(f)
return data
except:
return {}
class data_window():
def __init__(self, total_length = 600, second_length = 120):
self.length = total_length
self.second_length = second_length
self.datawindow = []
def add(self, data):
if self.datawindow == []:
self.datawindow.append(data)
else:
while len(self.datawindow) > self.length - 1:
self.datawindow.pop(0)
self.datawindow.append(data)
def is_grow(self):
if len(self.datawindow) <= self.length - 1:
return 0
growth_1min = self.datawindow[-1] - self.datawindow[-120]
growth_5min = self.datawindow[-1] - self.datawindow[0]
if growth_1min > 0 and growth_5min > 0:
return 1
elif growth_1min < 0 and growth_5min < 0:
return -1
else:
return 0
def clear(self):
self.datawindow = []
def get_account(account):
"""
:param account:
2: 南华期货
1:杨睿_SimNow
0:马朝阳_SimNow
...
:return:
"""
if account == 2:
# 南华期货
brokerID = '1008'
userID = '90095502'
password = '222832'
mdapi_front = 'tcp://115.238.106.253:41213'
trade_front = 'tcp://115.238.106.253:41205'
else:
# simnow
brokerID = b'9999'
mdapi_front = b'tcp://180.168.146.187:10010'
trade_front = b'tcp://180.168.146.187:10000'
if account == 1:
userID = b'097138'
password = b'285135278'
else:
userID = b'092120'
password = b'mzy187623'
return brokerID, userID, password, mdapi_front, trade_front
if __name__ == '__main__':
from unittest import TestCase
| [
"595403043@qq.com"
] | 595403043@qq.com |
1c732829737a797fd122d90ea40e90fd3a121ffd | 3700ac98bb8f6faf5dbcf20be2f7f5270b3429b9 | /server/test/helpers.py | a2da1815e88500c1751a8596d22b323a721663bb | [
"Apache-2.0"
] | permissive | NWCalvank/react-python-starter | 47478df0e611a04f33d08ec64e623b3bdf9f7af1 | 8bee6129f425d6284aba0a9bf1ccce7b696b837c | refs/heads/master | 2020-09-27T04:24:40.691221 | 2019-12-27T19:33:33 | 2019-12-27T19:33:33 | 226,389,820 | 0 | 0 | Apache-2.0 | 2019-12-06T21:33:35 | 2019-12-06T18:47:41 | Python | UTF-8 | Python | false | false | 282 | py | from app import db
from app.api.models.foo import Foo
# Helper function to add a sample string to Foo
def create_foo_string(foo_string):
foo_table_element = Foo(string_field=foo_string)
db.session.add(foo_table_element)
db.session.commit()
return foo_table_element
| [
"noreply@github.com"
] | NWCalvank.noreply@github.com |
c96c01d4f6fed322dc79eed64cbca2c8795827f6 | d68eec2b77e934022695f8c396c2a597aa1c36be | /REST_API_v2/Users/create_user_contact_method.py | 9f5acf27bf4dd7199325b57e5ed4581d3a72fca9 | [] | no_license | drummerweed/API_Python_Examples | 088570dbef5a7ba6c053cdf4081430d2d7d55417 | 641784c53628daeceb6f6735ad0de576413c35d8 | refs/heads/master | 2020-04-04T21:13:43.391354 | 2018-08-22T17:37:53 | 2018-08-22T17:37:53 | 156,279,682 | 1 | 0 | null | 2018-11-05T20:37:49 | 2018-11-05T20:37:49 | null | UTF-8 | Python | false | false | 2,653 | py | #!/usr/bin/env python
#
# Copyright (c) 2016, PagerDuty, Inc. <info@pagerduty.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of PagerDuty Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PAGERDUTY INC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import requests
import json
# Update to match your API key
API_KEY = '3c3gRvzx7uGfMYEnWKvF'
# Update to match ID of user you want to update
ID = 'P0H7Y7J'
# Update to match your chosen parameters
TYPE = 'email_contact_method' # Can be one of email_contact_method, sms_contact_method, phone_contact_method, or push_notification_contact_method # NOQA
ADDRESS = 'insert_email@here.com'
LABEL = 'Work'
def create_user_contact_method():
url = 'https://api.pagerduty.com/users/{id}/contact_methods'.format(id=ID)
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={token}'.format(token=API_KEY),
'Content-type': 'application/json'
}
payload = {
'contact_method': {
'type': TYPE,
'address': ADDRESS,
'label': LABEL
}
}
r = requests.post(url, headers=headers, data=json.dumps(payload))
print 'Status Code: {code}'.format(code=r.status_code)
print r.json()
if __name__ == '__main__':
create_user_contact_method()
| [
"lucasfepp@gmail.com"
] | lucasfepp@gmail.com |
f767561f0b3d9e8b78a4864603261931f59d4217 | 012c9396f6fd0c88769165b484f77c346b96cb56 | /software/artyS7/cmdClass.py | be3bc532207ec15d1bda90d8a099b5f7857efe07 | [] | no_license | atfienberg/artyS7 | d770209dfddc9860a57626cc8b318b4e7aba3c7c | cd29d49f6d3811e425098715f969853cfdb4872c | refs/heads/master | 2021-05-25T21:16:48.620081 | 2020-11-16T16:43:50 | 2020-11-16T16:43:50 | 253,920,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,533 | py | ################################################################
# Tyler Anderson Thu 07/25/2019_ 9:24:16.32
#
# A python script for parsing commands.
################################################################
import logging
import string
import binascii
import sys
import struct
import crcmod
class cmdClass:
"""A class for handling the command protocol."""
###########################################################
# Data members
# Constants
S_ACT_DICT = {
"swr": 0x0001, # single write
"srd": 0x0002, # single read
"bwr": 0x8001, # burst write
"brd": 0x8002,
} # burst read
###########################################################
# Methods
def __init__(self, adr_dict=None):
# Command struct
self.cmd = {
"ok": True,
"raw_cmd": "\x8f\xc7\x00\x02\x0f\xff", # Raw command
"raw_rsp": "", # Raw response
"hdr": 0x8FC7, # header word
"act": 0x0002, # swr, srd, bwr, brd
"len": 0x0001, # packet length for burst read/write
"adr": 0x0FFF, # a look up for the address
"data": 0x0000, # data
"crc16": 0x0000,
} # CRC16
self.S_ADR_DICT = adr_dict
def srd_raw_cmd(self):
self.cmd["raw_cmd"] = struct.pack(">H", self.cmd["hdr"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["act"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["adr"])
# print binascii.hexlify(self.cmd['raw_cmd'])
def srd_raw_rsp(self):
x = int(binascii.hexlify(self.cmd["raw_rsp"]), 16)
self.cmd["data"] = x >> 16
self.cmd["crc16"] = x & 0xFFFF
# print '%x' % self.cmd['data']
# print '%x' % self.cmd['crc16']
def single_crc16_calc(self):
crc16 = crcmod.mkCrcFun(0x18005, rev=False, initCrc=0xFFFF, xorOut=0x0000)
# print self.cmd['adr']
# print type(self.cmd['adr'])
xstr = hex(self.cmd["adr"])[2:].zfill(4)
xstr = xstr + hex(self.cmd["data"])[2:].zfill(4)
# print(xstr)
checksum16 = int(hex(crc16(bytearray.fromhex(xstr)))[2:], 16)
return checksum16
def swr_raw_cmd(self):
self.cmd["raw_cmd"] = struct.pack(">H", self.cmd["hdr"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["act"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["adr"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["data"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["crc16"])
# print binascii.hexlify(self.cmd['raw_cmd'])
def single_crc16_check(self):
checksum16 = self.single_crc16_calc()
if self.cmd["crc16"] == checksum16:
self.cmd["ok"] = True
else:
self.cmd["ok"] = False
def gse_cmd_str(self):
return (
"cmd: act = "
+ hex(self.cmd["act"])
+ ", len = "
+ hex(self.cmd["len"])
+ ", adr = "
+ hex(self.cmd["adr"])
+ ", data = "
+ hex(self.cmd["data"])
+ ", crc16 = "
+ hex(self.cmd["crc16"])
+ ", ok = "
+ str(self.cmd["ok"])
)
def parse_cmd(
self,
logging,
s_act,
h_act,
act,
s_adr,
h_adr,
adr,
h_length,
length,
h_data,
data,
):
# Decode the data
if data != None:
self.cmd["data"] = data
elif h_data != None:
self.cmd["data"] = int(h_data, 16)
self.cmd["inp_h_data"] = h_data
# print 'data is %d' % self.cmd['data']
# Decode the length
if length != None:
self.cmd["len"] = length
elif h_length != None:
self.cmd["len"] = int(h_length, 16)
# Decode the address
if adr != None:
self.cmd["adr"] = adr
elif h_adr != None:
self.cmd["adr"] = int(h_adr, 16)
elif s_adr != None:
self.cmd["adr"] = self.S_ADR_DICT[s_adr]
else:
logging.warning(
"comClass: ERROR: Must specify one of s_adr, h_adr, or adr! Exiting!"
)
exit(-1)
# Decode the action
if act != None:
self.cmd["act"] = act
elif h_act != None:
self.cmd["act"] = int(h_act, 16)
elif s_act != None:
self.cmd["act"] = self.S_ACT_DICT[s_act]
else:
logging.warning(
"comClass: ERROR: Must specify either s_act or act! Exiting!"
)
exit(-1)
# ATF -- burst read and write
def bwr_raw_cmd(self):
self.cmd["raw_cmd"] = struct.pack(">H", self.cmd["hdr"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["act"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["len"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["adr"])
# pack in the data
for word in self.cmd["words_array"]:
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", int(word, 16))
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["crc16"])
def brd_raw_cmd(self):
self.cmd["raw_cmd"] = struct.pack(">H", self.cmd["hdr"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["act"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["len"])
self.cmd["raw_cmd"] = self.cmd["raw_cmd"] + struct.pack(">H", self.cmd["adr"])
def brd_raw_rsp(self):
# srd_raw_rsp works for for now,
# but if the first register is all 0's, they will not be printed
self.srd_raw_rsp()
def burst_crc16_calc(self):
crc16 = crcmod.mkCrcFun(0x18005, rev=False, initCrc=0xFFFF, xorOut=0x0000)
xstr = hex(self.cmd["adr"])[2:].zfill(4)
xstr = xstr + hex(self.cmd["data"])[2:].rstrip("L").zfill(4 * self.cmd["len"])
checksum16 = int(hex(crc16(bytearray.fromhex(xstr)))[2:], 16)
return checksum16
def burst_crc16_check(self):
checksum16 = self.burst_crc16_calc()
if self.cmd["crc16"] == checksum16:
self.cmd["ok"] = True
else:
self.cmd["ok"] = False
| [
"atfienberg@psu.edu"
] | atfienberg@psu.edu |
fcd7b5300758601302ad4cce5f20949aca28de24 | 292a5ad7aa1031a00ae8b0f4b7514fd3480ab209 | /data/get_csv.py | dee2c25febe9c506dcf0ad20300fc39ff7e50463 | [] | no_license | bevarb/Autofocus-for-SPRM | d3e52140cf5bd6e5e0a4c856fa6e0449042875cc | 49ab4632b0097cd42501110da84ce2fa0e0b6285 | refs/heads/master | 2022-12-29T10:17:28.986442 | 2020-10-16T07:21:12 | 2020-10-16T07:21:12 | 268,509,896 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | import pandas as pd
import numpy as np
import os
def get_csv(B_root, A_root, save_root, CENTER=100):
'''获得A、B文件夹图片的序列信息,找到对焦帧,存入csv文件'''
A_List = os.listdir(A_root)
A_List = sorted(A_List, key=lambda x: int(x.split("_")[0]))
B_List = os.listdir(B_root)
B_List = sorted(B_List, key=lambda x: int(x.split("_")[0]))
Data = [["None" for _ in range(CENTER*2)] for _ in range(len(A_List))]
for i in range(len(A_List)):
Data[i][CENTER] = A_List[i]
id = int(A_List[i].split("_")[0])
A_frame = int(A_List[i].split("_")[-1].split(".")[0])
B_path = []
for B in B_List:
if int(B.split("_")[0]) == id:
B_path.append(B)
B_path = sorted(B_path, key=lambda x: int(x.split("_")[-1].split(".")[0]))
for B in B_path:
B_frame = int(B.split("_")[-1].split(".")[0])
real = A_frame - B_frame # 实际失焦帧
Data[i][CENTER - real] = B # 在对应位置上添加
nums = []
# 计算每个焦距存在的图片数量
for i in range(CENTER*2):
num = 0
for j in range(len(A_List)):
if Data[j][i] != "None":
num += 1
elif Data[j][i] == "None":
Data[j][i] = ""
nums.append(num)
Data.append(nums)
Data = pd.DataFrame(Data, columns=[str(i) for i in range(-100, 100, 1)])
Data.to_csv(save_root + "/" + "data.csv", index=False)
print(Data)
def get_test_csv(root):
'''获得0.1序列下的数据,所有数据都按从左到右的顺序,长短不一'''
All_name = os.listdir(root + "/B")
All_name = sorted(All_name, key=lambda x: int(x.split("_")[0]))
L = int(All_name[-1].split("_")[0])
All_DATA = []
for i in range(L):
temp = ["NONE" for i in range(100)]
All_DATA.append(temp)
for name in All_name:
path = name
x, y = int(name.split("_")[0]) - 1, int(name.split("_")[-1].split(".")[0]) - 1
print(path, x, y)
All_DATA[x][y] = path
# 记录时间
t = []
for i in range(100):
flag = 0
for j in range(L):
if All_DATA[j][i] != "NONE":
flag += 1
t.append(flag)
All_DATA.append(t)
Data = pd.DataFrame(All_DATA, columns=[str(i) for i in range(0, 100)])
Data.to_csv(root + "/" + "newdata.csv", index=False)
# get_test_csv("New-ROI-0.1/test1/new_for_train_2")
B_root = "New-Total-Train/for_train/B"
A_root = "New-Total-Train/for_train/A"
save_root = "New-Total-Train/for_train"
get_csv(B_root, A_root, save_root) | [
"49848629+bevarb@users.noreply.github.com"
] | 49848629+bevarb@users.noreply.github.com |
f8fd22a6064e8366f38550ad3d3bfbd6d473f4c1 | 0f187b981948b7bb1739c22b7b1d38f6c0fd4bb5 | /venv/bin/django-admin.py | a2eb0df4316c4287a519a1d266d93dd3836ecd12 | [] | no_license | marcosdefina/django-studies | 439680e619e50e872497c44e99d8d29e441a5741 | 1156c891954a13037efff776779ca86c0924c8de | refs/heads/master | 2020-05-18T21:24:08.371284 | 2019-05-02T22:29:54 | 2019-05-02T22:29:54 | 184,661,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | #!/media/marcosdefina/WindowsBackup/Users/marki/Documents/Estonia/Python/rp-portifolio/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"marcosdefina@gmail.com"
] | marcosdefina@gmail.com |
7fb1bbcd1838101abf13096c7d71cc1156bf7060 | e3f2a0acc79f1891b93553ee6a95396edeb6fd60 | /setup.py | c9c0390cc3d9d040b7b7fc777d3544fa322b0332 | [
"Apache-2.0"
] | permissive | imaginal/openprocurement.search_plugins | 5bd23b7e17365abba9f7f33da7a5c3263c440453 | a32a5e1b54c9b02fe24fae93e2e78632f77be82a | refs/heads/master | 2020-04-11T23:30:41.704868 | 2018-12-17T18:31:07 | 2018-12-17T18:31:07 | 162,170,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | from setuptools import setup, find_packages
setup(
name='openprocurement.search_plugins',
version='0.1', # NOQA
description="Plugin for OpenProcurement Search",
long_description=open("README.md").read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
keywords='prozorro search plugin',
author='Volodymyr Flonts',
author_email='flyonts@gmail.com',
license='Apache License 2.0',
url='https://github.com/imaginal/openprocurement.search_plugins',
namespace_packages=['openprocurement'],
packages=find_packages(),
package_data={'': ['*.md', '*.txt']},
include_package_data=True,
zip_safe=False,
install_requires=[
'openprocurement.search'
],
entry_points={
}
)
| [
"flyonts@gmail.com"
] | flyonts@gmail.com |
ad09f1178489249f0bbe1ac89b8c04992d14a89e | 11d0cc8470722b737ac1d83610f227ffb98ea71a | /convert/ahf2csv.py | 806bfe4ea9ced908b9a5b9164efc7b63a5b04e65 | [] | no_license | EdoardoCarlesi/PyRCODIO | d86e5298877f08b597f92a07aac0c9b634dbfa39 | d3f84efb7aeec0032ef6bde839fe9440ee82b392 | refs/heads/master | 2021-04-03T04:58:26.219884 | 2021-03-21T12:22:19 | 2021-03-21T12:22:19 | 125,036,704 | 148 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | '''
Python Routines for COsmology and Data I/ (PyRCODIO) v0.2
Edoardo Carlesi 2020
ecarlesi83@gmail.com
ahf2csv.py: convert (and compress) AHF halo catalogs to csv files
'''
import pandas as pd
import sys
sys.path.insert(1, '/home/edoardo/CLUES/PyRCODIO/')
import read_files as rf
this_ahf = sys.argv[1]
mpi = sys.argv[2]
out_file = this_ahf + '.csv'
halo_df = rf.read_ahf_halo(this_ahf, file_mpi=mpi)
halo_df.to_csv(out_file)
| [
"gatto@nanowar.it"
] | gatto@nanowar.it |
192d3de4d4523842cca9e342f1b2e292e06c8306 | 0a921dfd9e627153552b62170eba0f92a817ce5d | /tests/auger/test_configs.py | b0d656ecbe56dd70adff2abca21760be807d92de | [
"Apache-2.0"
] | permissive | mha23/a2ml | b7507729f3c54b71af8a1e4595c33563062d515e | 8fbd1be7a81be5f41564debeecb61379097f6fb8 | refs/heads/master | 2021-01-02T21:25:59.063460 | 2019-12-11T10:04:48 | 2019-12-11T10:04:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,349 | py | from .mock_rest_api import interceptor
from auger.api.cloud.experiment import AugerExperimentApi
EXPERIMENT = {
'data': {
'name': 'iris-1.csv-experiment',
'project_file_id': 1256,
}
}
PROJECT_FILE = {
'data': {
'name': 'iris-1.csv',
'id': 1256,
'statistics': {
'columns_count': 2, 'count': 150,
'stat_data': [{
'datatype': 'categorical',
'column_name': 'species',
'unique_values': 3
},{
'datatype': 'integer',
'column_name': 'sepal_length'
},{
'datatype': 'integer',
'column_name': 'sepal_width'
},{
'datatype': 'integer',
'column_name': 'petal_length'
},{
'datatype': 'integer',
'column_name': 'petal_width'
}]
},
}
}
class TestConfigs(object):
def test_experiment_settings(self, project, ctx, monkeypatch):
config = ctx.get_config('auger')
config.experiment.cross_validation_folds = 55
config.experiment.max_total_time = 606
config.experiment.max_eval_time = 55
config.experiment.max_n_trials = 101
config.experiment.use_ensemble = False
PAYLOAD = {
'get_experiment': EXPERIMENT,
'get_project_file': PROJECT_FILE
}
interceptor(PAYLOAD, monkeypatch)
config, model_type = AugerExperimentApi(
ctx, 'project-api', 'iris-1.csv-experiment', '1234').\
get_experiment_settings()
assert config['evaluation_options']['crossValidationFolds'] == 55
assert config['evaluation_options']['max_total_time_mins'] == 606
assert config['evaluation_options']['max_eval_time_mins'] == 55
assert config['evaluation_options']['max_n_trials'] == 101
assert config['evaluation_options']['use_ensemble'] == False
# dataset
assert config['evaluation_options']['targetFeature'] == 'species'
assert config['evaluation_options']['featureColumns'] == \
['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
assert config['evaluation_options']['categoricalFeatures'] == \
['species']
assert config['evaluation_options']['timeSeriesFeatures'] == []
assert config['evaluation_options']['binaryClassification'] == False
assert config['evaluation_options']['labelEncodingFeatures'] == []
assert config['evaluation_options']['classification'] == True
assert config['evaluation_options']['scoring'] == 'f1_macro'
def test_exclude_setting(self, project, ctx, monkeypatch):
config = ctx.get_config('config')
config.exclude = ['sepal_length']
PAYLOAD = {
'get_experiment': EXPERIMENT,
'get_project_file': PROJECT_FILE
}
interceptor(PAYLOAD, monkeypatch)
config, model_type = AugerExperimentApi(
ctx, 'project-api', 'iris-1.csv-experiment', '1234').\
get_experiment_settings()
assert config['evaluation_options']['targetFeature'] == 'species'
assert config['evaluation_options']['featureColumns'] == \
['sepal_width', 'petal_length', 'petal_width']
assert config['evaluation_options']['categoricalFeatures'] == \
['species']
def test_model_type_setting(self, project, ctx, monkeypatch):
config = ctx.get_config('config')
config.model_type = 'regression'
config = ctx.get_config('auger')
config.experiment.metric = None
PAYLOAD = {
'get_experiment': EXPERIMENT,
'get_project_file': PROJECT_FILE
}
interceptor(PAYLOAD, monkeypatch)
config, model_type = AugerExperimentApi(
ctx, 'project-api', 'iris-1.csv-experiment', '1234').\
get_experiment_settings()
assert config['evaluation_options']['timeSeriesFeatures'] == []
assert config['evaluation_options']['binaryClassification'] == False
assert config['evaluation_options']['labelEncodingFeatures'] == []
assert config['evaluation_options']['classification'] == False
assert config['evaluation_options']['scoring'] == 'r2'
| [
"vlad@dplrn.com"
] | vlad@dplrn.com |
6ffd580688dd7af218faa5a26e83fabfce825f2a | 300993f8ab336b19799ad10c7520514db6d56d2f | /aafield/aafieldapp/models.py | 713013d3b46a057dd3b6beedefd197efd2d46880 | [] | no_license | pratheebapalanisami/ParkReservationSystem | 23ef1a42690f7697e8b804ab2ac1714128adcbd0 | 5bd436c1e3688b8bdcd4986135604250240c5f6e | refs/heads/master | 2022-11-30T00:34:47.598881 | 2020-08-08T20:48:09 | 2020-08-08T20:48:09 | 286,116,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,437 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.urls import reverse
from accounts.models import Profile
from aafield import settings
class Parks(models.Model):
Park_Name = models.CharField(max_length=50,blank=False, null=False, default=' ')
Park_Address = models.CharField(max_length=250, blank=True, null=True, default=' ')
County = models.CharField(max_length=50, blank=True, null=True, default=' ')
Park_Image = models.ImageField(default='Images/park2.jpg')
def __str__(self):
return self.Park_Name
# Create your models here.
class Park_Properties(models.Model):
Park_Name = models.ForeignKey(Parks, on_delete=models.CASCADE)
Property_Name = models.CharField(max_length=50, default=' ', null=True, blank=True)
Property_Description = models.CharField(max_length=50, default=' ', null=True, blank=True)
Property_Guest_Capacity = models.IntegerField(blank=True, null=True)
Location_Choices=[('Indoor' ,'Indoor'),('Outdoor','Outdoor')]
Property_Location= models.CharField(max_length=50,default='Indoor',choices=Location_Choices)
Slot= models.CharField(max_length=50, default='2 hours', null=True, blank=True)
Price = models.IntegerField(blank=True, null=True)
Property_Image=models.ImageField(default='Images/pool.jpg')
def __str__(self):
return self.Property_Name
class Reservation(models.Model):
Park_Name = models.ForeignKey(Parks, on_delete=models.CASCADE)
Property_Name = models.ForeignKey(Park_Properties, on_delete=models.CASCADE)
Customer_Name = models.ForeignKey('accounts.customuser', on_delete=models.CASCADE)
Event_Date = models.DateField(blank=True, null=True)
Slot = models.CharField(max_length=50, default=' ', null=True, blank=True)
Team_Size = models.CharField(max_length=50, default=' ', null=True, blank=True)
Status = models.CharField(max_length=50, default=' ', null=True, blank=True)
def __str__(self):
return self.Property_Name.Property_Name
class Property_Status(models.Model):
Park_Name = models.ForeignKey(Parks, on_delete=models.CASCADE)
Property_Name = models.ForeignKey(Park_Properties, on_delete=models.CASCADE)
Report_TimeDate = models.DateTimeField(blank=True, null=True)
Property_Status_Description = models.CharField(max_length=50, blank=True, null=True)
Expenses = models.CharField(max_length=50, default=' ', null=True, blank=True)
Maintenance_ID = models.ForeignKey('accounts.customuser', on_delete=models.CASCADE)
Reservation_ID=models.ForeignKey(Reservation,on_delete=models.CASCADE,related_name='reservations')
def __str__(self):
return self.Park_Name.Park_Name
class Transaction(models.Model):
Park_Name = models.ForeignKey(Parks, on_delete=models.CASCADE)
Property_Name = models.ForeignKey(Park_Properties, on_delete=models.CASCADE)
Reservation_ID = models.ForeignKey(Reservation, on_delete=models.CASCADE)
Trans_Amount = models.CharField(max_length=50, default=' ', null=True, blank=True)
Trans_Time_Date = models.DateTimeField(blank=True, null=True)
Trans_Type = models.CharField(max_length=50, default=' ', null=True, blank=True)
Transaction_Token = models.CharField(max_length=50, default=' ', null=True, blank=True)
def __str__(self):
return self.Trans_Amount | [
"54645809+pratheebapalanisami@users.noreply.github.com"
] | 54645809+pratheebapalanisami@users.noreply.github.com |
5b87225c7d50e4de10c2e589bdd51d172792eef9 | e9b3842b3d2946bb6ddc07fcb8410732ca76f613 | /mdl.py | 67f802dd819229e1e48f72eebcab1278ee6efc6d | [] | no_license | aditihaiman/final-graphics | 2187c48bf3516467996eb1264019b0d2ea393055 | 45ee0b9c2733677a45d787a56f9a0333de2ad0a7 | refs/heads/master | 2022-11-09T00:31:56.646702 | 2020-06-11T22:04:50 | 2020-06-11T22:04:50 | 265,695,784 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,997 | py | from ply import lex, yacc
tokens = (
"STRING",
"ID",
"XYZ",
"DOUBLE",
"INT",
"COMMENT",
"LIGHT",
"CONSTANTS",
"SAVE_COORDS",
"CAMERA",
"AMBIENT",
"CYLINDER",
"PYRAMID",
"PRISM",
"TORUS",
"SPHERE",
"ELLIPSOID",
"TUBE",
"CONE",
"BOX",
"LINE",
"MESH",
"TEXTURE",
"SET",
"MOVE",
"SCALE",
"ROTATE",
"BASENAME",
"SAVE_KNOBS",
"TWEEN",
"FRAMES",
"VARY",
"PUSH",
"POP",
"SAVE",
"GENERATE_RAYFILES",
"SHADING",
"SHADING_TYPE",
"SET_KNOBS",
"FOCAL",
"DISPLAY",
"SCREEN",
"WEB",
"CO"
)
reserved = {
"x" : "XYZ",
"y" : "XYZ",
"z" : "XYZ",
"screen" : "SCREEN",
"light" : "LIGHT",
"constants" : "CONSTANTS",
"save_coord_system" : "SAVE_COORDS",
"camera" : "CAMERA",
"ambient" : "AMBIENT",
"cylinder" : "CYLINDER",
"pyramid" : "PYRAMID",
"torus" : "TORUS",
"sphere" : "SPHERE",
"ellipsoid" : "ELLIPSOID",
"cone" : "CONE",
"tube" : "TUBE",
"prism" : "PRISM",
"box" : "BOX",
"line" : "LINE",
"mesh" : "MESH",
"texture" : "TEXTURE",
"set" : "SET",
"move" : "MOVE",
"scale" : "SCALE",
"rotate" : "ROTATE",
"basename" : "BASENAME",
"save_knobs" : "SAVE_KNOBS",
"tween" : "TWEEN",
"frames" : "FRAMES",
"vary" : "VARY",
"push" : "PUSH",
"pop" : "POP",
"save" : "SAVE",
"generate_rayfiles" : "GENERATE_RAYFILES",
"shading" : "SHADING",
"phong" : "SHADING_TYPE",
"flat" : "SHADING_TYPE",
"gouraud" : "SHADING_TYPE",
"raytrace" : "SHADING_TYPE",
"wireframe" : "SHADING_TYPE",
"setknobs" : "SET_KNOBS",
"focal" : "FOCAL",
"display" : "DISPLAY",
"web" : "WEB"
}
t_ignore = " \t"
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
if t.value in reserved:
t.type = reserved.get(t.value)
return t
def t_STRING(t):
r'\.[a-zA-Z_0-9]*[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_DOUBLE(t):
r"""\-?\d+\.\d*|\-?\.\d+ |
\-?\d+"""
t.value = float(t.value)
return t
def t_COMMENT(t):
r"//.*"
return t
def t_CO(t):
r":"
return t
def t_error(t):
print("TOKEN ERROR: " + str(t))
lex.lex()
#----------------------------------------------------------
commands = []
symbols = {}
def p_input(p):
"""input :
| command input"""
pass
def p_command_comment(p):
'command : COMMENT'
pass
def p_SYMBOL(p):
"""SYMBOL : XYZ
| ID"""
p[0] = p[1]
def p_TEXT(p):
"""TEXT : SYMBOL
| STRING"""
p[0] = p[1]
def p_NUMBER(p):
"""NUMBER : DOUBLE"""
p[0] = p[1]
def p_command_stack(p):
"""command : POP
| PUSH"""
commands.append({'op' : p[1], 'args' : None})
def p_command_screen(p):
"""command : SCREEN NUMBER NUMBER
| SCREEN"""
if len(p) == 2:
commands.append({'op' : p[1], 'width' : 500, 'height': 500})
else:
commands.append({'op' : p[1], 'width' : p[2], 'height': p[3]})
def p_command_save(p):
"""command : SAVE TEXT TEXT"""
commands.append({'op' : p[1], 'args' : [p[2]]})
def p_command_show(p):
"""command : DISPLAY"""
commands.append({'op' : p[1], 'args' : None})
def p_command_cylinder(p):
"""command : CYLINDER NUMBER NUMBER NUMBER NUMBER NUMBER
| CYLINDER SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER
| CYLINDER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| CYLINDER SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 8 and isinstance(p[7], str):
cmd['cs'] = p[7]
if len(p) == 9 and isinstance(p[8], str):
cmd['cs'] = p[8]
cmd['args'] = p[arg_start:arg_start+5]
commands.append(cmd)
def p_command_tube(p):
"""command : TUBE NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| TUBE SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| TUBE NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| TUBE SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 9 and isinstance(p[8], str):
cmd['cs'] = p[8]
if len(p) == 10 and isinstance(p[9], str):
cmd['cs'] = p[9]
cmd['args'] = p[arg_start:arg_start+6]
commands.append(cmd)
def p_command_cone(p):
"""command : CONE NUMBER NUMBER NUMBER NUMBER NUMBER
| CONE SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER
| CONE NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| CONE SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 8 and isinstance(p[7], str):
cmd['cs'] = p[7]
if len(p) == 9 and isinstance(p[8], str):
cmd['cs'] = p[8]
cmd['args'] = p[arg_start:arg_start+5]
commands.append(cmd)
def p_command_prism(p):
"""command : PRISM NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| PRISM SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| PRISM NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| PRISM SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 9 and isinstance(p[8], str):
cmd['cs'] = p[8]
if len(p) == 10 and isinstance(p[9], str):
cmd['cs'] = p[9]
cmd['args'] = p[arg_start:arg_start+6]
commands.append(cmd)
def p_command_pyramid(p):
"""command : PYRAMID NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| PYRAMID SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| PYRAMID NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| PYRAMID SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 11 and isinstance(p[10], str):
cmd['cs'] = p[10]
if len(p) == 12 and isinstance(p[11], str):
cmd['cs'] = p[11]
cmd['args'] = p[arg_start:arg_start+8]
commands.append(cmd)
def p_command_ellipsoid(p):
"""command : ELLIPSOID NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| ELLIPSOID SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| ELLIPSOID NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| ELLIPSOID SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
#print("P", p[7])
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 9 and isinstance(p[8], str):
cmd['cs'] = p[8]
if len(p) == 10 and isinstance(p[9], str):
cmd['cs'] = p[9]
cmd['args'] = p[arg_start:arg_start+6]
commands.append(cmd)
def p_command_sphere(p):
"""command : SPHERE NUMBER NUMBER NUMBER NUMBER
| SPHERE SYMBOL NUMBER NUMBER NUMBER NUMBER
| SPHERE NUMBER NUMBER NUMBER NUMBER SYMBOL
| SPHERE SYMBOL NUMBER NUMBER NUMBER NUMBER SYMBOL"""
#print("P", p[7])
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 7 and isinstance(p[6], str):
cmd['cs'] = p[6]
if len(p) == 8 and isinstance(p[7], str):
cmd['cs'] = p[7]
cmd['args'] = p[arg_start:arg_start+4]
commands.append(cmd)
def p_command_torus(p):
"""command : TORUS NUMBER NUMBER NUMBER NUMBER NUMBER
| TORUS NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| TORUS SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER
| TORUS SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 8 and isinstance(p[7], str):
cmd['cs'] = p[7]
if len(p) == 9 and isinstance(p[8], str):
cmd['cs'] = p[8]
cmd['args'] = p[arg_start:arg_start+5]
commands.append(cmd)
def p_command_box(p):
"""command : BOX NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| BOX NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| BOX SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| BOX SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
if len(p) == 9 and isinstance(p[8], str):
cmd['cs'] = p[8]
if len(p) == 10 and isinstance(p[9], str):
cmd['cs'] = p[9]
cmd['args'] = p[arg_start:arg_start+6]
commands.append(cmd)
def p_command_line(p):
"""command : LINE NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| LINE NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| LINE NUMBER NUMBER NUMBER SYMBOL NUMBER NUMBER NUMBER
| LINE NUMBER NUMBER NUMBER SYMBOL NUMBER NUMBER NUMBER SYMBOL
| LINE SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| LINE SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL
| LINE SYMBOL NUMBER NUMBER NUMBER SYMBOL NUMBER NUMBER NUMBER
| LINE SYMBOL NUMBER NUMBER NUMBER SYMBOL NUMBER NUMBER NUMBER SYMBOL"""
cmd = {'op' : p[1], 'constants' : None, 'cs0' : None, 'cs1' : None, 'args':[]}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start = 3
cmd['args'] = p[arg_start:arg_start+3]
arg_start = arg_start+3
if isinstance(p[arg_start], str):
cmd['cs0'] = p[arg_start]
arg_start+= 1
cmd['args']+= p[arg_start:arg_start+3]
if len(p) == 9 and isinstance(p[8], str):
cmd['cs1'] = p[8]
if len(p) == 10 and isinstance(p[9], str):
cmd['cs1'] = p[9]
if len(p) == 11 and isinstance(p[10], str):
cmd['cs1'] = p[10]
commands.append(cmd)
def p_command_move(p):
"""command : MOVE NUMBER NUMBER NUMBER SYMBOL
| MOVE NUMBER NUMBER NUMBER"""
cmd = {'op' : p[1], 'args' : p[2:5], 'knob' : None}
if len(p) == 6:
cmd['knob'] = p[5]
symbols[p[5]] = ['knob', 0]
commands.append(cmd)
def p_command_scale(p):
"""command : SCALE NUMBER NUMBER NUMBER SYMBOL
| SCALE NUMBER NUMBER NUMBER"""
cmd = {'op' : p[1], 'args' : p[2:5], 'knob' : None}
if len(p) == 6:
cmd['knob'] = p[5]
symbols[p[5]] = ['knob', 0]
commands.append(cmd)
def p_command_rotate(p):
"""command : ROTATE XYZ NUMBER SYMBOL
| ROTATE XYZ NUMBER"""
cmd = {'op' : p[1], 'args' : p[2:4], 'knob' : None}
if len(p) == 5:
cmd['knob'] = p[4]
symbols[p[4]] = ['knob', 0]
commands.append(cmd)
def p_command_frames(p):
"""command : FRAMES NUMBER"""
cmd = {'op' : p[1], 'args' : [p[2]]}
commands.append(cmd)
def p_command_basename(p):
"""command : BASENAME TEXT"""
cmd = {'op' : p[1], 'args' : [p[2]]}
commands.append(cmd)
def p_command_vary(p):
"""command : VARY SYMBOL NUMBER NUMBER NUMBER NUMBER"""
cmd = {'op' : p[1], 'args' : p[3:], 'knob' : p[2]}
symbols[p[2]] = ['knob', 0]
commands.append(cmd)
def p_command_knobs(p):
"""command : SET SYMBOL NUMBER
| SET_KNOBS NUMBER"""
cmd = {'op' : p[1], 'args' : [], 'knob' : None}
if p[1] == 'SET':
cmd['knob'] = p[2]
cmd['args'].append(p[3])
symbols[p[2]] = p[3]
else:
cmd['args'].append(p[2])
commands.append(cmd)
def p_command_ambient(p):
"command : AMBIENT NUMBER NUMBER NUMBER"
symbols['ambient'] = ['ambient'] + p[2:]
cmd = {'op':p[1], 'args':p[2:]}
commands.append(cmd)
def p_command_constants(p):
"""command : CONSTANTS SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER
| CONSTANTS SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER"""
symbols[p[2]] = ['constants', {'red' : p[3:6], 'green' : p[6:9], 'blue' : p[9:]}]
cmd = {'op':p[1], 'args' : None, 'constants' : p[2] }
commands.append(cmd)
def p_command_light(p):
"command : LIGHT SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER"
symbols[p[2]] = ['light', {'location' : p[3:6], 'color' : p[6:]}]
cmd = {'op':p[1], 'args' : None, 'light' : p[2] }
commands.append(cmd)
def p_command_shading(p):
"command : SHADING SHADING_TYPE"
symbols['shading'] = ['shade_type', p[2]]
cmd = {'op':p[1], 'args' : None, 'shade_type' : p[2] }
commands.append(cmd)
def p_command_camera(p):
"command : CAMERA NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER"
symbols['camera'] = ['camera', {'eye': p[2:4], 'aim': p[4:]} ]
commands.append({'op':p[1], 'args':None})
def p_command_generate_rayfiles(p):
"command : GENERATE_RAYFILES"
commands.append({'op':p[1], 'args':None})
def p_command_mesh(p):
"""command : MESH CO TEXT
| MESH SYMBOL CO TEXT
| MESH CO TEXT SYMBOL
| MESH SYMBOL CO TEXT SYMBOL"""
cmd = {'op':p[1], 'args' : [], 'cs':None, 'constants':None}
arg_start = 2
if isinstance(p[2], str):
cmd['constants'] = p[2]
arg_start+= 1
cmd['args'].append(p[arg_start])
if len(p) == 4 and isinstance(p[3], str):
cmd['cs'] = p[3]
if len(p) == 5 and isinstance(p[4], str):
cmd['cs'] = p[4]
commands.append(cmd)
def p_save_knobs(p):
"command : SAVE_KNOBS SYMBOL"
cmd = {'op':p[1], 'args':None, 'knob_list':p[2]}
symbols[p[2]] = ['knob_list', []]
commands.append(cmd)
def p_save_coords(p):
"command : SAVE_COORDS SYMBOL"
cmd = {'op':p[1], 'args':None, 'cs':p[2]}
symbols[p[2]] = ['coord_sys', []]
commands.append(cmd)
def p_tween(p):
"command : TWEEN NUMBER NUMBER SYMBOL SYMBOL"
cmd = {'op':p[1], 'args':p[2:4], 'knob_list0':p[4], 'knob_list1':p[5]}
commands.append(cmd)
def p_focal(p):
"command : FOCAL NUMBER"
commands.append({'op':p[1], 'args':[p[2]]})
def p_web(p):
"command : WEB"
commands.append({'op':p[1], 'args':None})
def p_texture(p):
"command : TEXTURE SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER"
symbols[p[2]] = ['texture', p[3:]]
def p_error(p):
print('SYNTAX ERROR: ' + str(p))
yacc.yacc()
from copy import deepcopy
def parseFile(filename):
"""
This function returns a tuple containing a list of opcodes
and a list of symbols.
Every opcode is a tuple of the form
(commandname, parameter, parameter, ...).
Every symbol is a tuple of the form (type, name).
"""
global commands
global symbols
commands = []
symbols = {}
try:
f = open(filename, "r")
for line in f.readlines():
line = line.strip()
yacc.parse(line)
f.close()
result = (commands[:], deepcopy(symbols))
commands = []
symbols = {}
return result
except IOError:
return ()
| [
"ahaiman00@stuy.edu"
] | ahaiman00@stuy.edu |
0668482cec5cf3ee1d9fbb3ba9466c1dc9c9b89b | 3ca67ed4d6ee727023e9342aca1fb6b2b03fac04 | /coremlconverter.py | dfc9311c5cbf31381724f02600610e76b09a0d08 | [] | no_license | poommomo/pokemon-classifier | 17712679674361a85c5c0d986110820b11adee84 | 7b6724123bc2936399aed811b22c12c6d8217766 | refs/heads/master | 2020-03-29T04:43:43.379612 | 2018-08-09T08:53:12 | 2018-08-09T08:53:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 13 13:35:16 2018
@author: KaranJaisingh
"""
# import necessary packages
from keras.models import load_model
import coremltools
import argparse
import pickle
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to trained model model")
ap.add_argument("-l", "--labelbin", required=True,
help="path to label binarizer")
args = vars(ap.parse_args())
# arguments passed:
#1. --model: path to model
#2. --labelbin: path to class label binarizer
# load the class labels
print("[INFO] loading class labels from label binarizer")
lb = pickle.loads(open(args["labelbin"], "rb").read())
class_labels = lb.classes_.tolist()
print("[INFO] class labels: {}".format(class_labels))
# load the trained convolutional neural network
print("[INFO] loading model...")
model = load_model(args["model"])
# convert the model to coreml format
print("[INFO] converting model")
coreml_model = coremltools.converters.keras.convert(model,
input_names="image",
image_input_names="image",
image_scale=1/255.0, # very important parameter
class_labels=class_labels, # obtained from LabelBinarizer object
is_bgr=True) # extremely important - must be set to true is images trained with BGR colours
# save the model to disk
output = args["model"].rsplit(".", 1)[0] + ".mlmodel" # change the extension of model
print("[INFO] saving model as {}".format(output))
coreml_model.save(output)
# To run the script in Terminal:
# python coremlconverter.py --model <MODEL_NAME>.model --labelbin lb.pickle | [
"KaranJaisingh@karan-jaisingh.lan"
] | KaranJaisingh@karan-jaisingh.lan |
19ebe52e3014d768bb3a0b4bde461281fb6ffc50 | c04b004558beee584817c26aaf4809cb366f7fbb | /Course/Section-1/day12/2.作业.py | c45cc33e1a3d05734ee700af48c634dfd2609f76 | [] | no_license | Wuzhibin05/python-course | 4f3d3e39990a613d222b1e7bcb3bb1453b0541a0 | 50daa1405b873e9c6727598e6c752c115fe7843a | refs/heads/master | 2023-07-14T07:17:19.054495 | 2021-08-26T01:22:13 | 2021-08-26T01:22:13 | 353,526,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,575 | py | # 1.编写装饰器,为多个函数加上认证的功能(用户的账号密码来源于文件),
# 要求登录成功一次,后续的函数都无需再输入用户名和密码
# FLAG = False
# def login(func):
# def inner(*args,**kwargs):
# global FLAG
# '''登录程序'''
# if FLAG:
# ret = func(*args, **kwargs) # func是被装饰的函数
# return ret
# else:
# username = input('username : ')
# password = input('password : ')
# if username == 'boss_gold' and password == '22222':
# FLAG = True
# ret = func(*args,**kwargs) #func是被装饰的函数
# return ret
# else:
# print('登录失败')
# return inner
#
# @login
# def shoplist_add():
# print('增加一件物品')
#
# @login
# def shoplist_del():
# print('删除一件物品')
#
# shoplist_add()
# shoplist_del()
# 2.编写装饰器,为多个函数加上记录调用功能,要求每次调用函数都将被调用的函数名称写入文件
# def log(func):
# def inner(*args,**kwargs):
# with open('log','a',encoding='utf-8') as f:
# f.write(func.__name__+'\n')
# ret = func(*args,**kwargs)
# return ret
# return inner
#
# @log
# def shoplist_add():
# print('增加一件物品')
#
# @log
# def shoplist_del():
# print('删除一件物品')
# shoplist_add()
# shoplist_del()
# shoplist_del()
# shoplist_del()
# shoplist_del()
# shoplist_del()
# 进阶作业(选做):
# 1.编写下载网页内容的函数,要求功能是:用户传入一个url,函数返回下载页面的结果
# 2.为题目1编写装饰器,实现缓存网页内容的功能:
# 具体:实现下载的页面存放于文件中,如果文件内有值(文件大小不为0),就优先从文件中读取网页内容,否则,就去下载,然后存到文件中
import os
from urllib.request import urlopen
def cache(func):
def inner(*args,**kwargs):
if os.path.getsize('web_cache'):
with open('web_cache','rb') as f:
return f.read()
ret = func(*args,**kwargs) #get()
with open('web_cache','wb') as f:
f.write(b'*********'+ret)
return ret
return inner
@cache
def get(url):
code = urlopen(url).read()
return code
# {'网址':"文件名"}
ret = get('http://www.baidu.com')
print(ret)
ret = get('http://www.baidu.com')
print(ret)
ret = get('http://www.baidu.com')
print(ret)
| [
"wuzb@szkingdom.com"
] | wuzb@szkingdom.com |
8f3c48e2d207660e14f0af89a3b6c1e6fa76b6dc | a6d9710e312caf4ae96b43f0290f9135bffdf8e0 | /Unit 45/45.4.1_4/calcpkg/geometry/vector.py | 6c6892b72e03d1f068f29d7872f2b5b90fd9723f | [] | no_license | gilbutITbook/006936 | 5b245cf1c6d4580eb07344bdaa254e4615109697 | b5cd6a57cdb5bb3c2ad5e3c9471a79b3fa82bc5d | refs/heads/master | 2023-01-29T07:35:46.360283 | 2023-01-18T06:20:49 | 2023-01-18T06:20:49 | 154,229,702 | 19 | 19 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | def product(a, b):
pass
def dot(a, b):
pass | [
"user@localhost"
] | user@localhost |
a9597573158cd06dab3973ee6e0512978f90458b | 229d71da5bb9fcdc34ab9c3a4ff9f75ca7ea7a19 | /bitly_app/urls.py | 81c9ebb0845bfee3a27ec09d812bed36ced4f7b6 | [] | no_license | Cunarefa/Convert | 8fd1ba5aae46915b1cde31a682d6ddd1b83bbeef | 93d366656c51affc2d17c685fcd6c93345180a49 | refs/heads/master | 2023-08-29T11:42:02.784981 | 2021-09-18T10:28:44 | 2021-09-18T10:28:44 | 407,829,331 | 0 | 0 | null | 2021-09-20T18:39:30 | 2021-09-18T10:30:09 | Python | UTF-8 | Python | false | false | 133 | py | from django.urls import path
from .views import ConvertView
urlpatterns = [
path('long', ConvertView.as_view(), name='long'),
] | [
"yevgen.yelik@gmail.com"
] | yevgen.yelik@gmail.com |
3d6a8d3932e7842aed3095ee73c35260a9beb8c0 | 40021c7443c3b512fd77df1a77855a10c0afd6df | /src/controller/command/track.py | b9796406b68396d87a5f02e48dc2e35b43675ec0 | [] | no_license | ginsm/punch | 9062cc6a18352a9a6569d2cdd3383563f012bf75 | be35587fffdb036f823b13e8df06d9509a348120 | refs/heads/master | 2021-04-10T01:39:04.296020 | 2020-03-24T23:59:08 | 2020-03-24T23:59:08 | 248,900,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | # SECTION IMPORTS - External and internal imports.
# =====================================================
# External
import re
# Internal
import view.commands.track as view
import model.db as db
# SECTION HANDLER - Job selection handlers.
# =====================================================
def handler(command, argument):
if argument is None:
return view.jobNameRequired()
if re.search('\/', argument) is not None:
return view.invalidCharacter(argument)
db.set_state({'job': argument})
if not db.exists(argument):
schema = db.get_state('schema')
schema['name'] = argument
db.write(schema, argument)
return view.newSelectedJob(argument)
| [
"matt@mgin.me"
] | matt@mgin.me |
8beec3595266a31881890de7dda1c3111fee3c79 | f0237dd96510765f1a8d4b61e5e6dca0d1142fba | /lib/tools/tools_ssh.py | f6155f4fca7f6f97a9800cfdbb16671dbf2a4638 | [
"MIT"
] | permissive | galena503/SCR | d217782dd03f22f5524666f11ccd6c9e73bec223 | d5b6581808b4f2fac775e7ff48b3eef548164ca1 | refs/heads/master | 2020-08-01T06:56:33.715559 | 2019-09-26T09:34:45 | 2019-09-26T09:34:45 | 210,895,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py |
class Tools_ssh:
def get_other_scr_stats(self):
# SCRfield内の他のSCRの情報を取得する
def get_other_scr_stats(self): | [
"52788168+akatuki-hi@users.noreply.github.com"
] | 52788168+akatuki-hi@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.