blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
190c0b7174e3ee074dcee7447dd6149444d96d20
|
9030481ef925278a174cbbf58c74bc5058e8d302
|
/contrib/testgen/base58.py
|
0b6e6e1ae339c3c25f894b09b621c4777509d655
|
[
"MIT"
] |
permissive
|
hideoussquid/aureus-13-gui
|
1b8f85f262cbc1970c3d8072b064956073bc4182
|
8865c958ba1680d4615128dabcc3cc4d47a24c51
|
refs/heads/master
| 2021-01-19T08:22:45.795165
| 2017-04-26T07:34:19
| 2017-04-26T07:34:19
| 87,622,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,999
|
py
|
# Copyright (c) 2012 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Aureus base58 encoding and decoding.
Based on https://aureustalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Aureus does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/aureus/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
[
"thesquid@mac.com"
] |
thesquid@mac.com
|
42242438bea8875d7471ea2ddf09291f67a15799
|
30a34b3503decf1b4516039df3106cd152631819
|
/4AL17IS050_T_K_HARSHITH_PRASAD/19_05_2020/2.py
|
90236ef15cb59e0d27deb74598351d1745cafda7
|
[] |
no_license
|
alvas-education-foundation/ISE_3rd_Year_Coding_challenge
|
8ddb6c325bf6ab63e2f73d16573fa0b6e2484136
|
b4074cab4a47aad07ed0fa426eacccbfafdef7f8
|
refs/heads/master
| 2022-11-23T20:52:19.204693
| 2020-07-23T11:28:15
| 2020-07-23T11:28:15
| 265,195,514
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
# This program adds two numbers
num1 = 1.5
num2 = 6.3
# Add two numbers
sum = float(num1) + float(num2)
# Display the sum
print('The sum of {0} and {1} is {2}'.format(num1, num2, sum))
|
[
"noreply@github.com"
] |
alvas-education-foundation.noreply@github.com
|
23360d6f5e4397081856508eee1b3434081d5f72
|
d3325fb51a99a8fb25a5bef8f61dc943333417a1
|
/neuron_models/experiments/MLI_exp_current_param_sweep.py
|
5a57bb949b4acca06601302e59cb1bab6d24e2ea
|
[] |
no_license
|
blennon/research
|
e26ccfb5a33543f72e84d96655b69f857b4ff422
|
6579a4d9636332267d0f26d8d4c8226e4fecf85d
|
refs/heads/master
| 2022-09-18T02:53:11.056480
| 2015-02-10T17:37:54
| 2015-02-10T17:37:54
| 7,845,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
'''
This script performs a grid search for the current parameters that best match the data
from Hausser and Clark (1997)
'''
import datetime
import os
import gc
import multiprocessing
import itertools
from brian import *
import sys
from neuron_models import *
import cPickle
import time
set_global_preferences(useweave=True, usenewpropagate=True, usecodegen=True, usecodegenweave=True)
defaultclock.dt = .25*ms
from scipy.stats import skew
def isi_mean_and_std(monitor):
'''
compute the mean and variance of interspike intervals
of a group of neurons
'''
isi = []
for n_ind, times in monitor.spiketimes.iteritems():
isi += list(diff(times)*1000)
return mean(isi), var(isi)**.5, skew(isi)
def run_net((k,theta)):
seed(os.getpid())
print os.getpid()
reinit()
reinit_default_clock()
clear(True)
gc.collect()
T = 6000
N_MLI = 1
MLI = MLIGroup(N_MLI)
@network_operation(Clock(dt=defaultclock.dt))
def random_current():
MLI.I = (k + exponential(theta,size=len(MLI))) * nA
# Monitor
MS_MLI = SpikeMonitor(MLI)
MR_MLI = PopulationRateMonitor(MLI,bin=1*ms)
MISI_MLI = ISIHistogramMonitor(MLI,bins=arange(0,162,2)*ms)
start = time.time()
run(T*msecond)
print time.time() - start
mli_mew, mli_std, mli_skew = isi_mean_and_std(MS_MLI)
return k,theta,mean(MR_MLI.rate), mli_std/mli_mew, mli_skew
if __name__ == "__main__":
pool = multiprocessing.Pool(6)
params = []
for k in linspace(.016,.018,25):
for theta in linspace(.009,.011,25):
if k+theta < .029 and k+theta > .02:
params.append((k,theta))
print len(params)
results = pool.map(run_net, params)
out_dir = out_dir = '/home/bill/research/data/neuron_models/molecular_layer/mli_exp_current_param_sweep/%s/'%datetime.datetime.now().isoformat()
os.makedirs(out_dir)
with open(out_dir+'results.txt','w') as outf:
outf.write('\t'.join(['k','theta','mli_mean_firing_rate','mli_cv', 'mli_skew'])+'\n')
for r in results:
outf.write('\t'.join(map(str,r))+'\n')
|
[
"blennon86@gmail.com"
] |
blennon86@gmail.com
|
58bb40f95b996bb5aaf4c9706c5271c0c5978cc2
|
25d8bac5635ac1cc3577a3593a4512e042ea7ecd
|
/scripts/asyncore-example-2.py
|
27a4738c22e98525faf3534d4f880e283ad582e0
|
[] |
no_license
|
mtslong/demo
|
2333fa571d6d9def7bdffc90f7bcb623b15e6e4b
|
a78b74e0eea7f84df489f5c70969b9b4797a4873
|
refs/heads/master
| 2020-05-18T18:28:48.237100
| 2013-11-11T16:10:11
| 2013-11-11T16:10:11
| 4,136,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
import asyncore
import socket, time
# reference time
TIME1970 = 2208988800L
class TimeChannel(asyncore.dispatcher):
def handle_write(self):
t = int(time.time()) + TIME1970
t = chr(t>>24&255) + chr(t>>16&255) + chr(t>>8&255) + chr(t&255)
self.send(t)
self.close()
class TimeServer(asyncore.dispatcher):
def __init__(self, port=37):
self.port = port
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(5)
print "listening on port", self.port
def handle_accept(self):
channel, addr = self.accept()
TimeChannel(channel)
server = TimeServer(8037)
asyncore.loop()
## log: adding channel <TimeServer at 8cb940>
## listening on port 8037
## log: adding channel <TimeChannel at 8b2fd0>
## log: closing channel 52:<TimeChannel connected at 8b2fd0>
|
[
"mofeng@netease.com"
] |
mofeng@netease.com
|
9e18ca0c910a39afcedd81193e4b16ecdffb726e
|
01494c3ac2e3281d71066ee220628afc452beb70
|
/Chapter IV/dostep_swobodny.py
|
24b2e875deb371d3f45b0076b3882a8bab9ddd20
|
[] |
no_license
|
PatrykDagiel/Python_Dawson
|
b3a4aab8dbb875eda54c0cd46ceed3650edc3dc7
|
d3a04a5041df5ac728e2596331521191f941f536
|
refs/heads/master
| 2020-07-17T12:58:47.390099
| 2017-10-11T21:11:37
| 2017-10-11T21:11:37
| 94,321,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
import random
word = "indeks"
high=len(word)
low=-len(word)
for i in range(10):
position = random.randrange(low, high)
print("word[", position, "]\t", word[position])
input("\nAby zakonczyc program nacisnij enter")
|
[
"patryk.dagiel@gmail.com"
] |
patryk.dagiel@gmail.com
|
bd89005298fc7cb04640f30108944021ea926373
|
862126fc5d0b920501a75913ddd563a0fb6c942a
|
/Trabajo_Final/gSLICrPy/gSLICrPy.py
|
16315f9e0d597b11c819adb08ab60e4aed3c42fa
|
[] |
no_license
|
jhuni45/TCG-Laboratorio
|
60cc5a735e98fafd6c171e92b7d0d9ae71ba5378
|
e2841e6ea30880142a47e7aa3dc4158c5c0fe4d3
|
refs/heads/master
| 2023-03-08T13:24:17.849817
| 2021-02-15T17:49:32
| 2021-02-15T17:49:32
| 257,342,390
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
import ctypes
from ctypes import POINTER
def __get_CUDA_gSLICr__(path_to_shared='./build/libDEMO.so'):
"""
:return: Callable
"""
dll = ctypes.CDLL(path_to_shared, mode=ctypes.RTLD_GLOBAL)
func = dll.CUDA_gSLICr
"""
int* CUDA_gSLICr(unsigned char* image,
int img_size_x,
int img_size_y,
int n_segs,
int spixel_size,
float coh_weight,
int n_iters,
int color_space,
int segment_color_space,
bool segment_by_size,
bool enforce_connectivity,
char* out_name)
"""
func.argtypes = [POINTER(ctypes.c_uint8),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_float,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_bool,
ctypes.c_bool,
ctypes.c_char_p]
# POINTER(c_char) or ctypes.c_char_p ?
return func
def CUDA_gSLICr(__get_CUDA_gSLICr__,
image,
img_size_x,
img_size_y,
n_segs,
spixel_size,
coh_weight,
n_iters,
color_space,
segment_color_space,
segment_by_size,
enforce_connectivity,
out_name):
"""
:param __get_CUDA_gSLICrm__:
:param image:
:param img_size_x:
:param img_size_y:
:param n_segs:
:param spixel_size:
:param coh_weight:
:param n_iters:
:param color_space:
:param segment_color_space:
:param segment_by_size:
:param enforce_connectivity:
:param out_name:
:return:
"""
image = image.ctypes.data_as(POINTER(ctypes.c_uint8))
out_name = out_name.encode('utf-8')
return __get_CUDA_gSLICr__(image,
img_size_x,
img_size_y,
n_segs,
spixel_size,
coh_weight,
n_iters,
color_space,
segment_color_space,
segment_by_size,
enforce_connectivity,
out_name)
|
[
"jhunier12@gmail.com"
] |
jhunier12@gmail.com
|
c88104d5615bd51adb47e4cdb4bcf60a416fae65
|
4cc2ad8ff00012095980bd98f4ec26437bf02feb
|
/form.py
|
ba4fbcf24556ee808a8bd7c05c332619b489b8a9
|
[] |
no_license
|
abcelso/Web-con-Python-y-flask
|
b57d562b592b2c5ff29936eccef7aaef7dadc5db
|
9de9b51abbb86880afdd5107973a1dd11156bafd
|
refs/heads/master
| 2022-06-09T23:10:41.734142
| 2020-04-21T04:29:28
| 2020-04-21T04:29:28
| 257,466,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from wtforms import Form
from wtforms import StringField, PasswordField
class LoginForm(Form):
username = StringField('username')
password = PasswordField('password')
|
[
"silvestroalejandro@gmail.com"
] |
silvestroalejandro@gmail.com
|
19f3c8b7d94aae6549e86646e36334cb826a906e
|
6e820756b82ffbe9837348937e53f1a0ce0e6cca
|
/Lib/site-packages/pandas_datareader/io/jsdmx.py
|
d602ca88beb058636aceaac714662ee2f457a6c4
|
[] |
no_license
|
AndreasPatsimas/pms_papei
|
c2afd941de6ae234dd37784d746e794183ebb8d3
|
da10220ea468304c1066bed55b8f92ba9e5ada8a
|
refs/heads/master
| 2023-02-01T23:33:39.221747
| 2020-12-19T12:17:59
| 2020-12-19T12:17:59
| 321,115,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,167
|
py
|
# pylint: disable-msg=E1101,W0613,W0603
from __future__ import unicode_literals
from collections import OrderedDict
import itertools
import re
import sys
import numpy as np
import pandas as pd
from pandas_datareader.io.util import _read_content
def read_jsdmx(path_or_buf):
"""
Convert a SDMX-JSON string to panda object
Parameters
----------
path_or_buf : a valid SDMX-JSON string or file-like
https://github.com/sdmx-twg/sdmx-json
Returns
-------
results : Series, DataFrame, or dictionary of Series or DataFrame.
"""
jdata = _read_content(path_or_buf)
try:
import simplejson as json
except ImportError:
if sys.version_info[:2] < (2, 7):
raise ImportError("simplejson is required in python 2.6")
import json
if isinstance(jdata, dict):
data = jdata
else:
data = json.loads(jdata, object_pairs_hook=OrderedDict)
structure = data["structure"]
index = _parse_dimensions(structure["dimensions"]["observation"])
columns = _parse_dimensions(structure["dimensions"]["series"])
dataset = data["dataSets"]
if len(dataset) != 1:
raise ValueError("length of 'dataSets' must be 1")
dataset = dataset[0]
values = _parse_values(dataset, index=index, columns=columns)
df = pd.DataFrame(values, columns=columns, index=index)
return df
def _get_indexer(index):
if index.nlevels == 1:
return [str(i) for i in range(len(index))]
else:
it = itertools.product(*[range(len(level)) for level in index.levels])
return [":".join(map(str, i)) for i in it]
def _fix_quarter_values(value):
"""Make raw quarter values Pandas-friendly (e.g. 'Q4-2018' -> '2018Q4')."""
m = re.match(r"Q([1-4])-(\d\d\d\d)", value)
if not m:
return value
quarter, year = m.groups()
value = "%sQ%s" % (quarter, year)
return value
def _parse_values(dataset, index, columns):
size = len(index)
series = dataset["series"]
values = []
# for s_key, s_value in iteritems(series):
for s_key in _get_indexer(columns):
try:
observations = series[s_key]["observations"]
observed = []
for o_key in _get_indexer(index):
try:
observed.append(observations[o_key][0])
except KeyError:
observed.append(np.nan)
except KeyError:
observed = [np.nan] * size
values.append(observed)
return np.transpose(np.array(values))
def _parse_dimensions(dimensions):
arrays = []
names = []
for key in dimensions:
values = [v["name"] for v in key["values"]]
role = key.get("role", None)
if role in ("time", "TIME_PERIOD"):
values = [_fix_quarter_values(v) for v in values]
values = pd.DatetimeIndex(values)
arrays.append(values)
names.append(key["name"])
midx = pd.MultiIndex.from_product(arrays, names=names)
if len(arrays) == 1 and isinstance(midx, pd.MultiIndex):
# Fix for panda >= 0.21
midx = midx.levels[0]
return midx
|
[
"45208441+AndreasPatsimas@users.noreply.github.com"
] |
45208441+AndreasPatsimas@users.noreply.github.com
|
ef27d9265109ae830f5ca62402fffac9b1752587
|
f2f8b2f31859608d98ef644a6114991733adc964
|
/asposeslidescloud/models/workbook.py
|
3f1f395f402ebbf10a5ca5e5de7bef334ed4ce5d
|
[
"MIT",
"Python-2.0"
] |
permissive
|
aspose-slides-cloud/aspose-slides-cloud-python
|
ece60566bcf755d7350773b6ea46b44cde2d038a
|
0627d09c65a776d8ea138f97c7487d47fb98fbce
|
refs/heads/master
| 2023-08-05T00:19:21.417406
| 2023-07-30T13:32:16
| 2023-07-30T13:32:16
| 161,640,927
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,848
|
py
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose">
# Copyright (c) 2018 Aspose.Slides for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from asposeslidescloud.models.data_source import DataSource
class Workbook(DataSource):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'worksheet_index': 'int',
'column_index': 'int',
'row_index': 'int'
}
attribute_map = {
'type': 'type',
'worksheet_index': 'worksheetIndex',
'column_index': 'columnIndex',
'row_index': 'rowIndex'
}
type_determiners = {
'type': 'Workbook',
}
def __init__(self, type='Workbook', worksheet_index=None, column_index=None, row_index=None): # noqa: E501
"""Workbook - a model defined in Swagger""" # noqa: E501
super(Workbook, self).__init__(type)
self._worksheet_index = None
self._column_index = None
self._row_index = None
self.type = 'Workbook'
self.worksheet_index = worksheet_index
self.column_index = column_index
self.row_index = row_index
@property
def worksheet_index(self):
"""Gets the worksheet_index of this Workbook. # noqa: E501
Worksheet index. # noqa: E501
:return: The worksheet_index of this Workbook. # noqa: E501
:rtype: int
"""
return self._worksheet_index
@worksheet_index.setter
def worksheet_index(self, worksheet_index):
"""Sets the worksheet_index of this Workbook.
Worksheet index. # noqa: E501
:param worksheet_index: The worksheet_index of this Workbook. # noqa: E501
:type: int
"""
self._worksheet_index = worksheet_index
@property
def column_index(self):
"""Gets the column_index of this Workbook. # noqa: E501
Column index of the first value. # noqa: E501
:return: The column_index of this Workbook. # noqa: E501
:rtype: int
"""
return self._column_index
@column_index.setter
def column_index(self, column_index):
"""Sets the column_index of this Workbook.
Column index of the first value. # noqa: E501
:param column_index: The column_index of this Workbook. # noqa: E501
:type: int
"""
self._column_index = column_index
@property
def row_index(self):
"""Gets the row_index of this Workbook. # noqa: E501
Row index of the first value. # noqa: E501
:return: The row_index of this Workbook. # noqa: E501
:rtype: int
"""
return self._row_index
@row_index.setter
def row_index(self, row_index):
"""Sets the row_index of this Workbook.
Row index of the first value. # noqa: E501
:param row_index: The row_index of this Workbook. # noqa: E501
:type: int
"""
self._row_index = row_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Workbook):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"victor.putrov@aspose.com"
] |
victor.putrov@aspose.com
|
14ef77722f048f2df87d9027535bf38755c8fe85
|
906f230699aa5017660140e7f2032c46db75086d
|
/feature_matching.py
|
4eef2b03912cd5044833e599456915d626fce331
|
[] |
no_license
|
gubo2012/opencv_tutorial
|
b3e10d41442052eb245f91e38f407dc67ef29a47
|
e90ecb4701731e4ed19c30bd32b58d9b9fda9763
|
refs/heads/master
| 2020-03-10T03:21:07.153053
| 2018-04-17T17:26:11
| 2018-04-17T17:26:11
| 129,162,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 22:05:40 2018
@author: gubo
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
img1 = cv2.imread('opencv-feature-matching-template.jpg',0)
img2 = cv2.imread('opencv-feature-matching-image.jpg',0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# sort them based on their distances
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:5],None, flags=2)
plt.imshow(img3)
plt.show()
|
[
"gubo@boyipricing.com"
] |
gubo@boyipricing.com
|
86a20d0a802a3b77e91c16b62fb4c5702450b991
|
dc69872f21492d34d7da6eee9f0d03f7c09a8a8d
|
/libraries/edge/opensearch/granuleisoresponse.py
|
fd3ed16eb03bd91778c8ff34354a963de13a58c8
|
[
"Apache-2.0"
] |
permissive
|
isabella232/incubator-sdap-edge
|
125e9ba8cb1738d8407222f9d21f5452fc5fa840
|
c725dad1098096048faed9a42a56f3cfc5c25bc5
|
refs/heads/master
| 2022-03-19T18:49:03.752184
| 2019-12-02T23:40:12
| 2019-12-02T23:40:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
import datetime
from edge.opensearch.isoresponsebysolr import IsoResponseBySolr
class GranuleIsoResponse(IsoResponseBySolr):
def __init__(self, linkToGranule):
super(GranuleIsoResponse, self).__init__()
self.linkToGranule = linkToGranule.split(',')
def _populateChannel(self, solrResponse):
pass
def _populateItem(self, solrResponse, doc, item):
link = self._getLinkToGranule(doc)
if link is not None:
doc['link'] = link
def _getLinkToGranule(self, doc):
link = None
if 'GranuleReference-Type' in doc and len(self.linkToGranule) > 0:
granuleRefDict = dict(list(zip(doc['GranuleReference-Type'], list(zip(doc['GranuleReference-Path'], doc['GranuleReference-Status'])))))
for type in self.linkToGranule:
# check if reference type exists
if type in granuleRefDict:
# check if reference is online
if granuleRefDict[type][1] == 'ONLINE':
link = granuleRefDict[type][0]
break
return link
|
[
"lewis.mcgibbney@gmail.com"
] |
lewis.mcgibbney@gmail.com
|
deece369baf689aed3e350790563652c99e1df4c
|
ca0d710ed0469beb7f87ae53f5efdef7bac19a27
|
/MainView/migrations/0001_initial.py
|
c421c7915ab1a3ced242749c9b05288a7231a3c2
|
[
"MIT"
] |
permissive
|
CiganOliviu/wedding_invitation
|
5d441d786f742d6a4baf5ff418370c0cfbb1b81e
|
8b243b287b6577b4f5f899e33ade1fec651152f0
|
refs/heads/main
| 2023-03-03T08:12:36.345173
| 2021-02-08T15:37:04
| 2021-02-08T15:37:04
| 333,568,503
| 0
| 0
|
MIT
| 2021-02-08T15:37:05
| 2021-01-27T21:43:34
| null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
# Generated by Django 3.0.8 on 2020-08-10 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ConfirmAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('submitted', models.BooleanField(default=True)),
('answer_sent', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"ciganoliviudavid@gmail.com"
] |
ciganoliviudavid@gmail.com
|
e06f6bad831d1fe03750409f2f19f010a7b6ddc3
|
4b23349fa42462acd842b713bbb9cb0868f2e8fe
|
/while.py
|
fa5438934e19b97c945b72b2951c6eb0069bc192
|
[] |
no_license
|
kkb1028-i-want-be-a-good-datascientist/TEST
|
663c80fefc510fb8d56f4c15ca6c781341fe5e10
|
cb918a2fd1ce7fdf935c82f29a354bf3b386cdf3
|
refs/heads/master
| 2020-07-08T02:20:16.283456
| 2019-08-21T08:24:43
| 2019-08-21T08:24:43
| 203,538,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
num, sum = 1, 0
while True:
sum+=num
if sum > 100:
break
else:
num += 1
print('num 값이 %d일 때 while문 탈출!!' % num)
|
[
"54340933+KKB1028@users.noreply.github.com"
] |
54340933+KKB1028@users.noreply.github.com
|
e2c5a124b1d605b156114ec0a8636fb103cbd5d3
|
2c9677180eeec4e1657765b2095828ba43f041ee
|
/src/python/grpcio/grpc/_runtime_protos.py
|
88863e0306dc137bd06225c46e2e2b19e0c01eca
|
[
"Apache-2.0"
] |
permissive
|
morganwu277/grpc
|
a82e4348184a27b273159808327e7f6778a6d448
|
7c4bdd9c6ba176ad65ecea323de8ea4fd6999cf9
|
refs/heads/master
| 2022-11-29T22:01:10.376948
| 2020-08-18T23:53:02
| 2020-08-18T23:53:02
| 288,634,896
| 0
| 0
|
Apache-2.0
| 2020-08-19T04:43:28
| 2020-08-19T04:40:57
| null |
UTF-8
|
Python
| false
| false
| 5,718
|
py
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def _uninstalled_protos(*args, **kwargs):
raise NotImplementedError(
"Install the grpcio-tools package to use the protos function.")
def _uninstalled_services(*args, **kwargs):
raise NotImplementedError(
"Install the grpcio-tools package to use the services function.")
def _uninstalled_protos_and_services(*args, **kwargs):
raise NotImplementedError(
"Install the grpcio-tools package to use the protos_and_services function."
)
def _interpreter_version_protos(*args, **kwargs):
raise NotImplementedError(
"The protos function is only on available on Python 3.X interpreters.")
def _interpreter_version_services(*args, **kwargs):
raise NotImplementedError(
"The services function is only on available on Python 3.X interpreters."
)
def _interpreter_version_protos_and_services(*args, **kwargs):
raise NotImplementedError(
"The protos_and_services function is only on available on Python 3.X interpreters."
)
def protos(protobuf_path): # pylint: disable=unused-argument
"""Returns a module generated by the indicated .proto file.
THIS IS AN EXPERIMENTAL API.
Use this function to retrieve classes corresponding to message
definitions in the .proto file.
To inspect the contents of the returned module, use the dir function.
For example:
```
protos = grpc.protos("foo.proto")
print(dir(protos))
```
The returned module object corresponds to the _pb2.py file generated
by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable
from an entry on sys.path.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A module object corresponding to the message code for the indicated
.proto file. Equivalent to a generated _pb2.py file.
"""
def services(protobuf_path): # pylint: disable=unused-argument
"""Returns a module generated by the indicated .proto file.
THIS IS AN EXPERIMENTAL API.
Use this function to retrieve classes and functions corresponding to
service definitions in the .proto file, including both stub and servicer
definitions.
To inspect the contents of the returned module, use the dir function.
For example:
```
services = grpc.services("foo.proto")
print(dir(services))
```
The returned module object corresponds to the _pb2_grpc.py file generated
by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable
from an entry on sys.path.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A module object corresponding to the stub/service code for the indicated
.proto file. Equivalent to a generated _pb2_grpc.py file.
"""
def protos_and_services(protobuf_path): # pylint: disable=unused-argument
"""Returns a 2-tuple of modules corresponding to protos and services.
THIS IS AN EXPERIMENTAL API.
The return value of this function is equivalent to a call to protos and a
call to services.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A 2-tuple of module objects corresponding to (protos(path), services(path)).
"""
if sys.version_info < (3, 5, 0):
protos = _interpreter_version_protos
services = _interpreter_version_services
protos_and_services = _interpreter_version_protos_and_services
else:
try:
import grpc_tools # pylint: disable=unused-import
except ImportError as e:
# NOTE: It's possible that we're encountering a transitive ImportError, so
# we check for that and re-raise if so.
if "grpc_tools" not in e.args[0]:
raise
protos = _uninstalled_protos
services = _uninstalled_services
protos_and_services = _uninstalled_protos_and_services
else:
from grpc_tools.protoc import _protos as protos # pylint: disable=unused-import
from grpc_tools.protoc import _services as services # pylint: disable=unused-import
from grpc_tools.protoc import _protos_and_services as protos_and_services # pylint: disable=unused-import
|
[
"j.belleville.richard@gmail.com"
] |
j.belleville.richard@gmail.com
|
8119ae09255af8a153504009ba5d56f6a35a0562
|
841e606be767cf7d6fdfa551daaa887c4400ec36
|
/branches/pgasync-branch/src/database/postgresql/pgasyncpool.py
|
e97ea8e8230c51048d5ad26ea5510701675dd5c9
|
[
"MIT"
] |
permissive
|
BackupTheBerlios/weever-svn
|
e4dfda7be1fc64c2d38b5d0420deee7daa5b462a
|
d7b9969f107cd9e38f633b1314416e7a50a95c50
|
refs/heads/master
| 2021-01-10T18:33:52.245594
| 2005-03-15T12:14:56
| 2005-03-15T12:14:56
| 40,748,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
from twisted.internet import defer
import pgasync
class ConnectionPool(object):
def __init__(self, dbadapter, dsn, *args, **kwargs):
self.params = dsn
def runOperation(self, query, args={}):
d = defer.Deferred()
conn = pgasync.connect(**self.params)
dd = conn.cursor()
dd.addCallback(self._runOperation, conn, d, query, args)
return d
def _runOperation(self, cursor, conn, d, query, args):
cursor.execute(query, **args)
dd = conn.commit()
dd.addCallback(self._finish, d, cursor)
dd.addErrback(self._finish, d, cursor)
def runQuery(self, query, args={}):
d = defer.Deferred()
conn = pgasync.connect(**self.params)
dd = conn.cursor()
dd.addCallback(self._runQuery, conn, d, query, args)
return d
def _runQuery(self, cursor, conn, d, query, args):
dx = cursor.exFetch(query, **args)
dx.addCallback(self._finish, d, cursor)
dx.addErrback(self._finish, d, cursor)
def runInteraction(self, fun, query, args={}):
d = defer.Deferred()
conn = pgasync.connect(**self.params)
dd = conn.cursor()
dd.addCallback(self._runInteraction, fun, conn, d, query, args)
return d
def _runInteraction(self, cursor, fun, conn, d, query, args):
def commit(result, conn, d, cursor):
d = conn.commit()
d.addCallback(lambda _: self._finish(result, d, cursor))
d.addErrback(lambda _: self._finish(result, d, cursor))
d = fun(cursor, query, args)
d.addCallback(commit, conn, d, cursor)
def _finish(self, result, d, cursor):
cursor.release()
d.callback(result)
|
[
"dialtone@a440c657-b6e6-0310-a3b3-b76a39be4160"
] |
dialtone@a440c657-b6e6-0310-a3b3-b76a39be4160
|
42ac27e191cdf443e6d63d711278ad947615b5d6
|
cb5f2d4943b65d53a7c36080a48057f79dcb4c4a
|
/core/admin.py
|
56624ea61d601f822467e9b4962fdab83d5ee951
|
[] |
no_license
|
radaevalex/Portal
|
54be04808f52fdda0c7a12919f3d4d152d10d5db
|
7134336289e0bf425124c20bcd5c85e33f938591
|
refs/heads/master
| 2022-05-01T19:23:46.800454
| 2019-10-31T17:46:12
| 2019-10-31T17:46:12
| 218,718,710
| 0
| 0
| null | 2022-04-22T22:34:38
| 2019-10-31T08:28:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 804
|
py
|
from django.contrib import admin
from .models import Office, Indicator, Dynamic
# Register your models here.
@admin.register(Office)
class OfficesAdmin(admin.ModelAdmin):
list_display = ('id', 'department', 'city')
list_filter = ('id', 'department', 'city')
search_fields = ('id', 'department', 'city')
exclude = ('slug',)
ordering = ('id',)
@admin.register(Indicator)
class IndicatorAdmin(admin.ModelAdmin):
list_display = ('group', 'name')
list_filter = ('group', 'name')
search_fields = ('group', 'name')
ordering = ('name',)
@admin.register(Dynamic)
class DynamicAdmin(admin.ModelAdmin):
list_display = ('month', 'office', 'indicator', 'value',)
list_filter = ('month', 'value',)
search_fields = ('month', 'value',)
ordering = ('month', )
|
[
"57213368+radaevalex@users.noreply.github.com"
] |
57213368+radaevalex@users.noreply.github.com
|
ee35bcd0011f2a65f079aa4d10f48e44c32ac16b
|
44873fa0398bfb8f613f7b4e40a6c6e70aceaa9a
|
/ClassicUserAccounts/managers.py
|
479b39c1f0b475e195d44db23ae87aa0b27e88d8
|
[
"BSD-2-Clause"
] |
permissive
|
shyampathak/django-classic-user-account
|
8c6b9b9a32cfc556f9abb569f4bdc279cd302178
|
49e086de6feb2ee19fce4b8463dd8760694d03c6
|
refs/heads/master
| 2020-04-13T05:33:31.013708
| 2018-12-24T10:50:25
| 2018-12-24T10:50:25
| 162,995,683
| 1
| 0
|
BSD-2-Clause
| 2018-12-24T13:55:46
| 2018-12-24T13:55:45
| null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
user = self.create_user(email, password=password)
user.is_admin = True
user.is_superuser = True
# user.is_staff = True
user.save(using=self._db)
return user
|
[
"suman.k@skysoft.net.in"
] |
suman.k@skysoft.net.in
|
2070f5d6b0de0efb6739eb2fd4df1b8420de7296
|
aeaa059ff404bbf08d94fa9d5affbcdbc8cd5e51
|
/nbp_task/models/exchange_rate.py
|
59d4b5d58084b6e2ad77de1d972d5f44d39fafd8
|
[] |
no_license
|
durejkol/nbp_pyramid
|
8391f351b32b04f17161324e3e06ecb123713756
|
70361906697c2ed27a317681404b48967852cb05
|
refs/heads/master
| 2020-03-23T06:04:34.292549
| 2018-07-16T19:55:01
| 2018-07-16T19:55:01
| 141,187,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
from nbp_task.models import Base
from sqlalchemy import Column, Float, Integer, String
class ExchangeRate(Base):
__tablename__ = 'currencies'
id = Column(Integer, primary_key=True)
currency = Column(String)
currency_code = Column(String)
exchange_rate = Column(Float)
def __init__(self, currency, currency_code, exchange_rate):
self.currency = currency
self.currency_code = currency_code
self.exchange_rate = exchange_rate
def __repr__(self):
return "{0}, {1}, {2}".format(self.currency,
self.currency_code,
self.exchange_rate)
|
[
"wendzior@gmail.com"
] |
wendzior@gmail.com
|
3173c7fc8e6eed2af73d82af36bdbc28a65b6521
|
070a6843e24c0eee6397d47495effcce5e8130df
|
/rcs/account/apps.py
|
bbbb347859ab7524a9dc981f8f9692877a61dfcf
|
[] |
no_license
|
Forrest-Z/rcs
|
5e86edd08d292adafcf9ef694ed7894ff12bf2ef
|
9dd5cd9d3693b6bae9014dff365b2968b45313b7
|
refs/heads/main
| 2023-07-06T13:39:38.938705
| 2021-08-10T08:30:35
| 2021-08-10T08:30:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.apps import AppConfig
class AccountConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'rcs.account'
|
[
"pazzitul@163.com"
] |
pazzitul@163.com
|
17a54c49e8b54a1afc88c363e32b08adf3f15a77
|
239f70d1d68feec739ae309fdb1ae9432b528277
|
/flaskblog/models.py
|
0ae84f782ea263195f3a2a656e05501717eb7d2a
|
[] |
no_license
|
Acejoy/Blog-WebApp
|
5321287b396a765c832649caaa06ffb25773cfe0
|
bea38213e67f220939b40da2d2e18a11394ea9d9
|
refs/heads/main
| 2023-04-18T20:38:18.100653
| 2021-05-06T06:55:23
| 2021-05-06T06:55:23
| 360,434,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
from datetime import datetime
from flask_login import UserMixin
from flaskblog import db, login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpeg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"User('{self.title}', '{self.date_posted}')"
|
[
"srinjoym98@gmail.com"
] |
srinjoym98@gmail.com
|
e286247caef6608e64d3f83668b0e57d5c35c469
|
07e6fc323f657d1fbfc24f861a278ab57338b80a
|
/python/test_chem_reaction.py
|
a45fb01f6793461a249921c48059b569c7d781b2
|
[
"MIT"
] |
permissive
|
ProkopHapala/SimpleSimulationEngine
|
99cf2532501698ee8a03b2e40d1e4bedd9a12609
|
47543f24f106419697e82771289172d7773c7810
|
refs/heads/master
| 2022-09-05T01:02:42.820199
| 2022-08-28T10:22:41
| 2022-08-28T10:22:41
| 40,007,027
| 35
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!/usr/bin/python
import re
import numpy as np
import sys
from pySimE import chemistry as ch
#print ch.str2composition( sys.argv[1] )
#sides = ch.parseReaction( 'Fe+O2=Fe2O3' )
#sides = ch.parseReaction( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print sides
#print ch.reaction2string( sides )
#print ch.balanceReactionString( 'Fe+O2=Fe2O3' )
print ch.balanceReactionString( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print atomicBalance( reaction[0], reaction[1] )
|
[
"ProkopHapala@gmail.com"
] |
ProkopHapala@gmail.com
|
64295ef2699c0f0ee65e91e48a6268e1df7ef44b
|
3ac02ea9f521d34cc385f67ad3fe19749311a551
|
/elvis/constants.py
|
71169d8779b2590bba7790acc597aab7b6a52ce2
|
[
"MIT"
] |
permissive
|
nghenzi/elvis
|
79886bd88c3fa01e7a7b8e80be4e0c8018c81ad3
|
57cc4e83b790d9970566cdd09c5aeb056534e2b5
|
refs/heads/master
| 2022-09-06T09:35:49.993300
| 2020-05-29T09:54:02
| 2020-05-29T09:54:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
from enum import Enum
class LayoutTheme(Enum):
light = 'light'
dark = 'dark'
|
[
"lvankouwen@gmail.com"
] |
lvankouwen@gmail.com
|
4d838de8a7073096520f06dfb7f090cada807e65
|
f0364f2511721d22599eb46eda40ed3d0f3b4b00
|
/autotest_ecutest4.5/ui_Complete.py
|
ae196cd9419880f8b4a5f6114e797c7ad06eb703
|
[] |
no_license
|
KnightCpp/HIL-test-Base-On-ECU_TEST
|
19c2f4d3f0ac0e625194c8780fe5c4c15dd68372
|
4860d888e7599b3fbd7a2372bb9a3f6038c97ace
|
refs/heads/main
| 2023-03-30T01:33:45.922283
| 2021-03-25T13:24:29
| 2021-03-25T13:24:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'E:\temp\py\GUI\autotest_ecutest4.5\Complete.ui'
#
# Created: Fri Jan 04 10:23:27 2019
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CompleteDgl(object):
def setupUi(self, CompleteDgl):
CompleteDgl.setObjectName(_fromUtf8("CompleteDgl"))
CompleteDgl.resize(328, 110)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/haha/HaHaBundle.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
CompleteDgl.setWindowIcon(icon)
CompleteDgl.setStyleSheet(_fromUtf8(""))
self.label = QtGui.QLabel(CompleteDgl)
self.label.setGeometry(QtCore.QRect(70, 30, 171, 41))
self.label.setStyleSheet(_fromUtf8("color: rgb(0, 255, 0);\n"
"font: 20pt \"Arial\";"))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(CompleteDgl)
QtCore.QMetaObject.connectSlotsByName(CompleteDgl)
def retranslateUi(self, CompleteDgl):
CompleteDgl.setWindowTitle(_translate("CompleteDgl", "Complete", None))
self.label.setText(_translate("CompleteDgl", "Complete !", None))
import haha_rc
|
[
"noreply@github.com"
] |
KnightCpp.noreply@github.com
|
35871975b31c4ba2ba0b34d40db9e1991e766f36
|
f662a5fb79627d22723ee91b49613b63221160ff
|
/cbv3/bin/flake8
|
c880345fa3c8189ed2ebb38bbe5f62e8f1a00130
|
[] |
no_license
|
brylie/django-concept
|
bd9bcbaf6dfe0bf2c25dcd2ff34a4d621275c619
|
4003222dc2a1fc441ac6bf21b03f1bad6a51ae23
|
refs/heads/master
| 2020-07-21T11:11:08.225802
| 2019-10-06T06:14:39
| 2019-10-06T06:14:39
| 206,844,463
| 0
| 0
| null | 2019-09-06T17:36:51
| 2019-09-06T17:36:50
| null |
UTF-8
|
Python
| false
| false
| 256
|
#!/Users/lpnotes/Desktop/django-concept/cbv3/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flake8.main.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"lpnotes@Lindas-MacBook-Pro-2.local"
] |
lpnotes@Lindas-MacBook-Pro-2.local
|
|
730fe30426ac70c128ee92b95ce2c55d3c99b67e
|
85f3dcf42563767d55994160e50fab175d51304b
|
/resumeproject/edu/urls.py
|
67630c62865efc1cd742aebfb016ec9be7ada2c1
|
[] |
no_license
|
surya-pratap-2181/All-django-Projects
|
ac6bb3f9fa4122b618d42edc5aedf726ecff2c83
|
fd7c7be19810794bade1f61ecfd5423489801c43
|
refs/heads/main
| 2023-06-24T00:26:47.861780
| 2021-07-28T02:59:59
| 2021-07-28T02:59:59
| 390,193,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('/skill', views.skill, name="skill"),
]
|
[
"rathoresurya21@gmail.com"
] |
rathoresurya21@gmail.com
|
3b0858bb9df04478fdbea75686d088f8c57597c3
|
303f984c9668fd2a099939abf1982e79b1a70f3e
|
/roll_graph.py
|
80b0fc5bbab7cf8c6ebcc3b836f400b2cc000cc8
|
[
"MIT"
] |
permissive
|
nate-r-a/catan-receipts
|
2bbc620d887fd18c849d1d2110b59f8e97275d1d
|
37254983a9847dc13409f9f312d542293ff34f3a
|
refs/heads/master
| 2021-01-10T22:42:49.150403
| 2016-11-16T22:35:46
| 2016-11-16T22:35:46
| 69,703,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
import plotly.plotly as py
from plotly.graph_objs import Bar, Scatter, Figure, Layout
from plotly import __version__
def create_graph(actual_rolls):
#x-axis
NUMBERS = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
#relative odds of a number being rolled
ODDS = [1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1]
#calculate expected rolls
expected_rolls = []
total = 0
for i in actual_rolls:
total += i
for i in ODDS:
expected_rolls.append((total/36) * i)
#Sample values for testing
#y-axis - bar
#actual_rolls = [0, 5, 2, 5, 4, 3, 8, 9, 1, 1, 2]
#y-axis - scatter
#expected_rolls = [1.1111111111111112, 2.2222222222222223, 3.3333333333333335, 4.444444444444445, 5.555555555555555, 6.666666666666667, 5.555555555555555, 4.444444444444445, 3.3333333333333335, 2.2222222222222223, 1.1111111111111112]
trace1 = Bar(x=NUMBERS,y=actual_rolls,
name = "Actual",
marker = dict(
line = dict(
color = "rgb(0,0,0)",
width = 5),
color = "rgb(255,255,255)")
)
trace2 = Scatter(x=NUMBERS, y=expected_rolls,
name = "Expected",
marker = dict(
size = 10,
color = "rgb(0,0,0)",
symbol = "hexagon"
)
)
data = [trace1, trace2]
layout = Layout(width = 365,
height = 310,
xaxis = dict(autotick = False,
tick0 = 2,
dtick = 1,
tickfont = dict(size = 18)),
yaxis = dict(tickfont = dict(size = 18)),
margin = dict(b = 25,
l = 25,
r = 0,
t = 0),
showlegend = False)
fig = Figure(data=data,layout=layout)
# Save the figure as a png image:
py.image.save_as(fig, 'dice_rolls.png')
#Sample rolls for testing
# actual_rolls = [0, 5, 2, 5, 4, 3, 8, 9, 1, 1, 2]
# expected_rolls = [1.1111111111111112, 2.2222222222222223, 3.3333333333333335, 4.444444444444445, 5.555555555555555, 6.666666666666667, 5.555555555555555, 4.444444444444445, 3.3333333333333335, 2.2222222222222223, 1.1111111111111112]
#create_graph(actual_rolls)
# trace1 = go.Scatter(
# x=NUMBERS,
# y=expected_rolls
# )
# trace2 = go.Bar(
# x=NUMBERS,
# y=actual_rolls
# )
# data = [trace1, trace2]
# py.plot(data, filename='bar-line')
|
[
"noreply@github.com"
] |
nate-r-a.noreply@github.com
|
81286eab7404c79ae264329c873fd324031b3ce5
|
b7054c7dc39eeb79aa4aecb77a8de222400b19a7
|
/object.py
|
deee2a4715df5ac355f73bac61921bfff028351c
|
[] |
no_license
|
csuxh/python_fullstack
|
89027133c7f9585931455a6a85a24faf41792379
|
f78571976b3bef104309e95304892fdb89739d9e
|
refs/heads/master
| 2023-05-11T09:36:40.482788
| 2019-06-12T14:21:26
| 2019-06-12T14:21:26
| 145,090,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
#!/usr/bin/env python
#!-*-coding:utf-8 -*-
#!@Auther : jack.xia
#!@Time : 2018/5/29 21:56
#!@File : object.py
class Stuf(object):
count = 0
__slots__ = ('name', 'id', 'position')
def __init__(self, name, id, position):
self.__name = name
self.__id = id
self.__position = position
def print_obj(self):
print('name: %s ;id: %d ;position %s ' %(self.__name, self.__id, self.__position))
class Account(Stuf):
pass
class IT(Stuf):
pass
if Stuf.count != 0:
print('测试失败!')
else:
bart = Stuf('Bart', 12, '2-4')
if Stuf.count != 1:
print('测试失败!')
Stuf.count +=1
print('%d' %(Stuf.count + 1) )
else:
lisa = Stuf('lisa', 11, '2-5')
if Stuf.count != 2:
print('测试失败!')
else:
print('Stuf:', Stuf.count)
print('测试通过!')
#stu1 = Stuf('jack', 13, '1-2')
#stu1.print_obj()
#print(stu1.id)
#print(stu1.__name)
|
[
"csuxh@foxmail.com"
] |
csuxh@foxmail.com
|
cad5f850e0c474633290f1d954bf25d14c77d53a
|
d16813727de339ec61c02c60cf1ac8bcd9636802
|
/PreProcessamento/reducao.py
|
eef04034e60b70d536c42f45c11ec40ac1944875
|
[] |
no_license
|
joaocbrito/DataMining
|
5324d9309d67fd3ce8243007458eef7efe1466b0
|
0efe85500ce55675eee85ecae31ce058f92cba41
|
refs/heads/main
| 2023-06-08T22:38:39.502623
| 2021-07-02T12:55:02
| 2021-07-02T12:55:02
| 351,207,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
def main():
# Faz a leitura do arquivo
input_file = './Dataset/air-quality-clean.data'
df = pd.read_csv(input_file)
columns = list(df.columns)
target = 'CO(GT)'
# Separating out the columns
x = df.loc[:, columns].values
# Separating out the target
y = df.loc[:, [target]].values
# PCA projection
pca = PCA()
principalComponents = pca.fit_transform(x)
print("Explained variance per component:")
print(pca.explained_variance_ratio_.tolist())
print("\n\n")
principalDf = pd.DataFrame(data=principalComponents[:, 0:2],
columns=['principal component 1',
'principal component 2'])
finalDf = pd.concat([principalDf, df[[target]]], axis=1)
ShowInformationDataFrame(finalDf, "Dataframe PCA")
VisualizePcaProjection(finalDf, target)
def ShowInformationDataFrame(df, message=""):
print(message+"\n")
print(df.info())
print(df.describe())
print(df.head(10))
print("\n")
def VisualizePcaProjection(finalDf, targetColumn):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Principal Component 1', fontsize=15)
ax.set_ylabel('Principal Component 2', fontsize=15)
ax.set_title('2 component PCA', fontsize=20)
targets = [1, 2, 3]
colors = ['r', 'g', 'b']
for target, color in zip(targets, colors):
indicesToKeep = finalDf[targetColumn] == target
ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'],
finalDf.loc[indicesToKeep, 'principal component 2'],
c=color, s=50)
ax.legend(targets)
ax.grid()
plt.show()
if __name__ == "__main__":
main()
|
[
"joao.brg@hotmail.com"
] |
joao.brg@hotmail.com
|
22cce56ad1cf624ac9db09d203ea57c2bd8a72fe
|
e34d4bf879910b8f41068c1efb90915897e53d53
|
/sprint/SquaresOfSortedArray.py
|
a58ff6bd16baa33b009ff18fbabf44af40766e9e
|
[] |
no_license
|
ZhouningMan/LeetCodePython
|
6cfc30f0b76f6162502410fef5639fde4801bd74
|
cad9585c440efb329c9321648f94c58ded198438
|
refs/heads/master
| 2020-12-10T03:53:48.824344
| 2020-01-13T02:29:02
| 2020-01-13T02:29:02
| 233,494,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
class Solution:
def sortedSquares(self, A):
size = len(A)
squares = [0] * size
for i in range(size):
squares[i] = A[i] * A[i]
copy = [0] * size
begin = 0
end = size - 1
i = size - 1
while begin <= end:
if squares[begin] > squares[end]:
copy[i] = squares[begin]
begin += 1
else:
copy[i] = squares[end]
end -= 1
i -= 1
return copy
if __name__ == '__main__':
s = Solution()
ans = s.sortedSquares([-3,-3,-2,1])
print(ans)
|
[
"linfenglee321@gmail.com"
] |
linfenglee321@gmail.com
|
52dfc7c479bffded54241d1449539d22e9a4a7ca
|
fd8dbd377277a8cd41883ee19fa01ed8285f17af
|
/casey_prototype/urls.py
|
91a6aece9fe891b009545c38c982f99f50801652
|
[] |
no_license
|
tomlouismurphy/casey_prototype
|
691c3ee06c5db23e1db9baaa1b159b3f40af692e
|
0cc03cf06e3fc7bc1eb60330d689b02393e68461
|
refs/heads/master
| 2021-08-23T17:19:19.337301
| 2017-12-05T21:27:16
| 2017-12-05T21:27:16
| 112,775,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
"""casey_prototype URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^bottom_ninth/', include('bottom_ninth.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"tom.louis.murphy@gmail.com"
] |
tom.louis.murphy@gmail.com
|
fb4d6144389ec8eb93a016186bb5908c2683cdc8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_clattering.py
|
3893e7f6289447dca25d947171005c4f61ce3729
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _CLATTERING():
def __init__(self,):
self.name = "CLATTERING"
self.definitions = clatter
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['clatter']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
08f4aced36fe56bcec48deaa99f0e5ad628d5792
|
b978cf7f47c5cd6295f3c0c104752d3e1e9d89d6
|
/test.py
|
f88b6b9a5b2b21a543c221161f595e2588fd53b5
|
[] |
no_license
|
sepidmnorozy/backup-crawler
|
1e4cd62d5a48b6e3bf974f89d1d513765e5d9c5b
|
73beddd2febd0dec3a0d1f5706557de073035a06
|
refs/heads/master
| 2022-11-18T19:56:43.507394
| 2020-07-22T13:11:53
| 2020-07-22T13:11:53
| 281,674,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from pymongo import MongoClient
from rss import rss_reader
import json
if rss_reader('https://www.khabaronline.ir/rss') == 'Success':
with open("links.json", 'r') as f:
urls = json.load(f)
else:
urls = []
client = MongoClient()
db = client['newsdb_week']
articles = db.weekarticles
start_urls = []
for url in urls:
if articles.find_one({"url": url}) is None:
start_urls.append(url)
print(start_urls)
print(len(start_urls))
|
[
"alireza97hi@gmail.com"
] |
alireza97hi@gmail.com
|
be150c5153affabe985e44548c355a01e4c22fda
|
1f908c05155bd905458ef0a740f67026ec4d83ea
|
/karatsubaalgorithm.py
|
5c3820c48291831348acbec9a2108d6316ed89b2
|
[] |
no_license
|
amirali1690/algorithm-coursera
|
c3214f49a14dc1daaa93f80be4109d767e8d0411
|
19d7e2246a489e939c9e3a47a70c08e0e3df0d4a
|
refs/heads/master
| 2020-05-04T13:23:27.640717
| 2020-01-23T04:49:12
| 2020-01-23T04:49:12
| 179,158,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 18:18:49 2019
@author: amir
"""
import math
def karatsuba(x,y):
if x<10 and y<10:
return x*y
else:
n=math.log(x,10)
nf=n//2+1
a=int(x/10**(nf))
b=int(x%10**(nf))
c=int(y/10**(nf))
d=int(y%10**(nf))
ac=karatsuba(a,c)
bd=karatsuba(b,d)
adbc=karatsuba(a,d)+karatsuba(b,c)
prod = ac * 10**(2*nf) + (adbc * 10**nf) + bd
return int(prod)
|
[
"amirali1690@gmail.com"
] |
amirali1690@gmail.com
|
d5962b41aa960e1b40b3de9eb56fc4a6813c6491
|
a7ad18e70d9f46429281490af40ce7595825640f
|
/clients/services.py
|
e2a193dff132933a0de5f0388ebfd50b424cc7a0
|
[] |
no_license
|
JEBT28/Curso-de-python-CRUD
|
2f02d4450440ead037cd4ea5f65a8a45b0baf8dd
|
ab7d3aa9bcf063eeae3c858c030784053ec6a88a
|
refs/heads/master
| 2022-12-15T07:44:53.843546
| 2020-08-24T18:10:53
| 2020-08-24T18:10:53
| 287,862,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
import json
import uuid
from clients.models import Client
class ClientService:
def __init__(self,file_clients):
self.file_clients=file_clients
self.load_clients()
# print('Se cargaron los metodos')
def load_clients(self) :
file=open(self.file_clients,'r')
var = json.load(file)
clients = []
for d in var:
name = str(d.get("name"))
company = str(d.get("company"))
email = str(d.get("email"))
position = str(d.get("position"))
uid = str(d.get("uid"))
client = Client(name, company, email,position,uid)
clients.append(client)
self.clients = list(clients)
file.close()
def save_clients(self):
file=open(self.file_clients,'w')
file.write(json.dumps([c.to_dict() for c in self.clients]))
file.close()
def create_client(self,client):
if client not in self.clients:
self.clients.append(client)
else:
print('Client already exists')
self.save_clients()
def list_clients(self):
clients=[]
for client in self.clients:
if client is None:
pass
else:
clients.append(client.to_dict())
return clients
def delete_client(self, deleted_client):
clients_aux = []
for client in self.clients:
if client.uid==deleted_client.uid:
pass
else:
clients_aux.append(client)
self.clients = clients_aux
self.save_clients()
def update_client(self,updated_client):
clients_aux = []
for client in self.clients:
if client.uid==updated_client.uid:
clients_aux.append(updated_client)
else:
clients_aux.append(client)
self.clients = clients_aux
self.save_clients()
def from_str(x) -> str:
assert isinstance(x, str)
return x
|
[
"juanbal2808@gmail.com"
] |
juanbal2808@gmail.com
|
ad614d1c10782d42d169c7a5b58b58df2d9e4ac2
|
caceb60f71165772b6d6155f619e79189e7c80a9
|
/第一期/北京-沧澜/ini文件读写.py
|
a7089ca87611d7a683cc4043345514b042aed299
|
[
"Apache-2.0"
] |
permissive
|
beidou9313/deeptest
|
ff41999bb3eb5081cdc8d7523587d7bc11be5fea
|
e046cdd35bd63e9430416ea6954b1aaef4bc50d5
|
refs/heads/master
| 2021-04-26T23:06:08.890071
| 2019-04-03T02:18:44
| 2019-04-03T02:18:44
| 123,931,080
| 0
| 0
|
Apache-2.0
| 2018-03-05T14:25:54
| 2018-03-05T14:25:53
| null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
# -*- coding utf-8 -*-
import configparser
if __name__ == "__main__":
config = configparser.ConfigParser()
# 先新增一个section
config.add_section('中国')
# 在新增的section下新增key-value对
config.set('中国', '河北', '石家庄')
config.set('中国', '河南', '郑州')
config.set('中国', '山东', '济南')
# 再新增一个section,但是不增加键值对
config.add_section('民国')
with open('iniConfig.ini', 'w') as configfile:
config.write(configfile)
##########################################
# 读取ini文件
config.read('iniConfig.ini')
# 获取所有的section
sections = config.sections()
print(sections)
# 获取section下所有的options
for sec in sections:
options = config.options(sec)
print(options)
# 根据sections和options获取对应的value值
for sec in sections:
for options in config.options(sec):
print("[%s] %s=%s" % (sec, options, config.get(sec, options)))
|
[
"15201036511@163.com"
] |
15201036511@163.com
|
2d0cf1dd1d5942321fd949a7ccdd5f5a5be62e1b
|
1fda2038157bfc9cce18edba04fd345c882851c9
|
/test scripts/tesst.py
|
4800010862a7114a180021885e9abb9d4b6fbd3e
|
[] |
no_license
|
yotovtsvetomir/Cloud-Providers-Notifier
|
6df8241f6980c044d3c8850dab02ff99206bbe0c
|
7d84f32bc51b2739f4c0e748a89908d8c62a958f
|
refs/heads/master
| 2021-06-27T01:21:33.815140
| 2017-09-13T11:57:17
| 2017-09-13T11:57:17
| 103,392,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
from pymongo import MongoClient
import datetime
client = MongoClient()
db = client.aws_us_east
posts = db.posts
#Find by region
#f = posts.find({"_id":{'$regex' : ".*-us-east"}})
#for post in f:
# print post
#Find by timeframe
#start = datetime.datetime(2016, 9, 14, 1, 31, 0, 0)
#end = datetime.datetime(2016, 9, 14, 1, 32, 0, 0)
start = 'Thu, 9 Feb 2017 19:29:00 PST'
#'Thu, 9 Feb 2017 19:29:00 PST'
d = posts.find({"published": start})
for doc in d:
print doc
|
[
"noreply@github.com"
] |
yotovtsvetomir.noreply@github.com
|
ab550e443a8102657df2f405460ad1185bd0c03e
|
6d2f5ab1d568b1b44591e8cc85865b37539d6b22
|
/HumourDetection/src/util/katz_walk.py
|
053df3ddf0c9ccb9eaac458daee4718a3453db09
|
[] |
no_license
|
acattle/HumourTools
|
ed350bb36c3f4d886653f6625577fba0f020e3e1
|
a4522e55ca8003745eff9bc032a10c56b9cdd6fe
|
refs/heads/master
| 2021-03-27T19:50:25.225543
| 2019-06-30T22:34:32
| 2019-06-30T22:34:32
| 63,935,056
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,400
|
py
|
'''
Created on Jul 6, 2018
@author: Andrew Cattle <acattle@connect.ust.hk>
Utility functions related to Katz Spreading Activation.
For more information see:
Simon De Deyne, Daniel J. Navarro, Amy Perfors, Gert Storms, 2016,
'Structure at every scale: A semantic network account of the
similarities between unrelated concepts.', Journal of Experimental
Psychology: General, vol. 145, no. 9, pp. 1228-1254
These Python scripts are based on the R and matlab scripts available at:
https://github.com/SimonDeDeyne/SWOWEN-2018/tree/master/R/functions
'''
from scipy.sparse import csc_matrix, diags, identity, save_npz, load_npz
from scipy.sparse.linalg import inv
from word_associations.association_readers.igraph_readers import iGraphFromTuples
import numpy as np
# from numpy.linalg import inv
# from util.util_classes import IndexLookupWrapper
########## MATRIX OPERATIONS ##########
def _sum_as_array(mat, axis=0):
return np.squeeze(np.asarray(mat.sum(axis=axis)))
def _normalize(mat, norm_vec):
#remove inf (happens if the nromalization value of a row is 0)
norm_vec[np.isinf(norm_vec)] = 0
return diags(norm_vec, format=mat.format) * mat
# return np.diag(norm_vec) * mat
def l1_normalize(mat):
"""
L1 normalize a matrix
:param mat: the matrix to L1 normalize
:type mat: a scipy.sparse matrix
:returns: L1 normalized mat
:rtype: mat
"""
norm_vec = 1/_sum_as_array(mat,axis=1)
# norm_vec = 1/mat.sum(axis=1)
return _normalize(mat, norm_vec)
def l1_numpy(mat):
row_sums = mat.sum(axis=1)
# return mat / row_sums[:, np.newaxis] #np.newaxis implicitly reshapes row_sums from (n,) to (n,1)
#perform normalization row-by-row to avoid memory error
mat = np.copy(mat) #copy mat to avoid in-place normalization
for i, rs in enumerate(row_sums):
mat[i] = mat[i] / rs
mat[np.isinf(mat)] = 0 #get rid of infs if they happen
#TODO: is this needed?
return mat
def l2_normalize(mat):
"""
L2 normalize a matrix
:param mat: the matrix to L2 normalize
:type mat: a scipy.sparse matrix
:returns: L2 normalized mat
:rtype: mat
"""
norm_vec = 1/np.sqrt(_sum_as_array(mat**2,axis=1))
return _normalize(mat, norm_vec)
def ppmi(mat):
"""
Positive Pointwise Mutual Information
:param mat: the matrix to perform PPMI on
:type mat: scipy.sparse.csc_matrix
:returns: the PPMI matrix
:rtype: scipy.sparse.csc_matrix
"""
n=mat.shape[0]
d=diags(1/(_sum_as_array(mat,axis=0)/n), format=mat.format)
# d=np.diag(1/(mat.sum(axis=0)/n))
mat = mat*d #TODO: check that mat is a sparse matrix and not a numpy array
#TODO: currently we assume mat is sparse. Add check for numpy
mat.data = np.log2(mat.data) #only take the logs of the non-zero elements
mat.data[mat.data < 0] = 0 #replace negative values with 0s. This is the POSTIVIE part of PPMI
mat.eliminate_zeros() #get rid of any 0 values we may have added
# mat = np.log2(mat) #TODO: is this what "P@x <- log2(P@x)" does?
# mat[mat < 0] = 0 #replace negative values with 0s. This is the POSTIVIE part of PPMI
return mat
def ppmi_numpy(mat):
"""
Positive Pointwise Mutual Information
:param mat: the matrix to perform PPMI on
:type mat: scipy.sparse.csc_matrix
:returns: the PPMI matrix
:rtype: scipy.sparse.csc_matrix
"""
#pmi is log(p(x|y)/p(x))
#the values in mat are p(x|y), how is d related to p(x)?
n=mat.shape[0]
d=np.diag(n/(mat.sum(axis=0)))
mat = np.dot(mat, d)
mat[np.nonzero(mat)] = np.log2(mat[np.nonzero(mat)]) #only take the log of the non-zero elements
mat[mat < 0] = 0 #replace negative values with 0s. This is the POSTIVIE part of PPMI
return mat
def katz_walk(P, alpha=0.75):
"""
Performs the Katz Spreading Activation transformation
described in De Deyne et al. (2016)
:param P: adjacency matrix to calculate spreading activation for
:type P: scipy.sparse.csc_matrix
:param alpha: the decay weight for each path step
:type alpha: float
:returns: An adjacency matrix representing the results of the Katz walk
:rtype: scipy.sparse.csc_matrix
"""
return inv(identity(P.shape[0], format=P.format) - alpha*P)
def katz_numpy(P, alpha=0.75):
return np.linalg.inv(np.identity(P.shape[0]) - alpha*P)
########## GRAPH OPERATIONS ##########
def extract_component(G,mode="strong"):
"""
Extracts the largest strongly connected component from graph G
and converts it to a sparse adjacency matrix
:param G: the graph to extract the component from
:type G: igraph.Graph
:param mode: the clustering mode. Must be either "strong" (i.e. each node has in-degree and out-degree >= 1) or "weak" (i.e. in-degree or out-degree >= 1)
:type mode: str
:returns: the largest strongly connected component as a sparse adjacency matrix and its corresponding word->index mapping
:rtype: Tuple[scipy.sparse.csc_matrix, Dict[str, int]
"""
#get largest connected component only
#this reduces computational complexity
G = G.components(mode).giant()
# s=time()
# adj_mat_from_adj = np.array(G.get_adjacency(attribute="weight").data)
# print(time()-s)
# #for use converting from words to matrix indexes
# word_index = dict((n,i) for i, n in enumerate(G.vs["name"]))
# vocab_size = len(word_index)
#reorder the vocabulary to be in alphabetical order
#optional step but makes indexes easier to interpret
old_index_map = {name : i for i, name in enumerate(G.vs["name"])}
sorted_names = sorted(G.vs["name"])
new_index_map = {name : i for i, name in enumerate(sorted_names)}
old_to_new = {old_index_map[name] : new_index_map[name] for name in sorted_names}
vocab_size = len(sorted_names)
#for each edge, make an (x,y,weight) tuple.
#Then split it into separate x, y, and weight lists for constructing sparse matrix
# s=time()
xs,ys,ws = zip(*((*edge.tuple,edge["weight"]) for edge in G.es))
#update indexes
xs = [old_to_new[x] for x in xs]
ys = [old_to_new[y] for y in ys]
adj_mat = csc_matrix((ws, (xs,ys)), shape=(vocab_size, vocab_size)) #solve is more efficient for csc matrixes
# print(time()-s)
# adj_mat = adj_mat.todense()
# print(time()-s)
# s=time()
# adj_mat_from_zeros = np.zeros((vocab_size,vocab_size))
# for x,y,w in zip(xs,ys,ws):
# adj_mat_from_zeros[x,y]=w
# print(time()-s)
#
# print(adj_mat_from_adj.nbytes)
# print(adj_mat_dense.nbytes)
# adj_mat_wrapped = IndexLookupWrapper(adj_mat, new_index_map, ignore_case=True)
# return adj_mat, word_index
return adj_mat, new_index_map
def generate_katz_walk(cue_response_strengths):
#convert to iGraph
G = iGraphFromTuples(cue_response_strengths).graph
to_del = [v.index for v in G.vs if G.degree(v, mode="OUT") == 0]
G.delete_vertices(to_del)
#for compatibility with katz
G=remove_UK_words(G)
#remove self loops, multiple edges
#TDOD: should I sum multiple edges? Do they ever happen?
G.simplify(combine_edges="sum") #need to specify combine_edges or it erases the weights
#get largest connected compornent and convert to adjacency matrix
P, word_index = extract_component(G)
print("starting dense")
s=time()
P_dense = P.todense()
P_dense = l1_numpy(P_dense)
P_dense = ppmi_numpy(P_dense)
P_dense = l1_numpy(P_dense)
P_dense = katz_numpy(P_dense)
P_dense = ppmi_numpy(P_dense)
P_dense = l1_numpy(P_dense)
print(f"dense took {time()-s} seconds")
# print(f"pre-katz density: {P.nnz/(P.shape[0]*P.shape[1])}")
#
# print("starting sparse")
# s=time()
# #ensure matrix values are probabilities
# P = l1_normalize(P)
# P = ppmi(P)
# P = l1_normalize(P)
# P = katz_walk(P)
# P = ppmi(P)
# P = l1_normalize(P)
#
# print(f"sparse took {time()-s} seconds")
#
# print(f"post-katz density: {P.nnz/(P.shape[0]*P.shape[1])}")
P=None
return P, word_index, P_dense
def remove_UK_words(G):
"""
For compatibility with DeDeyne's implimentation
"""
# brexit_words = set( w.upper() for w in ['aeroplane', 'arse', 'ax', 'bandana', 'bannister', 'behaviour', 'bellybutton', 'centre',
# 'cheque', 'chequered', 'chilli', 'colour', 'colours', 'corn-beef', 'cosy', 'doughnut',
# 'extravert', 'favour', 'fibre', 'hanky', 'harbour', 'highschool', 'hippy', 'honour',
# 'hotdog', 'humour', 'judgment', 'labour', 'light bulb', 'lollypop', 'neighbour',
# 'neighbourhood', 'odour', 'oldfashioned', 'organisation', 'organise', 'paperclip',
# 'parfum', 'phoney', 'plough', 'practise', 'programme', 'pyjamas',
# 'racquet', 'realise', 'recieve', 'saviour', 'seperate', 'theatre', 'tresspass',
# 'tyre', 'verandah', 'whisky', 'WIFI', 'yoghurt','tinfoil','smokey','seat belt','lawn mower',
# 'coca-cola','cell phone','breast feeding','break up','bubble gum','black out'])
brexit_words = set(['aeroplane', 'arse', 'ax', 'bandana', 'bannister', 'behaviour', 'bellybutton', 'centre',
'cheque', 'chequered', 'chilli', 'colour', 'colours', 'corn-beef', 'cosy', 'doughnut',
'extravert', 'favour', 'fibre', 'hanky', 'harbour', 'highschool', 'hippy', 'honour',
'hotdog', 'humour', 'judgment', 'labour', 'light bulb', 'lollypop', 'neighbour',
'neighbourhood', 'odour', 'oldfashioned', 'organisation', 'organise', 'paperclip',
'parfum', 'phoney', 'plough', 'practise', 'programme', 'pyjamas',
'racquet', 'realise', 'recieve', 'saviour', 'seperate', 'theatre', 'tresspass',
'tyre', 'verandah', 'whisky', 'WIFI', 'yoghurt','tinfoil','smokey','seat belt','lawn mower',
'coca-cola','cell phone','breast feeding','break up','bubble gum','black out'])
to_delete = [v.index for v in G.vs if v["name"] in brexit_words]
G.delete_vertices(to_delete)
return G
if __name__ == "__main__":
from word_associations.association_readers.xml_readers import SWoW_Dataset
# sm = csc_matrix(([0,1,2,3,4], ([0,1,2,3,3], [0,1,2,2,3])))
#
# print(sm.todense())
#
# nm = l2_normalize(sm)
# print(type(nm))
# print(nm.todense())
#make SWoW graph (as igraph.Graph)
swow_100 = SWoW_Dataset("D:/datasets/SWoW/SWOW-EN.R100.csv",complete=False, probs=False,response_types="R1").get_all_associations()
from time import time
# s=time()
katz_sparse, word_index, katz_dense = generate_katz_walk(swow_100)
# print(f"took {time()-s}s")
# save_npz("katz_r1_sparse.npz",katz_sparse)
np.save("katz_r1_dedeyne.npy", katz_dense)
import pickle
with open('word_index_r1_dedeyne.pkl', "wb") as f:
pickle.dump(word_index, f)
# xs, ys = map(array, zip(*graph.get_edgelist()))
# if not graph.is_directed():
# xs, ys = hstack((xs, ys)).T, hstack((ys, xs)).T
# else:
# xs, ys = xs.T, ys.T
# return coo_matrix((ones(xs.shape), (xs, ys)))
|
[
"acattle@connect.ust.hk"
] |
acattle@connect.ust.hk
|
56ab51ce296ac609989c894b2c80f0e70076ecf9
|
be2cd3e1696fa3b506c2fdcac4d388f886950811
|
/PY_files/file1.py
|
3c2f00669db616f8c7c29624c9f2f77d26e942a2
|
[] |
no_license
|
mhkr007/PY_Prgms
|
bc171f4e09cbc088d48336f597cdb476251794ca
|
ebda4d0728bc6751fd4abbdb049ed278277772d1
|
refs/heads/master
| 2020-06-20T12:56:46.218362
| 2019-07-23T07:19:41
| 2019-07-23T07:19:41
| 197,130,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
################################## FILE HANDLINGS ##################################################
"""These are 5 properties of files @ name,mode,readable,writable,closed"""
f=open("abc.txt",'r+')
print("File Name: ",f.name)
print("File Mode: ",f.mode)
print("Is File Readable: ",f.readable())
print("Is File Writable: ",f.writable())
print("Is File Closed : ",f.closed)
f.close()
print("Is File Closed : ",f.closed)
########################### write & writelines ############################
print("-------write----->\n")
f=open("abcde.txt",'w+') #to write
f.write("krishna\n")
f.write("Software\n")
f.write("Solutions\n")
print("Data written to the file successfully")
#f.close()
#f=open("abcd.txt",'r') #to read
pos1=f.tell()
print("current position of fptr after writing",pos1)
pos2=f.seek(0,0)
print(" position of fptr to read",pos2)
rd=f.read()
print(rd)
f.close()
############################################
print()
f=open("abcde.txt",'w')
list=["hari\n","teja\n","hema\n","mounika"]
rd=f.writelines(list) #to write no.of lines at time
print(rd)
f.close()
print("List of lines written to the file successfully")
################### read, read(n), readline() readlines() ###################
print("read----->\n")
f=open("abcd.txt","r")
a=f.read() #prints whole data
print(a,"\n**********************************************\n")
p=f.seek(0,0)
a=f.read(10) #prints 'n' chars
print(a,"\n**********************************************\n")
p=f.seek(0,0)
a=f.readline() #prints first line if no arguments and also can read 'n' characters by arguments
b=f.readline() #prints second line
c=f.readline(3) #by giving int in readline it prints upto that no.of chars and it treats remaining chars as a next line
print(a,"\n***************************************************\n")
print(b)
print(c)
p=f.seek(0,0)
a=f.readlines() # """all lines comes in list format only
#if arguments are zero or empty
# else prints first line in list format""
print(a,"\n***************************************************\n")
print("end of program")
|
[
"mhkr007@gmail.com"
] |
mhkr007@gmail.com
|
b0199dbaf2cbde5fff7bebea9f30687f1e16eeeb
|
6dc6287827a8b2e9bfb948624f62cc465c54fe12
|
/ch09/qos.py
|
ff5584e1ff5cb30938d1ed42cd0db0c752f3f34c
|
[
"MIT"
] |
permissive
|
AzureCloudMonk/Python-Networking-Cookbook
|
c1f8db96037e6c6a0d24cf4d9339e32d3ba513a0
|
26945c781a51fe72cc01409df6b5c5fa7df53f4c
|
refs/heads/main
| 2023-07-13T09:20:51.416840
| 2021-08-22T18:10:01
| 2021-08-22T18:10:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
import os
import sys
import requests
base_url = "https://api.meraki.com/api/v1"
key = os.environ.get("MERAKI_DASHBOARD_API_KEY", None)
if key is None:
print("Please provide an API key. Aborting.")
sys.exit(-1)
sess = requests.Session()
sess.headers.update({
"X-Cisco-Meraki-API-Key": key
})
network_id = "L_783626335162466515"
url = f"{base_url}/networks/{network_id}/switch/qosRules"
resp = sess.get(url)
if resp.status_code == 200:
rules = resp.json()
for rule in rules:
url_del = f"{base_url}/networks/{network_id}/switch/qosRules/{rule['id']}"
resp_del = sess.delete(url_del)
if resp_del.status_code == 204:
print(f"Deleted QoS rule {rule['id']}")
else:
print(f"Failed on delete request. Status: {resp_del.status_code}")
else:
print(f"Failed to retrieve rules. Status: {resp.status_code}")
|
[
"marcel.neidinger@nlogn.org"
] |
marcel.neidinger@nlogn.org
|
03bcd0f092ca2843a4d023d01e64e3e166b7e627
|
c1bb8c962e565749576cad0207ada9b80676c49c
|
/test/bitflyer.py
|
a860b613293eb7fb71e800c6d83fb693c38f877b
|
[] |
no_license
|
mayabaha/vcts
|
ad57a5b567fd850fb0d0dbe7f37569adc49ca2f2
|
37d4fde6d16f36703c96eda5a19ad8c448710f37
|
refs/heads/master
| 2021-09-06T08:44:41.433365
| 2018-01-22T14:26:27
| 2018-01-22T14:26:27
| 107,733,425
| 0
| 0
| null | 2018-01-14T09:59:46
| 2017-10-20T22:47:08
|
Python
|
UTF-8
|
Python
| false
| false
| 9,825
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import requests
import pandas as pd
import datetime
import argparse
class bitflyer:
"""
bitFlyer API module
see the following for details:
https://lightning.bitflyer.jp/docs/api?lang=ja&type=ex&_ga=2.136056049.1965297882.1509160469-180722574.1506822122#板情報
"""
PRODUCT_CODE_BTC = 0x00000001
PRODUCT_CODE_ETH = 0x00000002
PRODUCT_CODE_BCH = 0x00000004
def __init__(self, product_code_bit=0x00000000, outdir=""):
""" constructor
- product_code_bit : target product code
0x00000000 = None
0x00000001 = BTC_JPY
0x00000002 = ETH_BTC
0x00000004 = BCH_BTC
- outdir : output directory for .csv file(s)
"""
# endpoint
self.endpoint = "https://api.bitflyer.jp"
# market
self.markets = []
# set of ticker
self.tickers_btc = []
self.tickers_eth = []
self.tickers_bch = []
# set csv file for ticker
self.tickers_csv_btc = None
self.tickers_csv_eth = None
self.tickers_csv_bch = None
# open csv file
if len(outdir) > 0:
csv_btc_jpy = outdir + "/" + "ticker_btc_jpy.csv"
csv_eth_btc = outdir + "/" + "ticker_eth_btc.csv"
csv_bch_btc = outdir + "/" + "ticker_bch_btc.csv"
header = "# timestamp,product,tick_id,best_bid,best_ask,best_bid_size,best_ask_size,total_bid_depth,total_ask_depth,ltp,volume,volume_by_product\n"
try:
if product_code_bit & self.PRODUCT_CODE_BTC: # BTC_JPY
if os.path.exists(csv_btc_jpy):
self.tickers_csv_btc = open(csv_btc_jpy, "a")
else:
self.tickers_csv_btc = open(csv_btc_jpy, "w")
self.tickers_csv_btc.write(header)
if product_code_bit & self.PRODUCT_CODE_ETH: # ETH_BTC
if os.path.exists(csv_eth_btc):
self.tickers_csv_eth = open(csv_eth_btc, "a")
else:
self.tickers_csv_eth = open(csv_eth_btc, "w")
self.tickers_csv_eth.write(header)
if product_code_bit & self.PRODUCT_CODE_BCH: # BCH_BTC
if os.path.exists(csv_bch_btc):
self.tickers_csv_bch = open(csv_bch_btc, "a")
else:
self.tickers_csv_bch = open(csv_bch_btc, "w")
self.tickers_csv_bch.write(header)
except:
raise
def get(self, api):
""" invoke API to bitFlyer by GET method """
if len(api) == 0:
print("ERROR: API is not specified")
return
# invoke
url = self.endpoint + api
# print("%s: URL=%s" % (sys._getframe().f_code.co_name, url))
req = requests.get(url)
if req.status_code != 200:
print("ERROR: error occurred in invoking, errcd=%d\n" % req.status_code)
return
item = req.json()
return item
def fetchMarketStatus(self, product_code=""):
""" fetch market status
- NORMAL : active
- BUSY : busy (not at a high load)
- VERU BUSY : at a high load
- SUPER BUSY : extremely high load
- NO ORDER : cannot accept order
- STOP : market is inactive
- FAIL : could not get market status
"""
api = "/v1/gethealth"
if product_code is not None:
api = api + "?product_code=%s" % (product_code)
# invoke
item = self.get(api)
if item is not None:
return item['status']
else:
return "FAIL"
def fetchBookStatus(self, product_code=""):
""" fetch book status """
api = "/v1/getboardstate"
if len(product_code) > 0:
api = api + "?product_code=%s" % (product_code)
item = self.get(api)
if item is not None:
return item
def fetchMarket(self):
""" fetch market list """
items = self.get("/v1/getmarkets")
if items is not None:
# clear old status
if len(self.markets) > 0:
self.markets.clear()
for item in items:
status = self.getBookStatus(item['product_code'])
market = {"product_code" : item["product_code"],
"state" : status["state"],
"health" : status["health"]}
market["datetime"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.markets.append(market)
return self.markets
else:
return
def fetchTicker(self, product_code=""):
""" fetch the latest trade information
- 'product_code' : product name
- 'timestamp' : current time (UTC)
- 'tick_id' : tick ID
- 'best_bid' : the highest price of current buy order
- 'best_ask' : the lowest price of current sell order
- 'best_bid_size" : ???
- 'best_ask_size" : ???
- 'total_bid_depth" : ???
- 'total_ask_depth" : ???
- 'ltp' : last price
- 'volume' : the amount of transactions in 24hr
"""
api = "/v1/getticker"
if len(product_code) > 0:
api = api + "?product_code=%s" % (product_code)
item = self.get(api)
if item is not None:
ticker = {"timestamp" : item["timestamp"],
"product" : item["product_code"],
"tick_id" : item["tick_id"],
"best_bid" : item["best_bid"],
"best_ask" : item["best_ask"],
"best_bid_size" : item["best_bid_size"],
"best_ask_size" : item["best_ask_size"],
"total_bid_depth" : item["total_bid_depth"],
"total_ask_depth" : item["total_ask_depth"],
"ltp" : item["ltp"],
"volume" : item["volume"],
"volume_by_product" : item["volume_by_product"]}
try:
if item["product_code"] == "BTC_JPY":
self.tickers_btc.append(ticker)
self.tickers_csv_btc.write(bitflyer.ticker2str(ticker) + "\n")
elif item["product_code"] == "ETH_BTC":
self.tickers_eth.append(ticker)
self.tickers_csv_eth.write(bitflyer.ticker2str(ticker) + "\n")
elif item["product_code"] == "BCH_BTC":
self.tickers_bch.append(ticker)
self.tickers_csv_bch.write(bitflyer.ticker2str(ticker) + "\n")
else:
pass
except:
raise
return ticker
else:
return
def fetchTickerBTC(self):
""" get ticker of BTC-JPY """
try:
return self.getTicker("BTC_JPY")
except:
raise
def fetchTickerETH(self):
""" get ticker of ETH-BTC """
try:
return self.getTicker("ETH_BTC")
except:
raise
def fetchTickerBCH(self):
""" get ticker of BCH-BTC """
try:
return self.getTicker("BCH_BTC")
except:
raise
def market2str(markets):
""" convert market information to string """
header = "# date product market_status board_status\n"
line = header
for market in markets:
line = line + "%(datetime)s %(product_code)15s %(health)13s %(state)12s\n" % market
return line
def ticker2str(ticker):
""" convert ticker to string
output format:
timestamp,product,tick_id,best_bid,best_ask,best_bid_size,best_ask_size,total_bid_depth,total_ask_depth,ltp,volume,volume_by_product"
"""
line = "%(timestamp)s,%(product)s,%(tick_id)s,%(best_bid)s,%(best_ask)s,%(best_bid_size)s,%(best_ask_size)s,%(total_bid_depth)s,%(total_ask_depth)s,%(ltp)s,%(volume)s,%(volume_by_product)s" % ticker
return line
def tickers2str(tickers):
""" convert tickers to string """
line = "# timestamp,product,tick_id,best_bid,best_ask,best_bid_size,best_ask_size,total_bid_depth,total_ask_depth,ltp,volume,volume_by_product\n"
for ticker in tickers:
line = line + bitflyer.ticker2str(ticker) + "\n"
################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='bitFlyer API fetch module')
parser.add_argument('-o', '--output-dir', metavar='dir', dest='outdir',
type=str, required=False, default='',
help='output directory for .csv file')
parser.add_argument('-i', '--interval', metavar='val', dest='interval',
type=int, required=False, default=1,
help='polling interval [sec]')
parser.add_argument('-c', '--count', metavar='count', dest='count',
type=int, required=False, default=-1,
help='fetch count')
parser.add_argument('-b', '--fetch-btc', dest='f_btc',
required=False, action="store_true", default=False,
help='fetch ticker of BTC_JPY')
parser.add_argument('-e', '--fetch-eth', dest='f_eth',
required=False, action="store_true", default=False,
help='fetch ticker of ETH_BTC')
parser.add_argument('-H', '--fetch-bch', dest='f_bch',
required=False, action="store_true", default=False,
help='fetch ticker of BCH_BTC')
args = parser.parse_args()
# interval check
if args.interval <= 0:
print("ERROR: interval is NOT natural number")
sys.exit(1)
# set product code bit (pcb)
pcb = 0
if args.f_btc == True:
pcb = pcb | bitflyer.PRODUCT_CODE_BTC
if args.f_eth == True:
pcb = pcb | bitflyer.PRODUCT_CODE_ETH
if args.f_bch == True:
pcb = pcb | bitflyer.PRODUCT_CODE_BCH
if pcb == 0:
print("INFO: select BTC by default")
pcb = bitflyer.PRODUCT_CODE_BTC
outdir = args.outdir
if len(outdir) == 0:
outdir = "."
# create bitflyer instance
bf = bitflyer(pcb, outdir)
print("INFO: interval=%d, count=%d, outdir=%s" % \
(args.interval, args.count, outdir))
lpcnt = args.count
while True:
try:
if args.count == -1: # infinite loop is specified
lpcnt = 1;
if lpcnt > 0:
if pcb & bitflyer.PRODUCT_CODE_BTC:
ticker = bf.fetchTickerBTC()
print(bitflyer.ticker2str(ticker))
if pcb & bitflyer.PRODUCT_CODE_ETH:
ticker = bf.fetchTickerETH()
print(bitflyer.ticker2str(ticker))
if pcb & bitflyer.PRODUCT_CODE_BCH:
ticker = bf.fetchTickerBCH()
print(bitflyer.ticker2str(ticker))
lpcnt -= 1
# print("INFO: wait for %d seconds" % args.interval)
time.sleep(args.interval)
else:
break
except KeyboardInterrupt:
break
sys.exit(0)
|
[
"takashi@hermit"
] |
takashi@hermit
|
f41fa5c42bc1aebcc2d08cd24120b101b406944b
|
a370f6b81cbfe2a956b59db40fffc32526088b00
|
/analytical.py
|
012d1f5e47ba62858606750415e1bfbb70c99284
|
[] |
no_license
|
Mountiko/armageddon
|
10d607335e72a47078efa5f30811e607fb1fb4bc
|
0244ee604fbea2ecb1e9d54cd2e186b176b9d9a9
|
refs/heads/master
| 2020-11-24T04:13:57.740850
| 2019-12-14T18:05:52
| 2019-12-14T18:05:52
| 227,960,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def anal_sol(H_plot, radius=10, velocity=20e3, density=3000, strength=10e5, angle=45,
init_altitude=100e3, radians=False):
'''
Solves analytical solution for meteroid impact
Parameters
----------
radius : float
The radius of the asteroid in meters
velocity : float
The entery speed of the asteroid in meters/second
density : float
The density of the asteroid in kg/m^3
strength : float
The strength of the asteroid (i.e., the ram pressure above which
fragmentation and spreading occurs) in N/m^2 (Pa)
angle : float
The initial trajectory angle of the asteroid to the horizontal
By default, input is in degrees. If 'radians' is set to True, the
input should be in radians
init_altitude : float, optional
Initial altitude in m
radians : logical, optional
Whether angles should be given in degrees or radians. Default=False
Angles returned in the DataFrame will have the same units as the
input
Returns
-------
Result : DataFrame
pandas dataFrame with collumns:
altitude, velocity, dedz
'''
# define constants
Cd = 1 # drag coefficient
H = 8000 # atomspheric consatnt
rho = 1.2 # air density at the ground
# define initial conditions
m = 4/3 * np.pi * radius**3 * density # mass, asteroid to be assumed as spheric shape
A = np.pi * radius**2 # cross-sectional area
if radians is False: # converts degrees to radians
angle = angle * (np.pi)/180
# constant in analytical solution
c = velocity/(np.exp((-Cd * A * rho * H / (2 * m * np.sin(angle))) * np.exp(-init_altitude/H)))
def v_h(h):
return c * np.exp((-Cd * A * rho * H / (2 * m * np.sin(angle))) * np.exp(-h/H))
C2 = -Cd * A * rho * H / (2 * m * np.sin(angle))
def dEdz(z):
return c * np.exp(C2 * np.exp(-z/H)) * C2 * np.exp(-z/H) * (-1/H) * m * v_h(z)
#H_plot = np.linspace(100000, 0, 200)
v_plot = v_h(H_plot)
dedz = np.zeros((len(v_plot),)) # create array to store dedz results
dedz[0] = 0 # initial dedz
for i in range(1,len(v_plot)): # loop through all rows of result
energy = ((1/2 * m * v_plot[i]**2) - (1/2 * m * v_plot[i-1]**2))/4.184e12
alt = (H_plot[i] - H_plot[i-1])/1e3
dedz[i] = energy / alt
#dEdz_plot = dedz(H_plot)
result = pd.DataFrame({'altitude':H_plot, 'velocity':v_plot, 'dedz':dedz})
#result = result.sort_values(by='altitude', ascending=False)
return result
|
[
"56032588+acse-ncv19@users.noreply.github.com"
] |
56032588+acse-ncv19@users.noreply.github.com
|
a0c529fe9ac1114d4ea620a3a09ab644868c12c2
|
7c59bbd4ff413a95dc9d25fbfccd11c6db60202a
|
/python_stack/full_stack_django/test_orm/apps/test_orm_app/migrations/0001_initial.py
|
ff84e3ca46db76c12c5baaeb018a42283bcbe193
|
[] |
no_license
|
soikatesc/DojoAssignments
|
9a185a1164e42a985aea5e49d0ee270fd476d42a
|
c5c84bc9bd4aedd0fe6aa26bf75793e284edb248
|
refs/heads/master
| 2021-01-23T04:34:19.617679
| 2017-05-16T03:52:58
| 2017-05-16T03:52:58
| 86,211,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-19 00:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('blog', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_orm_app.Blog')),
],
),
]
|
[
"soikatesc@gmail.com"
] |
soikatesc@gmail.com
|
e1ca4ac4ad045dfef7ec413194552a242acdc4be
|
1636f64b079a7cbb9e27bb27947eddc6c88aef61
|
/Chapter7/second/models/model.py
|
74f2a137eb79352e4745898fdc33774c888c0e1c
|
[] |
no_license
|
TJJTJJTJJ/pytorch__test
|
436a74a57f83c800b90dc063ef1976c20a3abc2b
|
e8bf4e9f6a3d23a7d577c0c78b93d9f5a7561ca5
|
refs/heads/master
| 2020-03-27T18:11:24.889333
| 2018-10-04T09:45:20
| 2018-10-04T09:45:20
| 146,904,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
# coding: utf-8
# In[1]:
from torch import nn
from .BasicModule import BasicModule
from torch import autograd
import torch
# ### 定义生成器 init(),forward()
# In[2]:
class NetG(BasicModule):
"""
生成器定义
__init__()
forward()
"""
def __init__(self,opt):
super(NetG,self).__init__()
ngf = opt.ngf
self.main = nn.Sequential(
# 输入 1*nz*1*1维的噪声
nn.ConvTranspose2d(opt.nz,ngf*8, 4,1,0,bias=False),
nn.BatchNorm2d(ngf*8),
nn.ReLU(True),
# (ngf*8)*4*4
nn.ConvTranspose2d(ngf*8, ngf*4, 4,2,1,bias=False),
nn.BatchNorm2d(ngf*4),
nn.ReLU(True),
# (ngf*4)*8*8
nn.ConvTranspose2d(ngf*4, ngf*2, 4,2,1,bias=False),
nn.BatchNorm2d(ngf*2),
nn.ReLU(True),
# (ngf*2)*16*16
nn.ConvTranspose2d(ngf*2, ngf*1, 4,2,1,bias=False),
nn.BatchNorm2d(ngf*1),
nn.ReLU(True),
# (ngf*1)*32*32
nn.ConvTranspose2d(ngf, 3, 5,3,1,bias=False),
nn.Tanh()
# 3*96*96 range(-1,1)
)
def forward(self,x):
return self.main(x)
# ### 定义判别器
# In[3]:
class NetD(BasicModule):
"""
判别器
__init__()
forward()
"""
def __init__(self,opt):
super(NetD, self).__init__()
ndf = opt.ndf
self.main = nn.Sequential(
# 与生成器正好相反
# 3*96*96
nn.Conv2d(3, ndf, 5, 3, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# ndf*32*32
nn.Conv2d(ndf,ndf*2,4,2,1,bias=False),
nn.BatchNorm2d(ndf*2),
nn.LeakyReLU(0.2,inplace=True),
nn.Conv2d(ndf*2,ndf*4,4,2,1,bias=False),
nn.BatchNorm2d(ndf*4),
nn.LeakyReLU(0.2,inplace=True),
nn.Conv2d(ndf*4,ndf*8,4,2,1,bias=False),
nn.BatchNorm2d(ndf*8),
nn.LeakyReLU(0.2,inplace=True),
nn.Conv2d(ndf*8,1,4,1,0,bias=False),
# batch*1*1*1
nn.Sigmoid()
)
def forward(self,x):
return self.main(x).view(-1) # batch
|
[
"18810906582@163.com"
] |
18810906582@163.com
|
4e716e88211d1235e4eb2d17c6c4c7d49c93d68d
|
9a9b4b88485101f23dd84f12187dcc3dd0638fb4
|
/code/chp14/mailbox.py
|
6458bd18a929b09f9ea1fc106a9b5ccc949b3a5f
|
[] |
no_license
|
hongdago/fopnpcode
|
0d8ce279a51068e4ae92fc305c11f299991f50b7
|
ef55cdff12a1f07c3f91cf80ca6745c2448de765
|
refs/heads/master
| 2021-09-03T12:46:55.009501
| 2018-01-09T07:14:24
| 2018-01-09T07:14:24
| 115,104,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
FileName:mailbox.py
DESC: 使用POP的list命令
"""
import getpass, poplib, sys
def main():
if len(sys.argv) !=3:
print('Usage: %s hostname username ' % sys.argv[0])
exit(2)
hostname, username = sys.argv[1:]
passwd = getpass.getpass()
p = poplib.POP3_SSL(hostname)
try:
p.user(username)
p.pass_(passwd)
except poplib.error_proto as e:
print("Login failed:", e)
else:
response, listings, octet_count = p.list()
if not listings:
print('No messages')
for listing in listings:
number, size = listing.decode('ascii').split()
print('Message %s hav %s bytes ' % (number, size))
finally:
p.quit()
if __name__ == "__main__":
main()
|
[
"hongdago@yahoo.com"
] |
hongdago@yahoo.com
|
004f6ee2f5299f8fa21061fd22b3afc3270998c3
|
c5087f002963ca81c32fb1f7b801125955dd484f
|
/main.py
|
eb653007dc8acbc884353abac64969a681c84230
|
[] |
no_license
|
davidhjp/pysosj
|
a6ee12c99f14b6cf56ef975fed65f5cf740c9084
|
cb955fbae0f745ef7c1787be62c54c989d28483d
|
refs/heads/master
| 2021-06-10T21:47:42.735106
| 2017-01-24T03:39:53
| 2017-01-24T03:39:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
import pysosj
if __name__ == "__main__":
sss = pysosj.SJChannel("127.0.0.1", 1200, "127.0.0.1", 1100);
while 1:
val = sss.receive("CD2.I", "CD.I")
print "received " + val
sss.send("CD2.I2", "CD.I2", "hallo")
print "sent"
sss.close()
while(1):
pass
|
[
"hpar081@aucklanduni.ac.nz"
] |
hpar081@aucklanduni.ac.nz
|
c2137568a2e94f717e43fd034e129651b46804a3
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/streamanalytics/azure-mgmt-streamanalytics/azure/mgmt/streamanalytics/operations/_inputs_operations.py
|
890d33f1b8b1901067d5182d5396b9ae6a0bfef4
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 28,587
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InputsOperations(object):
"""InputsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~stream_analytics_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_replace(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input, # type: "models.Input"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Creates an input or replaces an already existing input under an existing streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: The definition of the input that will be used to create a new input or replace
the existing one under the streaming job.
:type input: ~stream_analytics_management_client.models.Input
:param if_match: The ETag of the input. Omit this value to always overwrite the current input.
Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new input to be created, but to prevent updating an
existing input. Other values will result in a 412 Pre-condition Failed response.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_replace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'Input')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_replace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input, # type: "models.Input"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Updates an existing input under an existing streaming job. This can be used to partially update
(ie. update one or two properties) an input without affecting the rest the job or input
definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: An Input object. The properties specified here will overwrite the corresponding
properties in the existing input (ie. Those properties will be updated). Any properties that
are set to null here will mean that the corresponding property in the existing input will
remain the same and not change as a result of this PATCH operation.
:type input: ~stream_analytics_management_client.models.Input
:param if_match: The ETag of the input. Omit this value to always overwrite the current input.
Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'Input')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes an input from the streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Gets details about the specified input.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def list_by_streaming_job(
self,
resource_group_name, # type: str
job_name, # type: str
select=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.InputListResult"]
"""Lists all of the inputs under the specified streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param select: The $select OData query parameter. This is a comma-separated list of structural
properties to include in the response, or "\ *" to include all properties. By default, all
properties are returned except diagnostics. Currently only accepts '*\ ' as a valid value.
:type select: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InputListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~stream_analytics_management_client.models.InputListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.InputListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_streaming_job.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InputListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_streaming_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs'} # type: ignore
def _test_initial(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input=None, # type: Optional["models.Input"]
**kwargs # type: Any
):
# type: (...) -> Optional["models.ResourceTestStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ResourceTestStatus"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._test_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if input is not None:
body_content = self._serialize.body(input, 'Input')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceTestStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_test_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test'} # type: ignore
def begin_test(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input=None, # type: Optional["models.Input"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ResourceTestStatus"]
"""Tests whether an input’s datasource is reachable and usable by the Azure Stream Analytics
service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: If the input specified does not already exist, this parameter must contain the
full input definition intended to be tested. If the input specified already exists, this
parameter can be left null to test the existing input as is or if specified, the properties
specified will overwrite the corresponding properties in the existing input (exactly like a
PATCH operation) and the resulting input will be tested.
:type input: ~stream_analytics_management_client.models.Input
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ResourceTestStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~stream_analytics_management_client.models.ResourceTestStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceTestStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._test_initial(
resource_group_name=resource_group_name,
job_name=job_name,
input_name=input_name,
input=input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ResourceTestStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_test.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test'} # type: ignore
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
0bb86cc88e336ab207cb59291392165129948a10
|
c2e43c48aefa729962097d5fb470777c044c5a85
|
/venv/bin/f2py3.9
|
66a4eb1360ee8269a0e9a5ce727cd677237fc0f7
|
[] |
no_license
|
HiroshigeAoki/Optiver_Realized_Volatility_Prediction
|
57a14d374a65fe2419eb3c84ed9d9f6fc3faeef6
|
21bdbe2ec2ed193db6c28ebfa3f5313539c0a188
|
refs/heads/main
| 2023-07-11T01:47:49.631676
| 2021-08-17T06:47:02
| 2021-08-17T06:47:02
| 397,778,792
| 0
| 0
| null | 2021-08-19T01:19:57
| 2021-08-19T01:19:57
| null |
UTF-8
|
Python
| false
| false
| 289
|
9
|
#!/home/bi18056/VScode_workplaces/Optiver_Realized_Volatility_Prediction/venv/bin/python3.9
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"lqqq0528636@gmail.com"
] |
lqqq0528636@gmail.com
|
8c8ddb6eb22d55c5cb1ea8f0c1998adaff30342f
|
28811880a917a1e1ec24a844999d950dc1a5d057
|
/0x07-python-test_driven_development/4-print_square.py
|
4d81d08b600778252b9240006e576c46b7c1393d
|
[] |
no_license
|
OrangeB0lt/holbertonschool-higher_level_programming
|
b591ceb8e0710cb26c78407a266421488f325678
|
f50668f78ffb861b305e0d691c29cd1b817d9ec0
|
refs/heads/master
| 2020-05-18T03:55:07.262414
| 2019-09-26T23:22:01
| 2019-09-26T23:22:01
| 184,158,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
#!/usr/bin/python3
"""
Prints squares
based on size input
woohoo
"""
def print_square(size):
"""
print_square: prints a square based on input number for size
"""
if not isinstance(size, int):
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
for idx in range(size):
for cnt in range(size):
print("#", end="")
print()
|
[
"667@holbertonschool.com"
] |
667@holbertonschool.com
|
c819640f419b8ec2a2e4db92176f8a8578bd998a
|
f9483d708e9df7b38b7ae1d58726a4e186780473
|
/app/core/models.py
|
e976e6abe197bd1e740b9b98e95f5c3609ea0be1
|
[
"MIT"
] |
permissive
|
dev-tanvir/rest_api_docker
|
6db6bf616484ab29e9446795158c8facc2489fae
|
09804cfbc6332d6cfbb25b09813b36c338051ba3
|
refs/heads/main
| 2023-06-20T08:41:53.535214
| 2021-07-18T11:16:19
| 2021-07-18T11:16:19
| 367,130,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,099
|
py
|
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings # recommended way to import settings in django
def synthesize_image_file_path(instance, main_filename):
"""return a valid path for uploaded file with unique name"""
main_file_extension = main_filename.split('.')[-1]
new_filename = f'{uuid.uuid4()}.{main_file_extension}'
return os.path.join('uploads/synthesize/', new_filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
""" Creates and saves a new user"""
if not email:
raise ValueError('Users need to pass an valid email address!')
user = self.model(email=self.normalize_email(email), **extra_fields) # not using password here cause
# password needs to be hashed and
# not saved in clear text
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
""" Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom User model that supports Creating a user using email in stead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = "email"
class Tag(models.Model):
"""Model for tag management for Synthesize"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self) -> str:
"""String reprensation of tag object"""
return self.name
class Chemcomp(models.Model):
"""Model for chemical components of synthesizer"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self) -> str:
return self.name
class Synthesize(models.Model):
"""Model for declaring a synthesize for life"""
title = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
time_years = models.IntegerField()
link = models.CharField(blank=True, max_length=255)
chemcomps = models.ManyToManyField('Chemcomp')
tags = models.ManyToManyField('Tag')
chance = models.DecimalField(max_digits=5, decimal_places=2)
image = models.ImageField(null=True, upload_to=synthesize_image_file_path)
def __str__(self) -> str:
return self.title
|
[
"tanvirfaisaldev@gmail.com"
] |
tanvirfaisaldev@gmail.com
|
b6cd32dd7c58e44b484925d0981c527b8eb6d61f
|
ddd09683d9cbd681db5dae4e2d036d28bd4d24c1
|
/PA3/BAL3.py
|
f82978400cd729be26ca286631abcea6caa2356a
|
[] |
no_license
|
nivedn3/DL4CV-EE6132-
|
41f9cd877a4c43db0a2f511a57df8b624fbc0a07
|
2cd97c7d2170a8e4fe36b6ccc8443c009e3d003a
|
refs/heads/master
| 2021-01-20T05:41:37.019460
| 2017-11-22T10:17:16
| 2017-11-22T10:17:16
| 101,465,640
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,240
|
py
|
import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
def data(number,size):
a = []
b = []
out = []
for i in range(number):
a_in = np.random.choice([0,1],size)
a_in = a_in.tolist()
#a_in = [1,0,0,0,0]
b_in = np.random.choice([0,1],size)
b_in = b_in.tolist()
#b_in = [1,0,0,0,0]
a_str = ','.join(str(x) for x in a_in).replace(',','')
b_str = ','.join(str(x) for x in b_in).replace(',','')
c = bin(int(a_str,2) + int(b_str,2)).split('b')[1]
c = [int(i) for i in list(c)]
c_out = np.array(c)
if len(c_out) == size:
c_out = np.insert(c_out,0,0)
if len(c_out) < size:
while(len(c_out) != size+1):
c_out = np.insert(c_out,0,0)
test = []
for j in range(len(a_in)):
test.append(a_in[j])
test.append(b_in[j])
a.append(test)
#b.append(b_in)
out.append(c_out)
return a,out
size = 3
hs = 5
x = tf.placeholder(tf.float32,shape = [None,size,2])
y = tf.placeholder(tf.float32,shape = [None,size+1])
w = tf.Variable(tf.random_normal([hs,size+1]))
b = tf.Variable(tf.random_normal([size+1]))
rnn_inp = tf.unstack(x,size,1)
lstm = tf.contrib.rnn.BasicRNNCell(hs)
outputs, states = tf.contrib.rnn.static_rnn(lstm, rnn_inp, dtype=tf.float32)
logits = tf.sigmoid(tf.matmul(outputs[-1], w) + b)
logitst = tf.add(logits,tf.scalar_mul(-0.5,tf.ones_like(logits)))
logitst = tf.nn.relu(logits)
logitst = tf.scalar_mul(1000000,logits)
logitst = tf.clip_by_value(logits,0,1)
logitsc = tf.cast(logitst,tf.int32)
yc = tf.cast(y,tf.int32)
with tf.name_scope("cross_entropy"):
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels = y))
cross_entropy = tf.losses.mean_squared_error(labels = y, predictions = logits)
tf.summary.scalar('cross entropy',cross_entropy)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(logitsc,yc)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.summary.scalar('accuracy',accuracy)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("/home/psycholearner/projects//DL4CV-EE6132-/PA3/2035")
writer.add_graph(sess.graph)
writer2 = tf.summary.FileWriter("/home/psycholearner/projects/DL4CV-EE6132-/PA3/20351")
writer2.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(20000):
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
#batch_y.astype(float)
if i % 25 == 0:
s = sess.run(merged_summary,feed_dict = {x: batch_x,y: batch_y})
writer.add_summary(s,i)
at,batch_yt = data(500,size)
batch_xt = np.array(at)
batch_xt = batch_xt.reshape(500,size,2)
batch_xt = [j[::-1] for j in batch_xt]
batch_xt = np.array(batch_xt)
batch_xt.astype(float)
batch_yt = np.array(batch_yt)
k = sess.run(merged_summary,feed_dict = {x: batch_xt,y: batch_yt})
writer2.add_summary(k,i)
#train_accuracy = sess.run(accuracy.eval(feed_dict={x: batch[0], y: batch[1]}))
#[train_accuracy] = sess.run([cross_entropy],feed_dict = {x: batch_x, y:batch_y})
#[test] = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#logits = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#print('step %d, training accuracy %g %g' % (i, train_accuracy,test))
#[test_acc] = sess.run([test_accuracy],feed_dict = {x: mnist.test.images, y:mnist.test.labels})
#print('step %d, test accuracy %g' % (i, test_acc))
#saver.restore(sess, "/home/psycholearner/projects//DL4CV-EE6132-/PA2/model.ckpt")
sess.run(train_step,feed_dict = {x:batch_x,y:batch_y})
'''
test_data = mnist.test.images[:128].reshape((-1, 28, 28))
test_label = mnist.test.labels[:128]
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: test_data, y: test_label}))
'''
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: batch_x, y: batch_y}))
|
[
"nivedn3@gmail.com"
] |
nivedn3@gmail.com
|
93a3267e51df3e2d5d2bf140872b6e2e1a563f40
|
53b2358c6089be2c51ac2768a77fc303d563550d
|
/assembly-scripts/combine_cds.py
|
3703c7cac212c41cfac0a5244748a064d01f18fd
|
[] |
no_license
|
bethsheets/Population-Genomics-via-RNAseq
|
684e7cd5a667a335f7b3e1111e1ccd6eb85533c6
|
3cb3ee912f855e8a9981874f4ff160551f8b8db3
|
refs/heads/docs
| 2020-04-12T05:42:48.038672
| 2019-07-12T17:24:37
| 2019-07-12T17:24:37
| 60,875,099
| 2
| 10
| null | 2017-04-14T15:16:23
| 2016-06-10T20:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 773
|
py
|
#usage: python combine_coding.py input_cds_grepped_from_gff_then_gff2bed_awk_prepend_strand.fa out.fa
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
coding=SeqIO.parse(sys.argv[1],'fasta',generic_dna)
combined=dict()
for cds in coding:
name=cds.id
transcript=name.split('.cds')[0]
if transcript in combined:
combined[transcript]=combined[transcript]+cds.seq
else:
cds.id=transcript
combined[transcript]=cds
final=dict()
for transcript, cds in combined.iteritems():
if transcript[0]=='-':
cds.seq=cds.seq.reverse_complement()
cds.id=str(transcript)[1:]
combined[transcript]=cds
OUT = open(sys.argv[2], "w")
SeqIO.write(combined.values(), OUT, "fasta")
OUT.close()
|
[
"bethsheets@gmail.com"
] |
bethsheets@gmail.com
|
a135986681b422aa38dc0f73eb018ec8b5e5de5b
|
ae8a2e748976c702da93be5fceb415683e26dae4
|
/sumit1136/cartoonifyRealTime.py
|
488f4c9475a76e657648fbf4469ba2a39a9a6b03
|
[] |
no_license
|
hackslash-nitp/cartoonify
|
1a19e1c57959103f61d968e773f11babdf2f699c
|
7f4ac7329de79c63855dd80727a18c0eab6be577
|
refs/heads/main
| 2023-02-17T11:11:23.705255
| 2021-01-19T23:35:03
| 2021-01-19T23:35:03
| 322,804,672
| 1
| 6
| null | 2021-01-19T23:35:04
| 2020-12-19T08:48:18
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
# import the opencv library
import cv2
import numpy as np
# define a video capture object
vid = cv2.VideoCapture(0)
while True:
# Capture the video frame
# by frame
ret, frame = vid.read()
# Display the resulting frame
cv2.imshow('frame 1', frame)
# frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# cv2.imshow('frame 2', frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# cv2.imshow('frame 2', gray)
imgBlur = cv2.medianBlur(gray, 5)
# cv2.imshow('frame 2', imgBlur)
imgEdge = cv2.adaptiveThreshold(imgBlur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
# cv2.imshow('frame 2', imgEdge)
colored = cv2.bilateralFilter(frame, 9, 250, 250)
cartoon = cv2.bitwise_and(colored, colored, mask=imgEdge)
cv2.imshow('frame 2', cartoon)
# the 'q' button is set as the
# quitting button you may use any
# desired button of your choice
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
cv2.destroyAllWindows()
|
[
"sumit.sav1136@gmail.com"
] |
sumit.sav1136@gmail.com
|
f0921f29f3f682945a8f671213dc391d565db088
|
9d41570295cc05b66fd52584a90fe87f29155943
|
/src/crawler/delay.py
|
649fb6282c26a77936487a5bcd18eeda56ff6aa7
|
[
"MIT"
] |
permissive
|
diegojromerolopez/relwrac
|
ed56feeb2a5e455e0fa58f6bc130445e5a0831bd
|
23ee278ab4019b98269419c53feed2194f079c25
|
refs/heads/master
| 2022-12-11T08:06:19.888698
| 2019-11-16T12:35:34
| 2019-11-16T12:35:34
| 219,372,323
| 0
| 0
|
MIT
| 2022-12-08T06:49:05
| 2019-11-03T22:09:35
|
Python
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
import random
class Delay(object):
@classmethod
def none(cls):
return None
@classmethod
def uniform(cls, lower_bound: float, upper_bound: float):
def uniform_delay_():
return random.uniform(lower_bound, upper_bound)
return uniform_delay_
|
[
"diegojromerolopez@gmail.com"
] |
diegojromerolopez@gmail.com
|
11716fe8e719fd08d9305e4d39864d7c53af3e8a
|
268e00212b4e863e35b2113496f1a71c4b7b4a04
|
/11_对dict的list去重.py
|
7af9b4a7b79ae15743f7087432b1449da813482a
|
[] |
no_license
|
MrCat9/Python_Note
|
ee5c2fa86b0f77538e1feacdaaadfa9afec884ef
|
6b81cdf4d46a6d1f1a78170c47151ae519e087d4
|
refs/heads/master
| 2022-08-20T22:28:17.730325
| 2022-08-08T02:44:47
| 2022-08-08T02:44:47
| 146,618,466
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,677
|
py
|
# -*- coding: utf-8 -*-
# 根据dict中的某一key的value,对dict的list去重,value相同视为重复
# def dict_list_duplicate(dict_list, duplicate_key):
# temp_list = []
# new_dict_list = []
# for a_dict in dict_list:
# value = a_dict[duplicate_key]
# if value not in temp_list:
# temp_list.append(value)
# new_dict_list.append(dict_)
# return new_dict_list
# def dict_list_duplicate(dict_list, duplicate_key):
# temp_set = set()
# new_dict_list = []
# for a_dict in dict_list:
# value = a_dict[duplicate_key]
# if value not in temp_set:
# temp_set.add(value)
# new_dict_list.append(dict_)
# return new_dict_list
def dict_list_duplicate(dict_list, duplicate_key):
temp_set = set()
new_dict_list = []
for a_dict in dict_list:
value = a_dict[duplicate_key]
old_set_len = len(temp_set)
temp_set.add(value)
new_set_len = len(temp_set)
if new_set_len > old_set_len:
new_dict_list.append(dict_)
return new_dict_list
if __name__ == '__main__':
old_dict_list = [
{"title": "title1", "name": "name1"},
{"title": "title2", "name": "name2"},
{"title": "title1", "name": "name3"},
]
print(old_dict_list)
# [{'title': 'title1', 'name': 'name1'}, {'title': 'title2', 'name': 'name2'}, {'title': 'title1', 'name': 'name3'}]
new_dict_list = dict_list_duplicate(old_dict_list, "title") # 根据dict中的title去重,title相同视为重复
print(new_dict_list)
# [{'title': 'title1', 'name': 'name1'}, {'title': 'title2', 'name': 'name2'}]
|
[
"noreply@github.com"
] |
MrCat9.noreply@github.com
|
cdfc02d15189f0e0ad498bfae17a3ee08c544cc0
|
d9cf5ce593b91c63139e4bd831f3ba99a3407d05
|
/analyze_timings.py
|
486158770c30fe74fa794ad1a058012d75a1ccb6
|
[] |
no_license
|
MaxwellDeJong/parallel_mcmc
|
51c6d7794424c4adf2d3e7ed85d8d3ade07dce57
|
4d9f41e402c35a295141057d8caab0bf85d1b033
|
refs/heads/main
| 2023-02-20T05:04:55.999668
| 2021-01-10T03:13:39
| 2021-01-10T03:13:39
| 328,289,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def plot_timings(n):
filename = 'timings_' + str(n) + '.txt'
timings = np.loadtxt(filename)
plt.scatter(timings[:, 0], timings[:, 1])
plt.xlabel('Number of Steps')
plt.ylabel('Execution Time (ms)')
plt.title('Scaling with ' + str(n) + ' Dimensions')
plt.xlim((0, max(timings[:, 0]) + 5000))
plt.show()
def plot_dim_timings():
filename = 'timings_dim.txt'
timings = np.loadtxt(filename)
plt.scatter(timings[:, 0], timings[:, 1])
plt.xlabel('Number of Dimensions')
plt.ylabel('Execution Time (ms)')
plt.title('Dimensional Scaling with 3000 Steps')
plt.xlim((0, max(timings[:, 0]) + 5))
plt.show()
def plot_data_timings():
filename = 'timings_data.txt'
timings = np.loadtxt(filename)
plt.scatter(timings[:, 0], timings[:, 1])
plt.xlabel('Number of Data Points')
plt.ylabel('Execution Time (ms)')
plt.title('Data Scaling with 10000 Steps')
plt.xlim((-100, max(timings[:, 0]) + 600))
plt.show()
plot_timings(20)
plot_dim_timings()
plot_data_timings()
|
[
"maxwelldejong@gmail.com"
] |
maxwelldejong@gmail.com
|
7c5501a793a8b3dcf3fe33d63e771c53c854e673
|
fd8eb0edf514fca4f25b885de86e1503463a8264
|
/polls/admin.py
|
3cd232b8b20f28c45a93edda83a4e2180351d1d8
|
[] |
no_license
|
ubiopen/project1
|
7f6dd1bf56721449c5e61d9c40c6c132721384e1
|
89ee8f5914bfe28627018d2b07749d17f0f63e92
|
refs/heads/master
| 2021-01-25T09:59:31.794667
| 2014-06-15T10:54:21
| 2014-06-15T10:54:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from polls.models import *
from django.contrib import admin
admin.site.register(Poll)
admin.site.register(Choice)
|
[
"kfisac@kfisacui-MacBook-Pro.local"
] |
kfisac@kfisacui-MacBook-Pro.local
|
7730264c505a3c732597e1d232c28b694ce63cd6
|
75d41c04791c047309607ce554f0b3e72f94b4cb
|
/app.py
|
3de14fdca36e861dfd5823560a0333681018862f
|
[
"MIT"
] |
permissive
|
lyfyork/LAB2
|
6e84d18dc22c98e9b0f2a1e550d39eb7cf022fe8
|
a76adfbe0e8077e06e2c25457df5ace67e64bc10
|
refs/heads/master
| 2020-05-14T15:17:38.807117
| 2019-04-17T09:05:28
| 2019-04-17T09:05:28
| 181,841,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17
|
py
|
//This is app.py
|
[
"noreply@github.com"
] |
lyfyork.noreply@github.com
|
a981d72e03c71c1df680fd7aebeddc9f0d707d99
|
58c0c6cd1da0a0b70c14787fbbd5a5af5161ac15
|
/venv/Scripts/rst2odt.py
|
6925f5b6c183bc706e358ba95f88170286c59ba7
|
[
"MIT"
] |
permissive
|
RafaelHMachado/Cioffis_Automation
|
6454d33558a4f4b63412d1d068726ca73feddeea
|
07965ca71c3d4e78f5cee1fce4ba0bbfe2db9811
|
refs/heads/main
| 2023-06-06T07:04:11.182106
| 2021-07-03T07:39:28
| 2021-07-03T07:39:28
| 382,553,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
#!C:\Users\Eng\Documents\Project\venv\Scripts\python.exe
# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
description = ('Generates OpenDocument/OpenOffice/ODF documents from '
'standalone reStructuredText sources. ' + default_description)
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
description=description)
|
[
"rafael_henriquemachado@hotmail.com"
] |
rafael_henriquemachado@hotmail.com
|
2892259f2d817721cf02d7066f632c0970f63743
|
c9317c7703f05c3dd17c29aaadf9062b60bedd37
|
/website/view.py
|
1b984a7eb87633e8ebc56c91044d081162c065c0
|
[] |
no_license
|
gliv001/appointment_webapp
|
778bd5983fad27cfcf2a159f1f72ff9efa37e989
|
bb7a003c5d25c1364e25b0c6e160e9b24b66f490
|
refs/heads/main
| 2023-05-26T12:57:59.134742
| 2021-06-14T01:13:02
| 2021-06-14T01:13:02
| 371,082,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,255
|
py
|
from website.forms import AppointmentForm, EmployeeForm, ServiceForm
from flask import Blueprint, render_template, request, redirect, flash
from flask.helpers import url_for
from .models import (
Appointments,
Employees,
LoginHistory,
Services,
ApptUsers,
UserLevel,
db,
)
from datetime import datetime
from flask_login import login_required, current_user
from werkzeug.security import generate_password_hash
import simplejson as json
view = Blueprint("view", __name__)
@view.route("/", methods=["POST", "GET"])
@view.route("/appointments", methods=["POST", "GET"])
@login_required
def appointment_home():
form = AppointmentForm()
if request.method == "POST":
if form.validate_on_submit():
client = form.client.data
service = form.service.data
employee = form.employee.data
appt_date = form.appt_date.data
appt_time = form.appt_time.data
tip = form.tip.data
total = form.total.data
if tip == "":
tip = 0
new_appt = Appointments(
client=client,
serviceid=service,
employeeid=employee,
apptdatetime=datetime.strptime(
f"{appt_date} {appt_time}", "%Y-%m-%d %H:%M:%S"
),
tips=tip,
total=total,
)
try:
db.session.add(new_appt)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue adding a new appointment", category="error")
return redirect("/appointments")
employeeList = Employees.query.all()
serviceList = Services.query.all()
form.employee.choices = [(e.id, e.uname) for e in employeeList]
form.service.choices = [(s.id, f"{s.sname} ${s.price}") for s in serviceList]
if len(serviceList) < 1:
flash("There are no services, Please add services first.", category="error")
appointments = (
db.session.query(
Appointments,
Services.sname.label("service"),
Employees.uname.label("employee"),
)
.select_from(Appointments)
.join(Services)
.join(Employees)
.filter(Appointments.employeeid == current_user.id)
.all()
)
return render_template(
"view/appointments.jinja2",
form=form,
appointments=appointments,
user=current_user,
)
@view.route("/appointments/table", methods=["GET"])
@login_required
def appointment_table():
if request.args.get("viewall", default=0, type=int) == 0:
results = (
db.session.query(
Appointments.id,
Appointments.client,
Appointments.serviceid,
Appointments.employeeid,
Appointments.apptdatetime,
Appointments.tips,
Appointments.total,
Services.sname.label("service"),
Employees.uname.label("employee"),
)
.select_from(Appointments)
.join(Services)
.join(Employees)
.filter(Appointments.employeeid == current_user.id)
.all()
)
else:
results = (
db.session.query(
Appointments.id,
Appointments.client,
Appointments.serviceid,
Appointments.employeeid,
Appointments.apptdatetime,
Appointments.tips,
Appointments.total,
Services.sname.label("service"),
Employees.uname.label("employee"),
)
.select_from(Appointments)
.join(Services)
.join(Employees)
.all()
)
appointments_dict_list = [r._asdict() for r in results]
return json.dumps(appointments_dict_list, default=str)
@view.route("/appointments/update/<int:id>", methods=["POST", "GET"])
@login_required
def appointment_update(id):
form = AppointmentForm()
select_appointment = Appointments.query.get_or_404(id)
if request.method == "POST":
if form.validate_on_submit():
select_appointment.client = form.client.data
select_appointment.service = form.service.data
select_appointment.employee = form.employee.data
select_appointment.apptdatetime = datetime.strptime(
f"{form.appt_date.data} {form.appt_time.data}", "%Y-%m-%d %H:%M:%S"
)
select_appointment.tips = form.tip.data
try:
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue updating an appointment", category="error")
return redirect("/appointments")
employeeList = Employees.query.all()
serviceList = Services.query.all()
form.employee.choices = [(e.id, e.uname) for e in employeeList]
form.service.choices = [(s.id, f"{s.sname} ${s.price}") for s in serviceList]
form.client.default = select_appointment.client
form.service.default = select_appointment.serviceid
form.employee.default = select_appointment.employeeid
date = select_appointment.apptdatetime.date()
time = select_appointment.apptdatetime.time()
form.appt_date.default = date
form.appt_time.default = time
form.tip.default = select_appointment.tips
form.total.default = select_appointment.total
form.process() # this is to set the default choices for services/employees
return render_template(
"view/appointment_update.jinja2",
form=form,
user=current_user,
appointment=select_appointment,
)
@view.route("/appointments/delete/<int:id>")
@login_required
def appointment_delete(id):
select_appointment = Appointments.query.get_or_404(id)
try:
db.session.delete(select_appointment)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue deleting an appointment", category="error")
return redirect("/appointments")
@view.route("/employees", methods=["POST", "GET"])
@login_required
def employee_home():
if current_user.userlevelid >= 3:
flash("Access denied: user privileges too low", category="error")
return redirect("/")
form = EmployeeForm()
if request.method == "POST":
if form.validate_on_submit():
name = form.name.data
email = form.email.data
password = form.password.data
userlevelid = form.employee_type.data
new_employee = ApptUsers(
userlevelid=userlevelid,
uname=name,
email=email,
upassword=generate_password_hash(password, "sha256"),
verified=True,
)
try:
db.session.add(new_employee)
db.session.commit()
flash("New employee created!", category="success")
return redirect("employees")
except Exception as error:
print(error)
flash("There was an issue adding a new employee", category="error")
userLevels = UserLevel.query.filter(UserLevel.ulevel >= 2)
form.employee_type.choices = [(l.ulevel, l.uname) for l in userLevels]
employee = Employees.query.order_by(Employees.id).all()
return render_template(
"view/employees.jinja2",
form=form,
employees=employee,
user=current_user,
)
@view.route("/employees/update/<int:id>", methods=["POST", "GET"])
@login_required
def employee_update(id):
form = EmployeeForm()
employee = ApptUsers.query.get_or_404(id)
if request.method == "POST":
if form.validate_on_submit():
employee.name = form.name.data
employee.email = form.email.data
employee.upassword = generate_password_hash(form.password.data, "sha256")
try:
db.session.commit()
return redirect("/employees")
except Exception as error:
print(error)
flash("There was an issue updating an employee", category="error")
return render_template(
"view/employee_update.jinja2",
form=form,
employee=employee,
user=current_user,
)
@view.route("/employees/delete/<int:id>")
@login_required
def employee_delete(id):
selected_employee = ApptUsers.query.get_or_404(id)
try:
db.session.delete(selected_employee)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue deleting an employee", category="error")
return redirect("/employees")
@view.route("/services", methods=["POST", "GET"])
@login_required
def service_home():
form = ServiceForm()
if request.method == "POST":
if form.validate_on_submit():
service_name = form.name.data
service_price = form.price.data
new_service = Services(sname=service_name, price=service_price)
try:
db.session.add(new_service)
db.session.commit()
flash("New service created!", category="success")
return redirect("services")
except Exception as error:
print(error)
flash("There was an issue adding a new service", category="error")
s = Services.query.order_by(Services.id).all()
return render_template(
"view/services.jinja2",
form=form,
services=s,
user=current_user,
)
@view.route("/services/update/<int:id>", methods=["POST", "GET"])
@login_required
def service_update(id):
form = ServiceForm()
selected_service = Services.query.get_or_404(id)
if request.method == "POST":
if form.validate_on_submit():
selected_service.sname = form.name.data
selected_service.price = form.price.data
try:
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue updating an service", category="error")
return redirect("/services")
return render_template(
"view/service_update.jinja2",
form=form,
service=selected_service,
user=current_user,
)
@view.route("/services/delete/<int:id>")
@login_required
def service_delete(id):
selected_service = Services.query.get_or_404(id)
try:
db.session.delete(selected_service)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue deleting an service", category="error")
return redirect("/services")
@view.route("/loginhistory")
@login_required
def login_history():
if current_user.userlevelid > 1:
flash("Access denied: user privileges too low", category="error")
return redirect("/")
logins = LoginHistory.query.order_by(LoginHistory.id.desc()).all()
return render_template("view/loginhistory.jinja2", logins=logins, user=current_user)
|
[
""
] | |
e5ab664edb4752d26689ea1f5d8c70d0327cb7b4
|
c876cc3d3bdb9020bf906c53f0f060379cd52b09
|
/rename.py
|
5671cda9ebf8909a59997ba752c73d1a314200f8
|
[
"Apache-2.0"
] |
permissive
|
shinysuraj/python-project
|
328660cb3203cceb76ce78d974ac543891970eed
|
7d7e31898e021d8c4c05d7d6445c5645f6452b11
|
refs/heads/master
| 2020-08-27T12:50:38.855019
| 2017-09-17T22:14:20
| 2017-09-17T22:14:20
| 217,374,680
| 0
| 0
|
Apache-2.0
| 2019-10-24T19:09:20
| 2019-10-24T19:09:20
| null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
#This programe rename all the files from current directory and remove Number or digits from their
#names
import os
def rename_files():
#replace your own Directory address
file_list = os .listdir(r"home/ved/hello")
saved_path = os.getcwd()
print("Current Dir is %s" %saved_path)
os.chdir(r"home/ved/hello")
for file_name in file_list:
os.rename(file_name,file_name.translate(None, "0123456789"))
os.chdir(saved_path)
rename_files()
|
[
"noreply@github.com"
] |
shinysuraj.noreply@github.com
|
5f1c2a99593a7553184a6e88dacd5cfddfa94dc2
|
11286e7989264134a8a8d610e0f609e6fbff9140
|
/ch06/ch06_6.py
|
611bb36abeda2b0457a21b95c8675ec3d9cc42ed
|
[] |
no_license
|
p2c2e/machine_learning_with_python_cookbook
|
04eeed2e00e0a3e9c0681d4b2f4125aa85485a1d
|
b176323a02f5b5722e312a579ad764a0276ec9c6
|
refs/heads/main
| 2023-01-30T06:54:34.138786
| 2020-12-13T05:02:07
| 2020-12-13T05:02:07
| 320,987,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
# Load libraries
import unicodedata
import sys
# Create text
text_data = ['Hi!!!! I. Love. This. Song....',
'10000% Agree!!!! #LoveIT',
'Right?!?!']
# Create a dictionary of punctuation characters
punctuation = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
# For each string, remove any punctuation characters
[string.translate(punctuation) for string in text_data]
|
[
"sudharsan.rangarajan@publicissapient.com"
] |
sudharsan.rangarajan@publicissapient.com
|
2b59d2bc871b13882aa71629e364e5ee5cde3a00
|
186736f265fa7954e95198955546305ab1b9b981
|
/notesApi/settings.py
|
d3fd465d97e808c8f69bde9fd61320c402413ffb
|
[] |
no_license
|
nova-sangeeth/notes-api
|
6449669870dfb69a72e1aad71c8859ca9de8bfbb
|
d5d15a4df615b0b276ccf8f49efc9e21eb177b65
|
refs/heads/master
| 2022-12-22T11:38:03.065884
| 2020-09-23T19:58:14
| 2020-09-23T19:58:14
| 298,022,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,607
|
py
|
"""
Django settings for notesApi project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "v1jk=4%^w9@)42-xumnuc3ho+7!&ug#q3*^y)x^@rlu#-96o*d"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# crispy forms
"crispy_forms",
# all auth apps
"django.contrib.sites",
"allauth",
"allauth.account",
"allauth.socialaccount",
# apps
"rest_framework",
"api_notes",
]
SITE_ID = 1
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "notesApi.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "notesApi.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
ACCOUNT_EMAIL_VERIFICATION = "required"
ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS = True
ACCOUNT_EMAIL_REQUIRED = False
|
[
"novasangeeth@outlook.com"
] |
novasangeeth@outlook.com
|
b43a8c8f46ebf074abafe2fe804dd6281bc08def
|
9506d1d978882f1310e31f05624f9123f7d1e4c4
|
/model.py
|
2572da3a20621fc9fcdb81728adc3ec4b507dbb5
|
[] |
no_license
|
mindninjaX/AI-Chatbot
|
6e9b3420cb5cde85138ace552b5cd6f22fe8c26a
|
eb839a538c0067485264f9f03a8a46176e66ebc0
|
refs/heads/master
| 2023-03-30T10:38:29.420301
| 2021-04-01T05:27:54
| 2021-04-01T05:27:54
| 329,689,986
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
import torch
import torch.nn as nn
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
return out
|
[
"rishabhsinghs90s@gmail.com"
] |
rishabhsinghs90s@gmail.com
|
4c92871a9b092599b369eba37b5e69ca438f451d
|
3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2
|
/src/Week4/Practice/Trace1.py
|
6db80027484d73a47f843382e033603034f1470c
|
[] |
no_license
|
theguyoverthere/CMU15-112-Spring17
|
b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8
|
b8287092b14e82d2a3aeac6c27bffbc95382eb34
|
refs/heads/master
| 2021-04-27T08:52:45.237631
| 2018-10-02T15:38:18
| 2018-10-02T15:38:18
| 107,882,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
def onesDigit(n):
return n%10
def ct1(L):
for i in range(len(L)):
L[i] += sum(L) + max(L)
# The function onesDigit is called on each element before
# making comparison.
return sorted(L, key=onesDigit)
a = [2,1,0]
print(ct1(a))
print(a)
|
[
"tariqueanwer@outlook.com"
] |
tariqueanwer@outlook.com
|
ebf03cfdd8a51f8ebfe2b59fa37239f887dc4074
|
a6518cd4bdb5d8d3fde49a805208d34d6381191a
|
/server/application/models/window.py
|
33c1b4ea4f3fc57f312a3debcdabc0df9eba1e29
|
[
"MIT"
] |
permissive
|
coloration-production/basic-saas
|
c43b09515be3ec08e044d25b33d69a4482e39855
|
91656e4cb70ace6b94cd0f5f6fa54f7d996106c0
|
refs/heads/main
| 2023-06-03T06:00:32.003920
| 2021-06-16T13:03:05
| 2021-06-16T13:03:05
| 375,542,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
# encoding: utf-8
from application.models.base_entity import BaseEntity
from application import db
from sqlalchemy_serializer import SerializerMixin
from application.models.window_widget import registrations
from application.models.widget import Widget
from datetime import datetime
class Window(BaseEntity, SerializerMixin):
__tablename__ = 'windows'
name = db.Column(db.String(32))
layout = db.Column(db.String)
widgets = db.relationship(
'Widget',
secondary = registrations,
backref = db.backref('windows', lazy = 'dynamic'),
lazy = 'dynamic'
)
def __repr__(self) -> str:
return '<Window {}:{}>'.format(self.id, self.name)
@classmethod
def create_record (Entity, **kwargs):
widgets = []
if 'widgets' in kwargs:
widgets = kwargs['widgets']
del kwargs['widgets']
record = Entity(**kwargs)
if len(widgets) > 0 and isinstance(widgets, list):
record.widgets = Widget.query.filter(Widget.id.in_(widgets)).all()
pass
db.session.add(record)
db.session.commit()
return record
@classmethod
def modify_record (Entity, id, entity):
record = Entity.query_record(id = id)
widgets = []
if 'id' in entity:
del entity['id']
if 'widgets' in entity:
widgets = entity['widgets']
del entity['widgets']
for key in entity:
setattr(record, key, entity[key])
if len(widgets) > 0 and isinstance(widgets, list):
record.widgets = Widget.query.filter(Widget.id.in_(widgets)).all()
pass
setattr(record, 'lasted', datetime.utcnow())
db.session.add(record)
db.session.commit()
return record
|
[
"binyu.wang@zgcechuang.com"
] |
binyu.wang@zgcechuang.com
|
7a8c6b2fe81d9938e242e7bf859ec30576c2dab4
|
c550b7993524ef3598d7b93900d36e022ad2b16f
|
/venv/Scripts/pip3.8-script.py
|
d9a1cebc9fc049c6a56f94b005a001e14004390f
|
[] |
no_license
|
19982084685/network-simulation
|
523e3205eb94a312a6105946f31e031d660dcd52
|
ce825730742e6adf2d968bfe3e210c38b4419798
|
refs/heads/master
| 2022-12-25T07:05:05.536120
| 2020-09-11T15:21:24
| 2020-09-11T15:21:24
| 294,640,613
| 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 446
|
py
|
#!D:\Users\玉明\Documents\教学文档\网络挑战课\pyProject\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"3010201649@qq.com"
] |
3010201649@qq.com
|
7b7065c44f46e1fc431ba62723c83f4e085bc20d
|
9801dd62f1c2a4454f104d26c2d7d9d75167a31c
|
/build-osmosis-script.py
|
305bfb15e1e3ea41596da49c3ed30e372b752c36
|
[] |
no_license
|
hholzgra/Extractotron
|
cc232f25bcc9609013ff2ecdf36dc8cd7fc2cf20
|
5c67734aa1107a93832d37317b6059fd5225ec3e
|
refs/heads/master
| 2021-01-15T23:35:22.356427
| 2011-09-24T03:57:42
| 2011-09-24T03:57:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
from sys import argv, stderr
from csv import DictReader
cities = list(DictReader(open('cities.txt'), dialect='excel-tab'))
try:
(osmosis, ) = argv[1:]
except ValueError:
print >> stderr, 'Usage: show-cities.py <osmosis command file>'
exit(1)
osmosis = open(osmosis, 'w')
print >> osmosis, 'bunzip2 -c planet-latest.osm.bz2 | osmosis-*/bin/osmosis --rx file=- \\'
print >> osmosis, ' --log-progress interval=60 \\'
print >> osmosis, ' --tee outputCount=2 \\'
print >> osmosis, ' --tag-filter accept-ways natural=coastline --used-node \\'
print >> osmosis, ' --wx coastline.osm.bz2 \\'
print >> osmosis, ' --tee outputCount=%d \\' % len(cities)
print >> osmosis, ' \\'
for city in cities:
print >> osmosis, ' --bb top=%(top)s left=%(left)s bottom=%(bottom)s right=%(right)s \\' % city
print >> osmosis, ' --tee outputCount=2 --wx file=ex/%(slug)s.osm.bz2 --wb file=ex/%(slug)s.osm.pbf \\' % city
print >> osmosis, '> osmosis.txt 2>&1;'
osmosis.close()
|
[
"mike@stamen.com"
] |
mike@stamen.com
|
a2c679a999c8aa27009e451be46263c0baafba57
|
b60c2ce1b3f5ae8e4381cad4564d2fb189cd325b
|
/source_manipulation/nt_parser_zefania_english.py
|
00ec14cfdd8d73f344443e416c23121bfdb9ecd3
|
[] |
no_license
|
jasonamyers/SacredPy
|
840659d4e192f88dac7fe08eac3159bbb0433491
|
2ed4951619d124aaa8f1a8d183ac0b3f816302ae
|
refs/heads/master
| 2021-01-16T21:18:08.046217
| 2013-12-08T14:07:26
| 2013-12-08T14:07:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
from lxml import etree
from pprint import pprint
ons = {'o': 'http://www.bibletechnologies.net/2003/OSIS/namespace'}
class Processor(object):
def __init__(self, fname):
self.tree = etree.parse(fname)
print self.tree
self.books = list()
self.process_book()
def process_verses(self, chapter):
verses = list()
for verse in chapter.findall('o:verse', namespaces=ons):
verses.append({'id': verse.get('osisID'), 'content': verse.text,})
return verses
def process_chapters(self, book):
chapters = list()
for chapter in book.findall('o:chapter', namespaces=ons):
chapters.append({
'number': chapter.get('osisID'),
'verses': self.process_verses(chapter)
})
return chapters
def process_book(self):
for book in self.tree.findall('//o:div[@type="book"]', namespaces=ons):
self.books.append({
'name': book.get('osisID'),
'chapters': self.process_chapters(book)
})
if __name__ == '__main__':
p = Processor('nt_zefania_english.xml')
print len(p.books)
for book in p.books:
pprint(book)
|
[
"jtim.arnold@gmail.com"
] |
jtim.arnold@gmail.com
|
4ccf7450ab45e16ee470b9508c1564a341691058
|
5eb84c7ca6572b6503f94e53813bdab018cbbe2d
|
/rocket.py
|
477d44fb1fa2af8ee9499fc0b79d9cca28082cfc
|
[] |
no_license
|
AlexanderHHS/new
|
fa161a04d5a56a7aba8da322825f4143100a7ccf
|
b3ecdf6acbb6fc19892bcb1029e4a31b2e91435c
|
refs/heads/master
| 2020-05-29T17:26:47.167837
| 2019-05-30T20:52:02
| 2019-05-30T20:52:02
| 189,277,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
from math import sqrt
class Rocket():
# Rocket simulates a rocket ship for a game,
# or a physics simulation.
def __init__(self, name = "TBD", x=0, y=0):
# Each rocket has an (x,y) position.
self.x = x
self.y = y
self.name = name
def move_rocket(self, x_increment=0, y_increment=1):
# Move the rocket according to the paremeters given.
# Default behavior is to move the rocket up one unit.
self.x += x_increment
self.y += y_increment
def get_distance(self, other):
# Calculates the distance from this rocket to another rocket,
# and returns that value.
distance = sqrt((self.x-other.x)**2+(self.y-other.y)**2)
return distance
def get_name(self):
#simply return the name
return self.name
def introduction(self):
print("Hi, I am a rocket named", self.name, ". Nice to meet you.")
# Make two rockets, at different places.
rocket1 = Rocket() #This one will be at default x and y position
rocket2 = Rocket("Tom",10,5) # This one is at 10, 5
#Print the names
print("The names of the rockets are...")
print(rocket1.get_name())
print(rocket2.name)
# Show the distance between them.
distance = rocket1.get_distance(rocket2)
print("The rockets are %f units apart." % distance)
#Move rocket1 up a bit
rocket1.move_rocket(0,2)
#Move the rockets some more to test out the methods
distance = rocket1.get_distance(rocket2)
print("The rockets are,", distance, " units apart.")
rocket1.move_rocket(0,2)
distance = rocket1.get_distance(rocket2)
print("The rockets are %f units apart." % distance)
rocket1.introduction()
rocket2.introduction()
|
[
"alexandt@hsd.k12.or.us"
] |
alexandt@hsd.k12.or.us
|
ab8675c96b935a51728df70ac5b5869ed48e9804
|
37fd5a148523aed620426cc3f39c653e02ba2e17
|
/opencensus/trace/exceptions_status.py
|
a57bdec60434f73c96ac2a6102656e4fd033636a
|
[
"Apache-2.0"
] |
permissive
|
dineshkrishnareddy/opencensus-python
|
8ebfa74e5b487c91ec1fe5734487c9d673a77fad
|
e5e752ceab3371ec4b78cec23a717168e2ed9372
|
refs/heads/master
| 2022-02-18T08:40:07.319320
| 2019-10-01T22:41:07
| 2019-10-01T22:41:07
| 212,539,887
| 1
| 0
|
Apache-2.0
| 2019-10-03T09:16:46
| 2019-10-03T09:16:46
| null |
UTF-8
|
Python
| false
| false
| 914
|
py
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.rpc import code_pb2
from opencensus.trace.status import Status
CANCELLED = Status(code_pb2.CANCELLED)
INVALID_URL = Status(code_pb2.INVALID_ARGUMENT, message='invalid URL')
TIMEOUT = Status(code_pb2.DEADLINE_EXCEEDED, message='request timed out')
def unknown(exception):
return Status.from_exception(exception)
|
[
"reyang@microsoft.com"
] |
reyang@microsoft.com
|
ddb617b3840deff9580b1979fa5f9a1accfb1906
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/you-get/2016/8/common.py
|
a5a0fbab63c9d5e6a52916b9ad5356b87ef836b7
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 46,179
|
py
|
#!/usr/bin/env python
SITES = {
'163' : 'netease',
'56' : 'w56',
'acfun' : 'acfun',
'archive' : 'archive',
'baidu' : 'baidu',
'bandcamp' : 'bandcamp',
'baomihua' : 'baomihua',
'bigthink' : 'bigthink',
'bilibili' : 'bilibili',
'cctv' : 'cntv',
'cntv' : 'cntv',
'cbs' : 'cbs',
'dailymotion' : 'dailymotion',
'dilidili' : 'dilidili',
'dongting' : 'dongting',
'douban' : 'douban',
'douyu' : 'douyutv',
'ehow' : 'ehow',
'facebook' : 'facebook',
'fc2' : 'fc2video',
'flickr' : 'flickr',
'freesound' : 'freesound',
'fun' : 'funshion',
'google' : 'google',
'heavy-music' : 'heavymusic',
'huaban' : 'huaban',
'iask' : 'sina',
'ifeng' : 'ifeng',
'imgur' : 'imgur',
'in' : 'alive',
'infoq' : 'infoq',
'instagram' : 'instagram',
'interest' : 'interest',
'iqilu' : 'iqilu',
'iqiyi' : 'iqiyi',
'isuntv' : 'suntv',
'joy' : 'joy',
'jpopsuki' : 'jpopsuki',
'kankanews' : 'bilibili',
'khanacademy' : 'khan',
'ku6' : 'ku6',
'kugou' : 'kugou',
'kuwo' : 'kuwo',
'le' : 'le',
'letv' : 'le',
'lizhi' : 'lizhi',
'magisto' : 'magisto',
'metacafe' : 'metacafe',
'mgtv' : 'mgtv',
'miomio' : 'miomio',
'mixcloud' : 'mixcloud',
'mtv81' : 'mtv81',
'musicplayon' : 'musicplayon',
'naver' : 'naver',
'7gogo' : 'nanagogo',
'nicovideo' : 'nicovideo',
'panda' : 'panda',
'pinterest' : 'pinterest',
'pixnet' : 'pixnet',
'pptv' : 'pptv',
'qianmo' : 'qianmo',
'qq' : 'qq',
'showroom-live' : 'showroom',
'sina' : 'sina',
'smgbb' : 'bilibili',
'sohu' : 'sohu',
'soundcloud' : 'soundcloud',
'ted' : 'ted',
'theplatform' : 'theplatform',
'thvideo' : 'thvideo',
'tucao' : 'tucao',
'tudou' : 'tudou',
'tumblr' : 'tumblr',
'twimg' : 'twitter',
'twitter' : 'twitter',
'videomega' : 'videomega',
'vidto' : 'vidto',
'vimeo' : 'vimeo',
'wanmen' : 'wanmen',
'weibo' : 'miaopai',
'veoh' : 'veoh',
'vine' : 'vine',
'vk' : 'vk',
'xiami' : 'xiami',
'xiaokaxiu' : 'yixia',
'xiaojiadianvideo' : 'fc2video',
'yinyuetai' : 'yinyuetai',
'miaopai' : 'yixia',
'youku' : 'youku',
'youtu' : 'youtube',
'youtube' : 'youtube',
'zhanqi' : 'zhanqi',
}
import getopt
import json
import locale
import logging
import os
import platform
import re
import socket
import sys
import time
from urllib import request, parse, error
from http import cookiejar
from importlib import import_module
from .version import __version__
from .util import log, term
from .util.git import get_version
from .util.strings import get_filename, unescape_html
from . import json_output as json_output_
dry_run = False
json_output = False
force = False
player = None
extractor_proxy = None
cookies = None
output_filename = None
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding.lower()
else:
default_encoding = locale.getpreferredencoding().lower()
def maybe_print(*s):
try: print(*s)
except: pass
def tr(s):
if default_encoding == 'utf-8':
return s
else:
return s
#return str(s.encode('utf-8'))[2:-1]
# DEPRECATED in favor of match1()
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
# DEPRECATED in favor of match1()
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def match1(text, *patterns):
"""Scans through a string for substrings matched some patterns (first-subgroups only).
Args:
text: A string to be scanned.
patterns: Arbitrary number of regex patterns.
Returns:
When only one pattern is given, returns a string (None if no match found).
When more than one pattern are given, returns a list of strings ([] if no match found).
"""
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def matchall(text, patterns):
"""Scans through a string for substrings matched some patterns.
Args:
text: A string to be scanned.
patterns: a list of regex pattern.
Returns:
a list if matched. empty if not.
"""
ret = []
for pattern in patterns:
match = re.findall(pattern, text)
ret += match
return ret
def launch_player(player, urls):
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
def parse_query_param(url, param):
"""Parses the query string of a URL and returns the value of a parameter.
Args:
url: A URL.
param: A string representing the name of the parameter.
Returns:
The value of the parameter.
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
def unicodize(text):
return re.sub(r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])', lambda x: chr(int(x.group(0)[2:], 16)), text)
# DEPRECATED in favor of util.legitimize()
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(data):
"""Decompresses data for Content-Encoding: deflate.
(the zlib compression is used.)
"""
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
# DEPRECATED in favor of get_content()
def get_response(url, faker = False):
# install cookies
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
# DEPRECATED in favor of get_content()
def get_html(url, encoding = None, faker = False):
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
# DEPRECATED in favor of get_content()
def get_decoded_html(url, faker = False):
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset, 'ignore')
else:
return data
def get_location(url):
response = request.urlopen(url)
# urllib will follow redirections and it's too much code to tell urllib
# not to do that
return response.geturl()
def get_content(url, headers={}, decoded=True):
"""Gets the content of a URL via sending a HTTP GET request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
logging.debug('get_content: %s' % url)
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
for i in range(10):
try:
response = request.urlopen(req)
break
except socket.timeout:
logging.debug('request attempt %s timeout' % str(i + 1))
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(response.getheader('Content-Type'), r'charset=([\w-]+)')
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
def url_size(url, faker = False, headers = {}):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(url)
size = response.headers['content-length']
return int(size) if size!=None else float('inf')
def urls_size(urls, faker = False, headers = {}):
return sum([url_size(url, faker=faker, headers=headers) for url in urls])
def get_head(url, headers = {}):
if headers:
req = request.Request(url, headers = headers)
else:
req = request.Request(url)
req.get_method = lambda : 'HEAD'
res = request.urlopen(req)
return dict(res.headers)
def url_info(url, faker = False, headers = {}):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(request.Request(url))
headers = response.headers
type = headers['content-type']
if type == 'image/jpg; charset=UTF-8' or type == 'image/jpg' : type = 'audio/mpeg' #fix for netease
mapping = {
'video/3gpp': '3gp',
'video/f4v': 'flv',
'video/mp4': 'mp4',
'video/MP2T': 'ts',
'video/quicktime': 'mov',
'video/webm': 'webm',
'video/x-flv': 'flv',
'video/x-ms-asf': 'asf',
'audio/mp4': 'mp4',
'audio/mpeg': 'mp3',
'image/jpeg': 'jpg',
'image/png': 'png',
'image/gif': 'gif',
'application/pdf': 'pdf',
}
if type in mapping:
ext = mapping[type]
else:
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(r1(r'filename="?([^"]+)"?', headers['content-disposition']))
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
except:
ext = None
else:
ext = None
if headers['transfer-encoding'] != 'chunked':
size = headers['content-length'] and int(headers['content-length'])
else:
size = None
return type, ext, size
def url_locations(urls, faker = False, headers = {}):
locations = []
for url in urls:
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(request.Request(url))
locations.append(response.url)
return locations
def url_save(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}):
file_size = url_size(url, faker = faker, headers = headers)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size!=float('inf') else filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = end = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length!=None else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size: # Download finished
break
else: # Unexpected termination. Retry request
headers['Range'] = 'bytes=' + str(received) + '-'
response = request.urlopen(request.Request(url, headers = headers), None)
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
def url_save_chunked(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}):
if os.path.exists(filepath):
if not force:
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(os.path.getsize(filepath))
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download'
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
break
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath))
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
class SimpleProgressBar:
term_size = term.get_terminal_size()[1]
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
self.speed = ''
self.last_updated = time.time()
total_pieces_len = len(str(total_pieces))
# 38 is the size of all statically known size in self.bar
total_str = '%5s' % round(self.total_size / 1048576, 1)
total_str_width = max(len(total_str), 5)
self.bar_size = self.term_size - 27 - 2*total_pieces_len - 2*total_str_width
self.bar = '{:>4}%% ({:>%s}/%sMB) ├{:─<%s}┤[{:>%s}/{:>%s}] {}' % (
total_str_width, total_str, self.bar_size, total_pieces_len, total_pieces_len)
def update(self):
self.displayed = True
bar_size = self.bar_size
percent = round(self.received * 100 / self.total_size, 1)
if percent >= 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = '█'
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '█' * dots + plus
bar = self.bar.format(percent, round(self.received / 1048576, 1), bar, self.current_piece, self.total_pieces, self.speed)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
if bytes_ps >= 1024 ** 3:
self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)
elif bytes_ps >= 1024 ** 2:
self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)
elif bytes_ps >= 1024:
self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format('', '=' * 40, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def get_output_filename(urls, title, ext, output_dir, merge):
# lame hack for the --output-filename option
global output_filename
if output_filename: return output_filename
merged_ext = ext
if (len(urls) > 1) and merge:
from .processor.ffmpeg import has_ffmpeg_installed
if ext in ['flv', 'f4v']:
if has_ffmpeg_installed():
merged_ext = 'mp4'
else:
merged_ext = 'flv'
elif ext == 'mp4':
merged_ext = 'mp4'
elif ext == 'ts':
if has_ffmpeg_installed():
merged_ext = 'mkv'
else:
merged_ext = 'ts'
return '%s.%s' % (title, merged_ext)
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers = {}, **kwargs):
assert urls
if json_output:
json_output_.download_urls(urls=urls, title=title, ext=ext, total_size=total_size, refer=refer)
return
if dry_run:
print('Real URLs:\n%s' % '\n'.join(urls))
return
if player:
launch_player(player, urls)
return
if not total_size:
try:
total_size = urls_size(urls, faker=faker, headers=headers)
except:
import traceback
traceback.print_exc(file=sys.stdout)
pass
title = tr(get_filename(title))
output_filename = get_output_filename(urls, title, ext, output_dir, merge)
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) and os.path.getsize(output_filepath) >= total_size * 0.9:
print('Skipping %s: file already exists' % output_filepath)
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(output_filename))
bar.update()
url_save(url, output_filepath, bar, refer = refer, faker = faker, headers = headers)
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
bar.update()
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if 'av' in kwargs and kwargs['av']:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_av
ret = ffmpeg_concat_av(parts, output_filepath, ext)
print('Merged into %s' % output_filename)
if ret == 0:
for part in parts: os.remove(part)
elif ext in ['flv', 'f4v']:
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, output_filepath)
else:
from .processor.join_flv import concat_flv
concat_flv(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, output_filepath)
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == "ts":
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
ffmpeg_concat_ts_to_mkv(parts, output_filepath)
else:
from .processor.join_ts import concat_ts
concat_ts(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
def download_urls_chunked(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers = {}):
assert urls
if dry_run:
print('Real URLs:\n%s\n' % urls)
return
if player:
launch_player(player, urls)
return
title = tr(get_filename(title))
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size and ext in ('ts'):
if not force and os.path.exists(filepath[:-3] + '.mkv'):
print('Skipping %s: file already exists' % filepath[:-3] + '.mkv')
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
parts = []
url = urls[0]
print('Downloading %s ...' % tr(filename))
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
url_save_chunked(url, filepath, bar, refer = refer, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_convert_ts_to_mkv
if ffmpeg_convert_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Conversion aborted.')
else:
print("Can't convert %s files" % ext)
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save_chunked(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
if ffmpeg_concat_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Merging aborted.')
else:
print("Can't merge %s files" % ext)
print()
def download_rtmp_url(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
if params.get("-y",False): #None or unset ->False
print('Real Playpath:\n%s\n' % [params.get("-y")])
return
if player:
from .processor.rtmpdump import play_rtmpdump_stream
play_rtmpdump_stream(player, url, params)
return
from .processor.rtmpdump import has_rtmpdump_installed, download_rtmpdump_stream
assert has_rtmpdump_installed(), "RTMPDump not installed."
download_rtmpdump_stream(url, title, ext,params, output_dir)
def download_url_ffmpeg(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
if params.get("-y",False): #None or unset ->False
print('Real Playpath:\n%s\n' % [params.get("-y")])
return
if player:
launch_player(player, [url])
return
from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream
assert has_ffmpeg_installed(), "FFmpeg not installed."
ffmpeg_download_stream(url, title, ext, params, output_dir)
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Playlist is not supported for ' + name)
return f
def print_info(site_info, title, type, size):
if json_output:
json_output_.print_info(site_info=site_info, title=title, type=type, size=size)
return
if type:
type = type.lower()
if type in ['3gp']:
type = 'video/3gpp'
elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']:
type = 'video/x-flv'
elif type in ['mkv']:
type = 'video/x-matroska'
elif type in ['mp3']:
type = 'audio/mpeg'
elif type in ['mp4']:
type = 'video/mp4'
elif type in ['mov']:
type = 'video/quicktime'
elif type in ['ts']:
type = 'video/MP2T'
elif type in ['webm']:
type = 'video/webm'
elif type in ['jpg']:
type = 'image/jpeg'
elif type in ['png']:
type = 'image/png'
elif type in ['gif']:
type = 'image/gif'
if type in ['video/3gpp']:
type_info = "3GPP multimedia file (%s)" % type
elif type in ['video/x-flv', 'video/f4v']:
type_info = "Flash video (%s)" % type
elif type in ['video/mp4', 'video/x-m4v']:
type_info = "MPEG-4 video (%s)" % type
elif type in ['video/MP2T']:
type_info = "MPEG-2 transport stream (%s)" % type
elif type in ['video/webm']:
type_info = "WebM video (%s)" % type
#elif type in ['video/ogg']:
# type_info = "Ogg video (%s)" % type
elif type in ['video/quicktime']:
type_info = "QuickTime video (%s)" % type
elif type in ['video/x-matroska']:
type_info = "Matroska video (%s)" % type
#elif type in ['video/x-ms-wmv']:
# type_info = "Windows Media video (%s)" % type
elif type in ['video/x-ms-asf']:
type_info = "Advanced Systems Format (%s)" % type
#elif type in ['video/mpeg']:
# type_info = "MPEG video (%s)" % type
elif type in ['audio/mp4']:
type_info = "MPEG-4 audio (%s)" % type
elif type in ['audio/mpeg']:
type_info = "MP3 (%s)" % type
elif type in ['image/jpeg']:
type_info = "JPEG Image (%s)" % type
elif type in ['image/png']:
type_info = "Portable Network Graphics (%s)" % type
elif type in ['image/gif']:
type_info = "Graphics Interchange Format (%s)" % type
else:
type_info = "Unknown type (%s)" % type
maybe_print("Site: ", site_info)
maybe_print("Title: ", unescape_html(tr(title)))
print("Type: ", type_info)
print("Size: ", round(size / 1048576, 2), "MiB (" + str(size) + " Bytes)")
print()
def mime_to_container(mime):
mapping = {
'video/3gpp': '3gp',
'video/mp4': 'mp4',
'video/webm': 'webm',
'video/x-flv': 'flv',
}
if mime in mapping:
return mapping[mime]
else:
return mime.split('/')[1]
def parse_host(host):
"""Parses host name and port number from a string.
"""
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port)
def set_proxy(proxy):
proxy_handler = request.ProxyHandler({
'http': '%s:%s' % proxy,
'https': '%s:%s' % proxy,
})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def unset_proxy():
proxy_handler = request.ProxyHandler({})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
# DEPRECATED in favor of set_proxy() and unset_proxy()
def set_http_proxy(proxy):
if proxy == None: # Use system default setting
proxy_support = request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = request.ProxyHandler({})
else: # Use proxy
proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
opener = request.build_opener(proxy_support)
request.install_opener(opener)
def download_main(download, download_playlist, urls, playlist, **kwargs):
for url in urls:
if url.startswith('https://'):
url = url[8:]
if not url.startswith('http://'):
url = 'http://' + url
if playlist:
download_playlist(url, **kwargs)
else:
download(url, **kwargs)
def script_main(script_name, download, download_playlist, **kwargs):
def version():
log.i('version %s, a tiny downloader that scrapes the web.'
% get_version(kwargs['repo_path']
if 'repo_path' in kwargs else __version__))
logging.basicConfig(format='[%(levelname)s] %(message)s')
help = 'Usage: %s [OPTION]... [URL]...\n\n' % script_name
help += '''Startup options:
-V | --version Print version and exit.
-h | --help Print help and exit.
\n'''
help += '''Dry-run options: (no actual downloading)
-i | --info Print extracted information.
-u | --url Print extracted information with URLs.
--json Print extracted URLs in JSON format.
\n'''
help += '''Download options:
-n | --no-merge Do not merge video parts.
--no-caption Do not download captions.
(subtitles, lyrics, danmaku, ...)
-f | --force Force overwriting existed files.
-F | --format <STREAM_ID> Set video format to STREAM_ID.
-O | --output-filename <FILE> Set output filename.
-o | --output-dir <PATH> Set output directory.
-p | --player <PLAYER [OPTIONS]> Stream extracted URL to a PLAYER.
-c | --cookies <COOKIES_FILE> Load cookies.txt or cookies.sqlite.
-x | --http-proxy <HOST:PORT> Use an HTTP proxy for downloading.
-y | --extractor-proxy <HOST:PORT> Use an HTTP proxy for extracting only.
--no-proxy Never use a proxy.
-s | --socks-proxy <HOST:PORT> Use an SOCKS5 proxy for downloading.
-t | --timeout <SECONDS> Set socket timeout.
-d | --debug Show traceback and other debug info.
'''
short_opts = 'Vhfiuc:ndF:O:o:p:x:y:s:t:'
opts = ['version', 'help', 'force', 'info', 'url', 'cookies', 'no-caption', 'no-merge', 'no-proxy', 'debug', 'json', 'format=', 'stream=', 'itag=', 'output-filename=', 'output-dir=', 'player=', 'http-proxy=', 'socks-proxy=', 'extractor-proxy=', 'lang=', 'timeout=']
if download_playlist:
short_opts = 'l' + short_opts
opts = ['playlist'] + opts
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError as err:
log.e(err)
log.e("try 'you-get --help' for more options")
sys.exit(2)
global force
global dry_run
global json_output
global player
global extractor_proxy
global cookies
global output_filename
info_only = False
playlist = False
caption = True
merge = True
stream_id = None
lang = None
output_dir = '.'
proxy = None
socks_proxy = None
extractor_proxy = None
traceback = False
timeout = 600
for o, a in opts:
if o in ('-V', '--version'):
version()
sys.exit()
elif o in ('-h', '--help'):
version()
print(help)
sys.exit()
elif o in ('-f', '--force'):
force = True
elif o in ('-i', '--info'):
info_only = True
elif o in ('-u', '--url'):
dry_run = True
elif o in ('--json', ):
json_output = True
# to fix extractors not use VideoExtractor
dry_run = True
info_only = False
elif o in ('-c', '--cookies'):
try:
cookies = cookiejar.MozillaCookieJar(a)
cookies.load()
except:
import sqlite3
cookies = cookiejar.MozillaCookieJar()
con = sqlite3.connect(a)
cur = con.cursor()
try:
cur.execute("SELECT host, path, isSecure, expiry, name, value FROM moz_cookies")
for item in cur.fetchall():
c = cookiejar.Cookie(0, item[4], item[5],
None, False,
item[0],
item[0].startswith('.'),
item[0].startswith('.'),
item[1], False,
item[2],
item[3], item[3]=="",
None, None, {})
cookies.set_cookie(c)
except: pass
# TODO: Chromium Cookies
# SELECT host_key, path, secure, expires_utc, name, encrypted_value FROM cookies
# http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/
elif o in ('-l', '--playlist'):
playlist = True
elif o in ('--no-caption',):
caption = False
elif o in ('-n', '--no-merge'):
merge = False
elif o in ('--no-proxy',):
proxy = ''
elif o in ('-d', '--debug'):
traceback = True
# Set level of root logger to DEBUG
logging.getLogger().setLevel(logging.DEBUG)
elif o in ('-F', '--format', '--stream', '--itag'):
stream_id = a
elif o in ('-O', '--output-filename'):
output_filename = a
elif o in ('-o', '--output-dir'):
output_dir = a
elif o in ('-p', '--player'):
player = a
caption = False
elif o in ('-x', '--http-proxy'):
proxy = a
elif o in ('-s', '--socks-proxy'):
socks_proxy = a
elif o in ('-y', '--extractor-proxy'):
extractor_proxy = a
elif o in ('--lang',):
lang = a
elif o in ('-t', '--timeout'):
timeout = int(a)
else:
log.e("try 'you-get --help' for more options")
sys.exit(2)
if not args:
print(help)
sys.exit()
if (socks_proxy):
try:
import socket
import socks
socks_proxy_addrs = socks_proxy.split(':')
socks.set_default_proxy(socks.SOCKS5,
socks_proxy_addrs[0],
int(socks_proxy_addrs[1]))
socket.socket = socks.socksocket
def getaddrinfo(*args):
return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
socket.getaddrinfo = getaddrinfo
except ImportError:
log.w('Error importing PySocks library, socks proxy ignored.'
'In order to use use socks proxy, please install PySocks.')
else:
import socket
set_http_proxy(proxy)
socket.setdefaulttimeout(timeout)
try:
if stream_id:
if not extractor_proxy:
download_main(download, download_playlist, args, playlist, stream_id=stream_id, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
download_main(download, download_playlist, args, playlist, stream_id=stream_id, extractor_proxy=extractor_proxy, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
if not extractor_proxy:
download_main(download, download_playlist, args, playlist, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
download_main(download, download_playlist, args, playlist, extractor_proxy=extractor_proxy, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
except KeyboardInterrupt:
if traceback:
raise
else:
sys.exit(1)
except UnicodeEncodeError:
log.e('[error] oops, the current environment does not seem to support Unicode.')
log.e('please set it to a UTF-8-aware locale first,')
log.e('so as to save the video (with some Unicode characters) correctly.')
log.e('you can do it like this:')
log.e(' (Windows) % chcp 65001 ')
log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8')
sys.exit(1)
except Exception:
if not traceback:
log.e('[error] oops, something went wrong.')
log.e('don\'t panic, c\'est la vie. please try the following steps:')
log.e(' (1) Rule out any network problem.')
log.e(' (2) Make sure you-get is up-to-date.')
log.e(' (3) Check if the issue is already known, on')
log.e(' https://github.com/soimort/you-get/wiki/Known-Bugs')
log.e(' https://github.com/soimort/you-get/issues')
log.e(' (4) Run the command with \'--debug\' option,')
log.e(' and report this issue with the full output.')
else:
version()
log.i(args)
raise
sys.exit(1)
def google_search(url):
keywords = r1(r'https?://(.*)', url)
url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords)
page = get_content(url, headers=fake_headers)
videos = re.findall(r'<a href="(https?://[^"]+)" onmousedown="[^"]+">([^<]+)<', page)
vdurs = re.findall(r'<span class="vdur _dwc">([^<]+)<', page)
durs = [r1(r'(\d+:\d+)', unescape_html(dur)) for dur in vdurs]
print("Google Videos search:")
for v in zip(videos, durs):
print("- video: %s [%s]" % (unescape_html(v[0][1]),
v[1] if v[1] else '?'))
print("# you-get %s" % log.sprint(v[0][0], log.UNDERLINE))
print()
print("Best matched result:")
return(videos[0][0])
def url_to_module(url):
try:
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
assert video_host and video_url
except:
url = google_search(url)
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
if video_host.endswith('.com.cn'):
video_host = video_host[:-3]
domain = r1(r'(\.[^.]+\.[^.]+)$', video_host) or video_host
assert domain, 'unsupported url: ' + url
k = r1(r'([^.]+)', domain)
if k in SITES:
return import_module('.'.join(['you_get', 'extractors', SITES[k]])), url
else:
import http.client
conn = http.client.HTTPConnection(video_host)
conn.request("HEAD", video_url, headers=fake_headers)
res = conn.getresponse()
location = res.getheader('location')
if location and location != url and not location.startswith('/'):
return url_to_module(location)
else:
return import_module('you_get.extractors.universal'), url
def any_download(url, **kwargs):
m, url = url_to_module(url)
m.download(url, **kwargs)
def any_download_playlist(url, **kwargs):
m, url = url_to_module(url)
m.download_playlist(url, **kwargs)
def main(**kwargs):
script_main('you-get', any_download, any_download_playlist, **kwargs)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
88fa0f7981f3210b9998f330762fc45808b2a807
|
8a5444d37a9d926bd38261689f6c0e3f477961bb
|
/ExportToDWGsExportOptions.py
|
c43572875815b46df7395c5e0ab63fba423fb955
|
[
"MIT"
] |
permissive
|
tuskin40/pyDynamo
|
6d27dead162685f96a894e8fcb963c880e73f80d
|
550e105ec27c29e9055c16b46e0b8ecc0960421b
|
refs/heads/master
| 2023-08-02T11:06:28.691378
| 2021-09-29T02:34:42
| 2021-09-29T02:34:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
__author__ = 'Danny Bentley - danny_bentley@hotmail.com'
__twitter__ = '@danbentley'
__Website__ = 'http://dannybentley.tech/'
__version__ = '1.0.0'
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
doc = DocumentManager.Instance.CurrentDBDocument
options = None
#check if the dwg export setting matches this name
dwg_opt = "- SOM Struc Export"
#collect all the settings in your project.
settings = FilteredElementCollector(doc).WherePasses(ElementClassFilter(ExportDWGSettings))
for element in settings:
if element.Name == dwg_opt:
options = element.GetDWGExportOptions()
break
if options is None:
options = DWGExportOptions()
OUT = options
|
[
"danny.bentley@som.com"
] |
danny.bentley@som.com
|
4cb683ce0b6fa1bdbf35d8661b4e1bb6cf8b8627
|
18c6073d1d9e1a1e22a1c5f734377ebc5ceb4b40
|
/stock_predictor/stockprediction/models.py
|
3ed79fd2f70c2a0aaeeb453053f51b5b82c21dcf
|
[] |
no_license
|
shashankgd/mokshtech
|
734846d7a1466385c42bda36f705c32918e61e60
|
f533de0fe99a9646413a5ed48bdeeb55a11578a9
|
refs/heads/master
| 2020-04-05T21:48:23.594195
| 2018-11-05T01:21:39
| 2018-11-05T01:21:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,415
|
py
|
import numpy as np
from keras.layers.core import Dense, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn import neighbors
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import property as p
def buildModel(X_train, X_test, y_train, y_test, forcast_scaled, method):
"""
Build final model for predicting real testing data
"""
if method == 'RNN':
regressor, MSE, X_train, X_test, y_train, y_test, forcast_scaled= performRNNlass(X_train, X_test, y_train, y_test, forcast_scaled)
print(method,MSE)
return regressor,MSE,X_train, X_test, y_train, y_test,forcast_scaled
elif method == 'RF':
regressor, MSE =performRFR(X_train, X_test, y_train, y_test)
print(method,MSE)
return regressor,MSE,X_train, X_test, y_train, y_test,forcast_scaled
elif method == 'SVR':
regressor, MSE, =performSVR(X_train, X_test, y_train, y_test)
print(method,MSE)
return regressor,MSE,X_train,X_test,y_train , y_test,forcast_scaled
elif method == 'KNN':
clf = neighbors.KNeighborsClassifier()
return
elif method == 'ADA':
clf = AdaBoostClassifier()
return
def performRFR(X_train, X_test, y_train, y_test):
print('rfr1',X_train.shape,X_test.shape, y_train.shape,y_test.shape)
seed = p.seed
num_trees = p.n_estimators
n_splits=p.n_splits
njobs=p.n_jobs
model = RandomForestRegressor(n_estimators=num_trees, n_jobs=njobs)
model.fit(X_train, y_train)
MSE = mse_error(y_test,X_test,model)
return model, MSE
def performRNNlass(X_train, X_test, y_train, y_test, forcast_scaled):
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
forcast_scaled = np.reshape(forcast_scaled, (forcast_scaled.shape[0], forcast_scaled.shape[1], 1))
regressor= Sequential()
dropoutunit=p.dropoutunit
LSTM_unit_increment = p.LSTM_unit_increment
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
regressor.add(Dropout(dropoutunit))
LSTM_units = 50
LSTM_units = LSTM_units + LSTM_unit_increment
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units=LSTM_units, return_sequences=True))
regressor.add(Dropout(dropoutunit))
# Adding a third LSTM layer and some Dropout regularisation
LSTM_units = LSTM_units + LSTM_unit_increment
regressor.add(LSTM(units=LSTM_units, return_sequences=True))
regressor.add(Dropout(dropoutunit))
# Adding a fifth LSTM layer and some Dropout regularisation
LSTM_units = LSTM_units + LSTM_unit_increment
regressor.add(LSTM(units=LSTM_units))
regressor.add(Dropout(dropoutunit))
# print(X_train.shape,y_train.shape)
# Adding the output layer
regressor.add(Dense(units=1))
# Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs=p.epochs, batch_size=p.batch_size)
print('rnn model build',X_test.shape)
score = regressor.evaluate(X_test, y_test, batch_size=100, verbose=0)
return regressor,score,X_train, X_test, y_train, y_test,forcast_scaled
def performSVR(X_train, X_test, y_train, y_test):
model = svm.SVR(kernel='rbf', C=1e3, gamma=0.5,epsilon=p.epsilon)
model.fit(X_train, y_train)
MSE = mse_error(y_test,X_test,model)
return model, MSE
def performKNNClass(X_train, y_train, X_test, y_test, parameters):
"""
KNN binary Classification
"""
clf = neighbors.KNeighborsClassifier(parameters[0])
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def performAdaBoostClass(X_train, y_train, X_test, y_test, forcast):
"""
Ada Boosting binary Classification
"""
# n = parameters[0]
# l = parameters[1]
clf = AdaBoostClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def mse_error(ytest,xtest,model):
return mean_squared_error(ytest, model.predict(xtest))
|
[
"34120423+shashankgd@users.noreply.github.com"
] |
34120423+shashankgd@users.noreply.github.com
|
0c479cef93df6818cbeead1bcb7fcd72addeaa01
|
6ce41ca757269d8dff0ebac9aa88b098b4328a4e
|
/Data_Connect/MySQL/mysql_to_redis.py
|
84e4ee2e55dd30db6ea0bf0e278b800fe058ccde
|
[] |
no_license
|
HankTsai/Smart__Fridge
|
e7abcc0017880b59e4d61c6095064c0a3ab38a9a
|
ab850517c38d06db45e8d71dcab2f849578f5ba7
|
refs/heads/master
| 2023-01-13T20:01:58.820911
| 2020-11-30T15:40:21
| 2020-11-30T15:40:21
| 312,780,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,199
|
py
|
import redis
import pymysql
from user_db_api import DataBaseConnector
'''
# 這叫做範本
# 使用者冰箱資料轉成字典型態再寫入redis
user_id_list = ["user1","user2","user3","user4"]
user_refri_dict = [{"蘋果":"1,顆,2020/10/23",
"香蕉":"2,根,2020/10/23",
"芭樂":"5,個,2020/10/24",
"蓮霧":"7,個,2020/10/21",
"雞蛋":"3,個,2020/10/24",
"番茄":"10,顆,2020/10/24",
"豬肉":"200,gram,2020/10/19"},
{"洋蔥":"12,顆,2020/10/23",
"茄子":"2,根,2020/10/23",
"九層塔":"5,個,2020/10/24",
'高麗菜':"7,個,2020/10/21",
"雞蛋":"3,個,2020/10/24",
'大蒜':"10,顆,2020/10/24",
"牛肉":"200,gram,2020/10/19"},
{'咖啡': '391,gram,2020/10/20',
'地瓜葉': '221,gram,2020/10/20',
'小麥胚芽粉': '142,gram,2020/10/20',
'海苔': '360,gram,2020/10/20',
'牛腩': '274,gram,2020/10/20',
'脆瓜': '133,gram,2020/10/20',
'葵瓜子': '473,gram,2020/10/20',
'蒜酥': '260,gram,2020/10/20'},
{'丸子': '179,gram,2020/10/20',
'乳酸菌飲品': '389,gram,2020/10/20',
'榴槤': '488,gram,2020/10/20',
'白菜': '455,gram,2020/10/20',
'紅茶': '366,gram,2020/10/20',
'花生': '399,gram,2020/10/20',
'蒜苗': '206,gram,2020/10/20',
'豆花': '361,gram,2020/10/20',
'豬腱肉': '353,gram,2020/10/20',
'醬瓜': '209,gram,2020/10/20'}
]
for user_id,user_refri in zip(user_id_list,user_refri_dict):
# establish user_id set
r.sadd("userid",user_id)
# establish user refrigerator hash
r.hmset(user_id,user_refri)
'''
'''
from mysql to redis
select
userid -> set
user_refrigerator -> hash
synonym word table -> hash
synonym -> 雞胸:"雞肉"
烏骨雞:"雞肉"
insert
1.ingredient storage record (when kafka consumer get data)
2.user profile + line_id (when questioner completed)
3.user like recipe record (when user has ordered a recipe)
update
ingredient take out record (when user has ordered a recipe)
'''
'''
MySQL 最終將在Redis建構3個基本key-value以及數個以使用者Line ID為key的hash
"synonym" , data type: hash
"general_ingredient" , data type: hash
"total_user_id" , data type: hash
"U429ec102b46a5332b32c4f1a8b3b04db" , data type: hash
...
'''
def ingredient_load(db):
# 從mysql同義詞庫抓同義詞存進redis (格式hash- synonym(key)- {總詞彙(sub key):定義食材(value)
sql = """
SELECT s.總食材名稱,i.食材名稱 FROM synonym s JOIN ingredient i ON s.食材ID = i.食材ID;
"""
# 從mysql抓食材存進redis (格式sort- general_ingredient:定義食材
sql2 = """
SELECT 食材ID, 食材名稱 FROM ingredient;
"""
db.cursor.execute(sql)
synonym_redis = db.cursor.fetchall()
db.cursor.execute(sql2)
ingredient_redis = db.cursor.fetchall()
meaning_dict = dict()
for each in synonym_redis:
meaning_dict[each[0]] = each[1]
db.redis.hmset('synonym', meaning_dict)
food_dict = dict()
for each in ingredient_redis:
food_dict[each[1]] = each[0]
db.redis.hmset('general_ingredient', food_dict)
def user_id_table(db):
sql = '''
SELECT `使用者ID`, `Line_ID` from recipe.user_profile;'''
db.cursor.execute(sql)
user_info = db.cursor.fetchall()
# (('Ryan', 'U429ec102b46a5332b32c4f1a8b3b04db'),)
user_table = {}
for user in user_info:
user_table[user[1]] = user[0]
db.redis.hmset("total_user_id", user_table)
def user_data_load(db):
# 抓全部冰箱資料
sql = '''
SELECT us.Line_ID, ing.食材名稱, re.食材重量, re.食材單位, re.食材存放日, re.食材到期日
FROM refrigerator_record re JOIN ingredient ing JOIN user_profile us
ON re.食材ID = ing.食材ID AND re.使用者ID = us.使用者ID
WHERE (re.食材取用日 is null);
'''
db.cursor.execute(sql)
refrigerator_record = db.cursor.fetchall()
if refrigerator_record:
for row in refrigerator_record:
ingredient_name = row[1]
ingredient_info = str(row[2]) + "," + row[3] + "," + str(row[4])
db.redis.hset(row[0], ingredient_name, ingredient_info)
def main():
db = DataBaseConnector()
try:
ingredient_load(db)
except Exception as e:
print(f"ingredient_load failed, Error: {e}")
try:
user_id_table(db)
except Exception as e:
print(f"user_id_table failed, Error: {e}")
try:
user_data_load(db)
except Exception as e:
print(f"user_data_load failed, Error: {e}")
db.cursor.close()
db.mysql.close()
print("MySQL loaded data to Redis successfully.")
if __name__ == '__main__':
main()
|
[
"hank_black@icloud.com"
] |
hank_black@icloud.com
|
67c8f6e68f42cf14fa5dda19c602fbd7976c47fc
|
b61efe2686feb44c5b0d2fb3094dd2ea94e6ca93
|
/src/control_decision_4.py
|
be6dc49f088a3f399c8bf5df9b0a6c7de0b509ca
|
[] |
no_license
|
idrissahil/bat_wifi_exploration
|
888f0f7243cc4bedeba6fe8d702762e6e2ad5da9
|
5a1bc74c1b35360d21d01e5e2a721b38fb380ac8
|
refs/heads/master
| 2020-05-31T16:38:49.118742
| 2019-06-29T14:03:28
| 2019-06-29T14:03:28
| 190,386,321
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
#! /usr/bin/env python
import rospy
import math
from sensor_msgs.msg import BatteryState
from geometry_msgs.msg import Twist, PoseArray, Pose, PoseStamped
rospy.init_node('control_decision_drone')
control_decision_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=1)
state=1
curr_pos = [0,0,0]
rrt_list=[]
index=0
def callback_gps(gps):
global curr_pos
global rrt_list
global state
global index
curr_pos[0] = gps.pose.position.x
curr_pos[1] = gps.pose.position.y
curr_pos[2] = gps.pose.position.z
if state==1:
print(state)
#curr_pos[0]=gps.pose.position.x
#curr_pos[1]=gps.pose.position.y
#curr_pos[2]=gps.pose.position.z
if len(rrt_list)>1:
state=2
print(state)
dist_point = math.sqrt(math.pow(rrt_list[index].position.x - curr_pos[0], 2)+math.pow(rrt_list[index].position.y - curr_pos[1], 2)+math.pow(rrt_list[index].position.z - curr_pos[2], 2))
if dist_point<0.3:
index=index+1
if index==len(rrt_list):
index=index-1
curr_position=PoseStamped()
#hold_position.pose.position.x= 0
#hold_position.pose.position.y = 14
#hold_position.pose.position.z= 1
curr_position.pose.position.x= rrt_list[index].position.x
curr_position.pose.position.y= rrt_list[index].position.y
curr_position.pose.position.z= rrt_list[index].position.z
curr_position.header.frame_id = "map"
control_decision_pub.publish(curr_position)
def callback_battery(rrt):
global state
global curr_pos
global rrt_list
rrt_list=rrt.poses
def callback_exploration(explore):
global state
global exploration_point_x
exploration_point_x = explore.pose.position.x
print(state)
if state ==1:
control_decision_pub.publish(explore)
def main():
exploration_sub = rospy.Subscriber('/mavros/setpoint_position/local1', PoseStamped, callback_exploration)
battery_sub = rospy.Subscriber('visual_marker_rrt', PoseArray, callback_battery)
gps_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback_gps)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"idrissahil3@gmail.com"
] |
idrissahil3@gmail.com
|
7459710f51cc4fc67c81c661b1cdbb49a98825ab
|
4e4171d9e94dd44b98b7010d86fd31b8a3c8c33e
|
/bb8/template.py
|
43fd985415ae80cd75cb7d1068de791347c6026e
|
[] |
no_license
|
thongdong7/bb8
|
1582d4f4bde06f17b95410c7ae67647189036744
|
2b771bd12584250456b1fc5b27ceeb37c55bbb14
|
refs/heads/master
| 2021-06-17T10:25:30.520708
| 2016-11-08T08:08:54
| 2016-11-08T08:08:54
| 59,622,849
| 0
| 0
| null | 2021-03-25T21:27:51
| 2016-05-25T02:01:40
|
Python
|
UTF-8
|
Python
| false
| false
| 261
|
py
|
from jinja2 import Template
class TemplateEngine(object):
def __init__(self):
self.params = {}
def load_params(self, params):
self.params.update(params)
def render(self, text):
return Template(text).render(**self.params)
|
[
"thongdong7@gmail.com"
] |
thongdong7@gmail.com
|
20b930f94ee43cd25a74f1887b8c7cef39c6b5ef
|
bc51fcd3fbea140dd7c47da83881aee5dbeb607a
|
/awscli/style.py
|
4ad34b34667ea227f795aa70ae7f0ee249838959
|
[
"Apache-2.0"
] |
permissive
|
hiroakis/aws-cli
|
46a3fce37e2e5dd86d807856a457f6a8643e0c4d
|
2f44552beb48ba02f2e7e1f410ee264bfe94a04c
|
refs/heads/master
| 2021-01-23T22:30:18.951608
| 2013-02-22T00:33:52
| 2013-02-22T00:33:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
from six.moves import cStringIO
class BaseStyle(object):
def __init__(self, doc, indent_width=4, **kwargs):
self.doc = doc
self.indent_width = indent_width
self.kwargs = kwargs
self.keep_data = True
def spaces(self, indent):
return ' ' * (indent * self.indent_width)
def start_bold(self, attrs=None):
return ''
def end_bold(self):
return ''
def bold(self, s):
return self.start_bold() + s + self.end_bold()
def h2(self, s):
return self.bold(s)
def h3(self, s):
return self.bold(s)
def start_underline(self, attrs=None):
return ''
def end_underline(self):
return ''
def underline(self, s):
return self.start_underline() + s + self.end_underline()
def start_italics(self, attrs=None):
return ''
def end_italics(self):
return ''
def italics(self, s):
return self.start_italics() + s + self.end_italics()
def start_p(self, attrs=None):
self.doc.add_paragraph()
def end_p(self):
return ''
def start_code(self, attrs=None):
self.doc.do_translation = True
return self.start_bold(attrs)
def end_code(self):
self.doc.do_translation = False
return self.end_bold()
def start_a(self, attrs=None):
self.doc.do_translation = True
return self.start_underline()
def end_a(self):
self.doc.do_translation = False
return self.end_underline()
def start_i(self, attrs=None):
self.doc.do_translation = True
return self.start_italics()
def end_i(self):
self.doc.do_translation = False
return self.end_italics()
def start_li(self, attrs):
return ''
def end_li(self):
return ''
def start_examples(self, attrs):
self.doc.keep_data = False
def end_examples(self):
self.doc.keep_data = True
class CLIStyle(BaseStyle):
def start_bold(self, attrs=None):
if self.kwargs.get('do_ansi', False):
return '\033[1m'
return ''
def end_bold(self):
if self.kwargs.get('do_ansi', False):
return '\033[0m'
return ''
def start_underline(self, attrs=None):
if self.kwargs.get('do_ansi', False):
return '\033[4m'
return ''
def end_underline(self):
if self.kwargs.get('do_ansi', False):
return '\033[0m'
return ''
def start_italics(self, attrs=None):
if self.kwargs.get('do_ansi', False):
return '\033[3m'
return ''
def end_italics(self):
if self.kwargs.get('do_ansi', False):
return '\033[0m'
return ''
def start_li(self, attrs=None):
para = self.doc.add_paragraph()
para.subsequent_indent = para.initial_indent + 1
para.write(' * ')
def h2(self, s):
para = self.doc.get_current_paragraph()
para.lines_before = 1
return self.bold(s)
def end_p(self):
para = self.doc.get_current_paragraph()
para.lines_after = 2
|
[
"mitch@garnaat.com"
] |
mitch@garnaat.com
|
8b151a74b1e4f2022e1042b3a62cfa99f382ba1b
|
f7f45300cb7ae8a2bb1c9db79f835a402d11da33
|
/orig_py_files/ex_generators.py
|
34b437f25bad8daa10c2ee0eadde5683b957cfac
|
[] |
no_license
|
rob-kistner/modern-python
|
d025eb6c26a0c5c16846086f59625867c92909f6
|
d23f7d2fb00f27255a11290deda8759346117a04
|
refs/heads/master
| 2021-05-25T22:50:40.063110
| 2020-09-02T20:06:08
| 2020-09-02T20:06:08
| 253,953,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
from printutils import *
big_banner("""
Exercise: Week Generator
------------------------
""")
# creates generator when run
def week():
weekdays = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')
for day in weekdays:
yield day
weekgen = week()
# Above solution is said to work in the course but
# I'm getting 'function' object is not an iterator
# print(weekgen)
# print(next(week))
# print(next(week))
# print(next(week))
# print(next(week))
# print(next(week))
# this one does work though
def yes_or_no():
answer = "yes"
while True:
yield answer
answer = "no" if answer == "yes" else "yes"
yn = yes_or_no()
print(yn)
print(next(yn))
print(next(yn))
print(next(yn))
print(next(yn))
|
[
"robkistner@mac.com"
] |
robkistner@mac.com
|
72883fcb5f9f5eef71f870b19b65d9612ea0ebf7
|
35fa64dbeb1dae686c703a5a96c33cc9df0dcf57
|
/djangogirls/djangogirls/settings.py
|
67c544a46171e2547d577016ad4049f0c17bc547
|
[] |
no_license
|
hanadevnyc/my-first-blog
|
acb545e0f01bc80880d95dcd794db8119c51a51b
|
680b68c4e692fe2508f0e1a31b892c3f8a3ab7cf
|
refs/heads/master
| 2021-01-10T11:35:18.417905
| 2016-02-27T20:37:06
| 2016-02-27T20:37:06
| 52,678,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,247
|
py
|
"""
Django settings for djangogirls project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h^61k8+ppx*-l#jhvgpe%8jdp#*m)8a4v#o5-43tnrc$kerbx8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangogirls.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangogirls.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"hdickman@credibly.com"
] |
hdickman@credibly.com
|
1e47b7d55a3b5b0dd7d1bb4083416b058727c5c6
|
2b40312991aee831532551de4524e9a6182ad2dd
|
/auto_otc_confirm_chk.py
|
90344acd9554491c54c5542538618c0b02c4faf7
|
[] |
no_license
|
PyBack/AutoExcelData
|
58d86b49eb8a9708bf455336741c479e6b31b437
|
c1e33ffd6895ca610f3591672ca9c548a16f4a30
|
refs/heads/master
| 2021-06-06T10:57:40.514629
| 2020-07-01T06:37:06
| 2020-07-01T06:37:06
| 146,445,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,609
|
py
|
# -*- coding: utf-8 =*-
from __future__ import print_function
import time
import datetime as dt
import logging
import getpass
import pandas as pd
import clipboard
import auto_helper as helper
import xlwings as xw
from handler_hnet import handle_hnet
from read_data_file import read_otc_termination_file
logger = logging.getLogger('AutoOTC.Termination')
logger.setLevel(logging.DEBUG)
# create file handler whhich logs even debug messages
# fh = logging.FileHandler('AutoReport.log')
fh = logging.handlers.RotatingFileHandler('AutoOTC.log', maxBytes=104857, backupCount=3)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s [%(levelname)s %(name)s %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handler to logger
logger.addHandler(fh)
# logger.addHandler(ch)
def get_confirm_isin_list_from_hnet(window_hnet=None):
if window_hnet is None:
logger.info('no handle of hnet')
return
if not window_hnet.Exists():
logger.info('no handle of hnet')
return
window_hnet.SetFocus()
# 30192 파생결합증권위험고지
sub_window_title = u'32802 파생결합증권상품정보'
sub_window = window_hnet[sub_window_title]
if not sub_window.Exists():
window_hnet.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('30192')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_window.Maximize()
sub_window.Restore()
sub_window.SetFocus()
msg = '== START of get_confirm_isin_code_list_from_hnet ==='
logger.info(msg)
sub_window.ClickInput(coords=(90, 15)) # 업무구분
for i in xrange(6):
helper.press('up_arrow')
for i in xrange(3):
helper.press('down_arrow')
helper.press('enter')
time.sleep(0.5)
helper.press('enter')
sub_window.RightClickInput(coords=(90, 140))
helper.press('up_arrow')
time.sleep(0.5)
helper.press('up_arrow')
time.sleep(0.5)
helper.press('enter')
time.sleep(0.5)
data_table = clipboard.paste()
data_table_rows = data_table.split("\n")
isin_code_list = list()
for row in data_table_rows:
column_list = row.split("\t")
if column_list[0] != u"상품코드" and len(column_list[0]) >= 10:
isin_code_list.append(column_list[0])
# print(column_list[0])
logger.info("data load->isin_code cnt: %d" % len(isin_code_list))
sub_window.Close()
msg = "== END of get_confirm_isin_code_list_from_hnet ==="
logger.info(msg)
return isin_code_list
def get_total_settle_list_from_hnet(window_hent=None, strdate=None):
if window_hent is None:
logger.info('no handle of hent...')
return
if not window_hent.Exists():
logger.info('no handle of hent...')
return
window_hent.SetFocus()
# 66305 통합스케쥴내역1
sub_window_title = u'66305 통합스케쥴내역1'
sub_window = window_hent[sub_window_title]
if sub_window.Exists():
sub_window.Close()
window_hent.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('66305')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_window.Maximize()
sub_window.Restore()
sub_window.SetFocus()
msg = '== START of get_total_settle_list_from_hnet ==='
logger.info(msg)
sub_window.DoubleClickInput(coords=(90, 15)) # 조회기간
for i in xrange(2):
for date_digit in strdate:
helper.press(date_digit)
sub_window.DoubleClickInput(coords=(90, 55)) # 종목종류
for i in xrange(5):
helper.press('down_arrow')
for i in xrange(3):
helper.press('up_arrow')
helper.press('enter')
sub_window.DoubleClickInput(coords=(700, 55)) # 일괄조회
helper.press('enter')
time.sleep(15)
sub_window.ClickInput(coords=(90, 120)) # 자료 복사
helper.press('up_arrow')
time.sleep(1)
helper.press('up_arrow')
time.sleep(1)
helper.press('enter')
time.sleep(1)
data = clipboard.paste()
data = data.split("\r\n")
new_data_lst = []
for row in data:
row_lst = row.split('\t')
new_data_lst.append(row_lst)
df_data = pd.DataFrame(new_data_lst)
headers = df_data.iloc[0]
df_data = pd.DataFrame(df_data.values[1:], columns=headers)
# df_data.index = df_data[u'딜코드']
df_data.index = df_data[df_data.columns[5]]
sub_window.Close()
msg = '=== END of get_total_settle_list_from_hnet ==='
logger.info(msg)
return df_data
def get_target_product_data(excel_file_name='', strdate='', term='상환'):
if excel_file_name == '':
excel_file_name = u'OTC상환리스트.xlsx'
df = read_otc_termination_file(excel_file_name, strdate[:4] + "." + strdate[4:6])
if not strdate in df.index:
target_df = pd.DataFrame()
# target_df = df.iloc[-2:].copy()
return target_df
df = df.loc[strdate]
if len(df) == 0:
return df
elif isinstance(df, pd.Series):
df_new = pd.DataFrame(df)
df_new = df_new.transpose()
df = df_new.copy()
if term == '상환':
target_df = df[(df[u'구분'] != 'ELT') & (df[u'구분'] != 'DLT')]
target_df = target_df[(target_df[u'상환여부'] == u'만32805기상환') | (target_df[u'상환여부'] == u'조기상환')]
return target_df
elif term == '상환_ALL':
target_df = df.copy()
target_df = target_df[(target_df[u'상환여부'] == u'만기상환') | (target_df[u'상환여부'] == u'조기상환')]
return target_df
elif term == '미상환':
target_df = df.copy()
target_df = target_df[(target_df[u'상환여부'] == u'미상환')]
return target_df
else:
df = pd.DataFrame()
return df
pass
def chk_in_isin_list(target_df, isin_code_list):
# 파생결합증권위험고지 30192 4.수익확정
msg = '=== START chk_in_isin_list %d ===' % (len(target_df))
logger.info(msg)
chk_in_list = []
chk_in_count = 0
for i in range(len(target_df)):
exp_date = target_df.iloc[i][u'상환예정일']
str_exp_date = u"%d-%0d-%d" % (exp_date.year, exp_date.month, exp_date.day)
msg = u"%s %s %s %s %s " % (target_df.iloc[i][u'종목코드'],
target_df.iloc[i][u'구분'],
target_df.iloc[i][u'상환여부'],
str_exp_date,
target_df.iloc[i][u'수익구조'],
)
if target_df.iloc[i][u'종목코드'] in isin_code_list:
chk_in_count += 1
msg = msg + u" CHK_IN"
chk_in_list.append(target_df.iloc[i][u'종목코드'])
else:
msg = msg + u"CHK_OUT"
logger.info(msg)
for isin in chk_in_list:
isin_code_list.remove(msg)
msg = 'CHK_IN: %s CHK_OUT: %s' % (chk_in_count, len(target_df) - chk_in_count)
logger.info(msg)
if chk_in_count > 0:
msg = 'Must generate %d sms for termination H.Net #30192' % chk_in_count
logger.warning(msg)
msg = '=== END chk_in_isin_list ===='
logger.info(msg)
return isin_code_list
pass
def chk_isin_in_salesteam(window_hnet, isin_code_list, df_data):
# 32802 파생결합증권상품정보
sub_windwow_title = u'32802 파생결합증권상품정보'
sub_windwow = window_hnet[sub_windwow_title]
if not sub_windwow.Exists():
window_hnet.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('32802')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_windwow.Maximize()
sub_windwow.Restore()
sub_windwow.SetFocus()
msg = '== START of chk_isin_in_saleteam ==='
logger.info(msg)
df_data_sub = df_data[(df_data[u'Sales부서'] == u'PB') & (df_data[u'결제상태'] == u'최종확정')]
df_data_early = df_data_sub[(df_data_sub[u'일자구분'] == 'OBS') & (df_data_sub[u'Sched.Type'] == u'의무중도')]
df_data_mat = df_data_sub[(df_data_sub[u'일자구분'] == 'MAT')]
df_data_sub = df_data[(df_data[u'Sales부서'] == u'PB') & (df_data[u'결제상태'] == u'미입력')]
df_data_delay = df_data_sub[(df_data_sub[u'일자구분'] == 'OBS') & (df_data_sub[u'Sched.Type'] == u'의무중도')]
logger.info('Sched Early Term-> %d' % len(df_data_early))
logger.info('Sched Delay Term-> %d' % len(df_data_delay))
logger.info('Sched MAT Term-> %d' % len(df_data_mat))
for isin_code in isin_code_list:
sub_windwow.ClickInput(coords=(90, 35)) # 종목코드
clipboard.copy(isin_code[2:])
helper.paste()
# helper.press('enter')
sub_windwow.ClickInput(coords=(775, 35)) # 조회
time.sleep(0.5)
sub_windwow.RightClickInput(coords=(110, 80)) # 딜코드
helper.press('up_arrow')
helper.press('up_arrow')
helper.press('enter')
deal_code = clipboard.paste()
if deal_code in list(df_data.index):
if len(df_data.loc[deal_code][u'Sales부서']) > 0:
sales_team = df_data.loc[deal_code][u'Sales부서'][0]
settle_state = df_date.loc[deal_code][u'결재상태'][0]
sched_type = df_data.loc[deal_code][u'Sched.Type'][0]
else:
sales_team = df_data.loc[deal_code][u'Sales부서']
settle_state = df_data.loc[deal_code][u'결재상태']
sched_type = df_data.loc[deal_code][u'Sched.Type']
msg = u"%s %s %s %s" % (isin_code, sales_team, settle_state, sched_type)
logger.info(msg)
else:
logger.info("%s not in list" % deal_code)
msg = '=== END of chk_isin_in_saleteam ==='
logger.info(msg)
def chk_isin_in_schedul_list(window_hnet, target_df, df_data):
# 32802 파생결합증권상품정보
sub_window_title = u'32802 파생결합증권상품정보'
sub_window = window_hnet[sub_window_title]
if not sub_window.Exists():
window_hnet.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('32802')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_window.Maximize()
sub_window.Restore()
sub_window.SetFocus()
msg = '== START of chk_isin_in_schedule_list ==='
logger.info(msg)
def main():
import argparse
now_dt = dt.datetime.now()
strdate = now_dt.strftime("%Y%m%d")
parser = argparse.ArgumentParser()
parser.add_argument('date',
type=lambda s: dt.datetime.strptime(s, "%Y%m%d").strftime("%Y%m%d"),
default=strdate,
help="Target Date",
nargs='?'
)
args = parser.parse_args()
logger.info("Target Date: %s" % args.date)
# pw = getpass.getpass("PWD: ")
# date_lst = ['20180212',
# '20180213',
# ]
# date_rng = pd.bdate_range('2018-05-17', '2018-07-01')
# date_lst = [d.strftime('%Y%m%d') for d in date_rng]
excel_file_name = u'OTC상환리스트.xlsx'
window_hnet = handle_hnet()
isin_code_list = get_confirm_isin_list_from_hnet(window_hnet)
target_df = get_target_product_data(excel_file_name, args.date)
if len(target_df) > 0:
target_df = target_df[[u'종목코드', u'상품명', u'구분', u'수익구조', u'상환여부', u'상환예정일']]
isin_code_list = chk_in_isin_list(target_df, isin_code_list)
if len(isin_code_list) == 0:
isin_code_list = list(target_df[u'종목코드'])
print(isin_code_list)
if len(isin_code_list) >= 0:
df_data = get_total_settle_list_from_hnet(window_hnet, args.date)
chk_isin_in_salesteam(window_hnet, isin_code_list, df_data)
pass
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
PyBack.noreply@github.com
|
6ba85c1726abfdec9f1ff343711d9becab558a89
|
c45ddd5464d8e9415b41543cbe6fb2d1593c3b23
|
/exercicios_em_sala/exe05/exe05_client.py
|
9a943859703d364324ff13589bea641d51d84610
|
[] |
no_license
|
msouto/20172-redes-2v-programacao-redes
|
38eb59f269840ea7062af48816720e262e91d880
|
5da51989270ff1de35019c014f99ba0b007ef939
|
refs/heads/master
| 2021-01-19T16:02:39.679034
| 2017-12-18T20:27:58
| 2017-12-18T20:27:58
| 100,985,051
| 21
| 25
| null | 2017-11-20T20:14:49
| 2017-08-21T19:23:51
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
from jsonsocket import Client, Server
host = '127.0.0.1'
port = '8001'
# Client code:
client = Client()
client.connect(host, int(port)).send({'some_list': [123, 456]})
response = client.recv()
print(response)
# response now is {'data': {'some_list': [123, 456]}}
client.close()
|
[
"moises.souto@gmail.com"
] |
moises.souto@gmail.com
|
6bb7357e4c3c78a71da4398592fc78ff38a7ab5c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_09_14_14_36_41_642784.py
|
986bff292e3d397ff9a597fd31a1ee3912e49175
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
import math
def calcula_gaussiana (x,mu,sigma) :
f1 = 1/(sigma*math.sqrt(2*math.pi))
f2 = math.exp((-0.5*((x-mu)/(sigma)**2))
y = f1*f2
return y
|
[
"you@example.com"
] |
you@example.com
|
e813294ef6a1fd27fd5b6a35d25c3055e06eb8fd
|
309963b86e666efceb3a816ca19ced70447d3d82
|
/crawl/test_url.py
|
6d6b805d036044512b3bc0976675029cc62ff533
|
[] |
no_license
|
player7450/ml-py
|
fd42cfa0248437ca8883702b3bd48df1771f36cc
|
0e10736498f0fe42431399ffd6980b9b5e1609c9
|
refs/heads/master
| 2021-09-03T08:16:42.011010
| 2018-01-07T12:26:52
| 2018-01-07T12:26:52
| 109,000,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
import urllib2
__author__ = 'liuzheng'
def test_url():
r = urllib2.urlopen("http://www.baidu.com");
url_text = r.read()
print url_text
def foo():
a = [1,2,3,4]
print a
a.append(5)
print a
b = tuple(a)
print b
c = (1,2,3,4)
if __name__=='__main__':
print 'hehe'
# test_url()
foo()
|
[
"lz109550@alibaba-inc.com"
] |
lz109550@alibaba-inc.com
|
91b306ecb2af69f0d6d781d57251266678f159f2
|
f8d3f814067415485bb439d7fe92dc2bbe22a048
|
/models/research/syntaxnet/dragnn/python/file_diff_test.py
|
9e9f1daa40a64ff9595724e30dbc95591ae299c2
|
[
"Apache-2.0"
] |
permissive
|
gmonkman/python
|
2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3
|
9123aa6baf538b662143b9098d963d55165e8409
|
refs/heads/master
| 2023-04-09T15:53:29.746676
| 2022-11-26T20:35:21
| 2022-11-26T20:35:21
| 60,254,898
| 0
| 2
| null | 2023-03-24T22:58:39
| 2016-06-02T10:25:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diff test that compares two files are identical."""
from absl import flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('actual_file', None, 'File to test.')
flags.DEFINE_string('expected_file', None, 'File with expected contents.')
class DiffTest(tf.test.TestCase):
def testEqualFiles(self):
content_actual = None
content_expected = None
try:
with open(FLAGS.actual_file) as actual:
content_actual = actual.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.actual_file, e.strerror))
try:
with open(FLAGS.expected_file) as expected:
content_expected = expected.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.expected_file, e.strerror))
self.assertTrue(content_actual == content_expected)
if __name__ == '__main__':
tf.test.main()
|
[
"gmonkman@mistymountains.biz"
] |
gmonkman@mistymountains.biz
|
73be6a781970e67e35078a012c9ece685a4f9bb3
|
36671c0625da3599bd2a6b3750bc837f159ac045
|
/Tools/NEOREC/EMGdecode.py
|
797624344376fd5453b25b56bb7ae64f1cdbbe91
|
[] |
no_license
|
KKaplun/MyoSynse
|
dcda7a9ca57359f332a5db6d007db533016b5041
|
b4225aeb6646631ce09954c966165d911959fcc5
|
refs/heads/master
| 2020-12-15T08:28:11.131237
| 2020-01-21T10:01:28
| 2020-01-21T10:01:28
| 235,046,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,369
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 21:15:26 2018
@author: Александр
"""
import scipy.io
import numpy as np
import h5py
import PNinterpolate
from EMGfilter import envelopeFilter
import tqdm
from matplotlib import pyplot as plt
class EMGDecoder:
def __init__(self):
self.emgFilter=envelopeFilter()
self.fitted=False
def loadParams(self, path=''):
self.Tail = 0
self.emg_buffer = 0
self.WienerCoordsBuffer = 0
self.KalmanCoordsBuffer = 0
self.emg_buffer_size = 500
if path:
filterParams = scipy.io.loadmat(path)
else:
filterParams = scipy.io.loadmat('filterParams.mat')
self.lag = int(filterParams['lag'])
self.forward = int(filterParams['forward'])
self.downsample = int(filterParams['downsample'])
self.A = filterParams['A']
self.W = filterParams['W']
self.Ex = filterParams['Ex']
self.Ez = filterParams['Ez']
self.P_after = np.copy(self.Ex)
self.P_before = np.empty((self.Ez.shape[0], self.Ez.shape[0]))
self.Kalman_estimate = np.empty((self.W.shape[0],1))
self.Wiener_Estimate = np.empty((self.W.shape[0],1))
self.fitted=True
def fit(self, X=None,Y=None,file='experiment_data.h5',numCh=64,offCh=64,Pn=[59,77,101,125],lag=2,forward=0,downsample=0):
self.numCh=numCh
self.offCh=offCh
self.Pn=Pn
self.lag=lag
self.forward=forward
self.downsample=downsample
if type(X)==type(None) or type(Y)==type(None):
#try and read file then
with h5py.File(file,'r+') as f1:
raw_data = np.array(f1['protocol1']['raw_data'])
Y=raw_data[:,[p+self.offCh for p in Pn]]
X=raw_data[:,:self.numCh]
del raw_data
#get the envelope of EMG data and interpolate PN to EMG samplerate
X=self.emgFilter.filterEMG(X)
Y=PNinterpolate.interpolatePN(Y)
def offset(data,lag,leftOffset,rightOffset=0):
return data[leftOffset+lag:data.shape[0]-rightOffset]
emg_lag=np.empty((X.shape[0]-2-self.lag-self.forward,numCh*(self.lag+1)));
for l in range(self.lag+1):
emg_lag[:,l*numCh:(l+1)*numCh]=X[(2+l):(X.shape[0]-self.lag-self.forward+l),:]
Coord=np.copy(Y)
Vel=np.apply_along_axis(np.diff,0,Coord)
Acc=np.apply_along_axis(np.diff,0,Vel)
Coords=np.hstack((np.apply_along_axis(offset,0,Coord,self.lag,2),np.apply_along_axis(offset,0,Vel,self.lag,1),np.apply_along_axis(offset,0,Acc,self.lag,0)))
EMG_signals=np.hstack((np.ones((emg_lag.shape[0],1)),emg_lag));
self.W = np.linalg.pinv(EMG_signals)
self.W = self.W @ Coords
Measurement_Error=Coords-EMG_signals @ self.W;
Measurement_Error_Covar=np.cov(Measurement_Error.T);
self.W = self.W.T;
Now=np.hstack((np.apply_along_axis(offset,0,Coord,self.lag,3),np.apply_along_axis(offset,0,Vel,self.lag,2),np.apply_along_axis(offset,0,Acc,self.lag,1)))
Lag=np.hstack((np.apply_along_axis(offset,0,Coord,self.lag,2,1),np.apply_along_axis(offset,0,Vel,self.lag,1,1),np.apply_along_axis(offset,0,Acc,self.lag,0,1)))
self.A=np.linalg.pinv(Lag) @ Now
State_Trans_Error=Now-Lag @ self.A
State_Trans_Covar=np.cov(State_Trans_Error.T)
self.A=self.A.T
self.Ex = State_Trans_Covar; # process noise
self.Ez = Measurement_Error_Covar; # measurement noise
self.P_after = np.copy(self.Ex)
self.P_before = np.empty((self.Ez.shape[0], self.Ez.shape[0]))
self.Kalman_estimate = np.empty((self.W.shape[0],1))
self.Wiener_Estimate = np.empty((self.W.shape[0],1))
self.fitted=True
scipy.io.savemat('filterParams.mat', mdict={'W': self.W, 'A':self.A, 'Ex': self.Ex, 'Ez':self.Ez,'lag':self.lag,'forward':self.forward,'downsample':self.downsample})
self.loadParams()
def evaluate(self,X=None,Y=None,file='experiment_data.h5', numCh=None,offCh=None,Pn=None,lag=None,forward=None,downsample=None):
numCh = self.numCh if type(numCh)==type(None) else numCh
offCh = self.offCh if type(offCh)==type(None) else offCh
Pn = self.Pn if type(Pn)==type(None) else Pn
lag = self.lag if type(lag)==type(None) else lag
forward = self.forward if type(forward)==type(None) else forward
downsample = self.downsample if type(downsample)==type(None) else downsample
if type(X)==type(None) or type(Y)==type(None):
#try and read file then
with h5py.File(file,'r+') as f1:
raw_data = np.array(f1['protocol1']['raw_data'])
Y=raw_data[:,[p+offCh for p in Pn]]
X=raw_data[:,:numCh]
del raw_data
#get the envelope of EMG data and interpolate PN to EMG samplerate
X=self.emgFilter.filterEMG(X)
Y=PNinterpolate.interpolatePN(Y)
emg_lag=np.empty((X.shape[0]-2-lag-forward,numCh*(lag+1)));
for l in range(lag+1):
emg_lag[:,l*numCh:(l+1)*numCh]=X[(2+l):(X.shape[0]-lag-forward+l),:]
Coord=np.copy(Y)
EMG_signals=np.hstack((np.ones((emg_lag.shape[0],1)),emg_lag))
WienerCoords=np.empty((EMG_signals.shape[0],self.Ex.shape[1]))
KalmanCoords=np.empty((EMG_signals.shape[0],self.Ex.shape[1]))
for t in tqdm.tqdm(range(EMG_signals.shape[0])):
#Predict coordinate by state measurement equation
X_measurement_estimate=self.W @ EMG_signals[t,:][:,None];
#Store Wiener Estimate
WienerCoords[t,:]=X_measurement_estimate.T;
#Kalman
X_state_estimate = self.A @ self.Kalman_estimate
self.P_before = self.A @ self.P_after @ self.A.T + self.Ex
self.P_after=np.linalg.pinv(np.linalg.pinv(self.P_before)+np.linalg.pinv(self.Ez))
self.Kalman_estimate=self.P_after @ (np.linalg.pinv(self.P_before) @ X_state_estimate+np.linalg.pinv(self.Ez) @ X_measurement_estimate)
KalmanCoords[t,:] = self.Kalman_estimate.T
Kc=KalmanCoords[:,:Coord.shape[1]]
Tc=Coord[lag+2:,:]
kalmanStabilizationOffset=round(Tc.shape[0]*0.05)
plt.figure()
for i in range(Kc.shape[1]):
plt.subplot(Kc.shape[1]*100+11+i)
plt.plot(Kc[kalmanStabilizationOffset:,i])
plt.plot(Tc[kalmanStabilizationOffset:,i])
for i in range(Kc.shape[1]):
print(np.corrcoef(Kc[2000:,i].T,Tc[2000:,i].T)[0,1])
def fitEvaluate(self,X=None,Y=None,file='experiment_data.h5',testRatio=1/2,numCh=64,offCh=64,Pn=[59,77,101,125],lag=2,forward=0,downsample=0):
self.numCh=numCh
self.offCh=offCh
self.Pn=Pn
self.lag=lag
self.forward=forward
self.downsample=downsample
if type(X)==type(None) or type(Y)==type(None):
with h5py.File(file,'r+') as f1:
raw_data = np.array(f1['protocol1']['raw_data'])
Y=raw_data[:,[p+offCh for p in Pn]]
X=raw_data[:,:numCh]
del raw_data
split=round(X.shape[0]*(1-testRatio))
self.fit(X[:split,:],Y[:split,:])
self.evaluate(X[split:,:],Y[split:,:])
def transform(self,EMGchunk):
if not self.fitted:
self.loadParams()
chunkSize=EMGchunk.shape[0]
numCh=EMGchunk.shape[1]
if(np.isscalar(self.Tail)):
emg_buffer=np.empty((self.emg_buffer_size,1+numCh*(self.lag+1)))
emg_buffer[:,0]=1
self.WienerCoordsBuffer=np.empty((self.emg_buffer_size,self.W.shape[0]))
self.KalmanCoordsBuffer=np.empty((self.emg_buffer_size,self.W.shape[0]))
Tail=np.zeros((self.lag+1,numCh*3))
emg_lag=emg_buffer[:chunkSize-self.lag-self.forward,1:]
for l in range(self.lag+1):
emg_lag[:,l*numCh:(l+1)*numCh]=EMGchunk[(l):(chunkSize-self.lag-self.forward+l),:]
Tail[:self.lag-l,l*numCh:(l+1)*numCh]=EMGchunk[chunkSize-self.lag+l:chunkSize,:]
t=np.copy(Tail)
emg_lag=emg_buffer[:chunkSize-self.lag-self.forward,:]
else:
emg_lag=emg_buffer[:chunkSize-self.forward,1:]
for l in range(self.lag+1):
emg_lag[self.lag-l:chunkSize,l*numCh:(l+1)*numCh]=EMGchunk[0:(chunkSize-self.lag-self.forward+l),:]
emg_lag[0:self.lag-l,l*numCh:(l+1)*numCh]=Tail[:self.lag-l,l*numCh:(l+1)*numCh]
Tail[:self.lag-l,l*numCh:(l+1)*numCh]=EMGchunk[chunkSize-self.lag+l:chunkSize,:]
emg_lag=emg_buffer[:chunkSize-self.forward,:]
WienerCoords=self.WienerCoordsBuffer[:emg_lag.shape[0],:]
KalmanCoords=self.KalmanCoordsBuffer[:emg_lag.shape[0],:]
for t in range(emg_lag.shape[0]):
#Predict coordinate by state measurement equation
X_measurement_estimate=self.W @ emg_lag[t,:][:,None];
#Store Wiener Estimate
WienerCoords[t,:]=X_measurement_estimate.T;
#Kalman
X_state_estimate = self.A @ self.Kalman_estimate
self.P_before = self.A @ self.P_after @ self.A.T + self.Ex
self.P_after=np.linalg.pinv(np.linalg.pinv(self.P_before)+np.linalg.pinv(self.Ez))
self.Kalman_estimate=self.P_after @ (np.linalg.pinv(self.P_before) @ X_state_estimate+np.linalg.pinv(self.Ez) @ X_measurement_estimate)
KalmanCoords[t,:] = self.Kalman_estimate.T
return WienerCoords, KalmanCoords
|
[
"k.kaplun@gmail.com"
] |
k.kaplun@gmail.com
|
59944bb8fa971396a0f7e49931ba6f9bf8ed1091
|
4b29c3e3c8a2cad5071a3fb2ea674253c6f0ef21
|
/pycharm/digiin/case/TestLogin.py
|
70e3880684b38a0a5d5a1bb7b50cd59768931663
|
[] |
no_license
|
yz9527-1/1YZ
|
a0303b00fd1c7f782b7e4219c52f9589dd3b27b7
|
5f843531d413202f4f4e48ed0c3d510db21f4396
|
refs/heads/master
| 2022-11-30T23:50:56.682852
| 2020-08-10T02:11:13
| 2020-08-10T02:11:13
| 286,354,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,586
|
py
|
#coding=utf-8
import ddt,data
from common.ExcelUtil import ExcelUtil
import time
import unittest
from selenium import webdriver
def self(args):
pass
class Case(object):
def __init__(self):
pass
def get_case(self):
"""
获取数据
得到有用的数据,并且使数据以邮箱地址、密码、预期结果定位、预期结果的顺序返回
:return:
"""
#获取Excel中的文件数据
sheet='Login'
file=ExcelUtil(sheet_name=sheet)
data=file.get_data()
#得到所需要数据的索引,然后根据索引获取相应顺序的数据
email_index=data[0].index("邮箱地址")
password_index=data[1].index("密码")
expected_element_index=data[2].index("预期结果定位")
expected_index=data[3].index("预期结果")
data_length=data.__len__()
all_cass=[]
#去除header行,和其他无用的数据
for i in range(1,data_length):
case=[]
case.append(data[i][email_index])
case.append(data[i][password_index])
case.append(data[i][expected_element_index])
case.append(data[i][expected_index])
all_cass.append(case)
return all_cass
class Login(object):
def __init__(self,driver):
self.driver=driver
def login(self,email,password):
"""登录步骤"""
#driver=webdriver.Chrome()
#self.driver=driver
#邮箱地址、密码、点击登录按钮操作
time.sleep(1)
if email!=None:
email_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input')
email_element.send_keys(email)
time.sleep(1)
if password!=None:
password_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input')
password_element.send_keys(password)
time.sleep(1)
login_btn=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input')
login_btn.click()
def login_assert(self,assert_type,assert_message):
"""登录断言"""
time.sleep(1)
if assert_type=='email error':
email_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input').text
assert email_message==assert_message
elif assert_type=='password error':
password_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input').text
assert password_message==assert_message
elif assert_type=='login sucess'or assert_type=='login fail':
login_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input').text
assert login_message==assert_message
else:
print("输入的断言类型不正确")
@ddt
class TextLogin(unittest.TestCase):
"""测试登录"""
def setUp(self):
self.driver=webdriver.Chrome()
url="http://192.168.0.30:18069"
self.driver.implicitly_wait(20)
self.driver.maximize_window()
self.driver.get(url=url)
def tearDown(self):
self.driver.quit()
case=Case().get_case()
@data(*case)
@unpack
def test_login(self,password,assert_type,assert_message):
login=Login(driver=self.driver)
login.login(email=email,password=password)
login.login_assert(assert_type=assert_type,assert_message=assert_message)
if __name__=='__main__':
unittest.main
|
[
"2447025220@qq.com"
] |
2447025220@qq.com
|
8bd8bd551bfb95270ba2e3e1d5d5e796dbca4b01
|
897d4052dcb89233ed20872ef14f0e41fdbbf6ae
|
/wallet/bin/versioneer.py
|
ec182a73c746d71bf471ac4b758a967ec044d482
|
[
"MIT"
] |
permissive
|
jvkatzman/tigoctm
|
0f83f1eb7ddb9d107943f708651778b6cdf66b0b
|
047e66b570ecc09bd2198229546cd9c605b20632
|
refs/heads/master
| 2020-04-02T01:29:48.856298
| 2018-10-20T01:22:05
| 2018-10-20T01:22:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,804
|
py
|
#!/home/chriss/Sites/guld/guldWallet/wallet/bin/python3
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.7+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
append the following to your __init__.py:
from _version import __version__
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%%s', no digits" %% ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %%s" %% ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import sys
def do_vcs_install(versionfile_source, ipy):
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
|
[
"jkatzman@infsoftware.com"
] |
jkatzman@infsoftware.com
|
2d24c2b1849fbb578426985672e634ca4e13e282
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/baiduads/keyword/api/__init__.py
|
d86d7640ef2ab230105e5b576757bc5d81a011fe
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863
| 2023-05-20T03:40:51
| 2023-05-20T03:40:51
| 446,718,177
| 16
| 11
|
Apache-2.0
| 2023-06-02T05:19:40
| 2022-01-11T07:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from baiduads.keyword.api.keyword_service import KeywordService
|
[
"jiangyuan04@baidu.com"
] |
jiangyuan04@baidu.com
|
ca9362d170a5e072bbd92f1841f0cc91721cf3e2
|
69954cf777a73db48a7efabe8ef8cf655e5c2864
|
/NetworkLib.py
|
83838037506da9a84f389a1140b094679cbc19e5
|
[] |
no_license
|
batuhannzorlu/Network-Project
|
e215dc6bf65c668b79af69d591427bd3a7a5191b
|
ec23b0f101528b00c2d953fa7ae1a33a3ee5a75f
|
refs/heads/main
| 2023-07-09T11:54:21.394920
| 2021-08-08T17:32:56
| 2021-08-08T17:32:56
| 394,027,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,578
|
py
|
import paramiko
import time
import threading
class Switch:
def __init__( self,IP, EnableSecret='-1',SshHname='admin',SshPsswd='admin'):
self.IP = IP
self.EnableSecret = EnableSecret
self.SshHname = SshHname
self.SshPsswd = SshPsswd
class Router:
def __init__(self, IP, EnableSecret='-1',SshHname='admin',SshPsswd='admin'):
self.IP = IP
self.EnableSecret = EnableSecret
self.SshHname = SshHname
self.SshPsswd = SshPsswd
def ConnectViaSSH(device:Router):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(device.IP,'22',device.SshHname,device.SshPsswd,
look_for_keys=False, allow_agent=False)
print('Connected Successfully!')
return ssh_client
def ConnectViaTELNET(device:Switch):
pass
def SendCommand(shell,Command):
print('sent')
shell.send(Command+'\n')
time.sleep(1)
return shell
def PrintOutput(shell):
output = shell.recv(10000)
output = output.decode('utf-8')
print(output)
def RIPV2SUB24Conf(shell,device:Router,Subnet=24):
# if (device.EnableSecret != '-1'):
SendCommand(shell,device.EnableSecret)
#SendCommand(shell,'show run | include (interface | ip address)')
SendCommand(shell,'show ip int bri')
output = shell.recv(10000)
output = output.decode('utf-8')
output_list = output.splitlines()
SendCommand(shell, ' en')
SendCommand(shell, 'admin')
SendCommand(shell, 'conf terminal')
SendCommand(shell, 'router rip')
SendCommand(shell, 'version 2')
for line in output_list:
if( line.__contains__('up')):
s = str(line)
IntIp = s.split()
if IntIp[1] != 'unassigned' :
SIntIp = IntIp[1].split('.')
IntIp[1] = SIntIp[0]+'.'+SIntIp[1]+'.'+SIntIp[2]+'.'+'0'
SendCommand(shell,'network'+' '+IntIp[1])
PrintOutput(shell)
client.close()
#User must create the vlan and give it ip address.
def EtherChannel(interface_list,vlan_num,port_channel_num, mode):
SendCommand(shell, ' en')
SendCommand(shell, 'admin')
SendCommand(shell, 'conf terminal')
for interface in interface_list:
SendCommand(shell,f'int {interface}')
SendCommand(shell,'switchport mode access')
SendCommand(shell,f'switchport access vlan {vlan_num}')
SendCommand(shell,f'channel-group {port_channel_num} mode {mode}')
SendCommand(shell,f'interface port-channel {port_channel_num}')
SendCommand(shell,'switchport trunk encapsulation dot1q')
SendCommand(shell,'switchport mode trunk')
SendCommand(shell,f'switchport trunk allowed vlan {vlan_num}')
PrintOutput(shell)
def DhcpConf(shell,device,network,SubnetMask,DefaultRouter,poolname):
SendCommand(shell,'')
def BackUp():
pass
def MultiThreading(DeviceList,TargetFunction):
threads =list()
for device in DeviceList:
th=threading.Thread(target=TargetFunction,args=(device,))
threads.append(th)
for th in threads:
th.start()
for th in threads:
th.join()
router2 = Router('10.1.1.3','admin')
client = ConnectViaSSH(router2)
shell = client.invoke_shell()
#interfaces=['e 1/0','e 1/1']
#EtherChannel(interfaces,12,1,'on')
#ripV2Conf(shell,router2)
#SendCommand(shell,'en')
#SendCommand(shell,'admin')
#SendCommand(shell,'sh run')
#PrintOutput(shell)
|
[
"noreply@github.com"
] |
batuhannzorlu.noreply@github.com
|
17989c3088b3000ff5653aa61f6730d7a718bb06
|
75258c8efa8e756234f7d32f729f1089e1667594
|
/DawgCTF 2020/Coding/Miracle Mile/client0.py
|
aff5a4e950aab4fac76367a1705d9fa58bcc478f
|
[] |
no_license
|
darkvoid32/CTF-writeups
|
7c9452a74930d63c26246311fc9de89a568c65f1
|
ea19afefa93b4cfb08f3d655bbf1065bb94cd6ac
|
refs/heads/master
| 2021-04-05T05:53:43.055056
| 2021-03-07T15:18:52
| 2021-03-07T15:18:52
| 248,516,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
# -*- coding: utf-8 -*-
"""
Created for Spring 2020 CTF
Cryptography 0
10 Points
Welcome to my sanity check. You'll find this to be fairly easy.
The oracle is found at umbccd.io:13370, and your methods are:
flg - returns the flag
tst - returns the message after the : in "tst:..."
@author: pleoxconfusa
"""
import socket
import math
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('ctf.umbccd.io', 5300)
sock.connect(server_address)
#sock.sendall(msg.encode())
data = sock.recv(1024)
while 1:
data = sock.recv(1024)
print(data.decode())
no = data.decode().split('I ran ')[1].split(' ')[0]
time = data.decode().split(' in ')[1].split(' ')[0]
print(no)
print(time)
curr_sec = time.split(':')[2]
curr_min = time.split(':')[1]
curr_hour = time.split(':')[0]
new_min = int(curr_min) + int(curr_hour) * 60
new_sec = int(curr_sec) + int(new_min) * 60
new_sec_div = new_sec / int(no)
print(new_sec_div)
print(math.floor(new_sec_div))
sec = math.floor(new_sec_div) % 60
print(sec)
minute = (math.floor(new_sec_div) - sec) / 60
print(str(int(minute)) + ':' + str(sec) + ' minutes/mile')
sock.sendall(str.encode(str(int(minute)) + ':' + str(sec) + ' minutes/mile'))
#sock.sendall()
sock.close()
|
[
"tangyetong666@gmail.com"
] |
tangyetong666@gmail.com
|
22e70becf6b691016982f2b828b13d8eeaf45564
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02571/s663642129.py
|
60a84cc30f58c36b037db16bb95f49473b02d187
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
def main():
s = input()
t = input()
min = int(1e9)
for i in range(len(s)-len(t)+1):
cnt = 0
for j in range(len(t)):
if s[i+j] != t[j]:
cnt += 1
if min > cnt:
min = cnt
print(min)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7a54c06f19a3583c531d27b86a0c4953aa5d59fd
|
147ad0231450e0b2ad14a8da8cc515a52f6a425a
|
/venv/bin/pyrsa-encrypt
|
4beb1da65ee6918a981d2ce17c2ef9186a773101
|
[] |
no_license
|
ayush-patel/PriceDrop-backend
|
4950956a801c8172ba6b05effd4415e50c4d9cf1
|
8721fafd3061f15f606db3f642693dd195b309dc
|
refs/heads/master
| 2021-01-19T08:13:19.704727
| 2017-04-08T06:15:09
| 2017-04-08T06:15:09
| 87,612,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/Users/ayush/Documents/Development/PriceChange/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt())
|
[
"ayushp3435@gmail.com"
] |
ayushp3435@gmail.com
|
|
dac0cd123134ee900137c144d2a10237f3321f84
|
75b85d55fd3e88813e04d5798fc69d4f42b4b7b0
|
/deprecated_nasa_r2_common/r2_control/nodes/r2_ready_pose_high.py
|
13f64a6a301d90869675813d122e3875b9a33952
|
[] |
no_license
|
hspy/rosit
|
9d4560f0cbf4f286c6e0cbc0a1555912dd74e9e1
|
c5ba04b7e870ce807da61d4e23ba80a9404e5f2c
|
refs/heads/main
| 2023-01-19T17:23:02.197874
| 2020-12-02T07:42:21
| 2020-12-02T07:42:21
| 309,916,632
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,308
|
py
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('r2_control')
import rospy
import actionlib
import math
import random
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from copy import copy, deepcopy
TORAD = math.pi/180.0
TODEG = 1.0/TORAD
class r2ReadyPose :
def __init__(self, N, wp, arm):
self.arm = arm
self.currentData = None
self.desiredData = None
self.deadlineData = None
self.currentState = JointState()
self.currentState.position = [0]*N
self.currentState.velocity = [0]*N
self.currentState.effort = [0]*N
self.numJoints = N
self.waypoints = wp
self.fingers = [("index",4),("middle",4),("ring",3),("little",3),("thumb",4)]
rospy.Subscriber("r2/joint_states", JointState, self.jointStateCallback)
if self.arm=="left" :
self.trajPublisher = rospy.Publisher('/r2/l_arm_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/l_arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="right" :
self.trajPublisher = rospy.Publisher('/r2/r_arm_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/r_arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="left_hand" :
self.trajPublisher = rospy.Publisher('/r2/l_hand_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/l_hand_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="right_hand" :
self.trajPublisher = rospy.Publisher('/r2/r_hand_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/r_hand_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="neck" :
self.trajPublisher = rospy.Publisher('/r2/neck_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/neck_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
else :
rospy.logerr("r2ReadyPose::r2ReadyPose() -- unknown arm")
self.trajClient.wait_for_server()
self.actionGoal = FollowJointTrajectoryGoal()
def getNumJoints(self) :
return self.numJoints
def jointStateCallback(self, data):
self.currentState = data
def computeTrajectory(self, desiredData, deadline):
jointTraj = JointTrajectory()
currentState = copy(self.currentState)
desiredState = copy(desiredData)
# create simple lists of both current and desired positions, based on provided desired names
rospy.loginfo("r2ReadyPose::computeTrajectory() -- finding necessary joints")
desiredPositions = []
currentPositions = []
for desIndex in range(len(desiredState.name)) :
for curIndex in range(len(currentState.name)) :
if ( desiredState.name[desIndex] == currentState.name[curIndex] ) :
desiredPositions.append(desiredState.position[desIndex])
currentPositions.append(currentState.position[curIndex])
rospy.loginfo("r2ReadyPose::computeTrajectory() -- creating trajectory")
jointTraj.joint_names = desiredState.name
jointTraj.points = list()
for j in range(self.waypoints) :
trajPoint = JointTrajectoryPoint()
t = (deadline / self.waypoints) * (j + 1)
trajPoint.time_from_start = rospy.Duration(t)
trajPoint.positions = list()
for i in range(len(desiredPositions)) :
trajPoint.positions.append( self.minJerk(currentPositions[i], desiredPositions[i], deadline, t) )
jointTraj.points.append(trajPoint)
rospy.loginfo("r2ReadyPose::moveToGoal() -- using tolerances")
return jointTraj
def minJerk(self, start, end, duration, t):
tOverD = float(t) / float(duration)
return start + (end - start)*( 10*math.pow(tOverD,3) - 15*math.pow(tOverD,4) + 6*math.pow(tOverD,5) )
def moveToGoal(self, jointGoal, deadline, useTolerances) :
self.actionGoal.trajectory = self.computeTrajectory(jointGoal, deadline)
offset = 0
if useTolerances :
rospy.loginfo("r2ReadyPose::moveToGoal() -- using tolerances")
self.actionGoal.path_tolerance = []
self.actionGoal.goal_tolerance = []
if self.arm == "left_hand" :
for k in range(len(self.fingers)):
for j in range(self.fingers[k][1]):
tol.name = "r2/left_arm/hand/" + self.fingers[k][0] + "/joint" + str(j+offset)
tol.position = 0.2
tol.velocity = 1
tol.acceleration = 10
self.actionGoal.path_tolerance.append(tol)
self.actionGoal.goal_tolerance.append(tol)
elif self.arm == "right_hand" :
for k in range(len(self.fingers)):
for i in range(self.fingers[k][1]):
tol.name = "r2/right_arm/hand/" + self.fingers[k][0] + "/joint" + str(j+offset)
print tol.name
tol.position = 0.2
tol.velocity = 1
tol.acceleration = 10
self.actionGoal.path_tolerance.append(tol)
self.actionGoal.goal_tolerance.append(tol)
else :
for i in range(self.numJoints):
tol = JointTolerance()
if self.arm == "left" or self.arm == "right" :
tol.name = "r2/" + self.arm + "_arm/joint" + str(i+offset)
elif self.arm == "neck" :
tol.name = "r2/" + self.arm + "/joint" + str(i+offset)
tol.position = 0.2
tol.velocity = 1
tol.acceleration = 10
self.actionGoal.path_tolerance.append(tol)
self.actionGoal.goal_tolerance.append(tol)
else :
rospy.loginfo("r2ReadyPose::moveToGoal() -- not using tolerances")
self.actionGoal.goal_time_tolerance = rospy.Duration(10.0)
# send goal nad monitor response
self.trajClient.send_goal(self.actionGoal)
rospy.loginfo("r2ReadyPose::moveToGoal() -- returned state: %s", str(self.trajClient.get_state()))
rospy.loginfo("r2ReadyPose::moveToGoal() -- returned result: %s", str(self.trajClient.get_result()))
return
def formatJointStateMsg(self, j, offset) :
if not (len(j) == self.numJoints) :
rospy.logerr("r2ReadyPose::formatJointStateMsg() -- incorrectly sized joint message")
return None
js = JointState()
js.header.seq = 0
js.header.stamp = rospy.Time.now()
js.header.frame_id = ""
js.name = []
js.position = []
if self.arm == "left" or self.arm == "right" :
for i in range(self.numJoints):
js.name.append("r2/" + self.arm + "_arm/joint" + str(i+offset))
js.position.append(j[i])
if self.arm == "left_hand" :
for k in range(len(self.fingers)):
for i in range(self.fingers[k][1]):
js.name.append("r2/left_arm/hand/" + self.fingers[k][0] + "/joint" + str(i+offset))
js.position.append(j[i])
if self.arm == "right_hand" :
for k in range(len(self.fingers)):
for i in range(self.fingers[k][1]):
js.name.append("r2/right_arm/hand/" + self.fingers[k][0] + "/joint" + str(i+offset))
js.position.append(j[i])
elif self.arm == "neck" :
for i in range(self.numJoints):
js.name.append("r2/" + self.arm + "/joint" + str(i+offset))
js.position.append(j[i])
return js
if __name__ == '__main__':
rospy.init_node('r2_ready_pose_high')
try:
r2TrajectoryGeneratorLeft = r2ReadyPose(7, 500, "left")
r2TrajectoryGeneratorRight = r2ReadyPose(7, 500, "right")
r2TrajectoryGeneratorNeck = r2ReadyPose(3, 500, "neck")
r2TrajectoryGeneratorLeftHand = r2ReadyPose(15, 10, "left_hand")
r2TrajectoryGeneratorRightHand = r2ReadyPose(15, 10, "right_hand")
rospy.sleep(2)
lhrp = [0]*15
rhrp = [0]*15
lrp1 = [50.0*TORAD, -80.0*TORAD, -105.0*TORAD, -140.0*TORAD, 80.0*TORAD, 0.0*TORAD, 0.0*TORAD]
rrp1 = [-50.0*TORAD, -80.0*TORAD, 105.0*TORAD, -140.0*TORAD, -80.0*TORAD, 0.0*TORAD, 0.0*TORAD]
rrp2 = [ 0.4, -0.5, 1.57, -2.0, -0.7, 0.3, 0.6]
lrp2 = [-0.4, -0.5, -1.57, -2.0, 0.7, 0.3, -0.6]
nrp = [-20.0*TORAD, 0.0*TORAD, -15.0*TORAD]
print "r2ReadyPose() -- moving to ready pose"
jointGoalNeck = r2TrajectoryGeneratorNeck.formatJointStateMsg(nrp, 0)
jointGoalLeftHand = r2TrajectoryGeneratorLeftHand.formatJointStateMsg(lhrp, 0)
jointGoalRightHand = r2TrajectoryGeneratorRightHand.formatJointStateMsg(rhrp, 0)
r2TrajectoryGeneratorLeftHand.moveToGoal(jointGoalLeftHand, 0.1, False)
r2TrajectoryGeneratorRightHand.moveToGoal(jointGoalRightHand, 0.1, False)
r2TrajectoryGeneratorNeck.moveToGoal(jointGoalNeck, 0.5, False)
jointGoalLeft = r2TrajectoryGeneratorLeft.formatJointStateMsg(lrp1, 0)
jointGoalRight = r2TrajectoryGeneratorRight.formatJointStateMsg(rrp1, 0)
r2TrajectoryGeneratorLeft.moveToGoal(jointGoalLeft, 0.5, False)
r2TrajectoryGeneratorRight.moveToGoal(jointGoalRight, 0.5, False)
rospy.sleep(3)
jointGoalLeft = r2TrajectoryGeneratorLeft.formatJointStateMsg(lrp2, 0)
jointGoalRight = r2TrajectoryGeneratorRight.formatJointStateMsg(rrp2, 0)
r2TrajectoryGeneratorLeft.moveToGoal(jointGoalLeft, 0.5, False)
r2TrajectoryGeneratorRight.moveToGoal(jointGoalRight, 0.5, False)
except rospy.ROSInterruptException:
pass
|
[
"xkqrpdla@gmail.com"
] |
xkqrpdla@gmail.com
|
554dca6916ae0978656c3e216f1079c587f4d1ee
|
6029273b26f70d408752adc4502bfb9dba662e67
|
/switch/predictor.py
|
bf9a8557e50e967117722a5de1af1b1005d6f4f4
|
[] |
no_license
|
valzam/switch
|
05cbb367dfb0efee112fb86f21eb6ed819768f5a
|
92aa6e4f198001f7128d275a68be3e76984a7cae
|
refs/heads/master
| 2022-12-25T16:33:23.928896
| 2019-10-21T21:54:28
| 2019-10-21T21:54:28
| 159,875,640
| 0
| 0
| null | 2022-12-08T01:19:44
| 2018-11-30T20:33:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
from .pb import machine_failure
from . import logging, get_timestamp
import datetime
import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MachineFailurePredictor:
def __init__(self, *args, **kwargs):
self.a = 0.5
self.b = 0.5
self.c = 1
def predict(self, datapoint_binary):
datapoint = machine_failure.MachineDataPoint()
datapoint.ParseFromString(datapoint_binary)
y = self._predict(datapoint)
response = self._build_response(y)
logger.info(
f"Calculated prediction based on data from {datapoint.sender}: {y}") # pylint: disable=E1101
return response
def _predict(self, datapoint: machine_failure.MachineDataPoint):
""" Actual algorithm to facilitate easier testing
"""
y = self.a * datapoint.temperature + self.b * datapoint.velocity + self.b # pylint: disable=E1101
return y
def _build_response(self, y: float):
response = machine_failure.MachineFailurePrediction()
response.failureChance = y
response.timestamp = get_timestamp()
return response
def __str__(self):
return f"MachineFailure"
|
[
"valentin.zambelli@gmail.com"
] |
valentin.zambelli@gmail.com
|
92103479da3362929c2772f8d99d3221ed2b97d9
|
5c786fbc05076b9286669c1e844d50f4d474d899
|
/Chapter_2/ReturnKthToLast.py
|
9c6ab3a010024c6a5db10f05dddc9e735ef69afa
|
[] |
no_license
|
LaloGarcia91/CrackingTheCodingInterview
|
2e25b06153f4248a3e377b2a912b2052d7634d0d
|
cff401ff8a5bcb14c65e7f030d63dd80a22ac14e
|
refs/heads/master
| 2022-11-24T09:26:43.917068
| 2020-07-26T04:01:51
| 2020-07-26T04:01:51
| 282,564,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
from Chapter_2.LinkedList import *
llist = LinkedList()
llist.append(1)
llist.append(22)
llist.append(3)
llist.append(6)
llist.append(3)
llist.append(3)
llist.append(45)
llist.append(4)
llist.append(5)
llist.append(9)
llist.printKthToLast(llist, 4)
|
[
"lalo.garcia.g91@gmail.com"
] |
lalo.garcia.g91@gmail.com
|
d88d6f3bdc2be3adcfbba3b7fc2622208a6b2bb7
|
68db6583160de733d950ef0ff087c0f7e826cbd1
|
/blog/blog/urls.py
|
26461cb704b50676a605ade9a8210fa98f54d82a
|
[] |
no_license
|
alllenmartin/django-tdd-project
|
636975b9d1842554f81e7bcb41b5ff4bdac644b2
|
f37f65c70f7b6e89b1d20598330259c57f05f0a0
|
refs/heads/master
| 2023-08-07T12:21:47.698974
| 2021-06-09T13:12:13
| 2021-06-09T13:12:13
| 270,918,823
| 1
| 0
| null | 2021-09-22T19:11:34
| 2020-06-09T06:14:54
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"allen@localhost.com"
] |
allen@localhost.com
|
04a44d12e2e82743006cd0f44c9a3e073dccfc6d
|
8f564f322398fa989a54844197d344e1a8ce1576
|
/surfex/cli.py
|
a78cbcb5a58d4a3c9a82c9c07cfdac00b56d1bce
|
[] |
no_license
|
lisbethb-met-no/pysurfex
|
2031f3fd75fee021bf33463d4be44f031681c904
|
fb671fca4b2b9a69ae8ae4eb28e74ee1d481fd53
|
refs/heads/master
| 2023-01-06T12:33:25.282940
| 2020-11-05T13:47:44
| 2020-11-05T13:47:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55,406
|
py
|
import sys
import surfex
from argparse import ArgumentParser, Action
from datetime import datetime
import json
import os
import yaml
import toml
import numpy as np
class LoadFromFile(Action):
def __call__(self, parser, namespace, values, option_string=None):
surfex.util.error("Reading options from file is not supported yet")
with values as f:
contents = f.read()
data = parser.parse_args(contents.split())
for k, v in vars(data).items():
if v and k != option_string.lstrip('-'):
setattr(namespace, k, v)
def parse_args_create_forcing(argv):
# print argv
parser = ArgumentParser(description="Create offline forcing")
parser.add_argument('dtg_start', type=str, help="Start DTG", nargs="?")
parser.add_argument('dtg_stop', type=str, help="Stop DTG", nargs="?")
parser.add_argument('area', type=str, help="Configuration file describing the points or locations", nargs="?")
parser.add_argument('-fb', type=str, help="First base time unless equal to dtg_start", default=None)
parser.add_argument('--options', type=open, action=LoadFromFile)
parser.add_argument('-c', '--config', dest="user_config", type=str,
help="Configuration file in yaml format describing customized variable setup",
default=None, nargs="?")
parser.add_argument('-t', '--timestep', type=int, help="Surfex time step", default=3600, nargs="?")
parser.add_argument('-ci', '--cache_interval', type=int, help="clear cached fields after..", default=3600,
nargs="?")
parser.add_argument('-i', '--input_format', type=str, help="Default input file format", default="netcdf",
choices=["netcdf", "grib1", "grib2", "surfex"])
parser.add_argument('-o', '--output_format', type=str, help="Output file format", default="netcdf", nargs="?")
parser.add_argument('-of', type=str, help="Output file name", default=None, nargs="?")
parser.add_argument('-p', '--pattern', type=str, help="Filepattern", default=None, nargs="?")
parser.add_argument('--zref', type=str, help="Temperature/humidity reference height", default="ml",
choices=["ml", "screen"])
parser.add_argument('--uref', type=str, help="Wind reference height: screen/ml/", default="ml",
choices=["ml", "screen"])
parser.add_argument('--debug', help="Show debug information", action="store_true")
parser.add_argument('--version', action="version", version=surfex.__version__)
group_ta = parser.add_argument_group('TA', description="Air temperature [K]")
group_ta.add_argument("--ta", type=str, help="Input format", default="default",
choices=["default", "netcdf", "grib1", "grib2", "surfex"])
group_ta.add_argument("--ta_converter", type=str, help="Converter function to air temperature",
default="none", choices=["none"])
group_qa = parser.add_argument_group('QA', description="Specific humidity")
group_qa.add_argument("--qa", type=str, help="Input format", default="default",
choices=["default", "netcdf", "grib1", "grib2", "surfex"])
group_qa.add_argument("--qa_converter", type=str, help="Converter function to specific humidity",
default="none", choices=["none", "rh2q"])
group_ps = parser.add_argument_group('PS', description="Surface air pressure [Pa]")
group_ps.add_argument('--ps', type=str, help="Surface air pressure input format",
default="default", choices=["default", "netcdf", "grib1", "grib2", "surfex", "constant"])
group_ps.add_argument("--ps_converter", type=str, help="Converter function to surface air pressure",
default="none", choices=["none"])
group_dir_sw = parser.add_argument_group('DIR_SW', description="Direct shortwave radiation")
group_dir_sw.add_argument('--dir_sw', type=str, help="Direct short wave radiation input format",
default="default", choices=["default", "netcdf", "grib1", "grib2", "surfex", "constant"])
group_dir_sw.add_argument("--dir_sw_converter", type=str,
help="Converter function to direct short wave radiation",
default="none", choices=["none"])
group_sca_sw = parser.add_argument_group('SCA_SW', description="Scattered short wave radiation flux")
group_sca_sw.add_argument('--sca_sw', type=str, help="Scattered short wave radiation input format",
default="default", choices=["netcdf", "grib1", "grib2", "surfex", "constant"])
group_sca_sw.add_argument("--sca_sw_converter", type=str,
help="Converter function to scattered shortwave radiation flux",
default="none", choices=["none"])
group_lw = parser.add_argument_group('LW', description="Long wave radiation flux")
group_lw.add_argument('--lw', type=str, help="Long wave radiation input format", default="default",
choices=["netcdf", "grib1", "grib2", "surfex", "constant"])
group_lw.add_argument("--lw_converter", type=str, help="Converter function to long wave radiation flux",
default="none", choices=["none"])
group_rain = parser.add_argument_group('RAIN', description="Rainfall rate")
group_rain.add_argument("--rain", type=str, help="Input format", default="default",
choices=["default", "netcdf", "grib1", "grib2", "surfex"])
group_rain.add_argument("--rain_converter", type=str, help="Converter function to rainfall rate",
default="totalprec", choices=["none", "totalprec", "calcrain"])
group_snow = parser.add_argument_group('SNOW', description="Snowfall rate")
group_snow.add_argument("--snow", type=str, help="Input format", default="default",
choices=["default", "netcdf", "grib1", "grib2", "surfex"])
group_snow.add_argument("--snow_converter", type=str, help="Converter function to snowfall rate", default="none",
choices=["none", "calcsnow"])
group_wind = parser.add_argument_group('WIND', description="Wind speed")
group_wind.add_argument("--wind", type=str, help="Input format", default="default",
choices=["default", "netcdf", "grib1", "grib2", "surfex"])
group_wind.add_argument("--wind_converter", type=str, help="Converter function to windspeed",
default="windspeed", choices=["none", "windspeed"])
group_wind_dir = parser.add_argument_group('WIND_DIR', description="Wind direction")
group_wind_dir.add_argument("--wind_dir", type=str, help="Input format", default="default",
choices=["default", "netcdf", "grib1", "grib2", "surfex"])
group_wind_dir.add_argument("--wind_dir_converter", type=str, help="Converter function to wind direction",
default="winddir", choices=["none", "winddir"])
group_co2 = parser.add_argument_group('CO2', description="Carbon dioxide")
group_co2.add_argument('--co2', type=str, help="CO2 input format", default="default",
choices=["netcdf", "grib1", "constant", "grib2", "surfex"])
group_co2.add_argument("--co2_converter", type=str, help="Converter function to carbon dioxide", default="none",
choices=["none"])
group_zs = parser.add_argument_group('ZS', description="Surface geopotential")
group_zs.add_argument('--zsoro', type=str, help="ZS input format", default="default",
choices=["netcdf", "grib1", "grib2", "surfex", "constant"])
group_zs.add_argument("--zsoro_converter", type=str, help="Converter function to ZS", default="none",
choices=["none", "phi2m"])
group_zval = parser.add_argument_group('ZREF', description="Reference height for temperature and humidity")
group_zval.add_argument('--zval', type=str, help="ZREF input format", default="default",
choices=["netcdf", "grib1", "grib2", "surfex", "constant"])
group_zval.add_argument("--zval_converter", type=str, help="Converter function to ZREF", default="none",
choices=["none"])
group_uval = parser.add_argument_group('UREF', description="Reference height for wind")
group_uval.add_argument('--uval', type=str, help="UREF input format", default="default",
choices=["netcdf", "grib1", "grib2", "surfex", "constant"])
group_uval.add_argument("--uval_converter", type=str, help="Converter function to UREF", default="none",
choices=["none"])
if len(argv) < 4:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
# print
# arg, getattr(args, arg)
kwargs.update({arg: getattr(args, arg)})
user_config = {}
if "user_config" in kwargs and kwargs["user_config"] is not None:
user_config = yaml.load(open(kwargs["user_config"])) or {}
kwargs.update({"user_config": user_config})
# Read point/domain config
if "area" in kwargs:
geo_out = surfex.geo.get_geo_object(json.load(open(kwargs["area"], "r")))
else:
raise Exception("You must provide an json area file")
kwargs.update({"geo_out": geo_out})
# Find name of global config file
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
base = os.path.dirname(os.path.abspath(root))
yaml_config = base + "/cfg/config.yml"
default_conf = yaml.load(open(yaml_config)) or sys.exit(1)
kwargs.update({"config": default_conf})
return kwargs
def run_create_forcing(**kwargs):
options, var_objs, att_objs = surfex.forcing.set_forcing_config(**kwargs)
surfex.forcing.run_time_loop(options, var_objs, att_objs)
def parse_args_qc2obsmon(argv):
parser = ArgumentParser("Create SQLite data base for obsmon")
parser.add_argument('dtg', type=str, help="YYYYMMDDHH")
parser.add_argument('varname', type=str, help="Variable name")
parser.add_argument('qc', type=str, help="QC dataset JSONfile")
parser.add_argument('--operator', type=str, help="Obs operator", choices=["bilinear", "nearest"],
default="bilinear", required=False)
parser.add_argument('--fg_file', type=str, help="First guess file", required=True)
parser.add_argument('--an_file', type=str, help="Analysis file", required=True)
parser.add_argument('--file_var', type=str, help="File variable", required=True)
parser.add_argument('-o', dest="output", type=str, help="output file", default="ecma.db")
parser.add_argument('--version', action='version', version=surfex.__version__)
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
kwargs.update({arg: getattr(args, arg)})
return kwargs
def parse_args_create_surfex_json_namelist(argv):
"""Parse the command line input arguments."""
parser = ArgumentParser("Creating the namelists in JSON format to be able to run SURFEX")
parser.add_argument('--version', action='version', version='surfex {0}'.format(surfex.__version__))
parser.add_argument('--config', '-c', type=str, nargs="?", required=True, help="Input TOML file")
parser.add_argument('--path', '-p', type=str, nargs="?", required=True, help="Path to input settings")
parser.add_argument('--indent', required=False, default=2, type=int, help="Indented output")
parser.add_argument('--namelist', '-n', required=False, default="options.json", nargs='?', help="")
parser.add_argument('--prep.file', dest="prep_file", type=str, nargs="?", required=False, default=None,
help="Input file for PREP")
parser.add_argument('--prep.filetype', dest="prep_filetype", type=str, nargs="?", required=False, default=None,
help="Input file for PREP", choices=["GRIB", "FA", "ASCII", "LFI", "NC", "json"])
parser.add_argument('--prep.pgdfile', dest="prep_pgdfile", type=str, nargs="?", required=False, default=None,
help="Input PGD file for PREP input file")
parser.add_argument('--prep.pgdfiletype', dest="prep_pgdfiletype", type=str, nargs="?", required=False,
default=None,
help="Fileformat for PGD file provided as --prep.pgdfile", choices=["FA", "ASCII", "LFI", "NC"])
parser.add_argument('--dtg', dest="dtg", type=str, nargs="?", required=False, default=None,
help="DTG (YYYYMMDDHH)")
parser.add_argument('--forc_zs', action="store_true", help="Set surfex orography to forcing height")
parser.add_argument('program', help="For which program you should create the JSON file",
choices=["pgd", "prep", "offline", "soda"])
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(argv) == 0:
parser.print_help()
sys.exit()
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
kwargs.update({arg: getattr(args, arg)})
return kwargs
def create_surfex_json_namelist(**kwargs):
program = kwargs["program"]
input_path = kwargs["path"]
indent = kwargs["indent"]
name_of_namelist = kwargs["namelist"]
args = {
"forc_zs": kwargs["forc_zs"],
"prep_file": kwargs["prep_file"],
"prep_filetype": kwargs["prep_filetype"],
"prep_pgdfile": kwargs["prep_pgdfile"],
"prep_pgdfiletype": kwargs["prep_pgdfiletype"]
}
args.update({"dtg": kwargs["dtg"]})
if kwargs["dtg"] is not None:
args.update({"dtg": datetime.strptime(kwargs["dtg"], "%Y%m%d%H")})
settings_file = kwargs["config"]
if os.path.exists(settings_file):
print("Read toml settings from " + settings_file)
settings = surfex.toml_load(settings_file)
# print(settings)
else:
raise FileNotFoundError("Input file does not exist: " + settings_file)
# kwargs.update({"settings": settings})
config = surfex.Configuration(settings, {})
namelist = surfex.BaseNamelist(program, config, input_path, **args)
merged_json_settings = namelist.get_namelist()
# Namelist settings
print("\nNamelist: ")
for key in merged_json_settings:
print(key, ":", merged_json_settings[key])
# Dump namelist as json
namelist.nml2ascii(merged_json_settings, name_of_namelist, indent=indent)
def parse_args_create_surfex_json_input(argv):
"""Parse the command line input arguments."""
parser = ArgumentParser("Creating the namelists in JSON format to be able to run SURFEX")
parser.add_argument('--version', action='version', version='surfex {0}'.format(surfex.__version__))
parser.add_argument('--config', '-c', type=str, nargs="?", required=True, help="Input TOML file")
parser.add_argument('--indent', required=False, default=2, type=int, help="Indented output")
parser.add_argument('--system', '-s', required=True, default="system.json", nargs='?', help="")
parser.add_argument('--files', '-f', type=str, nargs="?", required=False, default="surfex_input_files.json",
help="Input json file for SURFEX binaries")
parser.add_argument('--prep.file', dest="prep_file", type=str, nargs="?", required=False, default=None,
help="Input file for PREP")
parser.add_argument('--prep.filetype', dest="prep_filetype", type=str, nargs="?", required=False, default=None,
help="Input file for PREP", choices=["GRIB", "FA", "ASCII", "LFI", "NC", "json"])
parser.add_argument('--prep.pgdfile', dest="prep_pgdfile", type=str, nargs="?", required=False, default=None,
help="Input PGD file for PREP input file")
parser.add_argument('--prep.pgdfiletype', dest="prep_pgdfiletype", type=str, nargs="?", required=False,
default=None,
help="Fileformat for PGD file provided as --prep.pgdfile", choices=["FA", "ASCII", "LFI", "NC"])
parser.add_argument('--dtg', dest="dtg", type=str, nargs="?", required=False, default=None,
help="DTG (YYYYMMDDHH)")
parser.add_argument('program', help="For which program you should create the JSON file",
choices=["pgd", "prep", "offline", "soda"])
parser.add_argument('--sfx_first_guess', type=str, nargs="?", required=False, default=None,
help="")
parser.add_argument('--ua_first_guess', type=str, nargs="?", required=False, default=None,
help="")
parser.add_argument('--perturbed_runs', type=str, nargs="*", required=False, default=None,
help="")
parser.add_argument('--lsmfile', type=str, nargs="?", required=False, default=None,
help="")
parser.add_argument('--climfile', type=str, nargs="?", required=False, default=None,
help="")
parser.add_argument('--ascatfile', type=str, nargs="?", required=False, default=None,
help="")
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(argv) == 0:
parser.print_help()
sys.exit()
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
kwargs.update({arg: getattr(args, arg)})
return kwargs
def create_surfex_json_input(**kwargs):
program = kwargs["program"]
indent = kwargs["indent"]
del(kwargs["indent"])
system_settings = kwargs["system"]
name_of_input_files = kwargs["files"]
dtg = None
if kwargs["dtg"] is not None:
dtg = datetime.strptime(kwargs["dtg"], "%Y%m%d%H")
kwargs.update({"dtg": dtg})
settings_file = kwargs["config"]
if os.path.exists(settings_file):
print("Read toml settings from " + settings_file)
settings = surfex.toml_load(settings_file)
# print(settings)
else:
raise FileNotFoundError("Input file does not exist: " + settings_file)
kwargs.update({"config": surfex.Configuration(settings, {})})
if os.path.exists(system_settings):
kwargs.update({"system_file_paths": surfex.SystemFilePathsFromFile(system_settings)})
else:
raise FileNotFoundError("System settings not found " + system_settings)
config = kwargs["config"]
system_file_paths = kwargs["system_file_paths"]
del(kwargs["config"])
del(kwargs["system_file_paths"])
if program == "pgd":
input_for_surfex_json = surfex.PgdInputData(config, system_file_paths, **kwargs)
elif program == "prep":
input_for_surfex_json = surfex.PrepInputData(config, system_file_paths, **kwargs)
elif program == "offline":
input_for_surfex_json = surfex.OfflineInputData(config, system_file_paths, **kwargs)
elif program == "soda":
input_for_surfex_json = surfex.SodaInputData(config, system_file_paths, **kwargs)
else:
raise NotImplementedError
# Input files for SURFEX binary
print("\nInput files: ", input_for_surfex_json.data)
json.dump(input_for_surfex_json.data, open(name_of_input_files, "w"), indent=indent)
def parse_args_first_guess_for_oi(argv):
parser = ArgumentParser(description="Create first guess file for gridpp")
parser.add_argument('-dtg', dest="dtg", type=str, help="Date (YYYYMMDDHH)", required=True)
parser.add_argument('-i', "--inputfile", type=str, default=None, help="Default input file", nargs="?")
parser.add_argument('-if', dest="inputformat", type=str, help="output file", default="grib2")
parser.add_argument('-d', dest="domain", type=str, help="Domain", required=True)
parser.add_argument('-t2m_file', type=str, default=None, help="File with T2M", nargs="?")
parser.add_argument('-t2m_format', type=str, default=None, help="File format for file with T2M", nargs="?",
choices=["grib1", "grib2", "netcdf", "surfex"])
parser.add_argument('-t2m_converter', type=str, default="none", help="Converter for T2M", nargs="?",
choices=["none"])
parser.add_argument('-rh2m_file', type=str, default=None, help="File with RH2M", nargs="?")
parser.add_argument('-rh2m_format', type=str, default=None, help="File format for file with RH2M", nargs="?",
choices=["grib1", "grib2", "netcdf", "surfex"])
parser.add_argument('-rh2m_converter', type=str, default="none", help="Converter for RH2M", nargs="?",
choices=["none"])
parser.add_argument('-sd_file', type=str, default=None, help="Snow depth file", nargs="?")
parser.add_argument('-sd_format', type=str, default=None, help="Snow depth file format", nargs="?",
choices=["grib1", "grib2", "netcdf", "surfex"])
parser.add_argument('--sd_converter', type=str, default="none", help="", nargs="?",
choices=["none", "sweclim", "swe2sd"])
parser.add_argument('-laf_file', type=str, default=None, help="Land area fraction grib file", nargs="?")
parser.add_argument('-laf_format', type=str, default=None, help="Snow depth file format", nargs="?",
choices=["grib1", "grib2", "netcdf", "surfex"])
parser.add_argument('--laf_converter', type=str, default="sea2land", help="", nargs="?",
choices=["none", "sea2land"])
parser.add_argument('-altitude_file', type=str, default=None, help="SURFEX grib file", nargs="?")
parser.add_argument('-altitude_format', type=str, default=None, help="Snow depth file format", nargs="?",
choices=["grib1", "grib2", "netcdf", "surfex"])
parser.add_argument('--altitude_converter', type=str, default="phi2m", help="", nargs="?",
choices=["none", "phi2m"])
parser.add_argument('-o', dest="output", type=str, help="Output file", default="raw.nc")
parser.add_argument('--config', '-c', dest="config", type=str, help="YAML config file",
default="first_guess.yml", nargs="?")
parser.add_argument('variables', nargs="+", choices=["air_temperature_2m", "relative_humidity_2m",
"surface_snow_thickness"],
help="Variables to create first guess for")
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(argv) == 0:
parser.print_help()
sys.exit(1)
return parser.parse_args(argv)
def first_guess_for_oi(args):
if not os.path.exists(args.config):
raise FileNotFoundError(args.config)
if os.path.exists(args.domain):
geo = surfex.geo.get_geo_object(json.load(open(args.domain, "r")))
else:
raise FileNotFoundError(args.domain)
validtime = datetime.strptime(args.dtg, "%Y%m%d%H")
variables = args.variables
variables = variables + ["altitude", "land_area_fraction"]
cache = surfex.cache.Cache(True, 3600)
fg = None
for var in variables:
inputfile = args.inputfile
fileformat = args.inputformat
converter = "none"
if var == "air_temperature_2m":
if args.t2m_file is not None:
inputfile = args.t2m_file
if args.t2m_format is not None:
fileformat = args.t2m_format
if args.t2m_converter is not None:
converter = args.t2m_converter
elif var == "relative_humidity_2m":
if args.rh2m_file is not None:
inputfile = args.rh2m_file
if args.rh2m_format is not None:
fileformat = args.rh2m_format
if args.rh2m_converter is not None:
converter = args.rh2m_converter
elif var == "surface_snow_thickness":
if args.sd_file is not None:
inputfile = args.sd_file
if args.sd_format is not None:
fileformat = args.sd_format
if args.sd_converter is not None:
converter = args.sd_converter
elif var == "altitude":
if args.altitude_file is not None:
inputfile = args.altitude_file
if args.altitude_format is not None:
fileformat = args.altitude_format
if args.altitude_converter is not None:
converter = args.altitude_converter
elif var == "land_area_fraction":
if args.laf_file is not None:
inputfile = args.laf_file
if args.laf_format is not None:
fileformat = args.laf_format
if args.laf_converter is not None:
converter = args.laf_converter
else:
raise NotImplementedError("Variable not implemented " + var)
if inputfile is None:
raise Exception("You must set input file")
if fileformat is None:
raise Exception("You must set file format")
config = yaml.load(open(args.config, "r"))
defs = config[fileformat]
defs.update({"filepattern": inputfile})
print(var, fileformat)
converter_conf = config[var][fileformat]["converter"]
if converter not in config[var][fileformat]["converter"]:
raise Exception("No converter " + converter + " definition found in " + args.config + "!")
converter = surfex.read.Converter(converter, validtime, defs, converter_conf, fileformat, validtime)
field = surfex.read.ConvertedInput(geo, var, converter).read_time_step(validtime, cache)
field = np.reshape(field, [geo.nlons, geo.nlats])
# Create file
if fg is None:
nx = geo.nlons
ny = geo.nlats
fg = surfex.create_netcdf_first_guess_template(variables, nx, ny, args.output)
fg.variables["time"][:] = float(validtime.strftime("%s"))
fg.variables["longitude"][:] = np.transpose(geo.lons)
fg.variables["latitude"][:] = np.transpose(geo.lats)
fg.variables["x"][:] = [i for i in range(0, nx)]
fg.variables["y"][:] = [i for i in range(0, ny)]
if var == "altitude":
field[field < 0] = 0
fg.variables[var][:] = np.transpose(field)
if fg is not None:
fg.close()
def parse_args_masterodb(argv):
"""Parse the command line input arguments."""
parser = ArgumentParser(description="SURFEX for MASTERRODB")
parser.add_argument('--version', action='version', version='surfex {0}'.format(surfex.__version__))
parser.add_argument('--wrapper', '-w', type=str, default="", help="Execution wrapper command")
parser.add_argument('--harmonie', action="store_true", default=False,
help="Surfex configuration created from Harmonie environment")
parser.add_argument('--pgd', type=str, nargs="?", required=True, help="Name of the PGD file")
parser.add_argument('--prep', type=str, nargs="?", required=True, help="Name of the PREP file")
parser.add_argument('--force', '-f', action="store_true", default=False, help="Force re-creation")
parser.add_argument('--rte', '-r', required=True, nargs='?')
parser.add_argument('--config', '-c', required=False, nargs='?')
parser.add_argument('--system_file_paths', '-s', required=True, nargs='?', help="Input file paths on your system")
parser.add_argument('--namelist_path', '-n', required=True, nargs='?')
parser.add_argument('--domain', type=str, required=False, help="JSON file with domain")
parser.add_argument('--dtg', type=str, required=False, default=None)
parser.add_argument('--output', '-o', type=str, required=False, default=None)
parser.add_argument('--only_archive', action="store_true", default=False, help="Only call archiving")
parser.add_argument('--print_namelist', action="store_true", default=False, help="Print namelsist used")
parser.add_argument('--mode', '-m', type=str, required=True, choices=["forecast", "canari"])
parser.add_argument('--archive', '-a', required=False, default=None, nargs='?',
help="JSON file with archive output")
parser.add_argument('--binary', '-b', required=False, default=None, nargs='?',
help="Full path of MASTERODB binary")
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
kwargs.update({arg: getattr(args, arg)})
return kwargs
def run_masterodb(**kwargs):
print("ARGS: ", kwargs)
if "harmonie" in kwargs and kwargs["harmonie"]:
config_exp = None
if "config" in kwargs:
if kwargs["config"] is not None:
config_exp = kwargs["config"]
if config_exp is None:
config_exp = surfex.__path__[0] + "/../scheduler/config/config_exp_surfex.toml"
print("Using default config from: " + config_exp)
input_data = toml.load(open(config_exp, "r"))
config = surfex.ConfigurationFromHarmonie(os.environ, input_data)
else:
if "domain" not in kwargs:
raise Exception("Missing domain definition")
if "config" not in kwargs:
raise Exception("Missing config")
domain = kwargs["domain"]
if os.path.exists(domain):
geo = surfex.geo.get_geo_object(json.load(open(domain, "r")))
else:
raise FileNotFoundError("File not found: " + domain)
config = kwargs["config"]
if os.path.exists(config):
input_data = toml.load(open(config, "r"))
config = surfex.Configuration(input_data, {}, geo=geo)
else:
raise FileNotFoundError("File not found: " + config)
if "config" in kwargs:
del(kwargs["config"])
system_file_paths = kwargs["system_file_paths"]
if os.path.exists(system_file_paths):
system_file_paths = surfex.SystemFilePathsFromFile(system_file_paths)
else:
raise FileNotFoundError("File not found: " + system_file_paths)
del(kwargs["system_file_paths"])
my_geo = config.get_setting("GEOMETRY#GEO")
binary = kwargs["binary"]
rte = kwargs["rte"]
wrapper = kwargs["wrapper"]
namelist_path = kwargs["namelist_path"]
force = kwargs["force"]
mode = kwargs["mode"]
output = kwargs["output"]
archive = kwargs["archive"]
only_archive = kwargs["only_archive"]
print_namelist = kwargs["print_namelist"]
if "dtg" in kwargs:
if kwargs["dtg"] is not None and isinstance(kwargs["dtg"], str):
dtg = datetime.strptime(kwargs["dtg"], "%Y%m%d%H")
kwargs.update({"dtg": dtg})
pgd_file_path = kwargs["pgd"]
prep_file_path = kwargs["prep"]
if os.path.exists(rte):
my_batch = surfex.BatchJob(json.load(open(rte, "r")), wrapper=wrapper)
else:
raise FileNotFoundError
my_archive = None
if archive is not None:
if os.path.exists(archive):
my_archive = surfex.JsonOutputDataFromFile(archive)
else:
raise FileNotFoundError
if mode == "forecast":
input_data = surfex.InlineForecastInputData(config, system_file_paths, **kwargs)
mode = "offline"
elif mode == "canari":
input_data = surfex.SodaInputData(config, system_file_paths, **kwargs)
mode = "soda"
else:
raise NotImplementedError(mode + " is not implemented!")
my_settings = surfex.BaseNamelist(mode, config, namelist_path, **kwargs).get_namelist()
my_geo.update_namelist(my_settings)
# Create input
my_format = my_settings["nam_io_offline"]["csurf_filetype"]
my_pgdfile = my_settings["nam_io_offline"]["cpgdfile"]
my_prepfile = my_settings["nam_io_offline"]["cprepfile"]
my_surffile = my_settings["nam_io_offline"]["csurffile"]
lfagmap = False
if "lfagmap" in my_settings["nam_io_offline"]:
lfagmap = my_settings["nam_io_offline"]["lfagmap"]
print(my_pgdfile, lfagmap)
# Not run binary
masterodb = None
if not only_archive:
# Normal dry or wet run
exists = False
if output is not None:
exists = os.path.exists(output)
if not exists or force:
if binary is None:
my_batch = None
my_pgdfile = surfex.file.PGDFile(my_format, my_pgdfile, my_geo, input_file=pgd_file_path, lfagmap=lfagmap,
masterodb=True)
my_prepfile = surfex.PREPFile(my_format, my_prepfile, my_geo, input_file=prep_file_path, lfagmap=lfagmap,
masterodb=True)
surffile = surfex.SURFFile(my_format, my_surffile, my_geo, archive_file=output, lfagmap=lfagmap,
masterodb=True)
masterodb = surfex.Masterodb(my_pgdfile, my_prepfile, surffile, my_settings, input_data, binary=binary,
print_namelist=print_namelist, batch=my_batch, archive_data=my_archive)
else:
print(output + " already exists!")
if archive is not None:
if masterodb is not None:
masterodb.archive_output()
else:
print("Masterodb is None")
def parse_args_surfex_binary(argv, mode):
"""Parse the command line input arguments."""
pert = False
need_pgd = True
need_prep = True
if mode == "pgd":
need_pgd = False
need_prep = False
desc = "Create physiography for SURFEX (PGD)"
elif mode == "prep":
need_prep = False
desc = "Prepare initial conditions for SURFEX"
elif mode == "offline":
desc = "Run Offline SURFEX"
elif mode == "soda":
desc = "Run SURFEX data assimilation (SODA)"
elif mode == "perturbed":
pert = True
desc = "Run perturbed Offline SURFEX"
else:
raise NotImplementedError(mode + " is not implemented!")
parser = ArgumentParser(description=desc)
parser.add_argument('--version', action='version', version=surfex.__version__)
parser.add_argument('--wrapper', '-w', type=str, default="", help="Execution wrapper command")
if need_pgd:
parser.add_argument('--pgd', type=str, nargs="?", required=True, help="Name of the PGD file")
if need_prep:
parser.add_argument('--prep', type=str, nargs="?", required=True, help="Name of the PREP file")
if mode == "prep":
parser.add_argument('--prep_file', required=False, default=None, nargs='?')
parser.add_argument('--prep_filetype', required=False, default=None, nargs='?')
parser.add_argument('--prep_pgdfile', required=False, default=None, nargs='?')
parser.add_argument('--prep_pgdfiletype', required=False, default=None, nargs='?')
if mode == "offline" or mode == "perturbed":
parser.add_argument('--forc_zs', action="store_true", default=False, help="Set model ZS to forcing ZS")
parser.add_argument('--forcing_dir', required=False, default=None, nargs='?')
parser.add_argument('--force', '-f', action="store_true", help="Force re-creation")
parser.add_argument('--harmonie', action="store_true", default=False,
help="Surfex configuration created from Harmonie environment")
parser.add_argument('--print_namelist', action="store_true", default=False, help="Print namelsist used")
parser.add_argument('--masterodb', action="store_true", default=False, help="Input file written by msterodb")
parser.add_argument('--rte', '-r', required=True, nargs='?')
parser.add_argument('--config', '-c', required=False, nargs='?')
parser.add_argument('--system_file_paths', '-s', required=True, nargs='?', help="Input file paths on your system")
parser.add_argument('--namelist_path', '-n', required=True, nargs='?')
parser.add_argument('--domain', type=str, required=False, help="JSON file with domain")
parser.add_argument('--output', '-o', type=str, required=True)
parser.add_argument('--dtg', type=str, required=False, default=None)
if pert:
parser.add_argument('--pert', '-p', type=int, required=False, default=None)
parser.add_argument('--archive', '-a', type=str, required=False, default=None, nargs='?',
help="JSON file with archive output")
parser.add_argument('binary', type=str, help="Command to run")
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
kwargs.update({arg: getattr(args, arg)})
return kwargs
def run_surfex_binary(mode, **kwargs):
print("ARGS: ", kwargs)
if "harmonie" in kwargs and kwargs["harmonie"]:
config_exp = None
if "config" in kwargs:
if kwargs["config"] is not None:
config_exp = kwargs["config"]
if config_exp is None:
config_exp = surfex.__path__[0] + "/../scheduler/config/config_exp_surfex.toml"
print("Using default config from: " + config_exp)
input_data = toml.load(open(config_exp, "r"))
config = surfex.ConfigurationFromHarmonie(os.environ, input_data)
else:
if "domain" not in kwargs:
raise Exception("Missing domain definition")
if "config" not in kwargs:
raise Exception("Missing config")
domain = kwargs["domain"]
if os.path.exists(domain):
geo = surfex.geo.get_geo_object(json.load(open(domain, "r")))
else:
raise FileNotFoundError("File not found: " + domain)
config = kwargs["config"]
if os.path.exists(config):
input_data = toml.load(open(config, "r"))
config = surfex.Configuration(input_data, {}, geo=geo)
else:
raise FileNotFoundError("File not found: " + config)
if "config" in kwargs:
del(kwargs["config"])
system_file_paths = kwargs["system_file_paths"]
if os.path.exists(system_file_paths):
system_file_paths = surfex.SystemFilePathsFromFile(system_file_paths)
else:
raise FileNotFoundError("File not found: " + system_file_paths)
del(kwargs["system_file_paths"])
my_geo = config.get_setting("GEOMETRY#GEO")
if "forcing_dir" in kwargs:
system_file_paths.add_system_file_path("forcing_dir", kwargs["forcing_dir"])
pgd = False
prep = False
perturbed = False
need_pgd = True
need_prep = True
if mode == "pgd":
pgd = True
need_pgd = False
need_prep = False
input_data = surfex.PgdInputData(config, system_file_paths, **kwargs)
elif mode == "prep":
prep = True
need_prep = False
input_data = surfex.PrepInputData(config, system_file_paths, **kwargs)
elif mode == "offline":
input_data = surfex.OfflineInputData(config, system_file_paths, **kwargs)
elif mode == "soda":
input_data = surfex.SodaInputData(config, system_file_paths, **kwargs)
elif mode == "perturbed":
perturbed = True
input_data = surfex.OfflineInputData(config, system_file_paths, **kwargs)
else:
raise NotImplementedError(mode + " is not implemented!")
binary = kwargs["binary"]
rte = kwargs["rte"]
wrapper = kwargs["wrapper"]
namelist_path = kwargs["namelist_path"]
force = kwargs["force"]
output = kwargs["output"]
# domain = kwargs["domain"]
archive = kwargs["archive"]
print_namelist = kwargs["print_namelist"]
masterodb = kwargs["masterodb"]
print("masterodb ", masterodb)
if "dtg" in kwargs:
if kwargs["dtg"] is not None and isinstance(kwargs["dtg"], str):
dtg = datetime.strptime(kwargs["dtg"], "%Y%m%d%H")
kwargs.update({"dtg": dtg})
pgd_file_path = None
if need_pgd:
pgd_file_path = kwargs["pgd"]
prep_file_path = None
if need_prep:
prep_file_path = kwargs["prep"]
pert = None
if pert:
pert = kwargs["pert"]
if os.path.exists(rte):
my_batch = surfex.BatchJob(json.load(open(rte, "r")), wrapper=wrapper)
else:
raise FileNotFoundError("File not found: " + rte)
my_archive = None
if archive is not None:
if os.path.exists(archive):
my_archive = surfex.JsonOutputDataFromFile(archive)
else:
raise FileNotFoundError("File not found: " + archive)
if not os.path.exists(output) or force:
my_settings = surfex.BaseNamelist(mode, config, namelist_path, **kwargs).get_namelist()
my_geo.update_namelist(my_settings)
# Create input
my_format = my_settings["nam_io_offline"]["csurf_filetype"]
my_pgdfile = my_settings["nam_io_offline"]["cpgdfile"]
my_prepfile = my_settings["nam_io_offline"]["cprepfile"]
my_surffile = my_settings["nam_io_offline"]["csurffile"]
lfagmap = False
if "lfagmap" in my_settings["nam_io_offline"]:
lfagmap = my_settings["nam_io_offline"]["lfagmap"]
print(my_pgdfile, lfagmap)
if need_pgd:
my_pgdfile = surfex.file.PGDFile(my_format, my_pgdfile, my_geo, input_file=pgd_file_path, lfagmap=lfagmap,
masterodb=masterodb)
if need_prep:
my_prepfile = surfex.PREPFile(my_format, my_prepfile, my_geo, input_file=prep_file_path, lfagmap=lfagmap,
masterodb=masterodb)
surffile = None
if need_prep and need_pgd:
surffile = surfex.SURFFile(my_format, my_surffile, my_geo, archive_file=output, lfagmap=lfagmap,
masterodb=masterodb)
if perturbed:
surfex.PerturbedOffline(binary, my_batch, my_prepfile, pert, my_settings, input_data,
pgdfile=my_pgdfile, surfout=surffile, archive_data=my_archive,
print_namelist=print_namelist)
elif pgd:
my_pgdfile = surfex.file.PGDFile(my_format, my_pgdfile, my_geo, input_file=pgd_file_path,
archive_file=output, lfagmap=lfagmap, masterodb=masterodb)
surfex.SURFEXBinary(binary, my_batch, my_pgdfile, my_settings, input_data,
archive_data=my_archive, print_namelist=print_namelist)
elif prep:
my_prepfile = surfex.PREPFile(my_format, my_prepfile, my_geo, archive_file=output, lfagmap=lfagmap,
masterodb=masterodb)
surfex.SURFEXBinary(binary, my_batch, my_prepfile, my_settings, input_data, pgdfile=my_pgdfile,
archive_data=my_archive, print_namelist=print_namelist)
else:
surfex.SURFEXBinary(binary, my_batch, my_prepfile, my_settings, input_data, pgdfile=my_pgdfile,
surfout=surffile, archive_data=my_archive,
print_namelist=print_namelist)
else:
print(output + " already exists!")
def parse_args_gridpp(argv):
parser = ArgumentParser(description="Create horisontal OI analysis")
parser.add_argument('-i', '--input_file', type=str, help="Input NetCDF file with all variables", required=True)
parser.add_argument('-obs', '--obs_file', type=str, help="Input JSON file with QC observations", required=True)
parser.add_argument('-o', '--output_file', type=str, help="Output NetCDF file with all variables", required=True)
parser.add_argument('-v', '--var', type=str, help="Variable", required=True)
parser.add_argument('-hor', dest='hlength', type=float, required=True)
parser.add_argument('-vert', dest='vlength', type=float, default=100000, required=False)
parser.add_argument('--wlength', dest='wlength', type=float, default=0., required=False)
parser.add_argument('--landOnly', dest='land_only', action="store_true", default=False)
parser.add_argument('--maxLocations', dest='max_locations', type=int, default=20, required=False)
parser.add_argument('--elevGradient', dest='elev_gradient', type=float, default=-0.0065, required=False,
choices=[0, -0.0065])
parser.add_argument('--epsilon', dest='epsilon', type=float, default=0.5, required=False)
parser.add_argument('--minvalue', dest='minvalue', type=float, default=None, required=False)
parser.add_argument('--maxvalue', dest='maxvalue', type=float, default=None, required=False)
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(sys.argv) == 0:
parser.print_help()
sys.exit(1)
return parser.parse_args(argv)
def run_gridpp(args):
var = args.var
input_file = args.input_file
output_file = args.output_file
hlength = args.hlength
vlength = args.vlength
wlength = args.wlength
land_only = args.land_only
max_locations = args.max_locations
elev_gradient = args.elev_gradient
epsilon = args.epsilon
minvalue = args.minvalue
maxvalue = args.maxvalue
# Get input fields
geo, validtime, background, glafs, gelevs = surfex.read_first_guess_netcdf_file(input_file, var)
an_time = validtime
# Read OK observations
observations = surfex.dataset_from_file(an_time, args.obs_file, qc_flag=0)
field = surfex.horizontal_oi(geo, background, observations, gelevs=gelevs, glafs=glafs, hlength=hlength,
vlength=vlength, wlength=wlength, structure_function="Barnes",
land_only=land_only, max_locations=max_locations, elev_gradient=elev_gradient,
epsilon=epsilon, minvalue=minvalue, maxvalue=maxvalue, interpol="bilinear")
surfex.write_analysis_netcdf_file(output_file, field, var, validtime, gelevs, glafs, new_file=True, geo=geo)
def parse_args_titan(argv):
parser = ArgumentParser(description="Do quality control of observations")
parser.add_argument('-i', '--input_file', type=str, help="Input json file with observation sets and test settings",
required=True)
parser.add_argument('-o', '--output_file', type=str, help="Output json file with quality checked observations",
required=False, default="qc_obs.json")
parser.add_argument('-v', '--variable', type=str, required=True, help="Observation variable")
parser.add_argument('--indent', type=int, default=None, help="Indent")
parser.add_argument('-dtg', type=str, help="Date time group YYYYMMDDHH", required=True)
parser.add_argument('--harmonie', action="store_true", default=False,
help="Surfex configuration created from Harmonie environment")
parser.add_argument('tests', nargs='+', type=str, help="Which tests to run and order to run")
parser.add_argument('--blacklist', type=str, required=False, default=None, help="JSON file with blacklist")
parser.add_argument('--domain', type=str, required=False, default=None, help="JSON file with domain")
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
kwargs.update({arg: getattr(args, arg)})
return kwargs
def run_titan(**kwargs):
domain_geo = None
if "harmonie" in kwargs and kwargs["harmonie"]:
config_exp = None
if "config" in kwargs:
if kwargs["config"] is not None:
config_exp = kwargs["config"]
if config_exp is None:
config_exp = surfex.__path__[0] + "/../scheduler/config/config_exp_surfex.toml"
print("Using default config from: " + config_exp)
input_data = toml.load(open(config_exp, "r"))
config = surfex.ConfigurationFromHarmonie(os.environ, input_data)
domain_geo = config.get_setting("GEOMETRY#GEO")
elif "domain" in kwargs:
if kwargs["domain"] is not None:
domain_geo = surfex.get_geo_object(json.load(open(kwargs["domain"], "r")))
del(kwargs["domain"])
# Set domain geo if set
kwargs.update({"domain_geo": domain_geo})
blacklist = None
if "blacklist" in kwargs:
if kwargs["blacklist"] is not None:
blacklist = json.load(open(kwargs["blacklist"], "r"))
kwargs.update({"blacklist": blacklist})
input_file = kwargs["input_file"]
if os.path.exists(input_file):
settings = json.load(open(input_file, "r"))
else:
raise FileNotFoundError("Could not find input file " + input_file)
tests = kwargs["tests"]
output_file = kwargs["output_file"]
indent = kwargs["indent"]
an_time = kwargs["dtg"]
if isinstance(an_time, str):
an_time = datetime.strptime(an_time, "%Y%m%d%H")
kwargs.update({"an_time": an_time})
var = kwargs["variable"]
tests = surfex.titan.define_quality_control(tests, settings[var], an_time, domain_geo=domain_geo,
blacklist=blacklist)
# print(settings)
datasources = surfex.obs.get_datasources(an_time, settings[var]["sets"])
data_set = surfex.TitanDataSet(var, settings[var], tests, datasources, an_time, debug=True)
data_set.perform_tests()
data_set.write_output(output_file, indent=indent)
def parse_args_oi2soda(argv):
parser = ArgumentParser(description="Create ASCII input for SODA from gridPP files")
parser.add_argument('--t2m_file', type=str, help="NetCDF file for T2M", required=False, default=None)
parser.add_argument('--t2m_var', type=str, help="NetCDF variable name for T2M", required=False,
default="air_temperature_2m")
parser.add_argument('--rh2m_file', type=str, help="NetCDF file for RH2M", required=False, default=None)
parser.add_argument('--rh2m_var', type=str, help="NetCDF variable name for RH2M", required=False,
default="relative_humidity_2m")
parser.add_argument('--sd_file', type=str, help="NetCDF file for SD", required=False, default=None)
parser.add_argument('--sd_var', type=str, help="NetCDF variable name for SD", required=False,
default="surface_snow_thickness")
parser.add_argument('dtg', nargs="?", type=str, help="DTG", default=None)
parser.add_argument("-o", dest="output", type=str, help="Output file", default=None)
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(argv) < 3:
parser.print_help()
sys.exit(1)
return parser.parse_args(argv)
def run_oi2soda(args):
t2m_file = args.t2m_file
rh2m_file = args.rh2m_file
sd_file = args.sd_file
output = args.output
t2m = None
if t2m_file is not None:
t2m = {"file": t2m_file, "var": args.t2m_var}
rh2m = None
if rh2m_file is not None:
rh2m = {"file": rh2m_file, "var": args.rh2m_var}
sd = None
if sd_file is not None:
sd = {"file": sd_file, "var": args.sd_var}
dtg = datetime.strptime(args.dtg, "%Y%m%d%H")
surfex.oi2soda(dtg, t2m=t2m, rh2m=rh2m, sd=sd, output=output)
def parse_lsm_file_assim(argv):
parser = ArgumentParser(description="Create ASCII LSM input for SODA")
parser.add_argument('--file', type=str, help="Input file name", required=True)
parser.add_argument('--fileformat', type=str, help="Input fileformat", required=True)
parser.add_argument('--var', type=str, help="Variable in input file", required=False,
default="air_temperature_2m")
parser.add_argument('--converter', type=str, help="Converter for variable", required=False, default="none")
parser.add_argument('--dtg', type=str, help="DTG", default=None, required=False)
parser.add_argument('--domain', type=str, help="Domain", required=True)
parser.add_argument("-o", dest="output", type=str, help="Output file", default=None)
parser.add_argument('--version', action='version', version=surfex.__version__)
if len(argv) < 3:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
kwargs = {}
for arg in vars(args):
kwargs.update({arg: getattr(args, arg)})
domain = kwargs["domain"]
print(domain)
if os.path.exists(domain):
domain_json = json.load(open(domain, "r"))
kwargs.update({"geo": surfex.get_geo_object(domain_json)})
else:
raise FileNotFoundError(domain)
dtg = kwargs["dtg"]
if dtg is not None:
kwargs.update({"dtg": datetime.strptime(dtg, "%Y%m%d%H")})
return kwargs
def lsm_file_assim(**kwargs):
validtime = kwargs["dtg"]
cache = surfex.cache.Cache(True, 3600)
geo = kwargs["geo"]
inputfile = kwargs["file"]
fileformat = kwargs["fileformat"]
converter = kwargs["converter"]
output = kwargs["output"]
var = kwargs["var"]
defs = {
"filepattern": inputfile,
"fileformat": fileformat,
"fcint": 3,
"offset": 0,
"file_inc": 1
}
print(var, fileformat)
converter_conf = {
"none": {
"name": var
}
}
var = "LSM"
converter = surfex.read.Converter(converter, validtime, defs, converter_conf, fileformat, validtime)
field = surfex.read.ConvertedInput(geo, var, converter).read_time_step(validtime, cache)
field = np.reshape(field, [geo.nlons, geo.nlats])
field = np.transpose(field)
fh = open(output, "w")
for lat in range(0, geo.nlats):
for lon in range(0, geo.nlons):
# print(field[lat, lon])
fh.write(str(field[lat, lon]) + "\n")
fh.close()
def hm2pysurfex(**kwargs):
# surfex.Configuration
wd = "/home/trygveasp/sfx_home/new_ana/"
config = surfex.toml_load(wd + "/config/config.toml")
print(config)
config_files = {}
for f in config["config_files"]:
# EPS settings already in environment in HM
if f != "config_exp_eps.toml":
toml_dict = surfex.toml_load(wd + "/config/" + f)
config_files.update({f: {"toml": toml_dict, "blocks": config[f]["blocks"]}})
all_merged_settings = surfex.merge_toml_env_from_config_dicts(config_files)
# merged_config, member_merged_config = surfex.process_merged_settings(all_merged_settings)
system_file_paths = json.load(open(wd + "/Env_input_paths", "r"))
# Create configuration
config = surfex.ConfigurationFromHarmonie(os.environ, all_merged_settings)
namelist = surfex.BaseNamelist("pgd", config, wd + "/nam").get_namelist()
# namelist, eco, inp = surfex.set_json_namelist_from_toml_env("pgd", config, wd + "/nam", system_file_paths)
print(namelist)
inp = surfex.PgdInputData(config=config, system_file_paths=system_file_paths)
print(inp.data)
|
[
"trygveasp@met.no"
] |
trygveasp@met.no
|
3999cde4262817329bdd68fd5ae82079cf8e5078
|
1b382fa35424074f6e93d5efa26412057507ef7e
|
/brax/experimental/composer/composer.py
|
4a850f3ccb8b2b5020d9be7537077256b6e02021
|
[
"Apache-2.0"
] |
permissive
|
LARS12llt/brax
|
91f2914f78480308930dc83435f076de8a55b470
|
8cf936d60a393f586daa145e8f378c7aa4bafce6
|
refs/heads/main
| 2023-07-27T22:49:59.609896
| 2021-09-17T11:16:49
| 2021-09-17T15:06:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,125
|
py
|
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composer for environments.
ComponentEnv composes a scene from descriptions of the form below:
composer = Composer(
components=dict(
ant1=dict(component='ant', pos=(0, 1, 0)),
ant2=dict(component='ant', pos=(0, -1, 0)),
),
edges=dict(ant1__ant2=dict(collide_type='full'),),
)
env = ComposeEnv(composer=composer)
(more examples available in experimental/composer/env_descs.py)
During loading, it:
- creates components: loads and pieces together brax.Config()
components defined in experimental/composer/components/
such as ant.py or ground.py
- support multiple instances of the same component through suffixes
- each component requires: ROOT=root body, SYS_CONFIG=config in string form,
TERM_FN=termination function of this component, COLLIDES=bodies that
are allowed to collide, DEFAULT_OBSERVERS=a list of observers (
see experimental/composer/observers.py for references)
- creates edges: automatically create necessary edge information
between components, such as collide_include's in brax.Config()
- optionally edge information can be supplied,
e.g. `collide_type`={'full', 'root', None} specifying full collisons,
collision only between roots, or no collision between two components
- sets termination as any(termination_fn of each component)
- sets observation to concatenation of observations of each component defined
by each component's `observers` argument
"""
import collections
import copy
import functools
import itertools
from typing import Dict, Any, Callable, Tuple
import brax
from brax import envs
from brax.envs import Env
from brax.envs import State
from brax.experimental.braxlines.common import sim_utils
from brax.experimental.composer import component_editor
from brax.experimental.composer import env_descs
from brax.experimental.composer import observers
import jax
from jax import numpy as jnp
MetaData = collections.namedtuple('MetaData', [
'components', 'edges', 'global_options', 'config_str', 'config_json',
'extra_observers'
])
class Composer(object):
"""Compose a brax system."""
def __init__(self,
components: Dict[str, Dict[str, Any]],
edges: Dict[str, Dict[str, Any]] = None,
extra_observers: Tuple[observers.Observer] = (),
add_ground: bool = True,
global_options: Dict[str, Any] = None):
components = copy.deepcopy(components)
edges = copy.deepcopy(edges or {})
# load components
if add_ground:
components['ground'] = dict(component='ground')
components = {
name: component_editor.load_component(**value)
for name, value in components.items()
}
component_keys = sorted(components.keys())
components_ = collections.OrderedDict([
(k, components[k]) for k in component_keys
])
# set global
v = dict(
json=component_editor.json_global_options(**(global_options or {})))
v['message_str'] = component_editor.json2message_str(v['json'])
global_options_ = v
for k, v in components_.items():
# convert to json format for easy editing
v['json'] = component_editor.message_str2json(v['message_str'])
# add suffices
suffix = v.get('suffix', k)
if suffix:
rename_fn = functools.partial(
component_editor.json_add_suffix, suffix=suffix)
v['json'] = rename_fn(v['json'])
v['collides'] = rename_fn(v['collides'], force_add=True)
v['root'] = rename_fn(v['root'], force_add=True)
v['bodies'] = [b['name'] for b in v['json'].get('bodies', [])]
v['joints'] = [b['name'] for b in v['json'].get('joints', [])]
v['suffix'] = suffix
# convert back to str
v['message_str'] = component_editor.json2message_str(v['json'])
# set transform or not
if 'pos' in v or 'quat' in v:
v['transform'] = True
v['pos'] = jnp.array(v.get('pos', [0, 0, 0]), dtype='float')
v['quat_origin'] = jnp.array(
v.get('quat_origin', [0, 0, 0]), dtype='float')
v['quat'] = jnp.array(v.get('quat', [1., 0., 0., 0.]), dtype='float')
else:
v['transform'] = False
edges_ = {}
for k1, k2 in itertools.combinations(list(components_.keys()), 2):
if k1 == k2:
continue
k1, k2 = sorted([k1, k2]) # ensure the name is always sorted in order
edge_name = f'{k1}__{k2}'
v, new_v = edges.pop(edge_name, {}), {}
v1, v2 = [components_[k] for k in [k1, k2]]
collide_type = v.pop('collide_type', 'full')
v_json = {}
# add colliders
if collide_type == 'full':
v_json.update(
component_editor.json_collides(v1['collides'], v2['collides']))
elif collide_type == 'root':
v_json.update(
component_editor.json_collides([v1['root']], [v2['root']]))
else:
assert not collide_type, collide_type
if v_json:
# convert back to str
new_v['message_str'] = component_editor.json2message_str(v_json)
else:
new_v['message_str'] = ''
new_v['json'] = v_json
assert not v, f'unused edges[{edge_name}]: {v}'
edges_[edge_name] = new_v
assert not edges, f'unused edges: {edges}'
edge_keys = sorted(edges_.keys())
edges_ = collections.OrderedDict([(k, edges_[k]) for k in edge_keys])
# merge all message strs
message_str = ''
for _, v in sorted(components_.items()):
message_str += v.get('message_str', '')
for _, v in sorted(edges_.items()):
message_str += v.get('message_str', '')
message_str += global_options_.get('message_str', '')
config_str = message_str
config_json = component_editor.message_str2json(message_str)
metadata = MetaData(
components=components_,
edges=edges_,
global_options=global_options_,
config_str=config_str,
config_json=config_json,
extra_observers=extra_observers,
)
config = component_editor.message_str2message(message_str)
self.config, self.metadata = config, metadata
def reset_fn(self, sys, qp: brax.QP):
"""Reset state."""
# apply translations and rotations
for _, v in sorted(self.metadata.components.items()):
if v['transform']:
_, _, mask = sim_utils.names2indices(sys.config, v['bodies'], 'body')
qp = sim_utils.transform_qp(qp, mask[..., None], v['quat'],
v['quat_origin'], v['pos'])
return qp
def term_fn(self, done: jnp.ndarray, sys, qp: brax.QP, info: brax.Info):
"""Termination."""
for k, v in self.metadata.components.items():
term_fn = v['term_fn']
if term_fn:
done = term_fn(done, sys, qp, info, k)
return done
def obs_fn(self, sys, qp: brax.QP, info: brax.Info):
"""Return observation as OrderedDict."""
cached_obs_dict = {}
obs_dict = collections.OrderedDict()
for _, v in self.metadata.components.items():
for observer in v['observers']:
obs_dict_ = observers.get_obs_dict(sys, qp, info, observer,
cached_obs_dict, v)
obs_dict = collections.OrderedDict(
list(obs_dict.items()) + list(obs_dict_.items()))
for observer in self.metadata.extra_observers:
obs_dict_ = observers.get_obs_dict(sys, qp, info, observer,
cached_obs_dict, None)
obs_dict = collections.OrderedDict(
list(obs_dict.items()) + list(obs_dict_.items()))
return obs_dict
class ComponentEnv(Env):
"""Make a brax Env fromc config/metadata for training and inference."""
def __init__(self, composer: Composer, *args, **kwargs):
self.observer_shapes = None
self.composer = composer
super().__init__(
*args, config=self.composer.metadata.config_str, **kwargs)
def reset(self, rng: jnp.ndarray) -> State:
"""Resets the environment to an initial state."""
qp = self.sys.default_qp()
qp = self.composer.reset_fn(self.sys, qp)
info = self.sys.info(qp)
obs = self._get_obs(qp, info)
reward, done = jnp.zeros(2)
metrics = {}
return State(qp, obs, reward, done, metrics, info)
def step(self, state: State, action: jnp.ndarray) -> State:
"""Run one timestep of the environment's dynamics."""
qp, info = self.sys.step(state.qp, action)
obs = self._get_obs(qp, info)
reward = 0.0
done = False
done = self.composer.term_fn(done, self.sys, qp, info)
metrics = {}
return State(qp, obs, reward, done, metrics, info)
def _get_obs(
self,
qp: brax.QP,
info: brax.Info,
) -> jnp.ndarray:
"""Observe."""
obs_dict = self.composer.obs_fn(self.sys, qp, info)
if self.observer_shapes is None:
self.observer_shapes = observers.get_obs_dict_shape(obs_dict)
return jnp.concatenate(list(obs_dict.values()))
def get_env_obs_dict_shape(env: Env):
"""Gets an Env's observation shape(s)."""
if isinstance(env, ComponentEnv):
assert env.observation_size # ensure env.observer_shapes is set
return env.observer_shapes
else:
return (env.observation_size,)
def create(env_name: str = None,
components: Dict[str, Dict[str, Any]] = None,
edges: Dict[str, Dict[str, Any]] = None,
add_ground: bool = True,
global_options: Dict[str, Any] = None,
**kwargs) -> Env:
"""Creates an Env with a specified brax system."""
if env_name in env_descs.ENV_DESCS:
composer = Composer(
add_ground=add_ground,
global_options=global_options,
**env_descs.ENV_DESCS[env_name])
return ComponentEnv(composer=composer, **kwargs)
elif components:
composer = Composer(
components=components,
edges=edges,
add_ground=add_ground,
global_options=global_options)
return ComponentEnv(composer=composer, **kwargs)
else:
return envs.create(env_name, **kwargs)
def create_fn(env_name: str = None,
components: Dict[str, Dict[str, Any]] = None,
edges: Dict[str, Dict[str, Any]] = None,
add_ground: bool = True,
global_options: Dict[str, Any] = None,
**kwargs) -> Callable[..., Env]:
"""Returns a function that when called, creates an Env."""
return functools.partial(
create,
env_name=env_name,
components=components,
edges=edges,
add_ground=add_ground,
global_options=global_options,
**kwargs)
|
[
"erikfrey@google.com"
] |
erikfrey@google.com
|
56c1035dc9a2ff3dd0e77b2fe3db2a127c3c1dbb
|
59c5820be32dd498b6cda019b268c05db90a9ab3
|
/soundscapes/soundscape_splitter.py
|
e49c6f52a7a5f781bf3e63a009ad153df941d0c9
|
[
"Apache-2.0"
] |
permissive
|
thesteve0/birdclef21
|
1d881035e9e90f95536e1382b796f25c11326438
|
9c8748edbd6febe88191736406d838787e3c7a71
|
refs/heads/main
| 2023-05-02T21:52:35.733043
| 2021-05-21T16:28:24
| 2021-05-21T16:28:24
| 357,412,620
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,199
|
py
|
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import math
import os
def audioToSlicedSpecto(input_file, output_stub):
chunk_length_sec = 5
# Set some of the values we use for the Spectrogram
n_fft = 2048
n_mels = 256
hop_length = 256 # This is basically the size of the window for averaging samples together
y, sample_rate = librosa.load(input_file, sr=None)
# Trim the silent edges from the file
sound_array, _ = librosa.effects.trim(y)
sound_array_median = np.median(sound_array)
print('loaded file: ' + input_file)
# sample rate is samples per second so the length of the array divided by the sample rate tells us the seconds in the total track
track_length = math.floor(librosa.get_duration(sound_array, sr=sample_rate))
# determine how many chunks can fit into track and then make an array incrementing from 0 by 5 up to the total number of chunks
time_steps = np.arange(0, track_length + 1, chunk_length_sec).tolist()
# TODO we need to add 0 padding to the array to make the array divisiable by 5 seconds and add the new last 5 segment
# if time_steps[-1] < track_length:
# time_steps.append(track_length)
# time to the time steps array
# make two lists out of all the time steps we care about
# time steps = [0,5,7]
# starts = [0,5]
# stops = [5,7]
start_times = time_steps[:-1]
stop_times = time_steps[1:]
start_samples = list(map(lambda x: x * sample_rate, start_times))
stop_samples = list(map(lambda x: x * sample_rate, stop_times))
plt.figure(figsize=(60.48, 15.60), edgecolor='black', facecolor='black')
for i, (start, stop) in enumerate(zip(start_times, stop_times)):
out_filename = ''
# slice the original signal list
audio = sound_array[start_samples[i]:stop_samples[i]]
# chop ogg off the file name
out_filename = ''.join((out_file_prepped, '_', str(start), '_', str(stop), '.png'))
mel = librosa.feature.melspectrogram
S = mel(audio, sr=sample_rate, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels,
fmin=1600.0, fmax=11000)
# amin represents the amplitude mininimum (related to DB) that is considered more than 0. The higher you make the number the more noise you remove
# but you may actually start to remove the information you want.
# ref represents the value of which you are standardzing all the values against. Possible choices are mean, median, max
# We actually ended up using the median of the entire audio clip to rescale the audio values in each individual clip
p_to_d = librosa.power_to_db
S_DB = p_to_d(S, ref=sound_array_median, amin=0.0015)
spshow = librosa.display.specshow
spshow(S_DB, sr=sample_rate, hop_length=hop_length)
# Remove the black color using the method here in the article and save to disk
# https://www.delftstack.com/howto/matplotlib/hide-axis-borders-and-white-spaces-in-matplotlib/
plt.savefig(out_filename, bbox_inches='tight', pad_inches=0)
plt.close()
if __name__ == '__main__':
# Input directory should be a single directory with every species in its own sub-directory and no directories below that
input_directory = r'C:\Users\steve\data\six_species'
# This script will make this directory
output_directory = r'C:\Users\steve\data\six_species\_output'
# get all the folders and files using os.walk
# https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python
for root, dirs, files in os.walk(input_directory):
# make all the output directories
for name in dirs:
output_path = os.path.join(output_directory, name)
if not os.path.exists(output_path):
os.makedirs(output_path)
for name in files:
out_file_prepped = os.path.join(output_directory, os.path.basename(root), os.path.splitext(name)[0])
print()
audioToSlicedSpecto(input_file=os.path.join(root, name), output_stub = out_file_prepped)
print("Done")
|
[
"steve.pousty@gmail.com"
] |
steve.pousty@gmail.com
|
e7139650e259002e87422337eb2ff9d60b902915
|
be21b5e1615d0dfe4918798f0c87ff0693978fc0
|
/src/memo/api/urls.py
|
b965f6d5737c634b1569493ceb8c7e6dfd091fda
|
[] |
no_license
|
jian9ang-git/SuperMemo
|
60f47619e8cbdbc292abc63c08b9ad00f0df06c1
|
4bda2993d3451b87cf1cf214625c50b94d3fafef
|
refs/heads/master
| 2023-08-25T02:50:46.011352
| 2021-09-30T13:41:00
| 2021-09-30T13:41:00
| 380,762,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
from django.urls import path
# urlpatterns = [
# path('', .as_view(), name='home'),
# ]
|
[
"jian9ang1@ya.ru"
] |
jian9ang1@ya.ru
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.