blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
caeb2ddd5c391da1f8353bb97a4ac5a0196418b6 | 6f33ad84b82fbc730e1ebfa38ea6acfeb1434706 | /utils/confusion_matrix.py | 77424e4a0df2c90055648fd258bb239b78ea74e1 | [] | no_license | jedrzejkozal/ecg_oversampling | 7dfe22a11ad934dcd2085f13103702e69c8faf1d | 9c59f73c75f4ac60c1ddbc15c7c945e6e70c1c3b | refs/heads/master | 2020-06-21T23:09:51.754020 | 2019-08-06T11:00:38 | 2019-08-06T11:00:38 | 197,574,357 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | import itertools
import matplotlib.pyplot as plt
import numpy as np
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| [
"jedrzej.kozal@gmail.com"
] | jedrzej.kozal@gmail.com |
f3a5d780b21c4f4176e4ec807d686482a6a22ea8 | 80a7eb9eb2b9c86e5c278464d9ee3c875a11a1bf | /src/scene_analyzer/msg/_stamped_string.py | 03d058a94c7c532109397b5226eaf55299a4785e | [] | no_license | stefanschilling/scene_analyzer | c348fd93127323a421e527549a5a323a87a69e84 | 5df78d82bafbd031e09b51cdf2d11d1a6d88e2a6 | refs/heads/master | 2020-05-30T11:53:37.057281 | 2016-06-05T19:16:21 | 2016-06-05T19:16:21 | 60,476,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,186 | py | """autogenerated by genpy from scene_analyzer/stamped_string.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class stamped_string(genpy.Message):
_md5sum = "c99a9440709e4d4a9716d55b8270d5e7"
_type = "scene_analyzer/stamped_string"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
string data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(stamped_string, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = ''
else:
self.header = std_msgs.msg.Header()
self.data = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
| [
"stefan@stefan-laptop.(none)"
] | stefan@stefan-laptop.(none) |
7b285f649b01ce696d22c4fbd5cb8dae8b8d1286 | e63a36870512edb7fd947b809631cf153b028997 | /doc/source/conf.py | eed13ab7549cb2d023a203b106150682f25abb0b | [
"Apache-2.0"
] | permissive | titilambert/surveil | 632c7e65d10e03c675d78f278822015346f5c47a | 8feeb64e40ca2bd95ebd60506074192ecdf627b6 | refs/heads/master | 2020-05-25T13:36:59.708227 | 2015-06-29T14:07:07 | 2015-06-29T14:07:07 | 38,249,530 | 1 | 0 | null | 2015-06-29T13:38:04 | 2015-06-29T13:38:03 | null | UTF-8 | Python | false | false | 2,531 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinxcontrib.autohttp.flask',
'sphinxcontrib.pecanwsme.rest',
# 'oslosphinx',
'wsmeext.sphinxext',
]
wsme_protocols = ['restjson', 'restxml']
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'surveil'
copyright = u'2014-2015, Surveil Contributors'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# -- Options for manual page output -------------------------------------------
# If true, show URL addresses after external links.
man_show_urls = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| [
"alexandre.viau@savoirfairelinux.com"
] | alexandre.viau@savoirfairelinux.com |
9a55432ef8540a48bacea70b50892d1ab7974747 | 8e2c00e57e8a97d0051dfa1f3ba7c42109288702 | /exercise05/lettercount.py | 8ace748d6d7bc037815e32268652f5c3482339a3 | [] | no_license | camilleanne/hackbright_archive | 75dedb01ea9e4ff60f5d52b999b7ca7080e23ac4 | c67bb4aac8be21c5d7e64b8f7cfd910d198810bb | refs/heads/master | 2021-01-20T04:55:16.781229 | 2017-08-25T10:42:14 | 2017-08-25T10:42:14 | 101,394,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from sys import argv
script, filename = argv
text = open(filename)
def main():
firstletter = text.read()
lower_case = firstletter.lower()
alphabet_count = [0 for i in range(0,26)]
for char in lower_case:
if ord(char) <= 122 and ord(char) >= 97:
alphabet_count[ord(char)-97] += 1
for item in alphabet_count:
print item
main () | [
"cteicheira@gmail.com"
] | cteicheira@gmail.com |
d9227d4c777e0654770a44acf9b7b89e63a3f247 | 14165029c01863b3f1568d42111e449fd805e8eb | /broken_1_4.py | 3e2972b465d9e25ce184d4f4b547867622c764db | [] | no_license | afrokoder/csv-merge | de9e1cd23acc55017e7417f241cdb2d37b8a44cd | 79b7ff6215b41cbc5ebb98b1fdadbadc537dda09 | refs/heads/master | 2020-06-15T23:28:47.986300 | 2019-07-05T14:18:28 | 2019-07-05T14:18:28 | 195,420,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #Number half pyramid
#Makes a half pyramid of numbers up to 9
#1
#22
#333
#4444
#55555
outer_loop = 0
while outer_loop <= 10:
inner_loop = 1
#print ("Outer Loop is:", outer_loop)
while inner_loop < outer_loop + 4:
#print ("Inner Loop is:", inner_loop)
print (outer_loop, end="")
inner_loop = inner_loop + 1
print()
outer_loop = outer_loop + 2
| [
"knuchegbu@spscommerce.com"
] | knuchegbu@spscommerce.com |
66622e95d80095bcfb4410474b5a119066cb4c95 | 077407bd60cbf14b1f6de50759d8b02417cea007 | /OnlyBuy/users/models.py | 955f878145f7b6dc10c385147ea9a00cb8a4d866 | [] | no_license | zsx2541577860/chat | b7623f126055401d369f79f521daf6bcf3952063 | f7e08d07a39467c252451e4f032e374597178410 | refs/heads/master | 2020-05-16T18:26:03.334237 | 2019-07-15T12:00:23 | 2019-07-15T12:00:23 | 183,223,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your views here.
#自定义用户类型
#创建时显示的是后面的字符串,传递值时使用的是前面的数字
USERTYPE = (
(0,'管理员'),
(1,'卖家'),
(2,'买家')
)
GENDER = (
('1','男'),
('0','女')
)
class User(AbstractUser):
nickname = models.CharField('昵称',max_length=30,null=True,blank=True)
phone = models.CharField('手机号',max_length=30,null=True,unique=True)
gender = models.CharField('性别',max_length=10,null=True,blank=True,choices=GENDER,default='1')
is_delete = models.BooleanField('是否禁用',default=False)
usertype = models.IntegerField('用户类型',choices=USERTYPE,default=2)
def __str__(self):
return self.username
class Meta:
db_table = 'users'
| [
"2541577860@qq.com"
] | 2541577860@qq.com |
77a92c64c538638d8ebdaca85dbdefca126a1333 | 40b38b4ed3c6c85bcd58cc206db1efdce67bb66e | /snakeoil2015/untitled/fitnessplot.py | 93b151548cd36b57e4944d72e64991250aeb23c5 | [] | no_license | alexshade15/ComputazioneNaturale | 968244c5494af53472f6b45db5f6241a682dcf0d | 127b93911091304c8d58e66d7d99b0d5b036cb30 | refs/heads/master | 2022-01-21T08:03:29.909094 | 2019-07-19T06:31:23 | 2019-07-19T06:31:23 | 188,016,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | import matplotlib.pyplot as plt
import logs as log
import numpy as np
#DA evaluation_0_1 a evaluation_0_3015
id = 1
list1 = []
fit = []
i = 0
for j in range(1,3016):
app = 'evaluation_'+i.__str__()+'_'+j.__str__()
list1.append(getattr(log, app))
for elem in list1:
fit.append(elem['fitness'])
mean = []
index = 0
var = []
var1 = []
var2 = []
k = 0
while index<3015:
print (fit[index:index+15])
mean.append(sum(fit[index:index+15])/15)
var.append(np.var(fit[index:index+15]))
var1.append(mean[k] + var[k])
var2.append(mean[k] - var[k])
k = k+1
index = index+15
print('var1' , var1)
print("vettore delle medie", mean)
print('var2', var2)
epoch = range(len(mean))
plt.plot(epoch,var1)
plt.plot(epoch,mean)
plt.plot(epoch,var2)
plt.xlabel('Epochs')
plt.ylabel('Mean Fitness')
plt.show()
#DA evaluation_1_1 a evaluation_1_3015
'''id = 1
list1 = []
fit = []
i = 1
for j in range(1,3016):
app = 'evaluation_'+i.__str__()+'_'+j.__str__()
list1.append(getattr(log, app))
for elem in list1:
fit.append(elem['fitness'])
mean = []
index = 0
var = []
var1 = []
var2 = []
k = 0
while index<3015:
mean.append(sum(fit[index:index+15])/15)
var.append(np.var(fit[index:index+15]))
var1.append(mean[k] + var[k])
var2.append(mean[k] - var[k])
k = k+1
index = index+15
print('var1' , var1)
print("vettore delle medie", mean)
print('var2', var2)
epoch = range(len(mean))
p = plt.plot(epoch,var1)
plt.plot(epoch,mean)
plt.plot(epoch,var2)
plt.xlabel('Epochs')
plt.ylabel('Mean Fitness')
plt.show()'''
| [
"32539407+alexshade15@users.noreply.github.com"
] | 32539407+alexshade15@users.noreply.github.com |
54916cd6aef8b96949a3900348ef5e689648aa2c | 1ed4e96c20da03fbd3aa4f18d4b004a59d8f89e5 | /Repo/venv/Lib/site-packages/torch/utils/file_baton.py | d474bfb4a810ea042d978407b1239dea9dd3f8b9 | [] | no_license | donhatkha/CS2225.CH1501 | eebc854864dc6fe72a3650f640787de11d4e82b7 | 19d4dd3b11f8c9560d0d0a93882298637cacdc80 | refs/heads/master | 2023-07-19T13:27:17.862158 | 2021-02-08T07:19:05 | 2021-02-08T07:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | import os
import sys
import time
if sys.version < '3.3':
# Note(jiayq): in Python 2, FileExistsError is not defined and the
# error manifests it as OSError.
FileExistsError = OSError
class FileBaton:
'''A primitive, file-based synchronization utility.'''
def __init__(self, lock_file_path, wait_seconds=0.1):
'''
Creates a new :class:`FileBaton`.
Args:
lock_file_path: The path to the file used for locking.
wait_seconds: The seconds to periorically sleep (spin) when
calling ``wait()``.
'''
self.lock_file_path = lock_file_path
self.wait_seconds = wait_seconds
self.fd = None
def try_acquire(self):
'''
Tries to atomically create a file under exclusive access.
Returns:
True if the file could be created, else False.
'''
try:
self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL)
return True
except FileExistsError:
return False
def wait(self):
'''
Periodically sleeps for a certain amount until the baton is released.
The amount of time slept depends on the ``wait_seconds`` parameter
passed to the constructor.
'''
while os.path.exists(self.lock_file_path):
time.sleep(self.wait_seconds)
def release(self):
'''Releases the baton and removes its file.'''
if self.fd is not None:
os.close(self.fd)
os.remove(self.lock_file_path)
| [
"59596379+khado2359@users.noreply.github.com"
] | 59596379+khado2359@users.noreply.github.com |
316dfa1bedfcab2282dcbf533be4bdaa3d68e8b2 | 43cd95bcdef9d06bde0bc2e5e318bada6ae587fc | /basic/range_function.py | 3424c943384bc3e10f997866f421496d7171784b | [] | no_license | suchana172/My_python_beginner_level_all_code | 00eddb7aac87e559812dcb6781257e4e83dbbdce | 94a312b2da39017d8f93ba185562f82da77c9b55 | refs/heads/master | 2022-12-31T15:06:00.448433 | 2020-10-19T17:18:30 | 2020-10-19T17:18:30 | 305,456,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | for value in range(1,10) :
print(value)
| [
"khairunnahar.suchana@northsouth.edu"
] | khairunnahar.suchana@northsouth.edu |
cfc1119d27d112c18ef8a4cf62a55b777689fd7e | 57775b4c245723078fd43abc35320cb16f0d4cb6 | /Leetcode/hash-table/find-words-that-can-be-formed-by-characters.py | 5d7dfafa4509bc65009744a92a838b98562774db | [] | no_license | farhapartex/code-ninja | 1757a7292ac4cdcf1386fe31235d315a4895f072 | 168fdc915a4e3d3e4d6f051c798dee6ee64ea290 | refs/heads/master | 2020-07-31T16:10:43.329468 | 2020-06-18T07:00:34 | 2020-06-18T07:00:34 | 210,668,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | class Solution:
def countCharacters(self, words: List[str], chars: str) -> int:
n = 0
for word in words:
y = chars
flag = False
for w in word:
if w in y:
y = y.replace(w,"",1)
else:
flag = True
break
if not flag:
n += len(word)
return n
| [
"farhapartex@gmail.com"
] | farhapartex@gmail.com |
25d1fac9d7159f6e7e778967782d95636279a25c | 3863bea43fbc7ce2c7845cd2bad8ae50b67359b2 | /Tempermonkey-vue3-tfjs/torch/nmt_example/infer.py | dfb59886301d75382e707fd62c7156bddf4b07cd | [
"MIT"
] | permissive | flashlin/Samples | 3aea6a2dadf3051394f1535d00de9baeb7c904cb | c360e81624296c9243fd662dea618042164e0aa7 | refs/heads/master | 2023-09-03T06:32:53.857040 | 2023-09-02T15:55:51 | 2023-09-02T15:55:51 | 182,355,099 | 3 | 0 | MIT | 2023-03-03T00:03:37 | 2019-04-20T03:28:25 | C# | UTF-8 | Python | false | false | 6,462 | py | from nltk.translate import bleu_score
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import utils
import torch
import os
from nmt_model import NMTModel
from nmt_dataset import NMTDataset
import nmt_dataset
chencherry = bleu_score.SmoothingFunction()
args = utils.get_args()
class NMTSampler:
def __init__(self, vectorizer, model):
self.vectorizer = vectorizer
self.model = model
def apply_to_batch(self, batch_dict):
self._last_batch = batch_dict
y_pred = self.model(x_source=batch_dict['x_source'],
x_source_lengths=batch_dict['x_source_length'],
target_sequence=batch_dict['x_target'])
self._last_batch['y_pred'] = y_pred
attention_batched = np.stack(self.model.decoder._cached_p_attn).transpose(1, 0, 2)
self._last_batch['attention'] = attention_batched
def _get_source_sentence(self, index, return_string=True):
indices = self._last_batch['x_source'][index].cpu().detach().numpy()
vocab = self.vectorizer.source_vocab
return utils.sentence_from_indices(indices, vocab, return_string=return_string)
def _get_reference_sentence(self, index, return_string=True):
indices = self._last_batch['y_target'][index].cpu().detach().numpy()
vocab = self.vectorizer.target_vocab
return utils.sentence_from_indices(indices, vocab, return_string=return_string)
def _get_sampled_sentence(self, index, return_string=True):
_, all_indices = torch.max(self._last_batch['y_pred'], dim=2)
sentence_indices = all_indices[index].cpu().detach().numpy()
vocab = self.vectorizer.target_vocab
return utils.sentence_from_indices(sentence_indices, vocab, return_string=return_string)
def get_ith_item(self, index, return_string=True):
output = {"source": self._get_source_sentence(index, return_string=return_string),
"reference": self._get_reference_sentence(index, return_string=return_string),
"sampled": self._get_sampled_sentence(index, return_string=return_string),
"attention": self._last_batch['attention'][index]}
reference = output['reference']
hypothesis = output['sampled']
if not return_string:
reference = " ".join(reference)
hypothesis = " ".join(hypothesis)
output['bleu-4'] = bleu_score.sentence_bleu(references=[reference],
hypothesis=hypothesis,
smoothing_function=chencherry.method1)
return output
dataset = NMTDataset.load_dataset_and_load_vectorizer(args.dataset_csv,
args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
# create model
model = NMTModel(source_vocab_size=len(vectorizer.source_vocab),
source_embedding_size=args.source_embedding_size,
target_vocab_size=len(vectorizer.target_vocab),
target_embedding_size=args.target_embedding_size,
encoding_size=args.encoding_size,
target_bos_index=vectorizer.target_vocab.begin_seq_index)
# load from checkpoint or create new one
# if args.reload_from_files and os.path.exists(args.model_state_file):
model.load_state_dict(torch.load(args.model_state_file))
# print("Reloaded model")
# else:
# print("New model")
model = model.eval().to(args.device)
sampler = NMTSampler(vectorizer, model)
dataset.set_split('test')
batch_generator = nmt_dataset.generate_nmt_batches(dataset,
batch_size=args.batch_size,
device=args.device)
test_results = []
for batch_dict in batch_generator:
sampler.apply_to_batch(batch_dict)
for i in range(args.batch_size):
test_results.append(sampler.get_ith_item(i, False))
plt.hist([r['bleu-4'] for r in test_results], bins=100)
print(np.mean([r['bleu-4'] for r in test_results]), np.median([r['bleu-4'] for r in test_results]))
plt.show()
all_results = []
for i in range(args.batch_size):
all_results.append(sampler.get_ith_item(i, False))
top_results = [x for x in all_results if x['bleu-4'] > 0.5]
for sample in top_results:
plt.figure()
target_len = len(sample['sampled'])
source_len = len(sample['source'])
attention_matrix = sample['attention'][:target_len, :source_len + 2].transpose() # [::-1]
ax = sns.heatmap(attention_matrix, center=0.0)
ylabs = ["<BOS>"] + sample['source'] + ["<EOS>"]
# ylabs = sample['source']
# ylabs = ylabs[::-1]
ax.set_yticklabels(ylabs, rotation=0)
ax.set_xticklabels(sample['sampled'], rotation=90)
ax.set_xlabel("Target Sentence")
ax.set_ylabel("Source Sentence\n\n")
plt.show()
def get_source_sentence(vectorizer, batch_dict, index):
indices = batch_dict['x_source'][index].cpu().words.numpy()
vocab = vectorizer.source_vocab
return sentence_from_indices(indices, vocab)
def get_true_sentence(vectorizer, batch_dict, index):
return sentence_from_indices(batch_dict['y_target'].cpu().words.numpy()[index], vectorizer.target_vocab)
def get_sampled_sentence(vectorizer, batch_dict, index):
y_pred = model(x_source=batch_dict['x_source'],
x_source_lengths=batch_dict['x_source_length'],
target_sequence=batch_dict['x_target'],
sample_probability=1.0)
return sentence_from_indices(torch.max(y_pred, dim=2)[1].cpu().words.numpy()[index], vectorizer.target_vocab)
def get_all_sentences(vectorizer, batch_dict, index):
return {"source": get_source_sentence(vectorizer, batch_dict, index),
"truth": get_true_sentence(vectorizer, batch_dict, index),
"sampled": get_sampled_sentence(vectorizer, batch_dict, index)}
def sentence_from_indices(indices, vocab, strict=True):
ignore_indices = set([vocab.mask_index, vocab.begin_seq_index, vocab.end_seq_index])
out = []
for index in indices:
if index == vocab.begin_seq_index and strict:
continue
elif index == vocab.end_seq_index and strict:
return " ".join(out)
else:
out.append(vocab.lookup_index(index))
return " ".join(out)
results = get_all_sentences(vectorizer, batch_dict, 1)
print(results)
| [
"flash.lin@gmail.com"
] | flash.lin@gmail.com |
2d73f2c0101f0e46503190c763079eb50774cdfb | ad56f922a9ce352d4761a220e2fa0d3b7c934d58 | /scripts/watch_v2.py | 065034b01218fdde24c15faa5216a6fe5bc5503e | [
"MIT"
] | permissive | isajediknight/Sleep-Is-Overrated | 19118e8c503d59c6576ff9866326720ff7b6b6d0 | 0a9e364a5d1507199f9674cf67ff18a8c22cdad7 | refs/heads/main | 2023-03-14T13:04:12.511973 | 2021-03-05T07:12:54 | 2021-03-05T07:12:54 | 334,002,303 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,971 | py | import datetime,time,os,sys
if(sys.platform.lower().startswith('linux')):
OS_TYPE = 'linux'
elif(sys.platform.lower().startswith('mac')):
OS_TYPE = 'macintosh'
elif(sys.platform.lower().startswith('win')):
OS_TYPE = 'windows'
else:
OS_TYPE = 'invalid'
# Get our current directory
OUTPUT_FILE_DIRECTORY = os.getcwd()
def find_all(a_str, sub):
"""
Returns the indexes of {sub} where they were found in {a_str}. The values
returned from this function should be made into a list() before they can
be easily used.
Last Update: 03/01/2017
By: LB023593
"""
start = 0
while True:
start = a_str.find(sub, start)
if start == -1: return
yield start
start += 1
# Create variables for all the paths
if((OS_TYPE == 'windows')):
# Clear Screen Windows
os.system('cls')
directories = list(find_all(OUTPUT_FILE_DIRECTORY,'\\'))
OUTPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\outputs\\'
INPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\inputs\\'
SCRIPTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\scripts\\'
MODULES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\modules\\'
MODULES_GITHUB_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\modules\\github\\'
CLASSES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\classes\\'
elif((OS_TYPE == 'linux') or (OS_TYPE == 'macintosh')):
# Clear Screen Linux / Mac
os.system('clear')
directories = list(find_all(OUTPUT_FILE_DIRECTORY,'/'))
OUTPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/outputs/'
INPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/inputs/'
SCRIPTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/scripts/'
MODULES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/modules/'
MODULES_GITHUB_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/modules/github/'
CLASSES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/classes/'
# OS Compatibility for importing Class Files
if((OS_TYPE == 'linux') or (OS_TYPE == 'macintosh')):
sys.path.insert(0,'../classes/')
sys.path.insert(0,MODULES_DIR)
elif((OS_TYPE == 'windows')):
sys.path.insert(0,'..\\classes\\')
sys.path.insert(0,MODULES_DIR)
# < --- Begin Custom Classes Import --- >
# Custom Colors for printing to the screen
from custom_colors import *
from benchmark import *
from crypto_pairs import *
from command_line_arguments import *
from pseudothreading import *
# < --- End Custom Classes Import --- >
# Time all the things!
runtime = Benchmark()
# Text Coloration
cc = ColoredText(['exchange'],['38;5;214m'])
# Get parameters from commandline
parameters = Parse()
# Define what we're expecting to be passed in
parameters.add_expectation('-crypto-main', 'string', True, False)
parameters.add_expectation('-crypto-alt', 'string', True, False)
# Assign passed in values
parameters.parse_commandline()
# Check expectations were met
parameters.validate_requirements()
# World Reserve Crypto
main = parameters.get_parameter('-crypto-main').value
# Poor wanna be Crypto
alt = parameters.get_parameter('-crypto-alt').value
# Define threads to run
#'order book'
thread1 = thread('kraken',main,alt,'ticker')
thread2 = thread('binance',main,alt,'ticker');
thread3 = thread('bittrex',main,alt,'ticker');
thread4 = thread('tradeogre',main,alt,'ticker');
# Run the threads!
thread1.start()
thread2.start()
thread3.start()
thread4.start()
# Wait for all threads to finish
thread1.join()
thread2.join()
thread3.join()
thread4.join()
print(cc.cc("Kraken:",'exchange'))
print(str(thread1.get_thread_results())+"\n")
print(cc.cc("Binance:",'exchange'))
print(str(thread2.get_thread_results())+"\n")
print(cc.cc("Bittrex:",'exchange'))
print(str(thread3.get_thread_results())+"\n")
print(cc.cc("TradeOgre:",'exchange'))
print(str(thread4.get_thread_results())+"\n")
runtime.stop()
print(" Program Runtime: " + runtime.human_readable_string()) | [
"luke@isajediknight.com"
] | luke@isajediknight.com |
eec538cfdf617e4159ea9589da4655975097b6f0 | 24f21e7d62fc6c02cabf5f442effafd4bf712b93 | /ast_edits.py | bc6fe08eaab52b460b1aec7ad39e5fb454137fa4 | [
"MIT"
] | permissive | robur66/MLDogBreedClass | d44f0a8cf0ba9dc8b38df32870b468e8b4d864f8 | 97e44e6ca1e67a48140648beb2fcb207333e57ad | refs/heads/master | 2021-08-24T06:54:34.314579 | 2017-12-08T13:56:58 | 2017-12-08T13:56:58 | 113,493,513 | 0 | 0 | null | 2017-12-07T20:05:56 | 2017-12-07T20:05:56 | null | UTF-8 | Python | false | false | 18,962 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `function_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_handle`: maps function names to custom handlers for the function
For an example, see `TFAPIChangeSpec`.
"""
class _FileEditTuple(collections.namedtuple(
"_FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a _FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class _FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
_FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class _ASTCallVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, filename, lines, api_change_spec):
self._filename = filename
self._file_edit = _FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = api_change_spec
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name:
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if argval_lineno is not None and argval_col_offset is not None:
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if (self._lines[argval_lineno - 1][key_start:key_end] ==
argkey + "="):
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name:
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = _ASTCallVisitor(in_filename, lines, self._api_change_spec)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
| [
"gapp1@localhost"
] | gapp1@localhost |
d675945d9dcc0f99396a02cefb496fc58d518c2b | edb906c10790abc1eba4047bca557aa173616f10 | /business/serializer.py | 9c6d69a222cb016743cecfb85d7e3c1b365aab12 | [] | no_license | Wakarende/neighbourhood | 743d26ee76a79018865a15c523f390c35812b73c | 29003acc8f760046a33f1b3313b5a016a007890d | refs/heads/master | 2023-05-13T12:43:53.257053 | 2021-06-08T06:59:09 | 2021-06-08T06:59:09 | 373,812,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from rest_framework import serializers
from .models import BusinessModel
from django.db import models
class BusinessSerializer(serializers.ModelSerializer):
class Meta:
model=BusinessModel
fields='__all__'
| [
"joykirii@gmail.com"
] | joykirii@gmail.com |
6ee7436f2894be4f9a4be7048a46db2c4143bcab | fde7b32ee0296b32efa1da20c19755319e12c3c0 | /func.py | 01487189af838a03899edc4ee7d3ded29037b7dd | [] | no_license | rajila/courserapython | 78728ff6ade601f23cfb39d7c0fbcbe4adc8bf99 | a739b79a8b8a1a46d90c00deb52c383e42e5b87a | refs/heads/main | 2023-04-13T08:06:57.203332 | 2021-04-27T18:32:49 | 2021-04-27T18:32:49 | 308,402,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | name = 'Ronald'
def thing():
print("Hello", name, apellido)
def surname():
apellido = 'NA'
print(name, apellido)
def pmensaje():
global mensaje
mensaje = 'Cambio desde pmensaje()'
print(mensaje)
apellido = 'Ajila'
thing() # Hello Ronald Ajila
surname()
print(apellido)
mensaje = 'Hola desde exterior'
print('mensaje -> ', mensaje) # Msn Start
pmensaje() # Change var 'mensaje'
print('mensaje -> ', mensaje) # Msn End
d = {('valencia','madrid'):20}
# elemento: madrid, valencia, murcia
print(('madrid','valencia') in d) # False
print(('valencia','madrid') in d) # True
nameR = 'RonaldRonald'
def testData(name=nameR):
print('name: {}'.format(name))
nameR = 'DanielDaniel'
testData()
| [
"rdajila@gmail.com"
] | rdajila@gmail.com |
a8cacab94132932232ab8809d2cae3e6a22d1030 | 8d2e122add59cb8b9c0ea7e05b4b6b17afb6bf36 | /ds/stack/problems/balanced_or_not.py | c91cfe140fb9b746ffd999d27001088b41806117 | [] | no_license | thilakarajk/Algorithms-Data-Structures | 1ce1fcc8d69a033e32422736cd3d2cad57079d35 | 51682f015836302c3795b5206e936ae7e42c9787 | refs/heads/master | 2023-01-23T03:43:21.881439 | 2020-12-12T06:10:44 | 2020-12-12T06:10:44 | 317,902,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from ds.stack.implementation import Stack
TOKENS = ['<','>']
def is_open_tag(char):
return TOKENS[0] == char
def is_matching(open_tag, close_tag):
return TOKENS[0] == open_tag and TOKENS[1] == close_tag
def compare(expression, replacement):
stack = Stack()
for char in list(expression):
if is_open_tag(char): # open tag
stack.push(char)
else:
if stack.is_empty() and replacement == 0:
return 0
else:
if replacement == 0:
return 0
if stack.is_empty() or not is_matching(stack.pop(), char):
replacement -= 1
return 1 if stack.size == 0 else 0
def balancedOrNot(expressions, maxReplacements):
output_list = []
for expression, replacement in zip(expressions,maxReplacements):
output_list.append(compare(expression, replacement))
return output_list
result = balancedOrNot(["<<>>>","<>>>>"],[0,2])
result = map(str, result)
print("\n".join(result)) | [
"thilakaraj.kamaraj@astrazeneca.com"
] | thilakaraj.kamaraj@astrazeneca.com |
9acaba0b3609be4738ffb29eb198c1ce973a908b | 90033f709a8ea7fb1a0d8c7883ce79fd118fa188 | /proyecto_jmfc.py | 781a2b27a5612d1aa7e8843196782928044fe62a | [] | no_license | josemfc/recopilador_noticias | 73835ae7f46a99369ff878e127870caa4d1e6fbd | 8993780b97db01fae205fbf343b2aace99f994d7 | refs/heads/master | 2021-01-10T09:20:51.821190 | 2015-10-24T11:17:20 | 2015-10-24T11:17:20 | 44,862,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,826 | py | # -*- coding: utf-8 -*-
######
# Programa realizado en ubuntu con python 2.7.6
# Autor: José María Fernández Campos
######
import MySQLdb
from gi.repository import Gtk
import os
import time
# Establecemos la conexión a la Base de datos (variables globales)
Conexion = MySQLdb.connect(host='localhost', user='conan', passwd='crom', db='Noticias')
micursor = Conexion.cursor(MySQLdb.cursors.DictCursor)
class Handler:
builder=None
def __init__(self):
# Iniciamos el GtkBuilder para tirar del fichero de glade
self.builder = Gtk.Builder()
self.builder.add_from_file("noticias.glade")
self.handlers = {
"onRecopilarActivate": self.onRecopilarActivate, # Opciones del menú
"onConsultarActivate": self.onConsultarActivate,
"on_btn_recopilar_clicked": self.on_btn_recopilar_clicked, # Clic en botón
"on_btn_consultar_clicked": self.on_btn_consultar_clicked,
"onSelectAboutDialog": self.onSelectAboutDialog, # About
"onCloseAboutDialog": self.onCloseAboutDialog,
"onDeleteWindow": self.onDeleteWindow # Cerrar
}
# Conectamos las señales e iniciamos la aplicación
self.builder.connect_signals(self.handlers)
self.window = self.builder.get_object("window")
self.about = self.builder.get_object("aboutdialog")
self.btn_recopilar = self.builder.get_object("btn_recopilar") # Botones
self.btn_consultar = self.builder.get_object("btn_consultar")
self.enlace1 = self.builder.get_object("enlace1") # Texto y enlaces de noticias
self.enlace2 = self.builder.get_object("enlace2")
self.enlace3 = self.builder.get_object("enlace3")
self.enlace4 = self.builder.get_object("enlace4")
self.enlace5 = self.builder.get_object("enlace5")
self.mensaje = self.builder.get_object("mensaje") # Mensaje de texto para el usuario
self.comboboxtext_fecha = self.builder.get_object("comboboxtext_fecha") # Selección de fecha
self.sel_fecha = self.builder.get_object("sel_fecha")
self.spinner = self.builder.get_object("spinner")
self.fechas = [] # Si hay fechas anteriores, inicializar el combobox para seleccionarlas
query = "SELECT DISTINCT Fecha FROM Noticias;"
micursor.execute(query)
registros = micursor.fetchall()
for r in registros:
self.fechas.append(r['Fecha'])
self.comboboxtext_fecha.append_text(r['Fecha'])
self.window.show_all()
self.btn_recopilar.hide()
self.btn_consultar.hide()
self.enlace1.hide()
self.enlace2.hide()
self.enlace3.hide()
self.enlace4.hide()
self.enlace5.hide()
self.sel_fecha.hide()
self.spinner.hide()
self.comboboxtext_fecha.hide()
self.window.resize(1000,200)
# Al seleccionar opción en el menú, solo mostramos el botón indicado
def onRecopilarActivate(self, *args):
self.btn_recopilar.show()
self.btn_consultar.hide()
self.mensaje.hide()
self.sel_fecha.hide()
self.comboboxtext_fecha.hide()
def onConsultarActivate(self, *args):
self.btn_recopilar.hide()
self.btn_consultar.show()
self.mensaje.hide()
self.sel_fecha.show()
self.comboboxtext_fecha.show()
# --- Al hacer clic en un botón ---
# RECOPILAR
def on_btn_recopilar_clicked(self, *args):
fecha_hoy = ''
# Si hay ya noticias de hoy, borrarlas
now = time.strftime("%d.%m.%Y")
query = "DELETE FROM Noticias WHERE Fecha = '"+now+"';"
micursor.execute(query)
Conexion.commit()
# Ejecutamos araña
self.spinner.show()
self.spinner.start()
os.system("scrapy crawl NoticiasSpider")
self.spinner.stop()
self.spinner.hide()
# scrapy debe haber almacenado en la DB las noticias con fecha de hoy
query = "SELECT * FROM Noticias WHERE Fecha LIKE '" +now+ "%';"
micursor.execute(query)
noticia1 = micursor.fetchone()
if noticia1 is not None: # Normalmente hay más de 5 noticias, pero por si acaso
fecha_hoy = noticia1['Fecha'] # Si hay resultados, añadir esta fecha al combobox (al final)
self.enlace1.set_label(noticia1['Titulo'])
self.enlace1.set_uri(noticia1['Enlace'])
self.enlace1.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace2.set_label(noticia['Titulo'])
self.enlace2.set_uri(noticia['Enlace'])
self.enlace2.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace3.set_label(noticia['Titulo'])
self.enlace3.set_uri(noticia['Enlace'])
self.enlace3.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace4.set_label(noticia['Titulo'])
self.enlace4.set_uri(noticia['Enlace'])
self.enlace4.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace5.set_label(noticia['Titulo'])
self.enlace5.set_uri(noticia['Enlace'])
self.enlace5.show()
if fecha_hoy is not '' and fecha_hoy not in self.fechas: # Si en el combobox no está la fecha de hoy, se añade
self.fechas.append(noticia1['Fecha'])
self.comboboxtext_fecha.append_text(fecha_hoy)
self.mensaje.set_text("Noticias existentes recogidas satisfactoriamente.")
self.mensaje.show()
# CONSULTAR
def on_btn_consultar_clicked(self, *args):
fecha_seleccionada = self.comboboxtext_fecha.get_active_text()
if fecha_seleccionada is not None: # Si se ha seleccionado fecha, ejecutar select y mostrar
query = "SELECT * FROM Noticias WHERE Fecha=\""+fecha_seleccionada+"\";"
micursor.execute(query)
noticia1 = micursor.fetchone()
if noticia1 is not None:
fecha_hoy = noticia1['Fecha']
self.enlace1.set_label(noticia1['Titulo'])
self.enlace1.set_uri(noticia1['Enlace'])
self.enlace1.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace2.set_label(noticia['Titulo'])
self.enlace2.set_uri(noticia['Enlace'])
self.enlace2.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace3.set_label(noticia['Titulo'])
self.enlace3.set_uri(noticia['Enlace'])
self.enlace3.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace4.set_label(noticia['Titulo'])
self.enlace4.set_uri(noticia['Enlace'])
self.enlace4.show()
noticia = micursor.fetchone()
if noticia is not None:
self.enlace5.set_label(noticia['Titulo'])
self.enlace5.set_uri(noticia['Enlace'])
self.enlace5.show()
self.mensaje.set_text("Noticias existentes mostradas satisfactoriamente.")
else:
self.mensaje.set_text("Debe seleccionar una fecha.")
self.mensaje.show()
def onDeleteWindow(self, *args):
# Borrar el contenido de la base de datos
#query = "DELETE FROM Noticias WHERE 1;"
#micursor.execute(query)
#Conexion.commit()
# Cerramos DB
micursor.close()
Conexion.close()
Gtk.main_quit(*args)
def onSelectAboutDialog(self, *args):
self.about.show()
def onCloseAboutDialog(self, window, data=None):
self.about.hide()
def main():
window = Handler()
Gtk.main()
return 0
if __name__ == '__main__':
main()
| [
"josemaria_f_c@hotmail.com"
] | josemaria_f_c@hotmail.com |
d7f1386462f4acaaa3180b34123f6c040074cdc6 | 2770d7e78b88cc08291abd3381a2b578bbb566f0 | /www/migrations/0007_order_confirmation_no.py | e8baa4af9ee808ad4c9b1c4e760f5bad82a2f666 | [] | no_license | rileonard15/grabdeals | 8bb6d58ac7ba9265a57eebdcde15c4f8cf01235c | e13d8bd1a0b4e7b4ccca7ae91556af256c3456b1 | refs/heads/master | 2021-01-01T05:54:22.469007 | 2017-07-16T02:03:05 | 2017-07-16T02:03:05 | 97,300,703 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-15 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('www', '0006_order_transaction_id'),
]
operations = [
migrations.AddField(
model_name='order',
name='confirmation_no',
field=models.CharField(max_length=100, null=True),
),
]
| [
"ldimayuga@phixer.net"
] | ldimayuga@phixer.net |
894489a6d159e040d5ca697e4bb1fadf471b887c | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/_pytest/recwarn.py | 753bfd18742651b338e79169aab68ed417785218 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 7,173 | py | """ recording warnings during test function execution. """
import inspect
import py
import sys
import warnings
import pytest
@pytest.yield_fixture
def recwarn(request):
"""Return a WarningsRecorder instance that provides these methods:
* ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
wrec = WarningsRecorder()
with wrec:
warnings.simplefilter('default')
yield wrec
def pytest_namespace():
return {'deprecated_call': deprecated_call,
'warns': warns}
def deprecated_call(func, *args, **kwargs):
""" assert that calling ``func(*args, **kwargs)`` triggers a
``DeprecationWarning`` or ``PendingDeprecationWarning``.
Note: we cannot use WarningsRecorder here because it is still subject
to the mechanism that prevents warnings of the same type from being
triggered twice for the same module. See #1190.
"""
categories = []
def warn_explicit(message, category, *args, **kwargs):
categories.append(category)
old_warn_explicit(message, category, *args, **kwargs)
def warn(message, category=None, *args, **kwargs):
if isinstance(message, Warning):
categories.append(message.__class__)
else:
categories.append(category)
old_warn(message, category, *args, **kwargs)
old_warn = warnings.warn
old_warn_explicit = warnings.warn_explicit
warnings.warn_explicit = warn_explicit
warnings.warn = warn
try:
ret = func(*args, **kwargs)
finally:
warnings.warn_explicit = old_warn_explicit
warnings.warn = old_warn
deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
if not any(issubclass(c, deprecation_categories) for c in categories):
__tracebackhide__ = True
raise AssertionError("%r did not produce DeprecationWarning" % (func,))
return ret
def warns(expected_warning, *args, **kwargs):
"""Assert that code raises a particular class of warning.
Specifically, the input @expected_warning can be a warning class or
tuple of warning classes, and the code must return that warning
(if a single class) or one of those warnings (if a tuple).
This helper produces a list of ``warnings.WarningMessage`` objects,
one for each warning raised.
This function can be used as a context manager, or any of the other ways
``pytest.raises`` can be used::
>>> with warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
"""
wcheck = WarningsChecker(expected_warning)
if not args:
return wcheck
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
with wcheck:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
else:
func = args[0]
with wcheck:
return func(*args[1:], **kwargs)
class RecordedWarning(object):
def __init__(self, message, category, filename, lineno, file, line):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
self.file = file
self.line = line
class WarningsRecorder(object):
"""A context manager to record raised warnings.
Adapted from `warnings.catch_warnings`.
"""
def __init__(self, module=None):
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
self._list = []
@property
def list(self):
"""The list of recorded warnings."""
return self._list
def __getitem__(self, i):
"""Get a recorded warning by index."""
return self._list[i]
def __iter__(self):
"""Iterate through the recorded warnings."""
return iter(self._list)
def __len__(self):
"""The number of recorded warnings."""
return len(self._list)
def pop(self, cls=Warning):
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
return self._list.pop(i)
__tracebackhide__ = True
raise AssertionError("%r not found in warning list" % cls)
def clear(self):
"""Clear the list of recorded warnings."""
self._list[:] = []
def __enter__(self):
if self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def showwarning(message, category, filename, lineno,
file=None, line=None):
self._list.append(RecordedWarning(
message, category, filename, lineno, file, line))
# still perform old showwarning functionality
self._showwarning(
message, category, filename, lineno, file=file, line=line)
self._module.showwarning = showwarning
# allow the same warning to be raised more than once
self._module.simplefilter('always', append=True)
return self
def __exit__(self, *exc_info):
if not self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
class WarningsChecker(WarningsRecorder):
def __init__(self, expected_warning=None, module=None):
super(WarningsChecker, self).__init__(module=module)
msg = ("exceptions must be old-style classes or "
"derived from Warning, not %s")
if isinstance(expected_warning, tuple):
for exc in expected_warning:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif inspect.isclass(expected_warning):
expected_warning = (expected_warning,)
elif expected_warning is not None:
raise TypeError(msg % type(expected_warning))
self.expected_warning = expected_warning
def __exit__(self, *exc_info):
super(WarningsChecker, self).__exit__(*exc_info)
# only check if we're not currently handling an exception
if all(a is None for a in exc_info):
if self.expected_warning is not None:
if not any(r.category in self.expected_warning for r in self):
__tracebackhide__ = True
pytest.fail("DID NOT WARN")
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
7d8446981ac589f181e469108253fd61652a8b5a | cc117dd17f6cb0d69745dc85473396b4af5ce237 | /library/urls.py | 2b15a60fbfdfceb9810c2704f46ad1361a08b9c6 | [] | no_license | ewelinabuturla/library | d7cea875a050b1778307cfef96d98ca8643d04f7 | 1cf21a654276a69207a3be123924a5761e578f4a | refs/heads/master | 2023-01-07T01:25:58.048606 | 2020-11-04T09:12:24 | 2020-11-04T09:23:19 | 309,950,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | """library URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include("library_apps.book.urls")),
]
| [
"ewelina.buturla@monterail.com"
] | ewelina.buturla@monterail.com |
60b4dec8fdb07501aa7c4bef54bac369e4012a14 | e7a2f530e4440a330c1b15ab6c3c3b65cdd88829 | /alloy_site/alloy_site/views.py | 6d1c4e50cf8520152cc80b477d2df767aa1a6208 | [] | no_license | veyorokon/AlloyCoinWebsite | e7038bf75c8f1cb898d7f226cf12ecd369620db3 | b695f83575a7d08b183d950081824ae766161ef1 | refs/heads/master | 2023-03-20T12:03:16.072149 | 2017-06-15T23:42:48 | 2017-06-15T23:42:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
def index(request):
template = loader.get_template('alloy_site/index.html')
context = {} # doing nothing dynamic now
return HttpResponse(template.render(context, request))
| [
"dustin.td@gmail.com"
] | dustin.td@gmail.com |
656a0129b1473860ace3f1d7cc53e6e32919808c | 904acd1ae84bdd11f34dc33683cc4de6ce21df5b | /algorithm/test/test_random_forest_gini_sample_smote400.py | cee089925308291a2972e00bfff2cd7ff4b3faf6 | [] | no_license | imxtyler/MachineLearning | 5840cd7f2db2bfadc3c64e5478441e00fbcaece0 | 2562815472bcf0568c8b157d28db59285527835d | refs/heads/master | 2021-09-15T21:47:02.943104 | 2018-06-11T13:43:50 | 2018-06-11T13:43:50 | 91,756,092 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,424 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import random
import pandas
import numpy
import multiprocessing
import matplotlib.pyplot as plt
from pandas import DataFrame,Series
from preprocessing import DataPreprocessing
from gini_index import GiniIndex
from sklearn import metrics
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from ChicagoBoothML_Helpy.EvaluationMetrics import bin_classif_eval
from nearmiss1 import NearMiss
#from smote1 import Smote
from smote import Smote
from ensemble import Ensemble
if __name__ == "__main__":
pandas.set_option('display.max_rows', None)
#pprint.pprint(sys.path)
#file_fullpath = '/home/login01/Workspaces/python/dataset/module_data_app_hl_calls_stg1/app_hl_stg1.csv'
#file_fullpath = '/home/login01/Workspaces/python/dataset/module_data_stg2/md_data_stg2.csv'
#file_fullpath = '/home/login01/Workspaces/python/dataset/module_data_stg2/md_data_stg2_tmp.csv'
#train_fullpath = '/home/login01/Workspaces/python/dataset/module_data_stg2/md_data_stg2_tmp.csv'
train_fullpath = '/home/login01/Workspaces/python/dataset/module_data_stg2/tmp_train.csv'
test_fullpath = '/home/login01/Workspaces/python/dataset/module_data_stg2/tmp_test.csv'
#test_fullpath = '/home/login01/Workspaces/python/dataset/module_data_stg2/tmp_train.csv'
#attributes=[
# "id",
# "method",
# #"object_id",
# #"true_name",
# #"id_card_no",
# #"phone",
# "status",
# #"create_time",
# #"update_time",
# #"real_name",
# #"idcard",
# "sex",
# "age",
# "in_net_time",
# #"gmt_create",
# #"reg_time",
# "allavgamt",
# "12avgamt",
# "9avgamt",
# "6avgamt",
# "3avgamt",
# "planavgamt",
# #"phone_balance",
# "12zhuavgcount",
# "12zhutime",
# "9zhuavgcount",
# "9zhutime",
# "6zhuavgcount",
# "6zhutime",
# "3zhuavgcount",
# "3zhutime",
# "12beiavgcount",
# "12beitime",
# "9beiavgcount",
# "9beitime",
# "6beiavgcount",
# "6beitime",
# "3beiavgcount",
# "3beitime",
# "12hutongavgcount",
# "12hutongtime",
# "9hutongavgcount",
# "9hutongtime",
# "6hutongavgcount",
# "6hutongtime",
# "3hutongavgcount",
# "3hutongtime",
# "12receiveavg",
# "9receiveavg",
# "6receiveavg",
# "3receiveavg",
# "12sendavg",
# "9sendavg",
# "6sendavg",
# "3sendavg",
# "12avgflow",
# "9avgflow",
# "6avgflow",
# "3avgflow",
# "12avgnettime",
# "9avgnettime",
# "6avgnettime",
# "3avgnettime",
# "12contactavgcount",
# "12contacttime",
# "9contactavgcount",
# "9contacttime",
# "6contactavgcount",
# "6contacttime",
# "3contactavgcount",
# "3contacttime",
# "12zhuplace",
# "9zhuplace",
# "6zhuplace",
# "3zhuplace",
# #"user_own_overdue", #y, target
# #"user_own_overdue_yet",
# #"user_own_fpd_overdue_order",
# #"user_own_ninety_overdue_order",
# #"user_own_sixty_overdue_order",
# #"user_own_thirty_overdue_order",
# #"user_own_ninety_overdue_num",
# #"user_own_sixty_overdue_num",
# #"user_own_thirty_overdue_num",
# #"user_credit_ninety_overdue",
# "1zhuplace"
#]
#target_key="user_own_overdue"
attributes=[
#"create_date",
#"user_name",
#"user_phone",
"user_age",
"user_sex",
#"user_id_card",
"user_live_province",
"user_live_city",
"user_live_address",
#"user_regi_address",
#"user_mailbox",
"user_marriage",
"user_rela_name",
"user_relation",
"user_rela_phone",
"user_high_edu",
#"user_indu_type",
"user_company_name",
#"user_company_phone",
#"user_work_time",
#"user_work_phone",
"user_income_range",
"user_last_consume",
"user_ave_six_consume",
"user_ave_twelve_consume",
"user_house_mortgage",
"user_car_mortgage",
"user_base_fund",
"user_credit_limit",
"user_other_overdue",
#"user_own_overdue", #y, target
#"user_other_overdue_yet",
"user_own_overdue_yet",
"user_own_fpd_overdue_order",
#"user_own_ninety_overdue_order", #optional y, target
"user_own_sixty_overdue_order",
"user_own_thirty_overdue_order",
"user_own_ninety_overdue_num",
"user_own_sixty_overdue_num",
"user_own_thirty_overdue_num",
"user_credit_ninety_overdue",
"user_loan_pass",
"user_loan_amount",
"user_four_ident",
"user_face_ident",
"user_base_fund_ident",
"user_center_ident",
"user_card_ident",
"user_loan_ident"
]
target_key="user_own_overdue"
#target_key="user_own_ninety_overdue_order"
RANDOM_SEED = 99
##############################################################################################################
## Way one, using train_test_split spliting the source data set into train and test
#df = pandas.read_csv(file_fullpath,sep=',',na_values='NA',low_memory=False)
#df.convert_objects(convert_numeric=True)
#X = df[attributes]
#y = df[target_key]
#validation_size = 0.20
#X_train,X_validation,y_train,y_validation = model_selection.train_test_split(X,y,test_size=validation_size,random_state=RANDOM_SEED)
#train_datapreprocessing = DataPreprocessing(pandas.concat([X_train,y_train],axis=1),attributes,target_key)
##train_datapreprocessing.data_summary()
#binary_transform_attrs = ['user_live_address','user_rela_name','user_relation','user_rela_phone','user_high_edu','user_company_name']
#X_train = train_datapreprocessing.transform_x_to_binary(binary_transform_attrs)
#X_train = train_datapreprocessing.transform_x_dtype(binary_transform_attrs,d_type=[int],uniform_type=True)
#area_attrs = ['user_live_province','user_live_city']
#resource_dir = '../resources'
#X_train = train_datapreprocessing.china_area_number_mapping(area_attrs,resource_dir)
#X_train = train_datapreprocessing.transform_x_dtype(area_attrs,d_type=[int],uniform_type=True)
#X_train = train_datapreprocessing.x_dummies_and_fillna()
##X_train.info()
##print(X_train.head(5))
##Gini_DF = pandas.concat([X_train,y_train],axis=1)
###gini_attrs = Gini_DF.axes[1]
##gini_attrs = list(Gini_DF.columns.values)
##gini = GiniIndex(Gini_DF,gini_attrs,target_key,Gini_DF[target_key])
##gini_index_dict = gini.gini_index()
##gini_list = sorted(gini_index_dict.items(),key=lambda item:item[1])
##for item in gini_list:
## print(item)
#B = 400
#rf_model = \
# RandomForestClassifier(
# n_estimators=B,
# criterion='entropy',
# max_depth=None, # expand until all leaves are pure or contain < MIN_SAMPLES_SPLIT samples
# min_samples_split=200,
# min_samples_leaf=100,
# min_weight_fraction_leaf=0.0,
# max_features=None,
# # number of features to consider when looking for the best split; None: max_features=n_features
# max_leaf_nodes=None, # None: unlimited number of leaf nodes
# bootstrap=True,
# oob_score=True, # estimate Out-of-Bag Cross Entropy
# n_jobs=multiprocessing.cpu_count() - 4, # paralellize over all CPU cores but 2
# class_weight=None, # our classes are skewed, but but too skewed
# random_state=RANDOM_SEED,
# verbose=0,
# warm_start=False)
#rf_model.fit(
# X=X_train,
# y=y_train)
#validation_datapreprocessing = DataPreprocessing(pandas.concat([X_validation,y_validation],axis=1),attributes,target_key)
##validation_datapreprocessing.data_summary()
##X_validation = validation_datapreprocessing.transform_x_to_binary(binary_transform_attrs)
##X_validation = validation_datapreprocessing.transform_x_dtype(binary_transform_attrs,d_type=[int],uniform_type=True)
#X_validation = validation_datapreprocessing.china_area_number_mapping(area_attrs,resource_dir)
#X_validation = validation_datapreprocessing.transform_x_dtype(area_attrs,d_type=[int],uniform_type=True)
#X_validation = validation_datapreprocessing.x_dummies_and_fillna()
#rf_pred_probs = rf_model.predict_proba(X=X_train)
##rf_pred_probs = rf_model.predict_log_proba(X=X_train)
##result_probs = numpy.hstack((rf_pred_probs,y_train.as_matrix()))
#result_probs = numpy.column_stack((rf_pred_probs,y_train.as_matrix()))
##for item in result_probs:
## print(item)
#print(metrics.confusion_matrix(y_validation, rf_pred_probs))
#print(metrics.accuracy_score(y_validation, rf_pred_probs))
#print(metrics.precision_score(y_validation, rf_pred_probs))
#print(metrics.f1_score(y_validation, rf_pred_probs))
#print(metrics.classification_report(y_validation, rf_pred_probs))
##############################################################################################################
# Way two, cross-validation, using KFold spliting the source data set into train and test, repeat k times, the default evaluation
train_df = pandas.read_csv(train_fullpath,sep=',',na_values='NA',low_memory=False)
#for item in train_df.columns.values:
# pandas.to_numeric(train_df[item])
X_train = train_df[attributes]
y_train = train_df[target_key]
train_datapreprocessing = DataPreprocessing(pandas.concat([X_train,y_train],axis=1),attributes,target_key)
#train_datapreprocessing.data_summary()
binary_transform_attrs = ['user_live_address','user_rela_name','user_relation','user_rela_phone','user_high_edu','user_company_name']
X_train = train_datapreprocessing.transform_x_to_binary(binary_transform_attrs)
X_train = train_datapreprocessing.transform_x_dtype(binary_transform_attrs,d_type=[int],uniform_type=True)
area_attrs = ['user_live_province','user_live_city']
resource_dir = '../resources'
X_train = train_datapreprocessing.china_area_number_mapping(area_attrs,resource_dir)
X_train = train_datapreprocessing.transform_x_dtype(area_attrs,d_type=[int],uniform_type=True)
X_train = train_datapreprocessing.x_dummies_and_fillna()
#train_datapreprocessing.data_summary()
Gini_DF = pandas.concat([X_train,y_train],axis=1)
#gini_attrs = Gini_DF.axes[1]
gini_attrs = list(X_train.columns.values)
gini = GiniIndex(Gini_DF,gini_attrs,target_key,Gini_DF[target_key])
gini_index_dict = gini.gini_index()
gini_list = sorted(gini_index_dict.items(),key=lambda item:item[1])
new_attributes = []
new_attribues_num = 32
#new_attribues_num = len(X_train.columns.values)
i = 0
for item in gini_list:
#print(type(item))
#print(item)
if i < new_attribues_num:
new_attributes.append(str(item[0]))
i = i+1
X_train = X_train[new_attributes]
#print('-----------------nnnnnnnnnnnnnnnnnnnnnnnnnn-----------------gini:', new_attribues_num)
#print(X_train.info())
# Begin: smote
new_train_df = pandas.concat([X_train,y_train],axis=1)
smote_processor = Smote(new_train_df[new_train_df[target_key]==1],N=400,k=5)
train_df_sample = smote_processor.over_sampling()
#X_sample,y_sample = smote_processor.over_sampling()
sample = DataFrame(train_df_sample,columns=new_train_df.columns.values)
#sample_datapreprocessing = DataPreprocessing(sample,sample.drop(target_key,axis=1,inplace=False).columns.values,target_key)
#sample_datapreprocessing.data_summary()
X_train = pandas.concat([X_train,sample[X_train.columns.values]],axis=0)
y_train = pandas.concat([y_train.to_frame().rename(columns={0:target_key}),sample[target_key].to_frame().rename(columns={0:target_key})],axis=0)[target_key]
X_train = X_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
#merged_train_datapreprocessing = DataPreprocessing(pandas.concat([X_train,y_train],axis=1),attributes,target_key)
#merged_train_datapreprocessing.data_summary()
# End: smote
## Begin: nearmiss
#nearmiss_processor = NearMiss(random_state=RANDOM_SEED,n_neighbors=5)
#X_sample,y_sample = nearmiss_processor.sample(X_train.as_matrix(),y_train.as_matrix())
#sample = pandas.concat([DataFrame(X_sample,columns=X_train.columns.values),Series(y_sample).to_frame().rename(columns={0:target_key})],axis=1)
##sample_datapreprocessing = DataPreprocessing(sample,sample.drop(target_key,axis=1,inplace=False).columns.values,target_key)
##sample_datapreprocessing.data_summary()
#X_train = pandas.concat([X_train,DataFrame(X_sample,columns=X_train.columns.values)])
#y_train = pandas.concat([y_train.to_frame(),sample[target_key].to_frame()])[target_key]
#X_train = X_train.reset_index(drop=True)
#y_train = y_train.reset_index(drop=True)
#merged_train_datapreprocessing = DataPreprocessing(pandas.concat([X_train,y_train],axis=1),attributes,target_key)
#merged_train_datapreprocessing.data_summary()
## End: nearmiss
## Begin: smote1
#smote_processor = Smote(random_seed=RANDOM_SEED,n_neighbors=5,m_neighbors=5)
#X_sample,y_sample = smote_processor.sample(X_train.as_matrix(),y_train.as_matrix())
#sample = pandas.concat([DataFrame(X_sample,columns=X_train.columns.values),Series(y_sample).to_frame().rename(columns={0:target_key})],axis=1)
##sample_datapreprocessing = DataPreprocessing(sample,sample.drop(target_key,axis=1,inplace=False).columns.values,target_key)
##sample_datapreprocessing.data_summary()
#X_train = pandas.concat([X_train,DataFrame(X_sample,columns=X_train.columns.values)])
#y_train = pandas.concat([y_train.to_frame(),sample[target_key].to_frame()])[target_key]
#X_train = X_train.reset_index(drop=True)
#y_train = y_train.reset_index(drop=True)
#merged_train_datapreprocessing = DataPreprocessing(pandas.concat([X_train,y_train],axis=1),attributes,target_key)
#merged_train_datapreprocessing.data_summary()
## End: smote1
## Begin: ensemble
#ensemble_processor = Ensemble(random_seed=RANDOM_SEED,n_subset=10,n_tree=12)
#X_sample,y_sample = ensemble_processor.sample(X_train.as_matrix(),y_train.as_matrix())
#sample = pandas.concat([DataFrame(X_sample,columns=X_train.columns.values),Series(y_sample).to_frame().rename(columns={0:target_key})],axis=1)
##sample_datapreprocessing = DataPreprocessing(sample,sample.drop(target_key,axis=1,inplace=False).columns.values,target_key)
##sample_datapreprocessing.data_summary()
#X_train = pandas.concat([X_train,DataFrame(X_sample,columns=X_train.columns.values)])
#y_train = pandas.concat([y_train.to_frame(),sample[target_key].to_frame()])[target_key]
#X_train = X_train.reset_index(drop=True)
#y_train = y_train.reset_index(drop=True)
#merged_train_datapreprocessing = DataPreprocessing(pandas.concat([X_train,y_train],axis=1),attributes,target_key)
#merged_train_datapreprocessing.data_summary()
## End: ensemble
#X_train.describe()
#X_train.info()
#print(X_train.head(5))
#-----------------------------Find the best parameters' combination of the model------------------------------
#param_test1 = {'n_estimators': range(20, 600, 20)}
#gsearch1 = GridSearchCV(estimator=RandomForestClassifier(min_samples_split=200,
# min_samples_leaf=2, max_depth=5, max_features='sqrt',
# random_state=19),
# param_grid=param_test1, scoring='roc_auc', cv=5)
#gsearch1.fit(X_train,y_train)
#for item in gsearch1.grid_scores_:
# print(item)
#print(gsearch1.best_params_)
#print(gsearch1.best_score_)
#print(gsearch1.grid_scores_, gsearch1.best_params_, gsearch2.best_score_,'\n')
#print('-----------------------------------------------------------------------------------------------------')
#param_test2 = {'max_depth': range(2, 16, 2), 'min_samples_split': range(20, 200, 20)}
#gsearch2 = GridSearchCV(estimator=RandomForestClassifier(n_estimators=100,
# min_samples_leaf=2, max_features='sqrt', oob_score=True,
# random_state=19),
# param_grid=param_test2, scoring='roc_auc', iid=False, cv=5)
#gsearch2.fit(X_train,y_train)
#for item in gsearch2.grid_scores_:
# print(item)
#print(gsearch2.best_params_)
#print(gsearch2.best_score_)
#print(gsearch2.cv_results_, gsearch2.best_params_, gsearch2.best_score_,'\n')
##-----------------------------Find the best parameters' combination of the model------------------------------
B = 100
model = \
RandomForestClassifier(
n_estimators=B,
#criterion='entropy',
criterion='gini',
#max_depth=None, # expand until all leaves are pure or contain < MIN_SAMPLES_SPLIT samples
max_depth=12,
min_samples_split=180,
min_samples_leaf=2,
min_weight_fraction_leaf=0.0,
#max_features=None, # number of features to consider when looking for the best split; None: max_features=n_features
max_features="sqrt",
max_leaf_nodes=None, # None: unlimited number of leaf nodes
bootstrap=True,
oob_score=True, # estimate Out-of-Bag Cross Entropy
n_jobs=multiprocessing.cpu_count() - 4, # paralellize over all CPU cores minus 4
class_weight=None, # our classes are skewed, but but too skewed
random_state=RANDOM_SEED,
verbose=0,
warm_start=False)
kfold = model_selection.KFold(n_splits=5,random_state=RANDOM_SEED)
eval_standard = ['accuracy','recall_macro','precision_macro','f1_macro']
results = []
for scoring in eval_standard:
cv_results = model_selection.cross_val_score(model,X_train,y_train,scoring=scoring,cv=kfold)
results.append(cv_results)
msg = "%s: %f (%f)" % (scoring,cv_results.mean(),cv_results.std())
print(msg)
# Make predictions on validation dataset
test_df = pandas.read_csv(test_fullpath,sep=',',na_values='NA',low_memory=False)
#for item in test_df.columns.values:
# pandas.to_numeric(test_df[item])
X_validation = test_df[attributes]
y_validation = test_df[target_key]
validation_datapreprocessing = DataPreprocessing(pandas.concat([X_validation,y_validation],axis=1),attributes,target_key)
#validation_datapreprocessing.data_summary()
X_validation = validation_datapreprocessing.transform_x_to_binary(binary_transform_attrs)
X_validation = validation_datapreprocessing.transform_x_dtype(binary_transform_attrs,d_type=[int],uniform_type=True)
X_validation = validation_datapreprocessing.china_area_number_mapping(area_attrs,resource_dir)
X_validation = validation_datapreprocessing.transform_x_dtype(area_attrs,d_type=[int],uniform_type=True)
X_validation = validation_datapreprocessing.x_dummies_and_fillna(allnull=True,nullvalue=random.randint(0,2))
#validation_datapreprocessing.data_summary()
model.fit(X_train,y_train)
print('oob_score: %f' % (model.oob_score_))
#default evaluation way
print('-------------------default evaluation----------------------')
X_validation = X_validation[new_attributes]
#rf_pred_probs = model.predict_proba(X=X_validation)
rf_pred_probs = model.predict(X=X_validation)
result_probs = numpy.column_stack((rf_pred_probs,y_validation.as_matrix()))
#for item in result_probs:
# print(item)
print(metrics.confusion_matrix(y_validation, rf_pred_probs))
print(metrics.accuracy_score(y_validation, rf_pred_probs))
print(metrics.precision_score(y_validation, rf_pred_probs))
print(metrics.f1_score(y_validation, rf_pred_probs))
print(metrics.classification_report(y_validation, rf_pred_probs))
#self-defined evaluation way
print('-------------------self-defined evaluation----------------------')
low_prob = 1e-6
high_prob = 1 - low_prob
log_low_prob = numpy.log(low_prob)
g_low_prob = numpy.log(low_prob)
log_high_prob = numpy.log(high_prob)
log_prob_thresholds = numpy.linspace(start=log_low_prob,stop=log_high_prob,num=100)
prob_thresholds = numpy.exp(log_prob_thresholds)
rf_pred_probs = model.predict_proba(X=X_validation)
#result_probs = numpy.column_stack((rf_pred_probs,y_validation))
#for item in result_probs:
# print(item)
#for item in rf_pred_probs[:,1]:
# print(item)
## histogram of predicted probabilities
##n,bins,patches = plt.hist(rf_pred_probs[:1],10,normed=1,facecolor='g',alpha=0.75)
##plt.xlabel('Predicted probability of diabetes')
##plt.ylabel('Frequency')
##plt.title('Histogram of predicted probabilities')
###plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
##plt.axis([0,1,0,1])
##plt.grid(True)
#print(type(rf_pred_probs))
#print(type(rf_pred_probs[:,1]))
#print(rf_pred_probs[:,1])
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.hist(rf_pred_probs[:,1], bins=20)
#plt.xlim(0,1)
#plt.title('Histogram of predicted probabilities')
#plt.xlabel('Predicted probability of diabetes')
#plt.ylabel('Frequency')
#plt.show()
model_oos_performance = bin_classif_eval(rf_pred_probs[:,1],y_validation,pos_cat=1,thresholds=prob_thresholds)
#print(type(model_oos_performance))
#for item in model_oos_performance.recall:
# print(item)
recall_threshold = .74
idx = next(i for i in range(100) if model_oos_performance.recall[i] <= recall_threshold) - 1
print("idx = %d" % idx)
selected_prob_threshold = prob_thresholds[idx]
print("selected_prob_threshold:", selected_prob_threshold)
print(model_oos_performance.iloc[idx,:])
| [
"liuxiaobing_09@126.com"
] | liuxiaobing_09@126.com |
a4ad96d39cbd7bc79a58e3f487cf55a97f4253f2 | 9590e9b1f60fdfa573049699d10f1939929d2598 | /p2_D4PG_agent.py | 6ee87dcbbc4de92b12f69c23f5ec3c29c9d531fe | [] | no_license | kelvin84hk/DRLND_P2_Continuous_Control | 463d783b6a05c53fdd116e0d87bf1022e5f5ef66 | d1fd28b321c3d0ad4dbcb2364f9d48f6a63807c3 | refs/heads/master | 2020-04-05T14:45:59.346382 | 2018-11-23T17:43:48 | 2018-11-23T17:43:48 | 156,939,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,359 | py | import numpy as np
import random
import copy
from collections import namedtuple, deque
from p2_model import Actor,CriticD4PG,Critic
from prioritized_memory import Memory
import torch
import torch.nn.functional as F
import torch.optim as optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Vmax = 5
Vmin = 0
N_ATOMS = 51
DELTA_Z = (Vmax - Vmin) / (N_ATOMS - 1)
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed,BUFFER_SIZE = int(1e5),BATCH_SIZE = 64,GAMMA = 0.99,TAU = 1e-3,LR_ACTOR = 1e-4,LR_CRITIC = 1e-4,WEIGHT_DECAY = 0.0001,UPDATE_EVERY = 4,IsPR=False,N_step=1,IsD4PG_Cat=False):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.BUFFER_SIZE=BUFFER_SIZE
self.BATCH_SIZE=BATCH_SIZE
self.GAMMA=GAMMA
self.TAU=TAU
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.UPDATE_EVERY=UPDATE_EVERY
self.N_step=N_step
self.IsD4PG_Cat=IsD4PG_Cat
self.rewards_queue=deque(maxlen=N_step)
self.states_queue=deque(maxlen=N_step)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, seed).to(device)
self.actor_target = Actor(state_size, action_size, seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
if IsD4PG_Cat:
self.critic_local = CriticD4PG(state_size, action_size, seed,n_atoms=N_ATOMS,v_min=Vmin,v_max=Vmax).to(device)
self.critic_target = CriticD4PG(state_size, action_size, seed,n_atoms=N_ATOMS,v_min=Vmin,v_max=Vmax).to(device)
else:
self.critic_local = Critic(state_size, action_size, seed).to(device)
self.critic_target = Critic(state_size, action_size, seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Replay memory
self.BATCH_SIZE=BATCH_SIZE
self.IsPR=IsPR
if IsPR:
self.memory = Memory(BUFFER_SIZE) # prioritized experienc replay
else:
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, self.seed)
# Noise process
self.noise = OUNoise(action_size, self.seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.train_start = 2000
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
if self.IsPR:
self.states_queue.appendleft([state,action])
self.rewards_queue.appendleft(reward*self.GAMMA**self.N_step)
for i in range(len(self.rewards_queue)):
self.rewards_queue[i] = self.rewards_queue[i]/self.GAMMA
if len(self.rewards_queue)>=self.N_step: # N-steps return: r= r1+gamma*r2+..+gamma^(t-1)*rt
temps=self.states_queue.pop()
state = torch.tensor(temps[0]).float().to(device)
next_state = torch.tensor(next_state).float().to(device)
action = torch.tensor(temps[1]).float().unsqueeze(0).to(device)
if self.IsD4PG_Cat:
self.critic_local.eval()
with torch.no_grad():
Q_expected = self.critic_local(state, action)
self.critic_local.train()
self.actor_target.eval()
with torch.no_grad():
action_next = self.actor_target(next_state)
self.actor_target.train()
self.critic_target.eval()
with torch.no_grad():
Q_target_next = self.critic_target(next_state, action_next)
Q_target_next =F.softmax(Q_target_next, dim=1)
self.critic_target.train()
sum_reward=torch.tensor(sum(self.rewards_queue)).float().unsqueeze(0).to(device)
done_temp=torch.tensor(done).float().to(device)
Q_target_next=self.distr_projection(Q_target_next,sum_reward,done_temp,self.GAMMA**self.N_step)
Q_target_next = -F.log_softmax(Q_expected, dim=1) * Q_target_next
error = Q_target_next.sum(dim=1).mean().cpu().data
else:
self.critic_local.eval()
with torch.no_grad():
Q_expected = self.critic_local(state, action).cpu().data
self.critic_local.train()
action_next = self.actor_target(next_state)
Q_target_next = self.critic_target(next_state, action_next).squeeze(0).cpu().data
Q_target = sum(self.rewards_queue) + ((self.GAMMA**self.N_step)* Q_target_next * (1 - done))
error = abs(Q_target-Q_expected)
state=state.cpu().data.numpy()
next_state=next_state.cpu().data.numpy()
action=action.squeeze(0).cpu().data.numpy()
self.memory.add(error, (state, action, sum(self.rewards_queue), next_state, done))
self.rewards_queue.pop()
if done:
self.states_queue.clear()
self.rewards_queue.clear()
self.t_step = (self.t_step + 1) % self.UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if self.memory.tree.n_entries > self.train_start:
batch_not_ok=True
while batch_not_ok:
mini_batch, idxs, is_weights = self.memory.sample(self.BATCH_SIZE)
mini_batch = np.array(mini_batch).transpose()
if mini_batch.shape==(5,self.BATCH_SIZE):
batch_not_ok=False
else:
print(mini_batch.shape)
try:
states = np.vstack([m for m in mini_batch[0] if m is not None])
except:
print('states not same dim')
pass
try:
actions = np.vstack([m for m in mini_batch[1] if m is not None])
except:
print('actions not same dim')
pass
try:
rewards = np.vstack([m for m in mini_batch[2] if m is not None])
except:
print('rewars not same dim')
pass
try:
next_states = np.vstack([m for m in mini_batch[3] if m is not None])
except:
print('next states not same dim')
pass
try:
dones = np.vstack([m for m in mini_batch[4] if m is not None])
except:
print('dones not same dim')
pass
# bool to binary
dones = dones.astype(int)
states = torch.from_numpy(states).float().to(device)
actions = torch.from_numpy(actions).float().to(device)
rewards = torch.from_numpy(rewards).float().to(device)
next_states = torch.from_numpy(next_states).float().to(device)
dones = torch.from_numpy(dones).float().to(device)
experiences=(states, actions, rewards, next_states, dones)
self.learn(experiences, self.GAMMA, idxs)
else :
self.states_queue.appendleft([state,action])
self.rewards_queue.appendleft(reward*self.GAMMA**self.N_step)
for i in range(len(self.rewards_queue)):
self.rewards_queue[i] = self.rewards_queue[i]/self.GAMMA
if len(self.rewards_queue)>=self.N_step: # N-steps return: r= r1+gamma*r2+..+gamma^(t-1)*rt
temps=self.states_queue.pop()
self.memory.add(temps[0], temps[1], sum(self.rewards_queue), next_state, done)
self.rewards_queue.pop()
if done:
self.states_queue.clear()
self.rewards_queue.clear()
# If enough samples are available in memory, get random subset and learn
self.t_step = (self.t_step + 1) % self.UPDATE_EVERY
if self.t_step == 0:
if len(self.memory) >self.BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, self.GAMMA)
def act(self, state, add_noise=False):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
#state = torch.tensor(np.moveaxis(state,3,1)).float().to(device)
state = torch.tensor(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.squeeze(np.clip(action,-1.0,1.0))
# borrow from https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On/tree/master/Chapter14
def distr_projection(self,next_distr_v, rewards_v, dones_mask_t, gamma):
next_distr = next_distr_v.data.cpu().numpy()
rewards = rewards_v.data.cpu().numpy()
dones_mask = dones_mask_t.cpu().numpy().astype(np.bool)
batch_size = len(rewards)
proj_distr = np.zeros((batch_size, N_ATOMS), dtype=np.float32)
dones_mask=np.squeeze(dones_mask)
rewards = rewards.reshape(-1)
for atom in range(N_ATOMS):
tz_j = np.minimum(Vmax, np.maximum(Vmin, rewards + (Vmin + atom * DELTA_Z) * gamma))
b_j = (tz_j - Vmin) / DELTA_Z
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = u == l
proj_distr[eq_mask, l[eq_mask]] += next_distr[eq_mask, atom]
ne_mask = u != l
proj_distr[ne_mask, l[ne_mask]] += next_distr[ne_mask, atom] * (u - b_j)[ne_mask]
proj_distr[ne_mask, u[ne_mask]] += next_distr[ne_mask, atom] * (b_j - l)[ne_mask]
if dones_mask.any():
proj_distr[dones_mask] = 0.0
tz_j = np.minimum(Vmax, np.maximum(Vmin, rewards[dones_mask]))
b_j = (tz_j - Vmin) / DELTA_Z
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = u == l
if dones_mask.shape==():
if dones_mask:
proj_distr[0, l] = 1.0
else:
ne_mask = u != l
proj_distr[0, l] = (u - b_j)[ne_mask]
proj_distr[0, u] = (b_j - l)[ne_mask]
else:
eq_dones = dones_mask.copy()
eq_dones[dones_mask] = eq_mask
if eq_dones.any():
proj_distr[eq_dones, l[eq_mask]] = 1.0
ne_mask = u != l
ne_dones = dones_mask.copy()
ne_dones[dones_mask] = ne_mask
if ne_dones.any():
proj_distr[ne_dones, l[ne_mask]] = (u - b_j)[ne_mask]
proj_distr[ne_dones, u[ne_mask]] = (b_j - l)[ne_mask]
return torch.FloatTensor(proj_distr).to(device)
def learn(self, experiences, gamma,idxs =None):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
# Compute critic loss
Q_expected = self.critic_local(states, actions)
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
if self.IsD4PG_Cat:
Q_targets_next =F.softmax(Q_targets_next, dim=1)
Q_targets_next=self.distr_projection(Q_targets_next,rewards,dones,gamma**self.N_step)
Q_targets_next = -F.log_softmax(Q_expected, dim=1) * Q_targets_next
critic_loss = Q_targets_next.sum(dim=1).mean()
else:
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
critic_loss = F.mse_loss(Q_expected, Q_targets)
if self.IsPR:
if self.IsD4PG_Cat:
self.critic_local.eval()
with torch.no_grad():
errors = Q_targets_next.sum(dim=1).cpu().data.numpy()
self.critic_local.train()
else:
errors = torch.abs(Q_expected - Q_targets).squeeze(0).cpu().data.numpy()
# update priority
for i in range(self.BATCH_SIZE):
idx = idxs[i]
self.memory.update(idx, errors[i])
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
if self.IsD4PG_Cat:
crt_distr_v=self.critic_local(states, actions_pred)
actor_loss = -self.critic_local.distr_to_q(crt_distr_v)
actor_loss = actor_loss.mean()
else:
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
| [
"kelvin84hk@gmail.com"
] | kelvin84hk@gmail.com |
b891c1b843660a251c2fc198054adab99dfba8ab | 289bc4207b1c3efe3b99ac637d1ddfb88e28a5be | /Section05/debug_example.py | cdd21ee0a121d1d8cc04721135fa4e495be7ebd1 | [
"MIT"
] | permissive | PacktPublishing/-Hands-on-Reinforcement-Learning-with-TensorFlow | f5e41ed9e218f721b179e0b1d9aaa3c27957d38a | 6de9980db2bfc761524c27606e6495c093ddf516 | refs/heads/master | 2021-06-20T19:31:45.442884 | 2021-01-15T08:59:53 | 2021-01-15T08:59:53 | 145,985,316 | 11 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
learning_rate = 0.01
num_epochs = 1000
train_X = np.asarray(
[3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1])
train_Y = np.asarray(
[1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
n_samples = train_X.shape[0]
input_x = tf.placeholder("float")
actual_y = tf.placeholder("float")
# Simple linear regression tries to find W and b such that
# y = Wx + b
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
prediction = tf.add(tf.multiply(input_x, W), b)
loss = tf.squared_difference(actual_y, prediction)
# loss = tf.Print(loss, [loss], 'Loss: ', summarize=n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# sess = tf_debug.TensorBoardDebugWrapperSession(
# sess, 'localhost:6064')
sess.run(init)
initial_loss = sess.run(loss, feed_dict={
input_x: train_X,
actual_y: train_Y
})
print("Initial loss", initial_loss)
for epoch in range(num_epochs):
for x, y in zip(train_X, train_Y):
_, c_loss = sess.run([optimizer, loss], feed_dict={
input_x: x,
actual_y: y
})
tf.add_to_collection("Asserts", tf.assert_less(loss, 2.0, [loss]))
tf.add_to_collection("Asserts", tf.assert_positive(loss, [loss]))
assert_op = tf.group(*tf.get_collection('Asserts'))
final_loss, _ = sess.run([loss, assert_op], feed_dict={
input_x: train_X,
actual_y: train_Y
})
print("Final Loss: {}\n W:{}, b:{}".format(
final_loss, sess.run(W), sess.run(b)))
| [
"noreply@github.com"
] | noreply@github.com |
181c8c1bc9b19a8196b4811b28f403b224160e94 | 608a3139f45e0aa073680223277d7d8116c144dc | /Tactics/equipment.py | bebb134171e72949b66b1dfc66ad1abe03bfd883 | [] | no_license | orez-/Orez-Summer-2012 | 540e7ff167dc54973a960acdf4589ab29ca761bd | eea27473515a7c1d45ad2752d7653f72c83f7ba9 | refs/heads/master | 2016-09-06T07:05:01.771267 | 2012-12-30T16:56:48 | 2012-12-30T16:56:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,569 | py | import unittest
class EquipmentItem:
""" Currently just an example. """
def __init__(self, slot="head", itype="hat", name="Stylish Beret"):
self.equip_slot = slot
self.item_type = itype
self.name = name
class EquipmentSet:
def __init__(self):
self.equipment = {
"head":None,
"armor":None,
"mainhand":None,
"offhand":None,
"accessories":[],
"twohand":None
}
def equip(self, item, slot=None):
""" slot is unnecessary except for mainhand vs offhand """
if item.equip_slot not in self.equipment:
print "No such slot"
return False
if item.equip_slot == "accessories": # special case.
alike = 0
for i, it in enumerate(self.equipment["accessories"]):
if i != slot and it.item_type == item.item_type:
alike += 1
if alike >= (2 if item.item_type == "ring" else 1):
print "Too many of that accessory"
return False # ^ not the best
if (slot is not None and
slot < len(self.equipment["accessories"])): # overwriting old accessory
self._unequip_accessory(slot)
self.equipment["accessories"][slot] = item
else: # adding new accessory
slot = len(self.equipment["accessories"])
self.equipment["accessories"].append(item)
else: # not an accessory
if item.equip_slot in ("mainhand", "offhand"): # slot is important
if slot not in ("mainhand", "offhand"):
print "Must specify which hand."
return False
elif item.equip_slot == "twohand": # slot not important: takes both hands
slot = "mainhand"
self.unequip("mainhand")
self.unequip("offhand")
self.equipment["offhand"] = True
self.equipment["mainhand"] = item
# TODO: recalculate stats (based on item)
return
else:
slot = item.equip_slot
self.unequip(slot)
self.equipment[slot] = item
# TODO: recalculate stats (based on item)
def _unequip_accessory(self, index):
""" Unequips the accessory at slot 'index', but does not shift
later elements back """
item = self.equipment["accessories"][index]
if item is not None:
# TODO: put back in your inventory
self.equipment["accessories"][index] = None
# TODO: recalculate stats (based on item)
def unequip(self, slot, acc_slot=None):
if slot in self.equipment:
if slot == "accessories":
self._unequip_accessory(acc_slot)
del self.equipment["accessory"][acc_slot]
return
if slot in ("mainhand", "offhand"):
if self.equipment["offhand"] is True: # currently a two-hander
self.equipment["offhand"] = None
self.unequip("mainhand")
item = self.equipment[slot]
if item is not None:
# TODO: put it back in your inventory
self.equipment[slot] = None
# TODO: recalculate stats (based on item)
def get_all(self):
return [v for k, v in self.equipment.items() if k != "accessories"
and v is not None] + self.equipment["accessories"]
| [
"bshaginaw120@gmail.com"
] | bshaginaw120@gmail.com |
755a2aaad8acce2a42387f5c5739b2381f4c4253 | 8fe6a6790013eed7ca470414c398ea4848e798c4 | /src/datasets.py | c9a59f9a12256e6e0655c82d503fc40b155f989a | [
"MIT"
] | permissive | sovit-123/SSD300-VGG11-on-Pascal-VOC-2005-Data | d7aef30277076561c46d5f8a3d07985e09b9f13c | cb21c4c3e762a0184611b1a1659e7e730ef31932 | refs/heads/master | 2022-12-02T11:51:59.256715 | 2020-08-06T16:08:24 | 2020-08-06T16:08:24 | 284,756,098 | 3 | 0 | null | 2020-08-06T05:40:31 | 2020-08-03T16:53:09 | Python | UTF-8 | Python | false | false | 2,345 | py | import torch
import json
import os
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from utils import transform
class PascalVOCDataset(Dataset):
"""
Custom dataset to load PascalVOC data as batches
"""
def __init__(self, data_folder, split):
"""
:param data_folder: folder path of the data files
:param split: either `TRAIN` or `TEST`
"""
self.split = split.upper()
assert self.split in {'TRAIN', 'TEST'}
self.data_folder = data_folder
# read the data files
with open(os.path.join(data_folder,
self.split + '_images.json'), 'r') as j:
self.images = json.load(j)
with open(os.path.join(data_folder,
self.split + '_objects.json'), 'r') as j:
self.objects = json.load(j)
assert len(self.images) == len(self.objects)
def __len__(self):
return len(self.images)
def __getitem__(self, i):
# read image
image = Image.open(self.images[i])
image = image.convert('RGB')
# get bounding boxes, labels, diffculties for the corresponding image
# all of them are objects
objects = self.objects[i]
boxes = torch.FloatTensor(objects['boxes']) # (n_objects, 4)
labels = torch.LongTensor(objects['labels']) # (n_objects)
# apply transforms
image, boxes, labels = transform(image, boxes, labels, split=self.split)
return image, boxes, labels
def collate_fn(self, batch):
"""
Each batch can have different number of objects.
We will pass this collate function to the DataLoader.
You can define this function outside the class as well.
:param batch: iterable items from __getitem(), size equal to batch size
:return: a tensor of images, lists of varying-size tensors of
bounding boxes, labels, and difficulties
"""
images = list()
boxes = list()
labels = list()
for b in batch:
images.append(b[0])
boxes.append(b[1])
labels.append(b[2])
images = torch.stack(images, dim=0)
# return a tensor (N, 3, 300, 300), 3 lists of N tesnors each
return images, boxes, labels | [
"sovitrath5@gmail.com"
] | sovitrath5@gmail.com |
fe83eb6dfc9ba7a814faffa49e7c4d90c5063bb2 | b5cc154ecff7f8cf4c0d221a45f54839049ed755 | /upload_all_new.py | 85b3522d95aa92eaf0a8b88b933197d408105bb3 | [] | no_license | RobinL/daily | ba60bfda2135bad2138f21902b506e9041266eec | 99ac4dc47887f9b1c43cb0346c2c01b0a249ba18 | refs/heads/master | 2021-01-16T18:29:48.837206 | 2015-09-01T19:47:21 | 2015-09-01T19:47:21 | 41,631,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | import os
import sys
import logging
logger = logging.getLogger(__name__)
from azure.storage import BlobService
def upload_all_new_azure(local_folder, azure_container, account_name, account_key):
blob_service = BlobService(account_name=os.getenv('ACC_NAME'), account_key=os.getenv('ACCESS_KEY'))
blob_list = blob_service.list_blobs(azure_container)
blob_name_list = [b.name for b in blob_list.blobs]
blob_name_set = set(blob_name_list)
#Now for each file in local forlder see whether it's in the s3folder
localfiles = os.listdir(local_folder)
localfiles = [f for f in localfiles if "~" not in f]
localfiles = [f for f in localfiles if f[0] != "."]
localfiles = [f for f in localfiles if (".zip" in f or ".csv" in f)]
localfiles = set(localfiles)
files_to_upload = localfiles - blob_name_set
orig_len =len(files_to_upload)
error_counter = 0
while len(files_to_upload)>0:
if error_counter>orig_len:
logger.error("too many upload failures, exiting")
sys.exit()
filename = files_to_upload.pop()
try:
blob_service.put_block_blob_from_path(
'csvs',
filename,
os.path.join(local_folder,filename)
)
except Exception:
error_counter +=1
logging.error(filename + " failed to upload")
files_to_upload.add(filename)
| [
"robinlinacre@hotmail.com"
] | robinlinacre@hotmail.com |
0d17243da1091d61acb70238bbf2523aab2d4797 | 0a99c6f4c1336aec02e8a63789ddf2a37f18912a | /movement.py | 9001a3577b6dac19932c25cb400a7a80ea7aae97 | [] | no_license | timothyheyer/micropython-tutorials | ac5e5ddb6295ab6bdd1d7061f020c618aa5093a8 | 21b2043b160dbce6608a5c9c6d74d1c4448fdfe7 | refs/heads/master | 2020-04-26T21:07:57.326967 | 2019-03-08T23:30:10 | 2019-03-08T23:30:10 | 173,832,911 | 0 | 0 | null | 2019-03-04T22:40:18 | 2019-03-04T22:40:17 | null | UTF-8 | Python | false | false | 333 | py | from microbit import *
while True:
reading = accelerometer.get_x()
tilt = accelerometer.get_y()
if reading > 20:
display.show("R")
elif reading < -20:
display.show("L")
elif tilt > 500:
display.show("UP")
elif tilt < 5:
display.show("DOWN")
else:
display.show("-") | [
"theyer1@msudenver.edu"
] | theyer1@msudenver.edu |
4ec79ec6e331c470cdf1d2a503b1e4f5f4ede85d | 4b2bd13df43345924417311987f4b49b461b9062 | /Strings/Length_of_Last_Word.py | 3d9e1818a601df315fa6ce5ebfa26386986d3912 | [] | no_license | EladAssia/InterviewBit | 89ced6c3b555ef6d9886f00686b9ad989509e4e5 | c4099c60aead54204bac8919ab9ffd2268977c0b | refs/heads/master | 2020-05-26T13:44:27.059301 | 2019-06-01T09:30:51 | 2019-06-01T09:30:51 | 188,251,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | # Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string.
# If the last word does not exist, return 0.
# Note: A word is defined as a character sequence consists of non-space characters only.
# Example:
# Given s = "Hello World",
# return 5 as length("World") = 5.
# Please make sure you try to solve this problem without using library functions. Make sure you only traverse the string once.
##########################################################################################################################################
class Solution:
# @param A : string
# @return an integer
def lengthOfLastWord(self, A):
words = str.split(A, ' ')
for ii in range(len(words)-1, -1, -1):
if len(words[ii]) != 0:
return len(words[ii])
return 0
##########################################################################################################################################
| [
"noreply@github.com"
] | noreply@github.com |
175217a2e8693dc6abc2fb06bf86ba8bf7c9fb06 | 8f807e00570227aff15cadb9fb9cfbd46d5c5ab1 | /uninas/tasks/abstract.py | 45d13cc9cf1959a227e08f67a526d0632340edd4 | [
"MIT"
] | permissive | MLDL/uninas | cbe704e6e992af9f1c683f36cdbeab5919289c9b | 06729b9cf517ec416fb798ae387c5bd9c3a278ac | refs/heads/main | 2023-04-19T06:49:23.213220 | 2021-04-26T12:20:04 | 2021-04-26T12:20:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,040 | py | import os
import shutil
import logging
import random
from typing import Union, List
import numpy as np
import torch
from uninas.utils.args import ArgsInterface, Argument, MetaArgument, Namespace, sanitize, save_as_json
from uninas.utils.misc import split
from uninas.utils.loggers.python import LoggerManager, log_headline, log_in_columns, log_args
from uninas.utils.paths import get_task_config_path
from uninas.utils.system import dump_system_info
from uninas.methods.abstract import AbstractMethod
from uninas.methods.strategies.manager import StrategyManager
from uninas.register import Register
cla_type = Union[str, List, None]
class AbstractTask(ArgsInterface):
def __init__(self, args: Namespace, wildcards: dict, descriptions: dict = None):
super().__init__()
# args, seed
self.args = args
self.save_dir = self._parsed_argument('save_dir', args)
self.is_test_run = self._parsed_argument('is_test_run', args)
self.seed = self._parsed_argument('seed', args)
self.is_deterministic = self._parsed_argument('is_deterministic', args)
random.seed(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
if self.is_deterministic:
# see https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ.setdefault("CUBLAS_WORKSPACE_CONFIG", ":4096:8")
torch.set_deterministic(self.is_deterministic)
# maybe delete old dir, note arguments, save run_config
if self._parsed_argument('save_del_old', args):
shutil.rmtree(self.save_dir, ignore_errors=True)
os.makedirs(self.save_dir, exist_ok=True)
save_as_json(args, get_task_config_path(self.save_dir), wildcards)
dump_system_info(self.save_dir + 'sysinfo.txt')
# logging
self.log_file = '%slog_task.txt' % self.save_dir
LoggerManager().set_logging(default_save_file=self.log_file)
self.logger = self.new_logger(index=None)
log_args(self.logger, None, self.args, add_git_hash=True, descriptions=descriptions)
Register.log_all(self.logger)
# reset weight strategies so that consecutive tasks do not conflict with each other
StrategyManager().reset()
self.methods = []
@classmethod
def args_to_add(cls, index=None) -> [Argument]:
""" list arguments to add to argparse when this class (or a child class) is chosen """
return super().args_to_add(index) + [
Argument('is_test_run', default='False', type=str, help='test runs stop epochs early', is_bool=True),
Argument('seed', default=0, type=int, help='random seed for the experiment'),
Argument('is_deterministic', default='False', type=str, help='use deterministic operations', is_bool=True),
Argument('note', default='note', type=str, help='just to take notes'),
# saving
Argument('save_dir', default='{path_tmp}', type=str, help='where to save', is_path=True),
Argument('save_del_old', default='True', type=str, help='wipe the save dir before starting', is_bool=True),
]
@classmethod
def _add_meta_from_argsfile_to_args(cls, all_args: [str], meta_keys: [str], args_in_file: dict, overwrite=True):
""" copy all meta arguments in 'meta_keys' and their respective arguments to the 'all_args' list """
already_added = set()
if not overwrite:
for s in all_args:
already_added.add(s.split('=')[0][2:])
for key_meta in meta_keys:
value_meta = args_in_file.get(key_meta)
value_splits = split(sanitize(value_meta))
for key_cls in value_splits:
for k, v in args_in_file.items():
if k in already_added:
continue
if key_meta in k or key_cls in k:
all_args.append('--%s=%s' % (k, v))
already_added.add(k)
if key_meta == k:
print('\t\tusing "%s" as %s, copying arguments' % (v, key_meta))
def get_method(self) -> AbstractMethod:
""" get the only existing method """
assert len(self.methods) == 1, "Must have exactly one method, but %d exist" % len(self.methods)
return self.methods[0]
def checkpoint_dir(self, save_dir: str = None) -> str:
return save_dir if save_dir is not None else self.save_dir
def new_logger(self, index: int = None):
return LoggerManager().get_logger(
name=index if index is None else str(index),
default_level=logging.DEBUG if self.is_test_run else logging.INFO,
save_file=self.log_file)
def load(self, checkpoint_dir: str = None) -> 'AbstractTask':
""" load """
log_headline(self.logger, 'Loading')
checkpoint_dir = self.checkpoint_dir(checkpoint_dir)
try:
if not self._load(checkpoint_dir):
self.logger.info('Did not load, maybe nothing to do: %s' % checkpoint_dir)
except Exception as e:
self.logger.error('Failed loading from checkpoint dir: "%s"' % checkpoint_dir, exc_info=e)
return self
def _load(self, checkpoint_dir: str) -> bool:
""" load """
return False
def run(self) -> 'AbstractTask':
""" execute the task """
try:
self._run()
for method in self.methods:
method.flush_logging()
self.logger.info("Done!")
return self
except Exception as e:
raise e
finally:
LoggerManager().cleanup()
def _run(self):
""" execute the task """
raise NotImplementedError
class AbstractNetTask(AbstractTask):
def __init__(self, args: Namespace, *args_, **kwargs):
AbstractTask.__init__(self, args, *args_, **kwargs)
# device handling
cls_dev_handler = self._parsed_meta_argument(Register.devices_managers, 'cls_device', args, None)
self.devices_handler = cls_dev_handler.from_args(self.seed, self.is_deterministic, args, index=None)
# classes
self.cls_method = self._parsed_meta_argument(Register.methods, 'cls_method', args, None)
self.cls_trainer = self._parsed_meta_argument(Register.trainers, 'cls_trainer', args, None)
# methods and trainers
self.trainer = []
@classmethod
def meta_args_to_add(cls) -> [MetaArgument]:
"""
list meta arguments to add to argparse for when this class is chosen,
classes specified in meta arguments may have their own respective arguments
"""
kwargs = Register.get_my_kwargs(cls)
methods = Register.methods.filter_match_all(search=kwargs.get('search'))
return super().meta_args_to_add() + [
MetaArgument('cls_device', Register.devices_managers, help_name='device manager', allowed_num=1),
MetaArgument('cls_trainer', Register.trainers, help_name='trainer', allowed_num=1),
MetaArgument('cls_method', methods, help_name='method', allowed_num=1),
]
def add_method(self):
""" adds a new method (lightning module) """
# never try loading from checkpoint, since custom checkpoints are used
# if checkpoint_file is not None and os.path.isfile(checkpoint_file):
# self.logger.info('Loading Lightning module from checkpoint "%s"' % checkpoint_file)
# return self.cls_method.load_from_checkpoint(checkpoint_file)
method = self.cls_method(self.args)
self.methods.append(method)
def add_trainer(self, method: AbstractMethod, save_dir: str, num_devices=-1):
""" adds a new trainer which saves to 'save_dir' and uses 'num_gpus' gpus """
mover = self.devices_handler.allocate_devices(num_devices)
logger = self.logger if self.devices_handler.get_num_free() == 0 else self.new_logger(len(self.trainer))
trainer = self.cls_trainer(method=method,
args=self.args,
mover=mover,
save_dir=save_dir,
logger=logger,
is_test_run=self.is_test_run)
self.trainer.append(trainer)
def log_detailed(self):
# log some things
log_headline(self.logger, 'Trainer, Method, Data, ...')
rows = [('Trainer', '')]
for i, trainer in enumerate(self.trainer):
rows.append((' (%d)' % i, trainer.str()))
log_in_columns(self.logger, rows)
for i, method in enumerate(self.methods):
log_headline(self.logger, "Method %d/%d" % (i+1, len(self.methods)), target_len=80)
method.log_detailed(self.logger)
StrategyManager().log_detailed(self.logger)
def _run(self):
""" execute the task """
raise NotImplementedError
| [
"kevin.laube@uni-tuebingen.de"
] | kevin.laube@uni-tuebingen.de |
3c54d18e1ddff0980eaddc81064f2886f30343da | 80052e0cbfe0214e4878d28eb52009ff3054fe58 | /e2yun_addons/extra-addons/merp_picking_wave/wizard/message_wizard.py | 0e5a598da22cd2017115952c6a2839e8fa5675d9 | [] | no_license | xAlphaOmega/filelib | b022c86f9035106c24ba806e6ece5ea6e14f0e3a | af4d4b079041f279a74e786c1540ea8df2d6b2ac | refs/heads/master | 2021-01-26T06:40:06.218774 | 2020-02-26T14:25:11 | 2020-02-26T14:25:11 | 243,349,887 | 0 | 2 | null | 2020-02-26T19:39:32 | 2020-02-26T19:39:31 | null | UTF-8 | Python | false | false | 977 | py | # Copyright 2019 VentorTech OU
# Part of Ventor modules. See LICENSE file for full copyright and licensing details.
from odoo import models, fields as oe_fields, api, _
class MessageWizard(models.TransientModel):
_name = 'message.wizard'
message = oe_fields.Text()
@api.model
def default_get(self, fields):
res = super(MessageWizard, self).default_get(fields)
res['message'] = self.env.context.get('message')
return res
@api.multi
def wizard_view(self):
view = self.env.ref('merp_picking_wave.view_message_wizard')
return {
'name': _('Message'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'message.wizard',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
# 'res_id': self.ids[0],
'context': self.env.context,
}
| [
"joytao.zhu@icloud.com"
] | joytao.zhu@icloud.com |
8f53dfcc7f2f3305bc737e1491065fa5815c5aa6 | 660328cb139ce1f90da70dbe640df62bf79bcc61 | /infra/src/stages/train_stage_base.py | 05241ecd3f6f9cd28a02c5b6c310f1a38a27c433 | [
"MIT-0"
] | permissive | cyrilhamidechi/amazon-frauddetector-mlops-multiaccount-cdk | 0801f4b844bd9b8e80776748c1056db83c9023fb | 379def0a571452b7920a9aaa56bccc2bfb39c523 | refs/heads/main | 2023-04-23T13:49:37.413348 | 2021-05-10T18:37:41 | 2021-05-10T18:37:41 | 366,139,181 | 0 | 0 | NOASSERTION | 2021-05-10T18:25:59 | 2021-05-10T18:25:58 | null | UTF-8 | Python | false | false | 2,432 | py | # ***************************************************************************************
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. *
# *
# Permission is hereby granted, free of charge, to any person obtaining a copy of this *
# software and associated documentation files (the "Software"), to deal in the Software *
# without restriction, including without limitation the rights to use, copy, modify, *
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to *
# permit persons to whom the Software is furnished to do so. *
# *
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A *
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT *
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION *
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE *
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
# ***************************************************************************************
from typing import List, Dict
from aws_cdk import core
from aws_cdk.aws_codebuild import BuildEnvironmentVariable
from aws_cdk.aws_codepipeline import Artifact
from aws_cdk.aws_codepipeline_actions import Action
StageActionList = List[Action]
OutputArtifacts = List[Artifact]
OutputVariables = Dict[str, BuildEnvironmentVariable]
VariableNamespace = str
class TrainStageBase:
@property
def name(self):
return NotImplementedError
@property
def output_variables(self):
return NotImplementedError
def get_stage_actions(self, scope: core.Construct, env: str, stage_name: str,
source_artifacts: List[Artifact]) -> (StageActionList, VariableNamespace):
"""
Creates stage actions and returns the actions, the output artifacts and output variables
:param env:
:param scope:
:param stage_name:
:param source_artifacts:
:return:
"""
raise NotImplementedError
| [
"aeg@amazon.com"
] | aeg@amazon.com |
428358c36899815231cc8dc187e189a856eab801 | faa83048d2bb62c27f030942f3f038f87637c293 | /indico/migrations/versions/20200331_1251_3c5462aef0b7_review_conditions_editable_types.py | 310ac5a8b664dcc1d5e0151b411446e0f3dfcefa | [
"MIT"
] | permissive | janschill/indico | f79536db43afaf631449fef5119069af2938e76d | 068a947446eb624308d6264e34a4061807e6ff12 | refs/heads/master | 2023-06-08T07:32:33.007683 | 2021-06-18T12:42:03 | 2021-06-18T12:42:03 | 339,700,154 | 0 | 0 | MIT | 2021-06-18T12:42:04 | 2021-02-17T11:22:48 | Python | UTF-8 | Python | false | false | 845 | py | """Associate review-conditions with editable types
Revision ID: 3c5462aef0b7
Revises: 6444c893a21f
Create Date: 2020-03-31 12:51:40.822239
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '3c5462aef0b7'
down_revision = '6444c893a21f'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
UPDATE events.settings
SET name = 'paper_review_conditions'
WHERE module = 'editing' AND name = 'review_conditions'
""")
def downgrade():
op.execute("""
UPDATE events.settings
SET name = 'review_conditions'
WHERE module = 'editing' AND name = 'paper_review_conditions'
""")
op.execute("""
DELETE FROM events.settings
WHERE module = 'editing' AND name IN ('slides_review_conditions', 'poster_review_conditions')
""")
| [
"lea.tschiersch@cern.ch"
] | lea.tschiersch@cern.ch |
f0343c7a7e630a11740b9a725c504c151176edf9 | a0decbed06d9e3fc7c6d0e93cf481eb25d79e6e7 | /list.py | b0870146f3dcc17bca4a32d249c7e1ebdaddaae9 | [] | no_license | TriCodeIT/Review-DataCamp-learnpython-Exercise | ab48e4c099cbeaf3cfceb58b4e2f7b11d937b076 | 4d6532377a31204db8218761ef45a651b773408e | refs/heads/master | 2022-12-15T03:27:55.474623 | 2020-09-17T02:39:37 | 2020-09-17T02:39:37 | 295,577,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | numbers = []
strings = []
names = ["John", "Eric", "Jessica"]
numbers.append(7)
numbers.append(8)
numbers.append(9)
strings.append('That is')
strings.append('easy')
# write your code here
second_name = names[2]
# this code should write out the filled arrays and the second name in the names list (Eric).
print(numbers)
print(strings)
print("The second name on the names list is %s" % second_name) | [
"bayu165kusuma@gmail.com"
] | bayu165kusuma@gmail.com |
6de769645c56f7e2a167c942a0f3aee7d46c27ef | 0cff6b75ed2f951c7f4ec40141ea0e9d0b7791de | /assignment1/cs231n/classifiers/linear_svm.py | 67f39adf6f7db99689532d4ca7c4cbe5abefaedf | [] | no_license | Raiszo/cs231n | 825e9e3c3dc20aa252bfad284bc09ef33910fa55 | 46fdd99649156285106279420a2392c09b371e02 | refs/heads/master | 2021-05-08T14:04:54.504987 | 2018-04-07T00:11:20 | 2018-04-07T00:11:20 | 120,059,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,352 | py | import numpy as np
from random import shuffle
from past.builtins import xrange
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
### Raizo
# For the cost function, we go for each label that is not the correct one, and compute the
# difference between it and the correct one. Then using the formula max(0, s_non - s_correct + 1),
# thus: if the s_correct is greater than the other labels in at least 1, it will produce a cost
# of 0, implying that the loss function will lead the right label value to +infinity and the other ones
# if it's found a W that produces a Loss=0, then 2W produces a Loss=0
# to -inifity
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
# Remember to calculate the derivative of W'(j).Xi and -W'(y[i]).Xi
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
# derivative of W'(j).Xi
dW[:,j] += X[i] # No need to use X[i].T broadcasting does it for you
# derivative of -W'(y[i]).Xi
dW[:,y[i]] -= X[i]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
dW /= num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
dW += reg * 2 * np.sum(W)
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero (D,C)
num_train = X.shape[0]
#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
scores = X.dot(W) # (N, C)
correct = scores[xrange(num_train), y]
# new axis add a dimension, can transfor an array to a vector or a column
margin = np.maximum(0, scores - correct[:,np.newaxis] + 1)
margin[range(num_train), y] = 0
loss = 1/num_train * np.sum(margin.sum(axis=1))
loss += reg * np.sum(W * W)
#############################################################################
# END OF YOUR CODE #
#############################################################################
dmargin = (margin > 0) * 1
counter = np.sum(dmargin, axis=1) # array
# advance indexing should always be done usig arrays
# dW is incremented with 1 Xi for i!=yi, and -count() Xj for i==yi and j!=yj
dmargin[range(num_train), y] = -counter
dW = X.T.dot(dmargin)
dW /= num_train
dW += reg * 2 * W
#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| [
"E05.sek@gmail.com"
] | E05.sek@gmail.com |
3b7bc05b6cd8113f61a51701e14268638b535a1c | da958642910dc99cadb49a8e48319b7d871679fc | /command.py | 70b3b70e25978b9f131db141f116d721d81043fb | [
"MIT"
] | permissive | llou/jmvolume | 9e5b84910e334ddfd4753e2ea69e5058167f09c3 | ccaa165e65c09c1aa1ff514a29b0a071188bcab4 | refs/heads/master | 2022-06-01T08:57:15.983293 | 2020-05-01T11:05:24 | 2020-05-01T11:05:24 | 237,469,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | #! /usr/bin/env python
import sys
if len(sys.argv) > 1:
code = int(sys.argv[1])
else:
code = 0
text = sys.stdin.read()
sys.stdout.write(text)
sys.stderr.write(text)
sys.exit(code)
| [
"yo@llou.net"
] | yo@llou.net |
bf09d40eff5348178b19c86dd584dfa48920972e | fc640c89f22ea1768c2617c7099695246efa4733 | /ImportDashboard_ifactory_1.0.2/ImportDashboard_1_2_0.spec | 42590faaa5073da3a734c511b29b2e6848f52abf | [] | no_license | Atm999/metalwork_install | edc1c8e97aebd72a30c25807455ea0218e435353 | 232348d32033933e2bf9aa6da821106620cbe796 | refs/heads/master | 2022-11-25T12:29:16.596222 | 2020-07-31T02:20:59 | 2020-07-31T02:20:59 | 283,922,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['ImportDashboard_1_2_0.py'],
pathex=['C:\\Users\\jimcj.lin\\Desktop\\ImportDashboard\\ImportDashboard'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='ImportDashboard_1_2_0',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"904552105@qq.com"
] | 904552105@qq.com |
fce9296bd1f4ccff5f24c822acf2e8a54e17c148 | e9ddef73bdd204b0bcd39061c7e74292596db53f | /kindle/spiders/rv2.py | 2edf018b935d9be3d414c79c9227151e6fef116c | [] | no_license | tsavko/amazon_reviews_sa | 10553d68d2b871a0758609f16a662a5a61d1e346 | 244677a89e99d524abc4ac687cd15b34daeba7fe | refs/heads/master | 2021-01-19T03:45:24.806450 | 2016-07-12T09:11:09 | 2016-07-12T09:11:09 | 62,804,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from bs4 import BeautifulSoup
from kindle.items import KindleItem
import string
class Rv2Spider(CrawlSpider):
"""
To start extracting data:
* cd to kindle_reviews/kindle
* scrapy crawl -o FILENAME.csv -t csv rv2
"""
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 300,
'myspider.comm.random_proxy.RandomProxyMiddleware': 200,
}
RETRY_TIMES = 250
name = 'rv2'
allowed_domains = ['amazon.com']
start_urls = ['https://www.amazon.com/Amazon-Kindle-6-Inch-4GB-eReader/product-reviews/B00I15SB16/ref=cm_cr_getr_d_show_all/188-9790737-3604954?ie=UTF8&showViewpoints=1&sortBy=helpful&pageNumber=1']
rules = (
Rule(LxmlLinkExtractor(restrict_xpaths=('//div[@id="cm_cr-pagination_bar"]')), callback='parse_item', follow=True),
)
def parse_item(self, response):
i = KindleItem()
review_list = response.xpath('//div[@id="cm_cr-review_list"]')
review_list = review_list.xpath('//div[@class="a-section review"]')
for rev in range(len(review_list)):
soup = BeautifulSoup(review_list[rev].extract(), 'html.parser')
i['Rating'] = float(soup.find('span', { "class" : "a-icon-alt" }).get_text()[:3])
title_raw = soup.find('a', { "class" : "a-size-base a-link-normal review-title a-color-base a-text-bold" }).get_text().lower()
exclude = set(string.punctuation)
try:
title_raw = ''.join(ch for ch in title_raw if ch not in exclude).replace(' ', ' ').decode('unicode_escape').encode('ascii','ignore')
except UnicodeError:
title_raw = ''.join(ch for ch in title_raw if ch not in exclude).replace(' ', ' ').replace(u'\\u2019', '')
i['Title'] = title_raw
review_raw = soup.find('span', { "class" : "a-size-base review-text" }).get_text().lower()#.replace('\\', '')#.encode('utf-8').replace('\\', '')
try:
review_raw = ''.join(ch for ch in review_raw if ch not in exclude).replace(' ', ' ').decode('unicode_escape').encode('ascii','ignore')
except UnicodeError:
review_raw = ''.join(ch for ch in review_raw if ch not in exclude).replace(' ', ' ').replace(u'\\u2019', '')
i['Review'] = review_raw
print '----------------------------------------------------------------------------------------------------------------'
yield i
print '================================================================================================================'
| [
"tsavko@malkosua.com"
] | tsavko@malkosua.com |
cab5927ad3771cc324b6ece7a42f131306e8212f | c0029a349e70ccb3fd8cfa43d27d7ec8585d4620 | /Python/tests/test_Parser.py | 7a1595ec229a03a781156c4677908569e5da651c | [] | no_license | AshFromNowOn/Hansard_sentiment | 3dda7911fa730bec52d56f52262823d8e9778a1a | e055414c397bc189c6c29998a827bcfb6e72ef2b | refs/heads/master | 2022-06-05T12:09:30.294803 | 2018-05-04T12:52:46 | 2018-05-04T12:52:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | import unittest
import Speech_Parser_BS
import os
from lxml.doctestcompare import LXMLOutputChecker
import lxml
from doctest import Example
class SpeechParserTest(unittest.TestCase):
def setUp(self):
self.Parser = Speech_Parser_BS.SpeechParser("./tests/Test Data/Source Data/Test File")
self.Parser.find_files()
def tearDown(self):
directory = "./tests/Test Data/Parsed Speech/Test File"
for file in os.listdir(directory):
os.remove(os.path.join(directory,file))
os.rmdir(directory)
def test_parser_find_files(self):
expected_array = ["test_file.xml"]
self.assertEqual(expected_array, self.Parser.files)
@unittest.skip("Unable to find way to compare XML trees")
def test_parser_parse_files(self):
self.Parser.parse_files()
self.assertEqual()
| [
"adn2@aber.ac.uk"
] | adn2@aber.ac.uk |
2622e858d939ef2b19c0df7e2dd80c6ce51c5ae5 | 635a80be150a936392b91f6208c3077b71dceec6 | /src/train21y.py | 66228acb7b06ede61443b23996605ef013edb574 | [] | no_license | alegorov/recommendation-system | 005695547ffe47c108ce45a2edd96fcd02da2495 | 4925e800234cb7c08cde696651c3ff37d724a94e | refs/heads/master | 2022-11-23T15:47:14.620162 | 2020-07-30T19:52:42 | 2020-07-30T19:52:42 | 283,861,633 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,985 | py | import os
import pickle
from utils import *
from catboost import CatBoostClassifier, Pool
SRC_DIR = 'data0'
ITERATION_COUNT = 2721
FEATURE_COUNT = 100
# TREE_DEPTH = 1
# BORDER_COUNT = 256
# RANDOM_STRENGTH = 1060.
ETA = 0.1
ALG_NAME = str(os.path.splitext(os.path.basename(__file__))[0].split('-')[0])
OUT_DIR = SRC_DIR + '-out'
train = open_csv(SRC_DIR + '/train.csv')
test = open_csv(SRC_DIR + '/test.csv')
item_count = get_item_count(train)
publisher_count = get_publisher_count(train)
user_count = get_user_count(train)
topic_count = get_topic_count(train)
if item_count != get_item_count(test):
raise Exception('item_count != get_item_count(test)')
if publisher_count != get_publisher_count(test):
raise Exception('publisher_count != get_publisher_count(test)')
if user_count != get_user_count(test):
raise Exception('user_count != get_user_count(test)')
if topic_count != get_topic_count(test):
raise Exception('topic_count != get_topic_count(test)')
print('item_count =', item_count, flush=True)
print('publisher_count =', publisher_count, flush=True)
print('user_count =', user_count, flush=True)
print('topic_count =', topic_count, flush=True)
with open(OUT_DIR + '/__' + ALG_NAME[:-1] + 'x__aa.pickle', 'rb') as pf:
aa_if = np.transpose(pickle.load(pf)[:FEATURE_COUNT, :], (1, 0))
with open(OUT_DIR + '/__' + ALG_NAME[:-1] + 'x__bb.pickle', 'rb') as pf:
bb_uf = np.transpose(pickle.load(pf)[:FEATURE_COUNT, :], (1, 0))
def v2data_(v):
i = v[f_item]
u = v[f_user]
aa = aa_if[i]
bb = bb_uf[u]
return [aa[-1], bb[-1]] + (aa * bb).tolist()
def csv2data():
train_data = [[]] * len(train)
test_data = [[]] * len(test)
for pos, v in enumerate(train):
train_data[pos] = v2data_(v)
for pos, v in enumerate(test):
test_data[pos] = v2data_(v)
return train_data, test_data
def save_result(test_probas):
with open(OUT_DIR + '/' + ALG_NAME + '.csv', 'w') as f:
f.write('sample_id,target\n')
for pos, v in enumerate(test):
r = test_probas[pos][1]
f.write('%s,%s\n' % (v[f_sample_id], r))
def main():
test_has_target = f_target < len(test[0])
train_data, test_data = csv2data()
train_labels = list(map(lambda v: 1 if v[f_target] else -1, train))
train_data = Pool(data=train_data, label=train_labels)
if test_has_target:
test_labels = list(map(lambda v: 1 if v[f_target] else -1, test))
test_data = Pool(data=test_data, label=test_labels)
else:
test_data = Pool(data=test_data)
model = CatBoostClassifier(
# depth=TREE_DEPTH,
# border_count=BORDER_COUNT,
# random_strength=RANDOM_STRENGTH,
iterations=ITERATION_COUNT,
learning_rate=ETA
)
if test_has_target:
model.fit(train_data, eval_set=test_data)
else:
model.fit(train_data)
test_probas = model.predict_proba(test_data)
save_result(test_probas)
main()
| [
"alegorov@mail.ru"
] | alegorov@mail.ru |
da8f8da092d23de7bbd0a3f08e2882be55ad0909 | ae6d415523bbcebec3c77a5622b3d4028dcbccb8 | /config/config.py | 554bafbc997f254e7703f51db37d710c6d2450d3 | [] | no_license | manzub/newzz.com | c017347b10e729fa9079d1dfb501bbd6d65c7410 | 22eefad9ffad7cab6d47864dbab10989420c95e1 | refs/heads/main | 2022-12-19T14:55:21.942580 | 2020-10-06T14:44:13 | 2020-10-06T14:44:13 | 301,212,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class DevelopmentConfigs(object):
SECRET_KEY = 'mybuzzbreakapp'
SQLALCHEMY_DATABASE_URI = "postgres://postgres:Jeddac401@127.0.0.1:5432/buzzbreak"
SQLALCHEMY_TRACK_MODIFICATIONS = False
pool_size = 32
max_overflow = 64
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
MAIL_USERNAME = 'hadipartiv21@gmail.com'
MAIL_PASSWORD = 'lkgamqggscqkitnn' | [
"41767916+manzub@users.noreply.github.com"
] | 41767916+manzub@users.noreply.github.com |
418a1731b4494372d8177bdc9e701a72264b73da | 17d39b5f72ebc6339ad72ba3395c619dfa469c05 | /profiles_api/permissions.py | c3d3444daaaea28b8c6242043642af05cf11bafb | [
"MIT"
] | permissive | Princeshaw/userprofiles-rest-api | ca512f9378a2d8a18f979bd90f049f89d1760cc7 | cea40e5b77f0131350d963ddb0a9dc13f2c720d5 | refs/heads/master | 2022-12-03T18:46:09.387880 | 2020-08-16T16:44:57 | 2020-08-16T16:44:57 | 287,275,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow user to edit their own profile"""
def has_object_permission(self,request,view,obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
print('auth')
return True
return obj.id==request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check the user is trying to update their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id | [
"prince54shaw@gmail.com"
] | prince54shaw@gmail.com |
704f23f3ef1050e2aa4ca2a458d509acd2dd6475 | 2887be7e41fd0f5c619e320152c2eb6e4d8e03e1 | /plotcounts/src/d1state/system_state.py | 8255fff10ac99c19922046584b5b3ffd3fa5ee29 | [
"Apache-2.0"
] | permissive | DataONEorg/d1_environment_status | b586e077674a782aa84230304bd1a51dd72638ec | 65368e21e649d060e085df2e844964f74ac0f985 | refs/heads/master | 2021-01-10T18:01:36.013081 | 2016-01-29T15:48:14 | 2016-01-29T15:48:14 | 50,665,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,995 | py | '''
Created on Feb 27, 2014
@author: vieglais
Generates a JSON object that provides a high level description of the state of
a DataONE environment at the time.
The resulting JSON can be processed with Javascript and HTML to provide a
state view, or can be loaded back into the Python structure for additional
processing.
'''
import logging
import pprint
import datetime
import json
import socket
import httplib
import math
import dns.resolver
import d1_common.types.exceptions
from d1_client import cnclient, cnclient_1_1
from d1_client import mnclient
def getNow(asDate=False):
ctime = datetime.datetime.utcnow()
return ctime
def getNowString(ctime=None):
if ctime is None:
ctime = getNow()
return ctime.strftime("%Y-%m-%d %H:%M:%S.0+00:00")
def dateTimeToListObjectsTime(dt):
'''Return a string representation of a datetime that can be used in toDate or
fromDate in a listObject API call.
%Y-%m-%dT%H:%M:%S
'''
return dt.strftime("%Y-%m-%dT%H:%M:%S")
def dateTimeToSOLRTime(dt):
'''Return a string representation of a datetime that can be used in SOLR
queries against dates such as dateUploaded
fromDate in a listObject API call.
'''
return dt.strftime("%Y-%m-%dT%H:%M:%S.000Z")
def escapeQueryTerm(term):
'''
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \
'''
reserved = ['+','-','&','|','!','(',')','{','}','[',']','^','"','~','*','?',':',]
term = term.replace(u'\\',u'\\\\')
for c in reserved:
term = term.replace(c,u"\%s" % c)
return term
class NodeState(object):
def __init__(self, baseURL):
self.log = logging.getLogger(str(self.__class__.__name__))
self.baseurl = baseURL
self.clientv1 = mnclient.MemberNodeClient( self.baseurl )
def count(self):
'''
Return the number of objects on the node as reported by listObjects
Exceptions.NotAuthorized – (errorCode=401, detailCode=1520)
Exceptions.InvalidRequest – (errorCode=400, detailCode=1540)
Exceptions.NotImplemented –
(errorCode=501, detailCode=1560)
Raised if some functionality requested is not implemented. In the case of an optional request parameter not being supported, the errorCode should be 400. If the requested format (through HTTP Accept headers) is not supported, then the standard HTTP 406 error code should be returned.
Exceptions.ServiceFailure – (errorCode=500, detailCode=1580)
Exceptions.InvalidToken – (errorCode=401, detailCode=1530)
exception httplib.HTTPException
exception httplib.NotConnected -10
exception httplib.InvalidURL -11
exception httplib.UnknownProtocol -12
exception httplib.UnknownTransferEncoding -13
exception httplib.UnimplementedFileMode -14
exception httplib.IncompleteRead -15
exception httplib.ImproperConnectionState -16
exception httplib.CannotSendRequest -17
exception httplib.CannotSendHeader -18
exception httplib.ResponseNotReady -19
exception httplib.BadStatusLine -20
'''
try:
res = self.clientv1.listObjects(start=0, count=0)
return res.total
except d1_common.types.exceptions.NotAuthorized as e:
self.log.error(e)
return -401
except d1_common.types.exceptions.InvalidRequest as e:
self.log.error(e)
return -400
except d1_common.types.exceptions.NotImplemented as e:
self.log.error(e)
return -501
except d1_common.types.exceptions.ServiceFailure as e:
self.log.error(e)
return -500
except d1_common.types.exceptions.InvalidToken as e:
self.log.error(e)
return -401
except httplib.NotConnected as e:
self.log.error(e)
return -10
except httplib.InvalidURL as e:
self.log.error(e)
return -11
except httplib.UnknownProtocol as e:
self.log.error(e)
return -12
except httplib.UnknownTransferEncoding as e:
self.log.error(e)
return -13
except httplib.UnimplementedFileMode as e:
self.log.error(e)
return -14
except httplib.IncompleteRead as e:
self.log.error(e)
return -15
except httplib.ImproperConnectionState as e:
self.log.error(e)
return -16
except httplib.CannotSendRequest as e:
self.log.error(e)
return -17
except httplib.CannotSendHeader as e:
self.log.error(e)
return -18
except httplib.ResponseNotReady as e:
self.log.error(e)
return -19
except httplib.BadStatusLine as e:
self.log.error(e)
return -20
except socket.error as e:
self.log.error(e)
#See notes.md for a list of error codes
if hasattr(e, 'errno'):
if e.errno is not None:
return -1000 - e.errno
return -21
except Exception as e:
'''Something else. Need to examine the client connection object
'''
self.log.error("Error not trapped by standard exception.")
self.log.error(e)
return -1
class EnvironmentState(object):
#increment the version flag if there's a change to the generated data structure
VERSION = "18"
COUNT_PUBLIC = None
COUNT_PUBLIC_CURRENT = "-obsoletedBy:[* TO *]"
TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S.0+00:00"
JS_VARIABLE_STATE = "var env_state = "
JS_VARIABLE_INDEX = "var env_state_index = "
JS_VARIABLE_NODES = "var node_state_index = "
#TODO: These IP addresses are specific to the production environment and
#include changes to UCSB and ORC
CN_IP_ADDRESSES = ['160.36.134.71',
'128.111.220.46',
'128.111.54.80',
'128.111.36.80',
'160.36.13.150',
'64.106.40.6',
#'128.219.49.14', #This is a proxy server at ORNL
'128.111.220.51', #UCSB Nagios
'128.111.84.5', ] #UCSB Nagios
LOG_EVENTS = [['create','Created using DataONE API'],
['read', 'Content downloaded'],
['read.ext', 'Content downloaded by entities other than CNs'],
['update', 'Updated'],
['delete', 'Deleted'],
['replicate', 'Content retrieved by replication process'],
['synchronization_failed', 'Attempt to synchronize failed'],
['replication_failed', 'Attempt to replicate failed'],
]
def __init__(self, baseurl, cert_path=None):
self.log = logging.getLogger(str(self.__class__.__name__))
self.log.debug("Initializing...")
self.baseurl = baseurl
self.state = {'meta':None,
'formats':None,
'nodes':None,
'counts':None,
'summary': None,
'dns': None,
'logs': None,
}
self.clientv1 = cnclient.CoordinatingNodeClient( self.baseurl,
cert_path=cert_path )
self.clientv11 = cnclient_1_1.CoordinatingNodeClient( self.baseurl,
cert_path=cert_path )
def __str__(self):
return pprint.pformat( self.state )
def populateState(self):
'''Populates self.state with current environment status
'''
self.tstamp = getNow()
meta = {'tstamp': getNowString(self.tstamp),
'baseurl': self.baseurl,
'version': EnvironmentState.VERSION,
'count_meta': {0:'ALL',
1:EnvironmentState.COUNT_PUBLIC,
2:EnvironmentState.COUNT_PUBLIC_CURRENT}
}
self.state['meta'] = meta
self.state['formats'] = self.getFormats()
self.state['nodes'] = self.getNodes()
self.state['dns'] = self.getDNSInfo()
self.state['logs'] = self.getLogSummary()
self.state['counts'] = self.getCounts()
self.state['summary'] = self.summarizeCounts()
self.state['summary']['sizes'] = self.getObjectTypeSizeHistogram()
def retrieveLogResponse(self, q, fq=None):
self.clientv1.connection.close()
url = self.clientv1._rest_url('log')
query = {'q': q}
if not fq is None:
query['fq'] = fq
#logging.info("URL = %s" % url)
response = self.clientv1.GET(url, query)
logrecs = self.clientv1._read_dataone_type_response(response)
return logrecs.total
def getLogSummary(self):
periods = [['Day', 'dateLogged:[NOW-1DAY TO NOW]', 'Past day'],
['Week', 'dateLogged:[NOW-7DAY TO NOW]', 'Past week'],
['Month', 'dateLogged:[NOW-1MONTH TO NOW]', 'Past month'],
['Year', 'dateLogged:[NOW-1YEAR TO NOW]', 'Past year'],
['All', 'dateLogged:[2012-07-01T00:00:00.000Z TO NOW]', 'Since July 1, 2012'],
]
res = {'events': EnvironmentState.LOG_EVENTS,
'periods': map(lambda p: [p[0], p[2]], periods),
'data': {}}
exclude_cns = "-ipAddress:({0})"\
.format( " OR ".join(EnvironmentState.CN_IP_ADDRESSES))
for event in EnvironmentState.LOG_EVENTS:
res['data'][event[0]] = {}
for period in periods:
self.log.info('Log for {0} over {1}'.format(event[0], period[0]))
if event[0].endswith('.ext'):
ev = event[0].split(".")[0]
q = "event:{0} AND {1}".format(ev, exclude_cns)
else:
q = "event:{0}".format(event[0])
fq = period[1]
nrecords = self.retrieveLogResponse(q, fq=fq)
res['data'][event[0]][period[0]] = nrecords
return res
def getDNSInfo(self):
#TODO: Make this responsive to the CNode specified in the constructor
res = {'cn-ucsb-1.dataone.org':{},
'cn-unm-1.dataone.org':{},
'cn-orc-1.dataone.org':{},
'cn.dataone.org':{}
}
for k in res.keys():
info = dns.resolver.query(k)
res[k]['address'] = []
for ip in info:
res[k]['address'].append(ip.to_text())
return res;
def getCountsToDate(self, to_date, exclude_listObjects=False):
self.tstamp = getNow()
meta = {'tstamp': getNowString(self.tstamp),
'baseurl': self.baseurl,
'version': EnvironmentState.VERSION,
'count_meta': {0:'ALL',
1:EnvironmentState.COUNT_PUBLIC,
2:EnvironmentState.COUNT_PUBLIC_CURRENT}
}
self.state['meta'] = meta
self.state['formats'] = self.getFormats()
self.state['counts'] = self.getCounts(as_of_date = to_date,
exclude_listObjects=exclude_listObjects)
self.state['summary'] = self.summarizeCounts()
def getNodes(self):
'''Returns a dictionary of node information, keyed by nodeId
'''
def syncschedule_array(s):
if s is None:
return {}
# hour mday min mon sec wday year
# year, mon, mday, wday, hour, min, sec
return [s.year, s.mon, s.mday, s.wday, s.hour, s.min, s.sec]
res = {}
nodes = self.clientv1.listNodes()
for node in nodes.node:
entry = {'name' : node.name,
'description' : node.description,
'baseurl' : node.baseURL,
'type' : node.type,
'state': node.state,
'objectcount': -1,
}
sync = node.synchronization
if not sync is None:
entry['sync.schedule'] = syncschedule_array(sync.schedule)
entry['sync.lastHarvested'] = sync.lastHarvested.strftime("%Y-%m-%d %H:%M:%S.0%z")
entry['sync.lastCompleteHarvest'] = sync.lastCompleteHarvest.strftime("%Y-%m-%d %H:%M:%S.0%z")
#Call list objects to get a count
self.log.info("Attempting node count on {0}".format(node.baseURL))
ns = NodeState(node.baseURL)
entry['objectcount'] = ns.count()
res[node.identifier.value()] = entry
return res
def getFormats(self):
res = {}
formats = self.clientv1.listFormats()
for format in formats.objectFormat:
res[format.formatId] = {'name' : format.formatName,
'type' : format.formatType}
return res
def _countAll(self, counts, as_of_date=None):
'''Returns object counts by formatId using listObjects
Requires that self.state['formats'] has been populated
'''
to_date = None
if not as_of_date is None:
to_date = dateTimeToListObjectsTime(as_of_date)
for formatId in self.state['formats'].keys():
res = self.clientv11.listObjects(count=0,
formatId=formatId,
toDate=to_date)
self.log.info("{0:s} : {1:d}".format(formatId, res.total))
self.state['counts'][formatId][0] = res.total
def _countSOLR(self, counts, col=1, fq=None, as_of_date=None):
'''Populates counts
'''
queryEngine = "solr"
query='/'
maxrecords = 0
fields = 'id'
date_restriction = ''
if not as_of_date is None:
date_restriction = " AND dateUploaded:[* TO {0:s}]".format(dateTimeToSOLRTime(as_of_date))
for formatId in self.state['formats'].keys():
q = "formatId:\"{0:s}\"".format(escapeQueryTerm(formatId))
q = q + date_restriction
ntries = 0
while ntries < 4:
try:
ntries += 1
results = eval(self.clientv1.query(queryEngine, query=query,
q=q,
fq=fq,
wt='python',
fl=fields,
rows=maxrecords).read())
break
except httplib.BadStatusLine as e:
self.log.warn(e)
nHits = results['response']['numFound']
self.state['counts'][formatId][col] = nHits
self.log.info("{0:s} : {1:d}".format(formatId, nHits))
def getCounts(self, as_of_date=None, exclude_listObjects=False):
'''return object counts, optionally as of the specified date (datetime)
'''
#initialize the storage space
counts = {}
for formatId in self.state['formats'].keys():
counts[formatId] = [0, 0, 0]
self.state['counts'] = counts
#populate the number of all objects
for k in self.state['meta']['count_meta'].keys():
if k == 0:
if not exclude_listObjects:
self._countAll(counts, as_of_date=as_of_date)
else:
self._countSOLR(counts,
col=k,
fq=self.state['meta']['count_meta'][k],
as_of_date=as_of_date)
return counts
def getObjectSizeHistogram(self, q="*:*", nbins=10):
'''Returns a list of [size_low, size_high, count] for objects that match
the specified query.
To find minimum value:
https://cn.dataone.org/cn/v1/query/solr/?fl=size&sort=size%20asc&q=*:*&rows=1
to find maximum value:
https://cn.dataone.org/cn/v1/query/solr/?fl=size&sort=size%20desc&q=*:*&rows=1
'''
def getSOLRResponse(q, maxrecords, fields, rsort, fq=None):
ntries = 0
while ntries < 4:
try:
ntries += 1
results = eval(self.clientv1.query("solr", query="/",
q=q,
fq=fq,
wt='python',
fl=fields,
sort=rsort,
rows=maxrecords).read())
return results
except httplib.BadStatusLine as e:
self.log.warn(e)
return None
minval = getSOLRResponse(q, 1, 'size', "size asc")['response']['docs'][0]['size']
maxval = getSOLRResponse(q, 1, 'size', "size desc")['response']['docs'][0]['size']
if minval <1:
minval = 1
lminval = math.log10(minval)
lmaxval = math.log10(maxval)
binsize = (lmaxval - lminval) / (nbins*1.0)
res = []
for i in xrange(0, nbins):
row = [math.pow(10, lminval + i*binsize),
math.pow(10, lminval + (i+1)*binsize),
0]
res.append(row)
for i in xrange(0, nbins):
row = res[i]
if i == 0:
fq = "size:[{0:d} TO {1:d}]".format(math.trunc(row[0]), math.trunc(row[1]))
elif i == nbins-1:
fq = "size:[{0:d} TO {1:d}]".format(math.trunc(row[0]), math.trunc(row[1])+1)
else:
fq = "size:[{0:d} TO {1:d}]".format(math.trunc(row[0])+1, math.trunc(row[1]))
n = getSOLRResponse(q, 0, 'size', 'size asc', fq=fq)['response']['numFound']
res[i][2] = n
return {"minimum": minval,
"maximum": maxval,
"histogram": res}
def getObjectTypeSizeHistogram(self):
res = {'data':[],
'metadata':[],
'resource':[]}
res['data'] = self.getObjectSizeHistogram(q="formatType:DATA")
res['metadata'] = self.getObjectSizeHistogram(q="formatType:METADATA")
res['resource'] = self.getObjectSizeHistogram(q="formatType:RESOURCE")
return res
def summarizeCounts(self):
'''Computes summary totals for DATA, METADATA, and RESOURCE objects
'''
totalcols = ['data', 'meta', 'resource']
summary = {'all': {'data':0, 'meta': 0, 'resource': 0, 'total': 0},
'public': {'data':0, 'meta': 0, 'resource': 0, 'total': 0},
'public_notobsolete': {'data':0, 'meta': 0, 'resource': 0, 'total': 0}
}
for fmt in self.state['formats'].keys():
if self.state['formats'][fmt]['type'] == 'DATA':
summary['all']['data'] = summary['all']['data'] + self.state['counts'][fmt][0]
summary['public']['data'] = summary['public']['data'] + self.state['counts'][fmt][1]
summary['public_notobsolete']['data'] = summary['public_notobsolete']['data'] + self.state['counts'][fmt][2]
elif self.state['formats'][fmt]['type'] == 'METADATA':
summary['all']['meta'] = summary['all']['meta'] + self.state['counts'][fmt][0]
summary['public']['meta'] = summary['public']['meta'] + self.state['counts'][fmt][1]
summary['public_notobsolete']['meta'] = summary['public_notobsolete']['meta'] + self.state['counts'][fmt][2]
elif self.state['formats'][fmt]['type'] == 'RESOURCE':
summary['all']['resource'] = summary['all']['resource'] + self.state['counts'][fmt][0]
summary['public']['resource'] = summary['public']['resource'] + self.state['counts'][fmt][1]
summary['public_notobsolete']['resource'] = summary['public_notobsolete']['resource'] + self.state['counts'][fmt][2]
for ctype in summary.keys():
summary[ctype]['total'] = summary[ctype]['data']
summary[ctype]['total'] = summary[ctype]['total'] + summary[ctype]['meta']
summary[ctype]['total'] = summary[ctype]['total'] + summary[ctype]['resource']
self.state['summary'] = {'counts' : summary}
return summary
def asJSON(self, outStream):
outStream.write(EnvironmentState.JS_VARIABLE_STATE)
json.dump(self.state, outStream, indent=2)
def fromJSON(self, inStream):
jbuffer = inStream.read()
self.state = json.loads(jbuffer[len(EnvironmentState.JS_VARIABLE_STATE):])
self.tstamp = datetime.datetime().strptime(self.state['meta']['tstamp'],"%Y-%m-%d %H:%M:%S.0+00:00")
def getTStamp(self):
return self.state['meta']['tstamp']
#===============================================================================
def test1(baseurl="https://cn.dataone.org/cn"):
es = EnvironmentState(baseurl)
pprint.pprint(es.getNodes())
def test2(baseurl="https://cn.dataone.org/cn"):
es = EnvironmentState(baseurl)
pprint.pprint(es.getFormats())
def test3(baseurl="https://knb.ecoinformatics.org/knb/d1/mn"):
ns = NodeState(baseurl)
n = ns.count()
print "{0} : {1}".format(baseurl, n)
def main(baseurl="https://cn.dataone.org/cn"):
es = EnvironmentState(baseurl)
es.populateState()
print es
#===============================================================================
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test3()
#test1()
#test2()
#main()
| [
"dave.vieglais@gmail.com"
] | dave.vieglais@gmail.com |
903a77f4a02718a688e108e05b286348b1c99a65 | eef243e450cea7e91bac2f71f0bfd45a00c6f12c | /.history/worker_master_20210128031009.py | 16168c2cf8a83eb1dfb82220fd383d845e05ab9a | [] | no_license | hoaf13/nlp-chatbot-lol | 910ab2ea3b62d5219901050271fc1a1340e46a2f | 18cb64efa9d6b4cafe1015f1cd94f4409271ef56 | refs/heads/master | 2023-05-08T04:17:19.450718 | 2021-02-02T02:37:38 | 2021-02-02T02:37:38 | 332,535,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | import redis
red = redis.StrictRedis(host='localhost',port=6379,db=0)
queue = list()
def str_to_bool(str):
if str == b'False':
return False
if str == b'True':
return True
return None
while True:
# check supplier product status
is_new = str_to_bool(red.get("new_product_worker1"))
if is_new:
taken_product = red.get('product_worker1')
queue.append(taken_product)
red.set("new_product_worker1", str(False))
| [
"samartcall@gmail.com"
] | samartcall@gmail.com |
a582db00f0dd2b56511ec2be5cc34a6afe61582a | 03b2e2b6e28545a738e2cf2bd396e5d8518eef1d | /BalancingHelper/migrations/0010_auto_20190712_1605.py | 9558603dc8475fe926646b59da90dd914c8b6929 | [] | no_license | ChasingCarrots/GoodCompanyGamedesignDB | a1ffe4ae6138d921630ff68603b200a878748140 | 4663c394a56e64bcb2e0629bfa09c5070267e666 | refs/heads/master | 2022-05-06T05:40:48.307200 | 2022-03-24T11:59:34 | 2022-03-24T11:59:34 | 132,142,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-07-12 14:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Production', '0043_auto_20190709_1321'),
('BalancingHelper', '0009_auto_20190712_1408'),
]
operations = [
migrations.AddField(
model_name='criticalmodulepath',
name='MainFeatureValue',
field=models.FloatField(default=1),
),
migrations.AddField(
model_name='criticalmodulepath',
name='NegativeFeatureValue',
field=models.FloatField(default=0.2),
),
migrations.AddField(
model_name='criticalmodulepath',
name='NegativeFeatures',
field=models.ManyToManyField(blank=True, related_name='NegativeOnPath', to='Production.ProductFeature'),
),
migrations.AddField(
model_name='criticalmodulepath',
name='PositiveFeatureValue',
field=models.FloatField(default=0.2),
),
migrations.AddField(
model_name='criticalmodulepath',
name='PositiveFeatures',
field=models.ManyToManyField(blank=True, related_name='PositiveOnPath', to='Production.ProductFeature'),
),
migrations.AddField(
model_name='historicalcriticalmodulepath',
name='MainFeatureValue',
field=models.FloatField(default=1),
),
migrations.AddField(
model_name='historicalcriticalmodulepath',
name='NegativeFeatureValue',
field=models.FloatField(default=0.2),
),
migrations.AddField(
model_name='historicalcriticalmodulepath',
name='PositiveFeatureValue',
field=models.FloatField(default=0.2),
),
]
| [
"marc@chasing-carrots.com"
] | marc@chasing-carrots.com |
57b68e4a74604876334affc613d1d972667cfbe0 | c6431cdf572dd10f0f4d45839e6081124b246f90 | /code/lc3.py | 2ec290d3bc538c94abd8a7e80c92e02c5ff01e14 | [] | no_license | bendanwwww/myleetcode | 1ec0285ea19a213bc629e0e12fb8748146e26d3d | 427846d2ad1578135ef92fd6549235f104f68998 | refs/heads/master | 2021-09-27T19:36:40.111456 | 2021-09-24T03:11:32 | 2021-09-24T03:11:32 | 232,493,899 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | """
给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。
示例 1:
输入: "abcabcbb"
输出: 3
解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
示例 2:
输入: "bbbbb"
输出: 1
解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
示例 3:
输入: "pwwkew"
输出: 3
解释: 因为无重复字符的最长子串是 "wke",所以其长度为 3。
请注意,你的答案必须是 子串 的长度,"pwke" 是一个子序列,不是子串。
"""
class Solution(object):
def lengthOfLongestSubstring(self, s):
dictMap = {}
res = 0
first = 0
for i in range(len(s)):
if s[i] not in dictMap:
dictMap[s[i]] = i
else:
index = dictMap[s[i]]
first = max(first, index + 1)
dictMap[s[i]] = i
res = max(res, i - first + 1)
return res
s = Solution()
res = s.lengthOfLongestSubstring('abba')
print(res) | [
"461806307@qq.com"
] | 461806307@qq.com |
d65784b7ec0cdad3d8e2ac0b3c31ebe3e21c263e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_embassy.py | 8c0f88ca692520c38a075b1fcc908e831280b60a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py |
#calss header
class _EMBASSY():
def __init__(self,):
self.name = "EMBASSY"
self.definitions = [u'the group of people who represent their country in a foreign country: ', u'the building that these people work in: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
bf7758fdfb1ae881bf670bfce99ebcafd8b320d2 | 04f4b5c2e6b283d9ef7198425c3bf926623554d2 | /Heliocentric/heliocentric.py | 8ecd6f948d455061e7e2096966d986b5dbbded88 | [] | no_license | gabisala/Kattis | a00e96aab4dbe8033e0e110f5224170b8ad473a3 | 686817fb90b39e0126b4c8b0280f8a1f10c294ee | refs/heads/master | 2021-07-07T11:53:30.931347 | 2017-10-03T15:40:07 | 2017-10-03T15:40:07 | 105,471,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py |
# -*- coding:utf-8 -*-
import sys
# Read data
data = []
for line in sys.stdin:
data.append(map(int, line.split()))
def simultanously(earth, mars):
"""
Count how long it will take until both planets are on day 0 of their orbits simultanously.
:param earth: int, day earth orbit
:param mars: int, day mars orbit
:return: int, the smallest number of days until the two planets will both be on day 0 of their orbits
"""
# Count days
counter = 0
# While mars and earth are not on day 0 of their orbits
while True:
# If the two planets will both be on day 0 of their orbits
if earth == 0 and mars == 0:
# Return number of days
return counter
# If not one day to the begging of a new year
elif earth < 364 and mars < 686:
earth += 1
mars += 1
counter += 1
# If new year on earth and mars
elif earth == 364 and mars == 686:
earth = 0
mars = 0
counter += 1
# If new year on earth
elif earth == 364:
earth = 0
mars += 1
counter += 1
# If new year on mars
elif mars == 686:
mars = 0
earth += 1
counter += 1
# For each case, display the case number followed by the smallest number of days until the two planets will both be on
# day 0 of their orbits. Follow the format of the sample output.
for i, test in enumerate(data):
# Assign current day
earth = test[0]
mars = test[1]
num_days = simultanously(earth, mars)
print 'Case {}: {}'.format(i + 1, num_days)
| [
"noreply@github.com"
] | noreply@github.com |
7430485eeff81f0ac00f6ea969fd08c817c9590f | 7c4b911a93e00f5e86f2ad077948a2bb0d7230e2 | /newShravan.py | e79e65944f044676fdf99fdf7058dc8ff23b77de | [] | no_license | ShravanKumar-Technology/personalPython | adfe781803cc3571e9f02921bcbce45ce9ef74ce | db48a11ae18b63865af74ad33b8f7c8a7ffad671 | refs/heads/master | 2021-06-30T15:28:14.633003 | 2017-09-20T17:22:59 | 2017-09-20T17:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #!/usr/bin/python3
import random
for i in range(0,100):
print(str(i)+"."+str(random.randint(0,10000)))
print("shravan")
for i in range(0,100):
print(str(i))
new = 44
old = 55
print(str(new))
print(str(old))
new,old = old,new
print(str(new))
print(str(old))
| [
"shravan.theobserver@gmail.com"
] | shravan.theobserver@gmail.com |
05075495165fc4ee51bdc62ea55beb4c40e7ae87 | d69e59155c7eb8b692feb927bf12d13260d50ebb | /Admin_app/models.py | d6a0f3b2b7e70f67a2faebc42d4ecd1e3cfafd5e | [] | no_license | susmitha009/Admin-Panel | 8ee905150c84c6a197d77c0ddb03d3c7c81c3de4 | 4eddcfe60f95c07e79d57be7de2fb1ae2f155cc3 | refs/heads/master | 2023-06-22T01:16:14.085588 | 2021-07-17T10:42:54 | 2021-07-17T10:42:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | from django.db import models
from django.utils import timezone
from datetime import datetime,date
# Create your models here.
class rider(models.Model):
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
email = models.EmailField(max_length=254,unique=True)
createdAt = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.first_name + ' ' + self.last_name
class driver(models.Model):
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
email = models.EmailField(max_length=254,unique=True)
DateOfBirth = models.DateField(default=date.today)
def __str__(self):
return self.first_name + ' ' + self.last_name
class ride(models.Model):
first_name = models.ForeignKey(rider,default=1,on_delete = models.SET_DEFAULT)
url = models.URLField(unique=True,default=1)
date = models.DateTimeField(default=timezone.now)
# def __str__(self):
# return self.Rider
| [
"gurramsusmitha09@gmail.com"
] | gurramsusmitha09@gmail.com |
c84d0e45b5c85963ecda682b917c1efbf1ab79c8 | e3b2451c7693b4cf7d9511094272c3d4f47dedc7 | /BBMS_WEBSITE/Blood_bank/urls.py | 129963318736152c16ed13fa8ae54a76d78b3e0a | [] | no_license | iamyoshita/blood-bank-management-system | 31f25690371ab1a2817a362ce5b8ae68e23dedea | 36b01a3fa26ac7331895a8bfa8eae259bf7dc027 | refs/heads/master | 2020-04-17T13:37:01.889330 | 2019-01-20T04:29:32 | 2019-01-20T04:29:32 | 166,622,906 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from django.conf.urls import url
from .import views
urlpatterns = [
#/music/
url(r'^donor/',views.all_donors,name='index'),
url(r'^receiver/',views.all_receivers,name='ind'),
#music/74/
#url(r'^(?P<album_id>[0-9]+)/$',views.detail,name='detail'),
] | [
"noreply@github.com"
] | noreply@github.com |
465a1a5a3d2c313b84710d1d8fab3bf3f7245d39 | f33d315a8d4cf5e0c62795e48917384bf6542bf7 | /Market/models.py | cfab823031a255bae6a8fd4c864cc4380e052f49 | [] | no_license | Heexi/demo | 858ea81051600154f53f3ac77b129009db39bec7 | 50209fa8bfdffa55b6b9e990861e29aaca93d198 | refs/heads/master | 2020-03-17T10:00:15.215098 | 2018-05-15T09:31:51 | 2018-05-15T09:31:51 | 133,496,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from django.db import models
# Create your models here.
class Producet(models.Model):
pass
| [
"canhuayin@gmail.com"
] | canhuayin@gmail.com |
83503bae694f4bdf6c82b15e366f28e0066e3537 | 93e9bbcdd981a6ec08644e76ee914e42709579af | /depth-first-search/323_Number_of_Connected_Components_in_an_Undirected_Graph.py | 4009e47f32f8f461cc7dbd9fe5c9d094309c835b | [] | no_license | vsdrun/lc_public | 57aa418a8349629494782f1a009c1a8751ffe81d | 6350568d16b0f8c49a020f055bb6d72e2705ea56 | refs/heads/master | 2020-05-31T11:23:28.448602 | 2019-10-02T21:00:57 | 2019-10-02T21:00:57 | 190,259,739 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/number-of-connected-components-in-an-undirected-graph/description/
Given n nodes labeled from 0 to n - 1 and a list of undirected edges
(each edge is a pair of nodes)
write a function to find the number of
connected components in an undirected graph.
Example 1:
0 3
| |
1 --- 2 4
Given n = 5 and edges = [[0, 1], [1, 2], [3, 4]], return 2.
Example 2:
0 4
| |
1 --- 2 --- 3
Given n = 5 and edges = [[0, 1], [1, 2], [2, 3], [3, 4]], return 1.
Note:
You can assume that no duplicate edges will appear in edges.
Since all edges are undirected,
[0, 1] is the same as [1, 0] and thus will not appear together in edges.
"""
class Solution(object):
def countComponents(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
from __builtin__ import xrange
graph = {i: [] for i in xrange(n)}
# build graph
for e in edges:
graph[e[0]] += e[1],
graph[e[1]] += e[0],
def dfs(key):
child = graph.pop(key, [])
for c in child:
dfs(c)
cnt = 0
while graph:
key = graph.keys()[0]
dfs(key)
cnt += 1
return cnt
def rewrite(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
# build bi-dir graph
dmap = {i: [] for i in range(n)}
for e in edges:
dmap[e[0]].append(e[1])
dmap[e[1]].append(e[0])
def dfs(node):
child = dmap.pop(node, [])
for c in child:
dfs(c)
cnt = 0
while dmap:
cnt += 1
k = dmap.keys()[0]
dfs(k)
return cnt
def build():
return 5, [[0, 1], [1, 2], [3, 4]]
if __name__ == "__main__":
s = Solution()
print(s.countComponents(*build()))
print(s.rewrite(*build()))
| [
"vsdmars@gmail.com"
] | vsdmars@gmail.com |
01d1c1b4d4b921bf5423193b3bfd373e832d9242 | 9df4063297cfe437bf51d7789b2169820a328c45 | /REQUEST/tycloudrds/request/ModifySQLCollectorPolicyRequest.py | 580bc18945aec15467d91da6c2e6c2ae1c50cef9 | [] | no_license | jiangchengzi/DBRDS | 4b869a32910ca78d4e88530c62f646308e3e2173 | 385287a2caca9a3eaab35b2c138214a6cee82c99 | refs/heads/master | 2021-01-11T12:02:54.878532 | 2017-03-06T02:44:52 | 2017-03-06T02:44:52 | 76,580,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,298 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifySQLCollectorPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifySQLCollectorPolicy')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_SQLCollectorStatus(self):
return self.get_query_params().get('SQLCollectorStatus')
def set_SQLCollectorStatus(self,SQLCollectorStatus):
self.add_query_param('SQLCollectorStatus',SQLCollectorStatus)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount) | [
"1025416045@qq.com"
] | 1025416045@qq.com |
1491d9df3950a219fbb0c85065a746b7a79d485b | ecf89f03148a8661d5235d65660820116fab76dc | /src/record.py | f389fe20d355e109bddcd0f423e328fbe15f80f6 | [] | no_license | hvvka/weather-ztd | 7f2d67a61bc5cc11cbc369402601d0f5febe40f8 | 7c1b29f1dceea8428a47cb28c48716d1925cef73 | refs/heads/master | 2020-03-12T10:51:13.005172 | 2018-11-03T19:12:26 | 2018-11-03T19:12:26 | 130,582,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | import math
class Record:
def __init__(self, date, time, station_id, pressure_marker, lat, lon, x_coord, y_coord, aerial_height, wrf_height,
temperature, humidity_relative, pressure):
self.date = date
self.time = time
self.station_id = station_id
self.pressure_marker = pressure_marker # 2m/interp
self.lat = lat
self.lon = lon
self.x_coord = x_coord
self.y_coord = y_coord
self.aerial_height = aerial_height
self.wrf_height = wrf_height
self.temperature = temperature
self.humidity_relative = humidity_relative
self.pressure = pressure
def count_ztd(self):
e_sat = 6.112 * math.exp(
(17.67 * (float(self.temperature) - 273.15) / ((float(self.temperature) - 273.15) + 243.5)))
r = 8.31432 # [N*m/mol*K]
gamma = 0.0065 # temperature gradient
e = float(self.humidity_relative) * e_sat / 100
g = 9.8063 * (1 - pow(10, -7) * (float(self.wrf_height) + float(self.aerial_height)) / 2 * (
1 - 0.0026373 * math.cos(2 * math.radians(float(self.lat)))) + 5.9 * pow(10, -6) * pow(
math.cos(2 * math.radians(float(self.lat))), 2))
m = 0.0289644 # [kg/mol]
if self.pressure_marker == "2m":
p = float(self.pressure) * pow((float(self.temperature) - gamma * (
float(self.aerial_height) - float(self.wrf_height)) / float(self.temperature)),
g * m / r * gamma)
else:
p = float(self.pressure)
zdt = 0.002277 * (p + (1255 / float(self.temperature) + 0.05) * e)
return zdt
def get_date(self):
return str(self.date + " " + self.time)
| [
"hania.grodzicka@gmail.com"
] | hania.grodzicka@gmail.com |
3b147decd179451541596ab1434b617eb835b122 | 263b1997190f39b4547530ce05e889699a77a922 | /Problems/Birdbox/main.py | 79f91f52053873c2da34409ca1f7e26616faf1b0 | [] | no_license | IgnatIvanov/To-Do_List_JetBrainsAcademy | fa593a29143bf388f085d4ba95713540cd89eeca | 2bc4ed360c41ece09634e72e705dbc257e686958 | refs/heads/master | 2023-03-08T08:25:11.022569 | 2021-02-20T19:28:47 | 2021-02-20T19:28:47 | 339,089,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | # create you classes here
class Animal:
def __init__(self, name):
self.name = name
class Bird(Animal):
pass
class Pigeon(Bird):
pass
class Sparrow(Bird):
pass
| [
"ignativanov1996@mail.ru"
] | ignativanov1996@mail.ru |
dff696a42a1813155fc8fb54de635c1f8f93b539 | 774616d654af798cefdfba85662c98b79d938474 | /Rekog/venv/bin/rst2s5.py | a81547e9cb92f492dcb595235195231532dceb3f | [] | no_license | GreCar05/python | c3a1585940f5d6c8af11172441e7be3b73d19332 | cd5ee27421aff8659f5f02d4c0cce382d3cdb4b5 | refs/heads/main | 2023-03-03T09:47:11.031012 | 2021-02-16T12:12:13 | 2021-02-16T12:12:13 | 339,151,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/home/oficina/PycharmProjects/Rekog/venv/bin/python
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| [
"gregoricar05@gmail.com"
] | gregoricar05@gmail.com |
2fd547723d832790323016a9974bfa1bfc32a049 | 0edf3192cffd37fb4fdbef735d1557b023ac9d8c | /src/collective/recipe/htpasswd/__init__.py | 06bd92693d0d74e734d9459dab39fb65b4e088ea | [] | no_license | nueces/collective.recipe.htpasswd | 4027da5b0274f6c65bc977e990871f3123c92e60 | af48f049557e2c7c411fd77eb5dbd5b0f69de6b8 | refs/heads/master | 2021-01-01T06:54:54.285805 | 2013-01-30T07:45:45 | 2013-01-30T07:45:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,749 | py | # -*- coding: utf-8 -*-
""" collective.recipe.htpasswd
"""
import crypt
import logging
import os
import random
import string
import zc.buildout
class Recipe(object):
""" This recipe should not be used to update an existing htpasswd file
because it overwritte the htpasswd file in every update.
"""
def __init__(self, buildout, name, options):
self.buildout = buildout
self.name = name
self.options = options
self.logger = logging.getLogger(self.name)
supported_algorithms = ('crypt', 'plain')
if 'algorithm' in options:
if options['algorithm'].lower() not in supported_algorithms:
raise zc.buildout.UserError("Currently the only supported "
"method are 'crypt' and 'plain'.")
else:
self.algorithm = options['algorithm'].lower()
else:
self.algorithm = 'crypt'
if 'output' not in options:
raise zc.buildout.UserError('No output file specified.')
elif os.path.isdir(options['output']):
raise zc.buildout.UserError('The output file specified is an '
'existing directory.')
elif os.path.isfile(options['output']):
self.logger.warning('The output file specified exist and is going '
'to be overwritten.')
self.output = options['output']
if 'credentials' not in options:
raise zc.buildout.UserError('You must specified at lest one pair '
'of credentials.')
else:
self.credentials = []
for credentials in options['credentials'].split('\n'):
if not credentials:
continue
try:
(username, password) = credentials.split(':', 1)
except ValueError:
raise zc.buildout.UserError('Every pair credentials must '
'be separated be a colon.')
else:
self.credentials.append((username, password))
if not self.credentials:
raise zc.buildout.UserError('You must specified at lest one '
'pair of credentials.')
if 'mode' in options:
self.mode = int(options['mode'], 8)
else:
self.mode = None
def install(self):
""" Create the htpasswd file.
"""
self.mkdir(os.path.dirname(self.output))
with open(self.output, 'w+') as pwfile:
for (username, password) in self.credentials:
pwfile.write("%s:%s\n" % (username, self.mkhash(password)))
if self.mode is not None:
os.chmod(self.output, self.mode)
self.options.created(self.output)
return self.options.created()
def update(self):
""" Every time that the update method is called the htpasswd file is
overrided.
"""
return self.install()
def mkdir(self, path):
""" Create the path of directories recursively.
"""
parent = os.path.dirname(path)
if not os.path.exists(path) and parent != path:
self.mkdir(parent)
os.mkdir(path)
self.options.created(path)
def salt(self):
""" Returns a two-character string chosen from the set [a–zA–Z0–9./].
"""
#FIXME: This method only works for the salt requiered for the crypt
# algorithm.
characters = string.ascii_letters + string.digits + './'
return random.choice(characters) + random.choice(characters)
def mkhash(self, password):
""" Returns a the hashed password as a string.
"""
# TODO: Add support for MD5 and SHA1 algorithms.
if self.algorithm == 'crypt':
if len(password) > 8:
self.logger.warning((
'Only the first 8 characters of the password are '
'used to form the password. The extra characters '
'will be discarded.'))
return crypt.crypt(password, self.salt())
elif self.algorithm == 'md5':
raise NotImplementedError(
'The MD5 algorithm has not been implemented yet.')
elif self.algorithm == 'plain':
return password
elif self.algorithm == 'sha1':
raise NotImplementedError(
'The SHA1 algorithm has not been implemented yet.')
else:
raise ValueError(
"The algorithm '%s' is not supported." % self.algorithm)
| [
"juan@linux.org.ar"
] | juan@linux.org.ar |
1dedcdb0e23a3b98af243d75c56342783d85e41a | c187465dcc0004e2931de90764cf8ea4657d4355 | /build/ros_controllers/imu_sensor_controller/catkin_generated/pkg.develspace.context.pc.py | 414c3e3e2472fe78aa7e3ab09310110069a6f4e8 | [
"MIT"
] | permissive | team-auto-z/IGVC2019 | 4550e4a6c31dd6f4f7877618ae2fedc3daac9ea7 | 047e3eea7a2bd70f2505844ccc72ae1a2aaa6f2d | refs/heads/master | 2020-04-22T05:13:59.939647 | 2019-08-29T16:40:41 | 2019-08-29T16:40:41 | 170,152,781 | 1 | 3 | MIT | 2019-09-26T17:20:22 | 2019-02-11T15:30:16 | Makefile | UTF-8 | Python | false | false | 660 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ajinkya/catkin_ws/src/ros_controllers/imu_sensor_controller/include".split(';') if "/home/ajinkya/catkin_ws/src/ros_controllers/imu_sensor_controller/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_interface;hardware_interface;pluginlib;realtime_tools;roscpp;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-limu_sensor_controller".split(';') if "-limu_sensor_controller" != "" else []
PROJECT_NAME = "imu_sensor_controller"
PROJECT_SPACE_DIR = "/home/ajinkya/catkin_ws/devel"
PROJECT_VERSION = "0.14.2"
| [
"ajinkyaprabhu97@gmail.com"
] | ajinkyaprabhu97@gmail.com |
20afedb1b001619332e9d7e143861e7ec13ba57a | 45e97bd0c32042504052342bc1ae4e66a30d4d9a | /corepy/chapter13/demo5-trackInstance.py | 34e5d0f1d27f10ca12c701e54330defcbeef7adc | [] | no_license | vonzhou/py-learn | acf20c5183bff9788fcae9e36abdcd6f9bc553da | f0794164105dddbdffe082dfc90520f8778cbec3 | refs/heads/master | 2016-09-10T01:29:30.551541 | 2015-12-08T08:53:46 | 2015-12-08T08:53:46 | 27,669,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py |
''' P343'''
class InsCnt(object):
count = 0 #count是一个类属性
def __init__(self):
InsCnt.count += 1
def __del__(self):
InsCnt.count -= 1
def howMany(self):
return InsCnt.count
c1 = InsCnt()
print c1.howMany()
c2 = c1
print c2.howMany()
c3 = InsCnt()
print howMany()
del c1
del c2
print howMany()
del c3
print howMany()
raw_input()
raw_input()
| [
"vonzhou@163.com"
] | vonzhou@163.com |
509601af0ae5337e7f8b9fc2f49be25dda28dc54 | 4acc08d2c165b5d88119df6bb4081bcfaca684f7 | /PythonPrograms/python_program/multiple_matrix.py | c88486026ddb4fe56be352d8cd4c3a355b0923f6 | [] | no_license | xiaotuzixuedaima/PythonProgramDucat | 9059648f070db7304f9aaa45657c8d3df75f3cc2 | 90c6947e6dfa8ebb6c8758735960379a81d88ae3 | refs/heads/master | 2022-01-16T04:13:17.849130 | 2019-02-22T15:43:18 | 2019-02-22T15:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Python Program to Multiply Two Matrices ????
l = [[1,2,3],
[2,3,4],
[3,4,5]]
m = [[3,4,5],
[5,6,7],
[6,7,8]]
for i in range(3):
for j in range(len(l)):
sum = 0
for k in range(len(m)):
sum=sum + l[i][k] * m[k][j]
print(sum,end=" ")
print()
'''
output ==
l = [[1,2,3],
[2,3,4],
[3,4,5]]
m = [[3,4,5],
[5,6,7],
[6,7,8]]
output ==== ***********
l*m = 31 37 43 *
45 54 63 *
59 71 83 *
==== ***********
''' | [
"ss7838094755@gmail.com"
] | ss7838094755@gmail.com |
9da728a81b1289dd5b1b883deefb81f5cce0ee88 | f1932b5fb8f629015b4deeb4dd76255ef3ffc609 | /Scheme Interpreter/TestCases.py | b692923b4fcec15a5abd5a5ddfb5453ab8cd8733 | [
"MIT"
] | permissive | VladiMio/VladiMio_Toys | a54754319866693f99b0c0df0e5a06c477cf5592 | 8c383f6ccd2628c109ca884a51e74a348d565cc8 | refs/heads/master | 2021-01-24T00:02:34.793213 | 2018-02-28T13:40:40 | 2018-02-28T13:40:40 | 122,745,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | import Scheme
cases = {
"(+ 1 2 3 4 5)": 1 + 2 + 3 + 4 + 5,
"(- 1 2 3 4 5)": 1 - 2 - 3 - 4 - 5,
"(* 1 2 3 4 5)": 1 * 2 * 3 * 4 * 5,
"(/ 1 2 3 4 5)": 1 / 2 / 3 / 4 / 5,
"(define mkact (lambda (balance) (lambda (amt) (begin (set! balance (+ balance amt)) balance))))": None,
"(define act1 (mkact 100.00))": None,
"(act1 -20.00)": 80.0,
}
casesList = list(cases.items())
isPass = True
for case in casesList:
res = Scheme.scheme_eval(Scheme.parse(case[0]))
if res != case[-1]:
print("case %s failed, output %s" % (case[0], res))
isPass &= False
else:
isPass &= True
if isPass:
print("Congratulations! All Test Cases Passed! ")
| [
"jasper1992ws@hotmail.com"
] | jasper1992ws@hotmail.com |
4b6649d161b7f8d7d13610000ae29e0eb231be22 | 13d3ee90ee7ae4cb0e2e7bb3787d448c2ab8335e | /secondPython.py | cd1d06864596390d6010ce72ed051cdab3d1a29f | [] | no_license | kevf92/Test_Repository | ffe3f77fed76ffd4c4af97621c3efc4783c5419e | c61d2cc34ab798e887a2a5bb35fb6448dfe7fd71 | refs/heads/master | 2022-12-02T17:32:13.211413 | 2020-08-20T19:39:17 | 2020-08-20T19:39:17 | 289,077,315 | 0 | 0 | null | 2020-08-20T18:27:42 | 2020-08-20T18:14:57 | Python | UTF-8 | Python | false | false | 22 | py | print("Child Python")
| [
"noreply@github.com"
] | noreply@github.com |
2b3e3404a0d221b47e9e46a5d715af0f6a0449be | 25b6a0a9d5e9c17fcf9164444bfbde411a313e8e | /pendu.py | 4eeffcb8616257a5b6ae90ea2b0ad55272a89dde | [] | no_license | kamelh1972/Pendu.Kamel | 95dacbd147b538c352deff352a722247112bfc80 | e608e3c9eddac8fadbe4548eff3002a5ada3da57 | refs/heads/master | 2020-09-16T13:18:07.934177 | 2019-12-10T14:02:41 | 2019-12-10T14:02:41 | 223,782,121 | 0 | 0 | null | 2019-12-10T14:02:43 | 2019-11-24T17:23:37 | null | UTF-8 | Python | false | false | 710 | py | from fonction import *
import donnee
nom = input("Entrez votre nom:\n")
global gamePlayer
global gameScore
gamePlayer = nom
gameScore = 0
win = False
check_players(nom)
word = choose_word()
hidden_word = hide_word(word)
tentative = donnee.chance;
while tentative > 0 :
show_hidden_word(hidden_word)
lettre = input_lettre()
for key in hidden_word.keys():
if lettre == key :
hidden_word[key] = True
for value in hidden_word.values():
if value == False :
continue
else:
print("Gagné! Votre score: {}".format(tentative))
win = True
tentative -= 1
if win == False :
print("Perdu! Le mot était {}".format(word))
| [
"kamelhammiche44@gmail.com"
] | kamelhammiche44@gmail.com |
4f2721593ab30e86c52f68296de1168474b86e3c | 767b70f3e9ba11751a459b31547b1c6e5a50081f | /authenticationSystem/migrations/0008_auto_20210518_2324.py | 38246bb0eb9c834f0d6f70c287dc47555f1f3a77 | [] | no_license | RidwanShihab/marketms | 7dd226e29a05c8363d2dbb772567707ba4f3e01a | 83949018bfa01509416dbd41a3b37e8769401f71 | refs/heads/master | 2023-04-26T09:39:20.539793 | 2021-05-18T19:03:18 | 2021-05-18T19:03:18 | 358,003,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # Generated by Django 3.1.7 on 2021-05-18 17:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('authenticationSystem', '0007_bill'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='biller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"ridwanshihab14466@gmail.com"
] | ridwanshihab14466@gmail.com |
a7722afc93933b786c47f0b3f10cfced51515f8b | 13bc12cadea3f2b34e9710e773f77fcc6cf96927 | /password_generator/settings.py | fe470eb8f0fc34eba3ed766f754ca41d7ec5256f | [] | no_license | henryyeh802/django-password-generator | 161e45c6d1c0271918fc81f3acfb7b3dd991f78e | 6399e98342c329769002dcd14e23d6065a73d4ba | refs/heads/master | 2022-04-23T23:17:20.793177 | 2020-04-29T19:51:40 | 2020-04-29T19:51:40 | 260,029,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | """
Django settings for password_generator project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'krc=&35wailj68f!wvo7gssb5g_p*a#)7i1p!0hom%5vu6&1p@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'generator',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'password_generator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'password_generator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"henryyeh802@gmail.com"
] | henryyeh802@gmail.com |
b6734e8581b07bdd28ff0446fc0acb52c83c0fa7 | e103af3b6d42ef6782ed34526bdeffa36d56776b | /solver.py | 1ff78419b5dc07b7fc35220dc54c48740277f928 | [] | no_license | Alva-2020/mipae | b9b294f1cf91583f7645c80e5d72db0f103fbdc3 | b0dd410305ab1162193f2e80a3c7d77665080e5d | refs/heads/master | 2022-12-30T06:41:28.803783 | 2020-10-15T13:40:21 | 2020-10-15T13:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,461 | py | import os
import glob
import math
import sys
import itertools
import random
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as dist
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import models
import resnet_64
import resnet_128
import vgg_128
import vgg_64
from lstm import lstm
from moving_mnist import MovingMNIST
from moving_dsprites import MovingDSprites
from mpi3d_toy import Mpi3dReal
import utils
import critics
import mi_estimators
class Solver(object):
def __init__(self, nets, optims, args, estimators = {},extra_keys = {}):
torch.set_default_dtype(torch.float32)
self.cpu = torch.device('cpu')
args = args.__dict__
for key in args:
setattr(self, key, args[key])
self.nets = nets
self.estimators = estimators
self.optims = optims
self.extra_keys = extra_keys
self.load_checkpoint_or_initialize(extra_keys)
for name in self.nets:
self.nets[name] = nn.DataParallel(self.nets[name])
if self.dataset == "mnist":
train_data = MovingMNIST(True, self.data_root, seq_len = self.input_frames + self.target_frames, color = self.color, deterministic = self.deterministic)
test_data = MovingMNIST(False, self.data_root, seq_len = self.input_frames + self.target_frames, color = self.color, deterministic = self.deterministic)
elif self.dataset == "dsprites":
train_data = MovingDSprites(True, self.data_root, seq_len = self.input_frames + self.target_frames, color = self.color, rotate = self.rotate_sprites, deterministic = self.deterministic)
test_data = MovingDSprites(False, self.data_root, seq_len = self.input_frames + self.target_frames, color = self.color, rotate = self.rotate_sprites, deterministic = self.deterministic)
elif self.dataset == "mpi3d_real":
train_data = Mpi3dReal(True, self.data_root, seq_len = self.input_frames + self.target_frames, deterministic = self.deterministic)
test_data = Mpi3dReal(False, self.data_root, seq_len = self.input_frames + self.target_frames, deterministic = self.deterministic)
else:
raise NotImplementedError()
self.dataset_len = len(train_data)
train_loader = DataLoader(train_data, batch_size = self.batch_size, shuffle = True, num_workers = 5, drop_last=True)
test_loader = DataLoader(test_data, batch_size = self.batch_size, shuffle = True, num_workers = 5, drop_last=True)
def get_training_batch():
while True:
for sequence in train_loader:
sequence.transpose_(3,4).transpose_(2,3)
yield sequence
def get_test_batch():
while True:
for sequence in test_loader:
sequence.transpose_(3,4).transpose_(2,3)
yield sequence
self.train_generator = get_training_batch()
self.test_generator = get_test_batch()
eval_dir = os.path.join(self.log_dir, "eval")
self.train_summary_writer = SummaryWriter(log_dir = self.log_dir)
self.test_summary_writer = SummaryWriter(log_dir = eval_dir)
#Writing hyperparameters summary
for name in args:
self.train_summary_writer.add_text("Hyperparameters/"+name, str(args[name]))
def set_mode(self, mode):
if mode == "train":
for net in self.nets:
self.nets[net].train()
else:
for net in self.nets:
self.nets[net].eval()
def load_checkpoint_or_initialize(self, extra_keys):
# Here the extra_keys should be a dict (containing default values)
chkp_files = sorted(glob.glob(self.log_dir+"/"+self.name+r"-*.pth"), key = os.path.getmtime, reverse = True)
checkpoint = None
if chkp_files:
checkpoint = torch.load(chkp_files[0], map_location= self.cpu)
if checkpoint:
for name in self.nets:
self.nets[name].load_state_dict(checkpoint[name])
for name in self.estimators:
self.estimators[name].load_state_dict(checkpoint[name])
for name in self.optims:
self.optims[name].load_state_dict(checkpoint[name])
for name in extra_keys:
setattr(self, name, checkpoint[name])
self.global_itr = checkpoint["global_itr"]
else:
for name in extra_keys:
setattr(self, name, extra_keys[name])
for name in self.nets:
self.nets[name].apply(utils.init_weights)
self.global_itr = 0
def save_checkpoint(self, extra_keys = []):
checkpoint = {"global_itr" : self.global_itr}
for name in self.nets:
checkpoint[name] = self.nets[name].module.state_dict()
for name in self.estimators:
checkpoint[name] = self.estimators[name].state_dict()
for name in self.optims:
checkpoint[name] = self.optims[name].state_dict()
for name in extra_keys:
checkpoint[name] = getattr(self,name)
chkp_files = sorted(glob.glob(self.log_dir + "/"+self.name+r"-*.pth"), key = os.path.getmtime, reverse = True)
if len(chkp_files) == self.max_checkpoints:
os.remove(chkp_files[-1])
chkp_path = self.log_dir + "/"+self.name + "-" + str(self.global_itr) + ".pth"
torch.save(checkpoint, chkp_path)
def train(self):
while self.global_itr < self.niters:
self.set_mode("train")
for i in tqdm(range(self.epoch_size), desc = "[" + str(self.global_itr)+"/"+str(self.niters)+"]"):
self.train_step(summary = (i==0 and self.global_itr%self.summary_freq == 0))
self.set_mode("eval")
self.eval_step()
if self.global_itr%self.checkpoint_freq == 0:
self.save_checkpoint(self.extra_keys)
self.global_itr += 1
self.save_checkpoint(self.extra_keys)
def train_step(self,summary = False):
raise NotImplementedError()
def eval_step(self):
raise NotImplementedError()
class SolverAutoencoder(Solver):
def __init__(self, args):
args.deterministic = True
if args.dataset in ["mnist","dsprites"]:
content_encoder = models.content_encoder(args.g_dims, nc = args.num_channels).cuda()
position_encoder = models.pose_encoder(args.z_dims, nc = args.num_channels,normalize= args.normalize_position).cuda()
else:
content_encoder = vgg_64.encoder(args.g_dims, nc = args.num_channels).cuda()
position_encoder = resnet_64.pose_encoder(args.z_dims, nc = args.num_channels).cuda()
if args.dataset == "mpi3d_real":
decoder = vgg_64.drnet_decoder(args.g_dims, args.z_dims, nc = args.num_channels).cuda()
else:
decoder = models.decoder(args.g_dims, args.z_dims, nc = args.num_channels, skips = args.skips).cuda()
self.content_frames = 1
if args.content_lstm:
content_encoder = models.content_encoder_lstm(args.g_dims, content_encoder, args.batch_size)
self.content_frames = args.input_frames
discriminator = models.scene_discriminator(args.z_dims).cuda()
nets = {
"content_encoder" : content_encoder,
"position_encoder" : position_encoder,
"decoder" : decoder,
"discriminator" : discriminator,
}
self.encoder_decoder_parameters = itertools.chain(*[
content_encoder.parameters(),
position_encoder.parameters(),
decoder.parameters(),
])
encoder_decoder_optim = torch.optim.Adam(
self.encoder_decoder_parameters,
lr = args.lr,
betas = (args.beta1, 0.999)
)
discriminator_optim = torch.optim.Adam(
discriminator.parameters(),
lr = args.lr,
betas = (args.beta1, 0.999)
)
optims = {
"encoder_decoder_optim" : encoder_decoder_optim,
"discriminator_optim" : discriminator_optim,
}
super().__init__(nets, optims, args)
def train_step(self, summary = False):
Ec = self.nets["content_encoder"]
Ep = self.nets["position_encoder"]
D = self.nets["decoder"]
C = self.nets["discriminator"]
encoder_decoder_optim = self.optims["encoder_decoder_optim"]
discriminator_optim = self.optims["discriminator_optim"]
#Train discriminator
x = next(self.train_generator).cuda().transpose(0,1)
h_p1 = Ep(x[random.randint(0, self.input_frames + self.target_frames-1)]).detach()
h_p2 = Ep(x[random.randint(0, self.input_frames + self.target_frames-1)]).detach()
rp = torch.randperm(self.batch_size).cuda()
h_p2_perm = h_p2[rp]
out_true = C([h_p1, h_p2])
out_false = C([h_p1, h_p2_perm])
if self.sd_loss == "emily":
disc_loss = mi_estimators.discriminator_loss(out_true,out_false)
elif self.sd_loss == "js":
disc_loss = -1*mi_estimators.js_fgan_lower_bound(out_true,out_false)
elif self.sd_loss == "smile":
disc_loss = -1*mi_estimators.smile_lower_bound(out_true,out_false)
else:
raise NotImplementedError()
discriminator_optim.zero_grad()
disc_loss.backward()
if summary:
utils.log_gradients(C, self.train_summary_writer, global_step = self.global_itr)
discriminator_optim.step()
if summary:
self.train_summary_writer.add_scalar("discriminator_loss", disc_loss, global_step = self.global_itr)
k = random.randint(self.content_frames,self.input_frames + self.target_frames-self.content_frames)
x = next(self.train_generator).cuda().transpose(0,1)
if self.dataset != "lpc":
x_c1 = x[0:self.content_frames].squeeze(0)
x_c2 = x[k:(k+self.content_frames)].squeeze(0)
else:
x_c1 = x[k:(k+self.content_frames)].squeeze(0)
x_c2 = x[0:self.content_frames].squeeze(0)
x_p1 = x[k]
x_p2 = x[random.randint(0, self.input_frames + self.target_frames-1)]
h_content, skips = Ec(x_c1)
h_content_1 = Ec(x_c2)[0].detach()
h_position = Ep(x_p1)
h_position_1 = Ep(x_p2).detach()
sim_loss = F.mse_loss(h_content, h_content_1)
x_rec = D([[h_content,skips], h_position])
if self.recon_loss_type == "mse":
rec_loss = F.mse_loss(x_rec, x_p1)
elif self.recon_loss_type == "l1":
rec_loss = F.l1_loss(x_rec, x_p1)
if self.sd_loss == "emily":
out = C([h_position, h_position_1])
sd_loss = mi_estimators.emily_sd_loss(out)
else:
rp = torch.randperm(self.batch_size).cuda()
h_p2_perm = h_position_1[rp]
out_true = C([h_position, h_position_1])
out_false = C([h_position, h_p2_perm])
if self.sd_loss == "js":
sd_loss = mi_estimators.js_mi_lower_bound(out_true,out_false)
elif self.sd_loss == "smile":
sd_loss = mi_estimators.smile_mi_lower_bound(out_true, out_false)
else:
raise NotImplementedError()
loss = self.sim_weight * sim_loss + rec_loss + self.sd_weight * sd_loss
encoder_decoder_optim.zero_grad()
loss.backward()
if summary:
utils.log_gradients(Ec, self.train_summary_writer, global_step = self.global_itr)
utils.log_gradients(Ep, self.train_summary_writer, global_step = self.global_itr)
utils.log_gradients(D, self.train_summary_writer, global_step = self.global_itr)
encoder_decoder_optim.step()
if summary:
self.train_summary_writer.add_images("predicted_image",x_rec[:10], global_step = self.global_itr)
self.train_summary_writer.add_images("target_image", x_p1[:10], global_step = self.global_itr)
self.train_summary_writer.add_scalar("sim_loss", sim_loss, global_step = self.global_itr)
self.train_summary_writer.add_scalar("sd_loss", sd_loss, global_step = self.global_itr)
self.train_summary_writer.add_scalar("recon_loss", rec_loss, global_step = self.global_itr)
self.train_summary_writer.add_scalar("total_loss", loss, global_step = self.global_itr)
def eval_step(self):
Ec = self.nets["content_encoder"]
Ep = self.nets["position_encoder"]
D = self.nets["decoder"]
with torch.autograd.no_grad():
#Checking disentanglement
x_source = next(self.test_generator).cuda()[:10].transpose(0,1)
x_target = next(self.test_generator).cuda()[:10].transpose(0,1)
h_c = Ec(x_target[0:self.content_frames].squeeze(0))
position_list = []
for i in range(self.input_frames, self.input_frames+self.target_frames):
h_p = Ep(x_source[i])
position_list.append(h_p[0][None])
x_source = x_source[:,0][:,None]
generated_images = []
for h_p in position_list:
x_pred = D([h_c, torch.cat([h_p]*10, 0)])
generated_images.append(x_pred)
generated_images = torch.cat([x_target[self.input_frames-1]]+generated_images, dim = 3)
generated_images = list(map(lambda x: x.squeeze(0), generated_images.split(1,0)))
generated_images = torch.cat(generated_images, dim = 1)
source_images = list(map(lambda x: x.squeeze(0),x_source[self.input_frames:(self.input_frames+self.target_frames),0].split(1,0)))
source_images = torch.cat([torch.zeros_like(x_source[0,0])]+ source_images, dim = 2)
analogy_image = torch.cat([source_images, generated_images], dim = 1)
self.test_summary_writer.add_image("analogy_test", analogy_image, global_step = self.global_itr)
#Checking reconstruction
x = next(self.test_generator).cuda()[:10].transpose(0,1)
k = random.randint(1,self.input_frames + self.target_frames-1)
h_c = Ec(x[0:self.content_frames].squeeze(0))
h_p_1 = Ep(x[1])
h_p_2 = Ep(x[k])
x_pred_1 = D([h_c, h_p_1])
x_pred_2 = D([h_c, h_p_2])
rec_image = torch.cat([x[0],x_pred_1,x_pred_2], 3)
self.test_summary_writer.add_images("rec_test", rec_image, global_step = self.global_itr)
class SolverLstm(Solver):
def __init__(self, args):
args.deterministic = True
encoder_checkpoint = torch.load(args.encoder_checkpoint)
if args.dataset in ["mnist","dsprites"]:
Ec = models.content_encoder(args.g_dims, nc = args.num_channels).cuda()
Ep = models.pose_encoder(args.z_dims, nc = args.num_channels).cuda()
else:
Ec = vgg_64.encoder(args.g_dims, nc = args.num_channels).cuda()
Ep = resnet_64.pose_encoder(args.z_dims, nc = args.num_channels).cuda()
if args.dataset == "mpi3d_real":
D = vgg_64.drnet_decoder(args.g_dims, args.z_dims, nc = args.num_channels).cuda()
else:
D = models.decoder(args.g_dims, args.z_dims, nc = args.num_channels, skips = args.skips).cuda()
Ep.load_state_dict(encoder_checkpoint["position_encoder"])
Ec.load_state_dict(encoder_checkpoint["content_encoder"])
D.load_state_dict(encoder_checkpoint["decoder"])
self.Ep = nn.DataParallel(Ep)
self.Ec = nn.DataParallel(Ec)
self.D = nn.DataParallel(D)
self.Ep.train()
self.Ec.train()
self.D.train()
lstm_model = lstm(args.g_dims + args.z_dims, args.z_dims, args.rnn_size, args.rnn_layers, args.batch_size).cuda()
nets = {"lstm":lstm_model}
lstm_optim = torch.optim.Adam(
lstm_model.parameters(),
lr = args.lr,
betas = (args.beta1, 0.999)
)
optims = {"lstm_optim":lstm_optim}
super().__init__(nets, optims, args)
def train_step(self, summary = False):
lstm_model = self.nets["lstm"]
lstm_optim = self.optims["lstm_optim"]
hidden = lstm_model.module.init_hidden()
x = next(self.train_generator).cuda().transpose(0,1)
h_c = self.Ec(x[self.input_frames-1])[0].detach()
h_p = [self.Ep(x[i]).detach() for i in range(self.input_frames + self.target_frames)]
mse = 0
for i in range(1, self.input_frames + self.target_frames):
pose_pred, hidden = lstm_model(torch.cat([h_p[i-1],h_c],1), hidden)
mse += F.mse_loss(pose_pred,h_p[i])
lstm_optim.zero_grad()
mse.backward()
if summary:
utils.log_gradients(lstm_model, self.train_summary_writer, global_step = self.global_itr)
lstm_optim.step()
if summary:
self.train_summary_writer.add_scalar("mse_loss", mse, global_step = self.global_itr)
def eval_step(self):
x = next(self.test_generator).cuda().transpose(0,1)
lstm_model = self.nets["lstm"]
with torch.autograd.no_grad():
hidden = lstm_model.module.init_hidden()
h_c = self.Ec(x[self.input_frames-1])
h_p = [self.Ep(x[i]) for i in range(self.input_frames + self.target_frames-1)]
# h_p_pred = [lstm_model(torch.cat([pose,h_c[0]],1)) for pose in h_p]
h_p_pred = []
for pose in h_p:
pose_pred, hidden = lstm_model(torch.cat([pose,h_c[0]],1), hidden)
h_p_pred.append(pose_pred)
x_pred = [self.D([h_c,pose]) for pose in h_p_pred]
x_pred = torch.stack(x_pred, 0)
self.test_summary_writer.add_video("target_video", x[1:,:5].transpose(0,1), global_step = self.global_itr)
self.test_summary_writer.add_video("predicted_video", x_pred[:,:5].transpose(0,1), global_step = self.global_itr)
| [
"paditya.sreekar@research.iiit.ac.in"
] | paditya.sreekar@research.iiit.ac.in |
943dcb14ae7fae3a2b249ac14791e8426eeb8f08 | 3fde00076201d4c0ca3e31681b050a9de053bb5c | /2016/task_1/process_zips.py | 1f494a68a967ba8cf8d172da316906d8248f48a8 | [] | no_license | atuchak/interviews | 93e8b7b1786b4c13c222d3984bf85ef7e4c6fdd5 | 31f52d3e5b7ef9f6e2e3dd9b804ded58665f3598 | refs/heads/master | 2020-05-25T17:14:40.219658 | 2018-10-05T15:55:13 | 2018-10-05T15:55:13 | 84,949,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | import csv
import os
import logging
import multiprocessing
import queue
from multiprocessing import Lock, Value
from lxml import etree
from multiprocessing import Queue
from multiprocessing import Process
from zipfile import ZipFile
OUTPUT_DIR = '/tmp/ngenix'
CONCURRENCY = multiprocessing.cpu_count()
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def writer1(writer1_queue, num_of_active_workers):
with open(os.path.join(OUTPUT_DIR, '1.csv'), 'w') as f:
csv_writer = csv.writer(f)
while True:
try:
data = writer1_queue.get_nowait()
row = [data['id'], data['level']]
csv_writer.writerow(row)
except queue.Empty:
if num_of_active_workers.value == 0:
break
log.info('Writer1 is tearing down')
def writer2(writer2_queue, num_of_active_workers):
with open(os.path.join(OUTPUT_DIR, '2.csv'), 'w') as f:
csv_writer = csv.writer(f)
while True:
try:
data = writer2_queue.get_nowait()
for name in data['object_names']:
csv_writer.writerow([data['id'], name])
except queue.Empty:
if num_of_active_workers.value == 0:
break
log.info('Writer2 is tearing down')
def parse_xml(content):
root = etree.fromstring(content)
try:
element = root.xpath("//var[@name='id']")[0]
id = element.attrib['value']
element = root.xpath("//var[@name='level']")[0]
level = element.attrib['value']
elements = root.xpath("//objects/*")
object_names = []
for o in elements:
object_names.append(o.attrib['name'])
except (IndexError, KeyError):
log.error('var tag with value not found')
return None
return {'id': id, 'level': level, 'object_names': object_names}
def worker(zips_queue, writer1_queue, writer2_queue, num_of_active_workers, lock):
while True:
try:
file = zips_queue.get_nowait()
log.info('Processing {}'.format(file))
with ZipFile(os.path.join(OUTPUT_DIR, file), 'r') as myzip:
for xml_file in myzip.filelist:
content = myzip.read(xml_file)
res = parse_xml(content)
if res:
writer1_queue.put_nowait(res)
writer2_queue.put_nowait(res)
except queue.Empty:
with lock:
num_of_active_workers.value -= 1
break
log.info('Worker is tearing down')
def main():
writer1_queue = Queue()
writer2_queue = Queue()
zips_queue = Queue()
lock = Lock()
num_of_active_workers = Value('i', CONCURRENCY)
files = [f for f in os.listdir(OUTPUT_DIR) if f.endswith('.zip')]
for f in files:
zips_queue.put(f)
writer1_process = Process(target=writer1, args=(writer1_queue, num_of_active_workers,))
writer1_process.start()
writer2_process = Process(target=writer2, args=(writer2_queue, num_of_active_workers,))
writer2_process.start()
workers_list = []
for _ in range(CONCURRENCY):
p = Process(target=worker, args=(zips_queue, writer1_queue, writer2_queue, num_of_active_workers, lock,))
p.start()
workers_list.append(p)
writer1_process.join()
writer2_process.join()
for p in workers_list:
p.join()
writer1_queue.close()
writer2_queue.close()
zips_queue.close()
if __name__ == '__main__':
main()
| [
"anton.tuchak@gmail.com"
] | anton.tuchak@gmail.com |
8497d01d1f5416935e74935fb46785eb6e1d41b2 | 572efedf46f32fd65802433e9464f18713a8c06b | /AutoComplete/wsgi.py | c404762a04b1adeffdabdf90afbd2d0afbed34a5 | [] | no_license | kumarshubham77/AutoComplete | e17dca3346f9a668570931e7e48f54cf6afb3355 | 884ed4cdcd996cee3f3564b734811e36e91896c7 | refs/heads/master | 2020-05-02T19:19:09.959715 | 2019-03-28T14:09:15 | 2019-03-28T14:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for AutoComplete project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AutoComplete.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
fb0b82ee3ca997fabc248162d255f49ad5f9061c | 34294f487c203fcbc730b27567ed76f85d7b26d1 | /public_html/betterchecks_app/migrations/0006_auto_20201102_1644.py | 2955e8921bb89b3077cc0acbdc5ecf7083326d3a | [] | no_license | KeitaForCode/betachecks | 917293cbbf42b168a994233dc3b9abb4ef87e3a5 | f626ac5050718b591ade5008c35f3fbf33fa05fe | refs/heads/main | 2023-02-06T10:31:24.441075 | 2020-12-29T22:13:52 | 2020-12-29T22:13:52 | 325,387,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Generated by Django 3.0.3 on 2020-11-02 15:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('betterchecks_app', '0005_auto_20201101_2134'),
]
operations = [
migrations.AlterField(
model_name='contacts',
name='phone',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='courseenquiry',
name='phone',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='enquiry',
name='phone',
field=models.PositiveIntegerField(),
),
]
| [
"betachecksafrica@gmail.com"
] | betachecksafrica@gmail.com |
3b3b3bf3b3597fb177e2f34793128b23a711841e | 25210643406339fcd3383ecc355ed6c05fb0ad93 | /python/sparktestingbase/test/helloworld_test.py | 8d0b014ea20fa9b2dc2874eaaf41021813e58a24 | [
"Apache-2.0"
] | permissive | juanrh/spark-testing-base | f685e8beb3626f39459457de95ff295ac3348615 | afaf6fe11f22b7af9492a202dbfd2d62208f1995 | refs/heads/master | 2021-04-26T07:46:03.614212 | 2015-06-23T13:51:32 | 2015-06-23T13:51:32 | 31,416,073 | 0 | 0 | null | 2015-02-27T11:17:53 | 2015-02-27T11:17:52 | null | UTF-8 | Python | false | false | 1,132 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple test example"""
from sparktestingbase.testcase import SparkTestingBaseTestCase
class HelloWorldTest(SparkTestingBaseTestCase):
def test_basic(self):
input = ["hello world"]
rdd = self.sc.parallelize(input)
result = rdd.collect()
assert result == input
if __name__ == "__main__":
unittest2.main()
| [
"holden@pigscanfly.ca"
] | holden@pigscanfly.ca |
a9d04750663a66935108a98236d9113d6c7fc08d | fcf225fba6352a7492acc416718c3f1baad923d7 | /BeerGraphsForAllData.py | 6a9829ace70b4ff43b9185d4bc320d0cf0a08285 | [] | no_license | sleavor/Beer-Project | 45797123bcacc7d48de64f7037da9c78612e2329 | 512297a3870b14734ecca099e5e351e83d1eab0d | refs/heads/master | 2022-07-04T17:17:58.401598 | 2020-05-09T23:31:17 | 2020-05-09T23:31:17 | 262,675,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 22:48:26 2020
@author: Shawn Leavor
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('Project\Beer\data.csv', na_values=['','0'])
df = df.dropna(how='any',axis=0)
df = df[df.abv.str.contains("%")]
for num in range(1,25):
num = str(num)
df = df[df.numRatings != num]
allStyles = []
x = []
y = df['rating']
for abv in df['abv']:
if "%" in abv:
a = abv.split('%'[0])
x.append(float(a[0]))
for style in df['style']:
if style not in allStyles:
allStyles.append(style)
#Plot All Beers
plt.scatter(x,y, s=1)
plt.xlim([0, 25])
plt.show()
#Sort all beers by style and plot
for style in allStyles:
newdf = df[df['style'].str.contains(style)]
x=[]
y = newdf['rating']
for abv in newdf['abv']:
if "%" in abv:
a=abv.split('%'[0])
x.append(float(a[0]))
plt.scatter(x,y, label=style, s=1)
plt.title("Beer Ratings by ABV and Style")
plt.xlim([0,25])
plt.xlabel('abv')
plt.ylim([1,5])
plt.ylabel('average rating')
plt.xlim([0,25])
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show() | [
"sleavor@yahoo.com"
] | sleavor@yahoo.com |
3f9d6d3b36322800bf1fdd90291314003b1c58f4 | af42c50f185478d59fbb66ff84dddbbaab449c39 | /tests/visualize/test_show.py | d623d28d8210a84846cac139f0068504ffe31ce1 | [
"MIT"
] | permissive | keisuke-umezawa/chutil | 04eac71610d4c7711da75dabf670e93790e9f4df | df60440983c38a6dbbe4710019bcec5e83331904 | refs/heads/master | 2020-04-05T17:23:24.342601 | 2018-11-11T14:54:49 | 2018-11-11T14:54:49 | 157,058,035 | 1 | 0 | MIT | 2018-11-11T13:44:22 | 2018-11-11T07:41:29 | Python | UTF-8 | Python | false | false | 163 | py | import chutil.visualize.show as module
def test_show_test_peformance():
pass
def test_show_graph():
pass
def test_show_loss_and_accuracy():
pass
| [
"noreply@github.com"
] | noreply@github.com |
060eac28c26b41125b17d80a73f320465fa80cf3 | 2a6934acac4ec8bb29ad51e525ad2ed839a18587 | /sleekocmd.py | 68f49aecbefe4e0f260eb97fa74fe5cb08374f80 | [] | no_license | alexschlueter/arlecksbot | d9ca769a00bf0458163b397ebce314d510066af4 | 1730f5123b10bc638906f6206ea6b5b08460bfac | refs/heads/master | 2021-01-10T21:39:51.643735 | 2013-01-16T11:59:43 | 2013-01-16T11:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,261 | py | #!/usr/bin/env python2
# Name: SleekoCommander
# Author: Nick Caplinger (SleekoNiko)
# Dependencies: numpy, pypng
# Ideas:
# control the midfield with gankers
#1. Ambush flag carriers by predicting their path to the flag stand and whether or not they can intercept
#2. Camp the enemy spawn
#3. Actively search around points of interest to gain map awareness
# Import AI Sandbox API:
from api import Commander
from api import commands
from api import Vector2
# Import other modules
import random
#import png # for writing debug pngs
import networkx as nx # for graphs
import itertools
import math
#TODO Make bots more aggressive when time is running out and losing
#TODO Make bots more defensive when time is running out and winning
class SleekoCommander(Commander):
"""
Runners are responsible for infiltrating the enemy's defenses by flanking.
Defenders watch the flag stand for intruders and flankers by positioning themselves accordingly.
Midfielders try to provide map control by ganking and performing midfield objectives such as escorting and interception. They may fall into other roles when needed.
"""
def initialize(self):
"""
Assign each bot a role. Runners and defenders should default to 40%, and midfielders should default to 20%.
Role counts should adapt throughout the game depending on how aggressive or defensive the enemy commander is.
"""
self.verbose = True # display the command descriptions next to the bot labels
self.lastEventCount = 0
self.numAllies = len(self.game.team.members)
self.botDeathLocations = [] # stores a list of Vector2 objects of where bots died
self.makeRunnerGraph()
self.runners = [] # 40%
self.defenders = [] # 40%
self.midfielders = [] # 20%
ourSpawn = self.game.team.botSpawnArea[0]
theirSpawn = self.game.enemyTeam.botSpawnArea[0]
# if their spawn is closer to our flag than ours is
# attacking will probably be easy, so get more defenders
if distTo(theirSpawn, self.game.team.flag.position) < distTo(ourSpawn, self.game.team.flag.position):
# roughly half attackers/defenders
self.desiredRunners = math.ceil(self.numAllies * .5)
self.desiredDefenders = math.ceil(self.numAllies * .5)
else:
# Few defenders and the rest are attackers
defPercent = .20
self.desiredDefenders = math.ceil(self.numAllies * defPercent)
self.desiredRunners = math.ceil(self.numAllies * (1 - defPercent))
# Assign roles
for bot in self.game.team.members:
if len(self.runners) < self.desiredRunners:
self.runners.append(bot)
else:
self.defenders.append(bot)
# TODO calculate for more than 2 flags
self.midPoint = (self.game.team.botSpawnArea[0] + self.game.enemyTeam.flag.position) / 2.0
dirToFlag = (self.game.enemyTeam.flag.position - self.game.team.flag.position)
self.frontFlank = Vector2(dirToFlag.x, dirToFlag.y).normalized()
self.leftFlank = Vector2(dirToFlag.y,-dirToFlag.x).normalized()
self.rightFlank = Vector2(-dirToFlag.y,dirToFlag.x).normalized()
# Create behavior tree
self.behaviorTree = BotBehaviorTree(
Selector([
Sequence([
BotIsRunner(),
Selector([
Sequence([
BotHasFlag(),
RunToScoreZone()
]),
Sequence([
AllyHasFlag(),
SecureEnemyFlagObjective()
]),
Sequence([
Inverter(TeamHasEnemyFlag()),
#SmartApproachFlag()
Selector([
Sequence([
NearEnemyFlag(),
Selector([
Sequence([
EnemiesAreAlive(),
AttackFlag()
]),
ChargeFlag()
])
]),
ChargeToFlagFlank()
])
])
])
]),
Sequence([
BotIsDefender(),
Selector([
Sequence([
BotHasFlag(),
RunToScoreZone()
]),
Sequence([
OurFlagIsInBase(),
SecureOurFlagStand()
]),
Sequence([
OurFlagIsOnOurHalf(),
SecureOurFlag()
]),
Sequence([
SecureOurFlagStand()
])
])
])
])
)
# Set some blackboard data
self.behaviorTree.root.blackboard = {}
self.behaviorTree.root.blackboard['commander'] = self
# I was using a png file for output
#bt = getVonNeumannNeighborhood((int(self.game.team.flagSpawnLocation.x), int(self.game.team.flagSpawnLocation.y)), self.level.blockHeights, int(self.level.firingDistance))
#createPngFromBlockTuples(bt, (self.level.width, self.level.height))
#createPngFromMatrix(bt, (self.level.width, self.level.height))
# Determine safest positions for flag defense
self.secureFlagDefenseLocs = self.getMostSecurePositions(Vector2(self.game.team.flagSpawnLocation.x, self.game.team.flagSpawnLocation.y))
self.secureEnemyFlagLocs = self.getMostSecurePositions(Vector2(self.game.enemyTeam.flagSpawnLocation.x, self.game.enemyTeam.flagSpawnLocation.y))
def tick(self):
"""
Listen for events and run the bot's behavior tree.
"""
# listen for events
if len(self.game.match.combatEvents) > self.lastEventCount:
lastCombatEvent = self.game.match.combatEvents[-1]
#self.log.info('event:'+str(lastCombatEvent.type))
# if lastCombatEvent.instigator is not None:
# print "event:%d %f %s %s" % (lastCombatEvent.type,lastCombatEvent.time,lastCombatEvent.instigator.name,lastCombatEvent.subject.name)
# else:
# print "event:%d %f" % (lastCombatEvent.type,lastCombatEvent.time)
if lastCombatEvent.type == lastCombatEvent.TYPE_KILLED:
if lastCombatEvent.subject in self.game.team.members:
self.botDeathLocations.append(lastCombatEvent.subject.position)
#self.updateRunnerGraph()
self.lastEventCount = len(self.game.match.combatEvents)
# run behavior tree
for bot in self.game.bots_alive:
self.behaviorTree.root.blackboard['bot'] = bot
self.behaviorTree.run()
def shutdown(self):
scoreDict = self.game.match.scores
myScore = scoreDict[self.game.team.name]
theirScore = scoreDict[self.game.enemyTeam.name]
if myScore < theirScore:
self.log.info("We lost! Final score: " + str(myScore) + "-" + str(theirScore))
"""
Returns most secure positions by using von Neumann neighborhood where r = firingDistance + 2
"""
def getMostSecurePositions(self,secLoc):
levelSize = (self.level.width, self.level.height)
width, height = levelSize
potPosits = [[0 for y in xrange(height)] for x in xrange(width)]
neighbors = getVonNeumannNeighborhood((int(secLoc.x), int(secLoc.y)), self.level.blockHeights, int(self.level.firingDistance)+2)
securePositions = []
for n in neighbors:
# use raycasting to test whether or not this position can see the flag
# if it can't, automatically set it to 0
x,y = n
if self.level.blockHeights[x][y] >= 2:
potPosits[x][y] = 50
else:
potPosits[x][y] = 255
if potPosits[x][y] == 255:
numWallCells = numAdjCoverBlocks(n, self.level.blockHeights)
numWallCells += numAdjMapWalls(n, levelSize)
#print numWallCells
if numWallCells == 0:
potPosits[x][y] = 128
if potPosits[x][y] == 255:
# make sure they have LOS with the flag
goodLOS = True
lookVec = Vector2(x+0.5,y+0.5) - (secLoc + Vector2(.5,.5))
lookVecNorm = lookVec.normalized()
vecInc = .1
while vecInc < lookVec.length():
testPos = secLoc + lookVecNorm * vecInc
#print str(testPos)
if self.level.blockHeights[int(testPos.x)][int(testPos.y)] >= 2:
goodLOS = False
break
vecInc += .1
if not goodLOS:
potPosits[x][y] = 128
else:
securePositions.append(n)
#createPngFromMatrix(potPosits, levelSize)
return sorted(securePositions, key = lambda p: numAdjMapWalls(p, levelSize)*4 + numAdjCoverBlocksWeighted(p, self) + distTo(Vector2(p[0],p[1]), secLoc)/self.level.firingDistance, reverse = True)
def getFlankingPosition(self, bot, target):
flanks = [target + f * self.level.firingDistance for f in [self.leftFlank, self.rightFlank]]
options = map(lambda f: self.level.findNearestFreePosition(f), flanks)
#return sorted(options, key = lambda p: (bot.position - p).length())[0]
return random.choice(options)
# return number of living enemies
def numAliveEnemies(self):
livingEnemies = 0
for bot in self.game.enemyTeam.members:
if bot.health != None and bot.health > 0:
livingEnemies += 1
return livingEnemies
def makeRunnerGraph(self):
blocks = self.level.blockHeights
width, height = len(blocks), len(blocks[0])
g = nx.Graph(directed=False, map_height = height, map_width = width)
#self.positions = g.new_vertex_property('vector<float>')
#self.weights = g.new_edge_property('float')
#g.vertex_properties['pos'] = self.positions
#g.edge_properties['weight'] = self.weights
self.terrain = []
self.positions = {}
for j in range(0, height):
row = []
for i in range(0,width):
if blocks[i][j] == 0:
g.add_node(i+j*width, position = (float(i)+0.5, float(j)+0.5) )
self.positions[i+j*width] = Vector2(float(i) + 0.5, float(j) + 0.5)
row.append(i+j*width)
else:
row.append(None)
self.terrain.append(row)
for i, j in itertools.product(range(0, width), range(0, height)):
p = self.terrain[j][i]
if not p: continue
if i < width-1:
q = self.terrain[j][i+1]
if q:
e = g.add_edge(p, q, weight = 1.0)
if j < height-1:
r = self.terrain[j+1][i]
if r:
e = g.add_edge(p, r, weight = 1.0)
self.runnerGraph = g
def updateRunnerGraph(self):
blocks = self.level.blockHeights
width, height = len(blocks), len(blocks[0])
# update the weights based on the distance
for j in range(0, height):
for i in range(0, width -1):
a = self.terrain[j][i]
b = self.terrain[j][i+1]
if a and b:
w = max(255 - 4*(self.distances[a] + self.distances[b]), 0)
self.graph[a][b]['weight'] = w
for j in range(0, height-1):
for i in range(0, width):
a = self.terrain[j][i]
b = self.terrain[j+1][i]
if a and b:
w = max(255 - 4*(self.distances[a] + self.distances[b]), 0)
self.graph[a][b]['weight'] = w
def getNodeIndex(self, position):
i = int(position.x)
j = int(position.y)
width = self.runnerGraph.graph["map_width"]
return i+j*width
# Helper functions
def distTo(pos1, pos2):
return (pos1 - pos2).length()
# used for intercepting enemy flag runners
def canInterceptTarget(bot, target, targetGoal):
return distTo(bot, targetGoal) < distTo(target, targetGoal)
# Returns number of blocks that are adjacent that can be used as cover at a given position
def numAdjCoverBlocks(cell, blockHeights):
adjCells = getVonNeumannNeighborhood(cell, blockHeights, 1)
numWallCells = 0
for aCell in adjCells:
aCellX, aCellY = aCell
if blockHeights[aCellX][aCellY] >= 2:
numWallCells += 1
return numWallCells
# prioritize cells that have cover from their spawn
def numAdjCoverBlocksWeighted(cell, cmdr):
adjCells = getVonNeumannNeighborhood(cell, cmdr.level.blockHeights, 1)
# get distances of cells to their spawn
spawnPoint = cmdr.game.enemyTeam.botSpawnArea[0]
cellDistances = [distTo(spawnPoint, Vector2(x[0] + .5, x[1] + .5)) for x in adjCells]
cellDistData = sorted(zip(adjCells, cellDistances), key = lambda x: x[1], reverse = True)
wallScore = 0
for i, aCell in enumerate([x[0] for x in cellDistData]):
if not aCell == cell:
aCellX, aCellY = aCell
if cmdr.level.blockHeights[aCellX][aCellY] >= 2:
wallScore += i
return wallScore
# Tests to see approx. how far we can go in a direction until hitting a wall
def unblockedDistInDir(startPos, direction, commander):
testPos = startPos
while withinLevelBounds(testPos, (commander.level.width, commander.level.height)):
if commander.level.blockHeights[int(testPos.x)][int(testPos.y)] < 2:
testPos = testPos + direction/2
else:
break
return distTo(startPos, testPos)
# Returns true if the cell position is within level bounds, false otherwise
def withinLevelBounds(pos, levelSize):
return pos.x >= 0 and pos.y >= 0 and pos.x < levelSize[0] and pos.y < levelSize[1]
# Returns the number of adjacent map walls
def numAdjMapWalls(cell, mapSize):
adjWalls = 0
x,y = cell
width,height = mapSize
if x == 0 or x == width-1:
adjWalls += 1
if y == 0 or y == height-1:
adjWalls += 1
return adjWalls
# Returns the von Neumann Neighborhood of the cell of specified range as a list of tuples (x,y)
# http://mathworld.wolfram.com/vonNeumannNeighborhood.html
def getVonNeumannNeighborhood(cell, cells, r): # where cell is a tuple, cells is a 2D list, and r is the range
newCells = [] # list of tuples
for x, cx in enumerate(cells):
for y, cy in enumerate(cx):
if abs(x - cell[0]) + abs(y - cell[1]) <= r:
newCells.append((x,y))
return newCells
def createPngFromBlockTuples(tupleList, levelSize, name='pngtest.png'): # where tupleList is a list of block position tuples, levelSize is a tuple of x,y level size
width, height = levelSize
pngList = [[0 for y in xrange(height)] for x in xrange(width)]
for t in tupleList: # I could probably use list comprehensions here
print str(t)
x,y = t
column = pngList[y]
column[x] = 255
image = png.from_array(pngList, mode='L') # grayscale
image.save(name)
def createPngFromMatrix(matrix, levelSize, name='pngtest.png'):
width, height = levelSize
transposedMatrix = [[row[i] for row in matrix] for i in xrange(height)]
image = png.from_array(transposedMatrix, mode='L')
image.save(name)
# Base class for bot behavior tree
class BotBehaviorTree:
def __init__(self, child=None):
self.root = child
def run(self):
self.root.run()
# Base task classes
class Task:
def __init__(self, children=None, parent=None, blackboard=None):
#holds the children of task
self.children = children
self.blackboard = blackboard
self.parent = parent
if self.children != None:
for c in self.children:
c.parent = self
# returns True for success and False for failure
def run(self):
raise NotImplementedError("Can't call Task.run() without defining behavior.")
# Get data from the dict blackboard
def getData(self, name):
if self.blackboard == None or (self.blackboard != None and not name in blackboard):
testParent = self.parent
while testParent != None:
if testParent.blackboard != None and name in testParent.blackboard:
return testParent.blackboard[name]
else:
testParent = testParent.parent
# We went through the parents and didn't find anything, so return None
return None
else:
return blackboard[name]
class Selector (Task):
def run(self):
for c in self.children:
if c.run():
return True
return False
class Sequence (Task):
def run(self):
for c in self.children:
if not c.run():
return False
return True
# Decorators
class Decorator (Task):
def __init__(self, child=None,parent=None,blackboard=None):
self.child = child
self.parent = parent
self.blackboard = blackboard
self.child.parent = self
class Inverter (Decorator):
def run(self):
return not self.child.run()
# Now onto tasks specific to our program:
class BotIsRunner(Task):
def run(self):
return self.getData('bot') in self.getData('commander').runners
class BotIsDefender(Task):
def run(self):
return self.getData('bot') in self.getData('commander').defenders
class TeamHasEnemyFlag(Task):
def run(self):
commander = self.getData('commander')
return commander.game.enemyTeam.flag.carrier != None
class BotHasFlag(Task):
def run(self):
return self.getData('bot') == self.getData('commander').game.enemyTeam.flag.carrier
class LookRandom(Task):
def run(self):
self.getData('commander').issue(commands.Defend, self.getData('bot'), Vector2(random.random()*2 - 1, random.random()*2 - 1), description = 'Looking in random direction')
return True
class ChargeFlag(Task):
def run(self):
bot = self.getData('bot')
level = self.getData('commander').level
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
self.getData('commander').issue(commands.Charge, self.getData('bot'), self.getData('commander').game.enemyTeam.flag.position, description = 'Rushing enemy flag')
return True
class SmartApproachFlag(Task):
def run(self):
bot = self.getData('bot')
cmdr = self.getData('commander')
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
dst = cmdr.game.enemyTeam.flag.position
message = "Intelligently approaching flag?"
# calculate the shortest path between the bot and the target using our weights
srcIndex = cmdr.getNodeIndex(bot.position)
dstIndex = cmdr.getNodeIndex(dst)
pathNodes = nx.shortest_path(cmdr.runnerGraph, srcIndex, dstIndex, 'weight')
pathLength = len(pathNodes)
if pathLength > 0:
path = [cmdr.positions[p] for p in pathNodes if cmdr.positions[p]]
if len(path) > 0:
orderPath = path[::10]
orderPath.append(path[-1]) # take every 10th point including last point
cmdr.issue(commands.Charge, bot, orderPath, description = message)
class ChargeToFlagFlank(Task):
def run(self):
bot = self.getData('bot')
level = self.getData('commander').level
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
flankPos = self.getData('commander').getFlankingPosition(bot, self.getData('commander').game.enemyTeam.flag.position)
self.getData('commander').issue(commands.Charge, self.getData('bot'), flankPos, description = 'Rushing enemy flag via flank')
return True
class AttackFlag(Task):
def run(self):
bot = self.getData('bot')
cmdr = self.getData('commander')
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, cmdr.game.enemyTeam.flag.position, description = 'Attacking enemy flag')
return True
class WithinShootingDistance(Task):
def __init__(self):
self.shootingDistance = self.getData('commander').level.firingDistance
def run(self):
return distTo(self.getData('bot').position, self.getData('targetPos')) < self.shootingDistance
class RunToScoreZone(Task):
def run(self):
bot = self.getData('bot')
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
self.getData('commander').issue(commands.Charge, self.getData('bot'), self.getData('commander').game.team.flagScoreLocation, description = 'Taking their flag home')
return True
class AllyHasFlag(Task):
def run(self):
for b in self.getData('commander').game.bots_alive:
if b == self.getData('commander').game.enemyTeam.flag.carrier:
return True
return False
class SecureEnemyFlagObjective(Task):
def run(self):
bot = self.getData('bot')
cmdr = self.getData('commander')
flagSpawnLoc = cmdr.game.enemyTeam.flagSpawnLocation
flagScoreLoc = cmdr.game.enemyTeam.flagScoreLocation
# secure their flag spawn or their flag capture zone; whichever is closer
flagSpawnDist = distTo(bot.position, flagSpawnLoc)
capZoneDist = distTo(bot.position, flagScoreLoc)
secureLoc = None
secureDist = flagSpawnDist
if flagSpawnDist < capZoneDist:
secureLoc = flagSpawnLoc
secureDist = flagSpawnDist
else:
secureLoc = flagScoreLoc
secureDist = capZoneDist
if secureDist < 2:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_DEFENDING and bot.state != bot.STATE_TAKINGORDERS:
# TODO face direction(s) that the attackers will most likely come from
direction = (cmdr.midPoint - bot.position).normalized() + (random.random() - 0.5)
dirLeft = Vector2(-direction.y, direction.x)
dirRight = Vector2(direction.y, -direction.x)
cmdr.issue(commands.Defend, bot, [(direction, 1.0), (dirLeft, 1.0), (direction, 1.0), (dirRight, 1.0)], description = 'Keeping flag objective secure')
else:
enemiesAlive = False
for b in cmdr.game.enemyTeam.members:
if b.health != None and b.health > 0:
enemiesAlive = True
break
if enemiesAlive:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, secureLoc, description = 'Moving to secure enemy flag objective')
else:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Charge, bot, secureLoc, description = 'Charging to secure enemy flag objective')
return True
class NearEnemyFlag(Task):
def run(self):
bot = self.getData('bot')
return distTo(bot.position, self.getData('commander').game.enemyTeam.flag.position) < self.getData('commander').level.firingDistance * 1.5
class EnemiesAreAlive(Task):
def run(self):
for bot in self.getData('commander').game.enemyTeam.members:
if bot.health != None and bot.health > 0:
return True
return False
# Defender bot code
class OurFlagIsInBase(Task):
def run(self):
ourFlag = self.getData('commander').game.team.flag
ourFlagSpawnLoc = self.getData('commander').game.team.flagSpawnLocation
return distTo(ourFlag.position, ourFlagSpawnLoc) < 3
class OurFlagIsOnOurHalf(Task):
def run(self):
cmdr = self.getData('commander')
flagDistToSpawn = distTo(cmdr.game.team.flag.position, cmdr.game.team.flagSpawnLocation)
flagDistToScore = distTo(cmdr.game.team.flag.position, cmdr.game.enemyTeam.flagScoreLocation)
return flagDistToSpawn < flagDistToScore
class SecureOurFlag(Task):
def run(self):
cmdr = self.getData('commander')
bot = self.getData('bot')
secureLoc = cmdr.game.team.flag.position
secureDist = distTo(bot.position, secureLoc)
if secureDist < 2:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_DEFENDING and bot.state != bot.STATE_TAKINGORDERS:
# TODO face direction(s) that the attackers will most likely come from
direction = (cmdr.midPoint - bot.position).normalized() + (random.random() - 0.5)
dirLeft = Vector2(-direction.y, direction.x)
dirRight = Vector2(direction.y, -direction.x)
cmdr.issue(commands.Defend, bot, [(direction, 1.0), (dirLeft, 1.0), (direction, 1.0), (dirRight, 1.0)], description = 'Keeping our flag secure')
else:
enemiesAlive = False
for b in cmdr.game.enemyTeam.members:
if b.health != None and b.health > 0:
enemiesAlive = True
break
if enemiesAlive:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, secureLoc, description = 'Moving to secure our flag')
else:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Charge, bot, secureLoc, description = 'Charging to secure our flag')
return True
class SecureOurFlagStand(Task):
def run(self):
cmdr = self.getData('commander')
bot = self.getData('bot')
safeLocs = cmdr.secureFlagDefenseLocs
secureLoc = None
secureDist = None
chosenLoc = None
if len(safeLocs) == 0:
secureLoc = cmdr.game.team.flagSpawnLocation
else:
#double check to make sure we have a good position; note that this shouldn't really be done here
for i, sLoc in enumerate(safeLocs):
if distTo(Vector2(sLoc[0] + .5, sLoc[1] + .5), cmdr.game.team.flagSpawnLocation + Vector2(.5,.5)) <= cmdr.level.firingDistance - 1:
chosenLoc = safeLocs[i]
break
if chosenLoc == None:
# Give up
chosenLoc = secureLoc
secureLoc = Vector2(chosenLoc[0] + 0.5, chosenLoc[1] + 0.5)
secureDist = distTo(bot.position, secureLoc)
if secureDist < .5:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_DEFENDING and bot.state != bot.STATE_TAKINGORDERS:
# face away from adjacent walls
directions = []
secureLocCell = (int(secureLoc.x), int(secureLoc.y))
for aCell in getVonNeumannNeighborhood(secureLocCell, cmdr.level.blockHeights, 1):
if aCell != secureLocCell:
if cmdr.level.blockHeights[aCell[0]][aCell[1]] <= 1:
aimDir = Vector2(aCell[0], aCell[1]) - Vector2(secureLocCell[0], secureLocCell[1])
aimDist = unblockedDistInDir(secureLoc, aimDir, cmdr)
if aimDist > cmdr.level.firingDistance / 3:
directions.append(aimDir.normalized())
if len(directions) > 0:
cmdr.issue(commands.Defend, bot, directions, description = 'Keeping our flag stand secure')
else:
cmdr.issue(commands.Defend, bot, (cmdr.game.team.flagSpawnLocation - bot.position).normalized(), description = 'Keeping our flag stand secure')
else:
enemiesAlive = False
for b in cmdr.game.enemyTeam.members:
if b.health != None and b.health > 0:
enemiesAlive = True
break
if enemiesAlive:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, secureLoc, description = 'Moving to secure our flag stand')
else:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Charge, bot, secureLoc, description = 'Charging to secure our flag stand')
return True
| [
"arleckshunt@googlemail.com"
] | arleckshunt@googlemail.com |
2c6c3a09d95945c7a9f020b9df2ee127ebe4414a | 00e29479dc7c45a9e019f96c90a69a49af618ccf | /src/api-engine/src/api/routes/user/views.py | 42300c55ed400a24f4c2f80abe33262434251b0a | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | yunchaozou/cello | 8dd081db2ce5d9b8975d553d4491d329790588ef | 68158f572c688f1710813c4df47fad28c3d4276c | refs/heads/master | 2020-04-22T08:40:53.157301 | 2019-02-08T22:35:35 | 2019-02-08T22:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,549 | py | #
# SPDX-License-Identifier: Apache-2.0
#
import logging
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from drf_yasg.utils import swagger_auto_schema
from api.routes.network.serializers import NetworkListResponse
from api.utils.common import with_common_response
from api.routes.company.serializers import (
NodeOperationSerializer,
CompanyQuery,
CompanyCreateBody,
CompanyIDSerializer,
)
from api.auth import CustomAuthenticate
LOG = logging.getLogger(__name__)
class UserViewSet(viewsets.ViewSet):
authentication_classes = (CustomAuthenticate,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
query_serializer=CompanyQuery,
responses=with_common_response(
with_common_response({status.HTTP_200_OK: NetworkListResponse})
),
)
def list(self, request, *args, **kwargs):
"""
List Users
List user through query parameter
"""
LOG.info("user %s", request.user.role)
return Response(data=[], status=status.HTTP_200_OK)
@swagger_auto_schema(
request_body=CompanyCreateBody,
responses=with_common_response(
{status.HTTP_201_CREATED: CompanyIDSerializer}
),
)
def create(self, request):
"""
Create User
Create new user
"""
pass
@swagger_auto_schema(
responses=with_common_response(
{status.HTTP_204_NO_CONTENT: "No Content"}
)
)
def destroy(self, request, pk=None):
"""
Delete User
Delete user
"""
pass
@action(
methods=["get", "post", "put", "delete"],
detail=True,
url_path="attributes",
)
def attributes(self, request, pk=None):
"""
get:
Get User Attributes
Get attributes of user
post:
Create Attributes
Create attribute for user
put:
Update Attribute
Update attribute of user
delete:
Delete Attribute
Delete attribute of user
"""
pass
@swagger_auto_schema(method="post", responses=with_common_response())
@action(methods=["post"], detail=True, url_path="password")
def password(self, request, pk=None):
"""
post:
Update/Reset Password
Update/Reset password for user
"""
pass
| [
"hightall@me.com"
] | hightall@me.com |
0e587c315617d75d7578f397cb8112c8653d43bd | 4f7e258073b2688a2dd067d9f6d0947f0b5330f9 | /Project5/Python/get.py | 6b248c0c82135ba8c2d3d494e336ad2051661434 | [] | no_license | ajcletus500/College-Projects | 5ebc1c7f409dc2e757c7f4ca7fe20537681191ef | de7a73e13b203bc0cf93845f2b39d6be8ff5eac0 | refs/heads/master | 2021-01-01T16:40:57.565566 | 2017-07-21T01:40:25 | 2017-07-21T01:40:25 | 97,882,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | import sys
import os
import glob
import re
a=1
dir_path = "/mnt/c/Users/hp/OneDrive/GMU_Notes/Sem4/611/Project/two_core/Results/"
file_list = glob.glob(dir_path+'*') #use standard *NIX wildcards to get your file names, in this case, all the files with a .txt extension
file_list.sort()
#print(file_list)
b=1
#c=filename.split('Results/')[1]
m=0
for filename in file_list:
c=filename.split('Results/')[1]+".csv"
if re.match("(.*).A0", filename) or re.match("(.*).A1", filename):
continue
#elif (re.match("(*).A1", filename)):
# continue
print(filename)
with open(filename, 'r') as in_file:
#print in_file.readline(10)
print(c)
with open(c, 'w') as out_file:
b=b+1
c="test"+str(b)+".csv"
print(b)
if a:
out_file.write(filename.split('Results/')[1]+'\n \n')
else:
out_file.write(filename.split('Results/')[1]+'\n \n')
for line in in_file:
if re.match("(.*), ipc(.*)", line):
#print line,
out_file.write(line.split('ipc :')[1])
a=0
m=m+1
if m >1000:
break
out_file.close()
m=0
| [
"ajcletus500@gmail.com"
] | ajcletus500@gmail.com |
8eb17eeafb990dd19724e7110b8af45955b7f221 | aa37b7aec635fd62707c90a7b536926e20cddaef | /test/functional/test_runner.py | 59b3e989a6b2fc8b11611cac66ce1fcd22c7d4aa | [
"MIT"
] | permissive | planbcoin/planbcoin | d85b9345998c9a2221ea0b44ed0dec86c7d3dc1e | 7d132eebdce94f34ca2e74278b5ca09dc012d164 | refs/heads/master | 2020-12-02T20:58:31.167685 | 2017-08-06T17:57:51 | 2017-08-06T17:57:51 | 96,237,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,194 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:PlanbcoinTestFramework.main`.
"""
import argparse
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'zmq_test.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'uptime.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/planbcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_planbcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/planbcoin/planbcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/planbcoin/planbcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_planbcoind):
print("No functional tests to run. Wallet, utils, and planbcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
# Warn if planbcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "planbcoind"]) is not None:
print("%sWARNING!%s There is already a planbcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/planbcoind' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie planbcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `planbcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| [
"ysoheil@gmail.com"
] | ysoheil@gmail.com |
6da3f1090aaf84be9df93d0303e20872eb003fb0 | be2f1b5a43379d70e800e0aa2f25c70bbfeca86b | /dosirak/migrations/0014_auto_20160504_0430.py | dab458b4dd732fa493f831230ca91cd9ffff47e5 | [] | no_license | drexly/tongin | 2aab20d49d84662a82e7f9023194490af6b5b06e | d2dcb571c2100e260d93294710997fea3c73ecb3 | refs/heads/master | 2021-06-28T21:23:21.056502 | 2017-01-17T03:00:57 | 2017-01-17T03:00:57 | 58,129,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,781 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-04 04:30
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('dosirak', '0013_auto_20160504_0312'),
]
operations = [
migrations.CreateModel(
name='air',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text4', models.CharField(default=b'\xeb\xb6\x84\xec\x9c\x84\xea\xb8\xb0', max_length=20)),
('votes4', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='clean',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text5', models.CharField(default=b'\xec\xb2\xad\xea\xb2\xb0\xeb\x8f\x84', max_length=20)),
('votes5', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='price',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text2', models.CharField(default=b'\xea\xb0\x80\xea\xb2\xa9', max_length=20)),
('votes2', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text3', models.CharField(default=b'\xec\x84\x9c\xeb\xb9\x84\xec\x8a\xa4', max_length=20)),
('votes3', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='taste',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text1', models.CharField(default=b'\xeb\xa7\x9b', max_length=20)),
('votes1', models.IntegerField(default=0)),
],
),
migrations.RemoveField(
model_name='choice',
name='question',
),
migrations.AlterField(
model_name='question',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2016, 5, 4, 4, 30, 48, 990544, tzinfo=utc), verbose_name=b'Published Date'),
),
migrations.DeleteModel(
name='Choice',
),
migrations.AddField(
model_name='taste',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dosirak.Question'),
),
migrations.AddField(
model_name='service',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dosirak.Question'),
),
migrations.AddField(
model_name='price',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dosirak.Question'),
),
migrations.AddField(
model_name='clean',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dosirak.Question'),
),
migrations.AddField(
model_name='air',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dosirak.Question'),
),
]
| [
"KimJuhyeon@gimjuhyeon-ui-Mac-Pro.local"
] | KimJuhyeon@gimjuhyeon-ui-Mac-Pro.local |
05cd410ea1feb9e5ce69d50837df1634de6f31ed | d9031bfff990457e53470d8e578314980024cfe8 | /fluent_python_eg/2、数据结构/2.2.1列表推导.py | 616a616d2ccf9968eb7f7347d69d328069632f6b | [] | no_license | L316645200/_Python | f2d69772b2769b73061d3c5c8e957214a4ad9dfd | ca08b86da4e97df8e0f6e704c287f4fdb22f8675 | refs/heads/master | 2023-06-25T07:34:07.629789 | 2021-06-30T03:28:02 | 2021-06-30T03:28:02 | 249,311,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | symbols = '$¢£¥€¤'
codes = [ord(_) for _ in symbols]
print(codes)
codes = [ord(_) for _ in symbols if ord(_) > 127]
print(codes)
beyond_ascii = list(filter(lambda c: c > 127, map(ord, symbols)))
print(beyond_ascii)
x = 'ABC'
dummy = [ord(x) for x in 'ABC']
print(x)
storage_type = (('10', '容量型本地盘'),
('11', '容量型云盘'),
('20', '性能型本地盘'),
('21', '性能型云盘'),
('22', 'LVM云盘'))
# [{'value': 10, 'text': '容量型本地盘'}, {}, {}, {}, {}]
s = [{'value': _[0], 'text': _[-1]} for _ in storage_type]
print(s) | [
"linjq@kaopuyun.com"
] | linjq@kaopuyun.com |
6cf12299dfeaefae34ec2dbc281245a61fb76952 | 099f8b113bdf2a6f657e9b800aa657bb29bb94fa | /Python_Foundation/regularexp2.py | d114107672043e18c89e9face44d4c0602a13e97 | [] | no_license | MayurKasture/Mayur_Python_Foundation_Cource | a02d1442aa5120f6ae3f840f74b394cd4864cf58 | 9ff42a4406456897d3a0c9671e28a2d5fd441213 | refs/heads/master | 2020-09-26T16:31:42.789450 | 2019-12-06T09:28:43 | 2019-12-06T09:28:43 | 226,291,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | import re
str="5349 124 8494 8 823"
#1 get list of nnumbers
lst=re.findall("\d+",str)
print(lst)
lst=re.split("\s+",str)
print(lst)
#2 get list of all 3 digit numbers
lst= re.findall(r"\b\d{3}\b",str)
print(lst)
# 3. remove single digit from list
lst= re.sub(r"\b\d{1}\b","",str)
print(lst)
#4 search string contain 4 digit followed by 3 digit
lst= re.search("\d{4} \d{3}",str)
print(lst.group()) | [
"noreply@github.com"
] | noreply@github.com |
a22053a94d32942f8b2f31f59b1e33414a43912f | bdaff7d03ad5278bdb57c4894cf2678e4db7082d | /arquivos.py | e61677c903fdd66e5c540720fe9535b06c9d1f53 | [] | no_license | Dsblima/python_e_mysql | aff68d8b401135fd28cbf3be676982fc9dc312fd | 83b0d6436f469d39da6166295e2277dd7f6e331b | refs/heads/master | 2020-06-30T06:21:59.317363 | 2019-08-24T22:23:59 | 2019-08-24T22:23:59 | 200,754,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | """
- PARA ABRIR UM ARQUIVO arquivo = open(nomedoarquivo.txt,modo)
- O MODO PODE SER (r,a,w,a+,w+,r+)
- PARA LER O CONTEÚDO DE UM ARQUIVO PODEMOS USAR AS FUNÇÕES:
read() - lê o arquivo inteiro
readline() - lê uma linha do arquivo
readlines() - lê o conteúdo inteiro arquivo e retorna em um array de strings
"""
meuArquivo = open("arquivo.txt","a")
meuArquivo.write("Testando inserção\n")
meuArquivo.close()
meuArquivo= open("arquivo.txt","r")
linhas = meuArquivo.readlines()
for linha in linhas:
print(linha)
meuArquivo.close() | [
"dsbl@ecomp.poli.br"
] | dsbl@ecomp.poli.br |
441133699c06db56ac4b763ddf70f219012d95bb | ba88e2c659b3ca58deb97e78c69b1c0c1cb73bb6 | /example_jks/generate_examples.py | 047447acff5afb46d5238b664118b2662b061589 | [
"LicenseRef-scancode-mit-old-style",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | floyd-fuh/JKS-private-key-cracker-hashcat | 349ff59954c608dbaa24555efc9cab421bd8f103 | 6b2b557ec81745d0b43f8466f1619ef4f2c7019d | refs/heads/master | 2021-04-12T10:53:00.449891 | 2020-08-10T12:48:13 | 2020-08-10T12:48:13 | 94,535,141 | 189 | 22 | null | 2020-09-21T12:32:56 | 2017-06-16T10:50:29 | Java | UTF-8 | Python | false | false | 2,031 | py | #!/usr/bin/env pypy
#By floyd https://www.floyd.ch @floyd_ch
#modzero AG https://www.modzero.ch @mod0
import os
def executeInShell(command):
import subprocess
process = subprocess.Popen(command, shell=True)
process.wait()
for passw in ["123456", "1234567", "12345678", "123456789", "1234567890"]:
# RSA - can be pretty much arbitrary
for keysize in ["512", "777", "1024", "2048", "4096", "8192"]:
executeInShell("keytool -genkey -dname 'CN=test, OU=test, O=test, L=test, S=test, C=CH' -noprompt -alias "+passw+" -keysize "+keysize+" -keyalg RSA -keystore rsa_"+keysize+"_"+passw+".jks -storepass "+passw+ " -keypass "+passw)
# DSA - so far only these two sizes worked for me
for keysize in ["512", "1024"]:
executeInShell("keytool -genkey -dname 'CN=test, OU=test, O=test, L=test, S=test, C=CH' -noprompt -alias "+passw+" -keysize "+keysize+" -keyalg DSA -keystore dsa_"+keysize+"_"+passw+".jks -storepass "+passw + " -keypass "+passw)
# EC - these are all that work in my version of keytool
for curve in ["256", "283", "359", "384", "409", "431", "521"]: #[str(x) for x in range(256, 571)]:
executeInShell("keytool -genkey -dname 'CN=test, OU=test, O=test, L=test, S=test, C=CH' -noprompt -alias "+passw+" -keysize "+curve+" -keyalg EC -keystore ec_"+curve+"_"+passw+".jks -storepass "+passw + " -keypass "+passw)
#Now one example KeyStore that has two keys in it...
executeInShell("keytool -genkey -dname 'CN=test, OU=test, O=test, L=test, S=test, C=CH' -noprompt -alias first -keysize 2048 -keyalg RSA -keystore twokeys_123456.jks -storepass 123456 -keypass 123456")
executeInShell("keytool -genkey -dname 'CN=test, OU=test, O=test, L=test, S=test, C=CH' -noprompt -alias second -keysize 4096 -keyalg RSA -keystore second.jks -storepass 123456 -keypass 222222")
executeInShell("keytool -importkeystore -srckeystore second.jks -destkeystore twokeys_123456.jks -srcstorepass 123456 -deststorepass 123456 -srckeypass 222222 -srcalias second")
os.remove("second.jks")
| [
"tobias@modzero.ch"
] | tobias@modzero.ch |
c67609483dcf9c2fa41c88e4b9d12dd853df14eb | 00aa9875275ac00f978b388fa32bf241aee4b458 | /samples/vsphere/contentlibrary/contentupdate/content_update.py | d65ee6ec054e2e8481823eab6b00f26463c91225 | [
"MIT"
] | permissive | Onebooming/vsphere-automation-sdk-python | 3898404cc10b5bec284933d2beda9e18052346fc | 7576b3abb76cc03947eb1e13c6015b35574a704f | refs/heads/master | 2020-09-21T09:15:50.511280 | 2019-11-25T09:56:05 | 2019-11-25T09:56:05 | 224,751,817 | 1 | 0 | MIT | 2019-11-29T01:01:25 | 2019-11-29T01:01:25 | null | UTF-8 | Python | false | false | 8,086 | py | #!/usr/bin/env python
"""
* *******************************************************
* Copyright VMware, Inc. 2016. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2016 VMware, Inc. All rights reserved.'
__vcenter_version__ = '6.0+'
try:
import urllib2
except ImportError:
import urllib.request as urllib2
from com.vmware.content.library.item_client import UpdateSessionModel
from samples.vsphere.common.id_generator import generate_random_uuid
from samples.vsphere.common.sample_base import SampleBase
from samples.vsphere.contentlibrary.lib.cls_api_client import ClsApiClient
from samples.vsphere.contentlibrary.lib.cls_api_helper import ClsApiHelper
class ContentUpdate(SampleBase):
"""
Demonstrates the workflow of updating a content library item.
Note: the workflow needs an existing datastore (of type vmfs) with available storage.
"""
ISO_FILE_1 = 'test.iso'
ISO_FILE_2 = 'test-2.iso'
ISO_ITEM_NAME = 'test'
def __init__(self):
SampleBase.__init__(self, self.__doc__)
self.servicemanager = None
self.client = None
self.helper = None
self.datastore_name = None
self.lib_name = "demo-lib"
self.local_library = None
def _options(self):
self.argparser.add_argument('-datastorename', '--datastorename',
help='The name of the datastore where '
'the library will be created.')
def _setup(self):
self.datastore_name = self.args.datastorename
assert self.datastore_name is not None
self.servicemanager = self.get_service_manager()
self.client = ClsApiClient(self.servicemanager)
self.helper = ClsApiHelper(self.client, self.skip_verification)
def _execute(self):
storage_backings = self.helper.create_storage_backings(self.servicemanager,
self.datastore_name)
library_id = self.helper.create_local_library(storage_backings, self.lib_name)
self.local_library = self.client.local_library_service.get(library_id)
self.delete_and_upload_scenario(library_id)
self.replace_scenario(library_id)
def replace_scenario(self, library_id):
"""
:param library_id: the Iso item will be created, and then replaced in this library
:return: None
Content update scenario 2:
Update ISO library item by creating an update session for the
item, then adding the new ISO file using the same session file
name into the update session, which will replace the existing
ISO file upon session complete.
"""
iso_item_id = self.helper.create_library_item(library_id=library_id,
item_name=self.ISO_ITEM_NAME,
item_description='Sample iso file',
item_type='iso')
print('ISO Library item version (on creation) {0}:'.format(
self.get_item_version(iso_item_id)))
iso_files_map = self.helper.get_iso_file_map(item_filename=self.ISO_FILE_1,
disk_filename=self.ISO_FILE_1)
self.helper.upload_files(library_item_id=iso_item_id, files_map=iso_files_map)
original_version = self.get_item_version(iso_item_id)
print('ISO Library item version (on original content upload) {0}:'.format(
original_version))
session_id = self.client.upload_service.create(
create_spec=UpdateSessionModel(library_item_id=iso_item_id),
client_token=generate_random_uuid())
# Use the same item filename (update endpoint, as it's a replace scenario)
iso_files_map = self.helper.get_iso_file_map(item_filename=self.ISO_FILE_1,
disk_filename=self.ISO_FILE_2)
self.helper.upload_files_in_session(iso_files_map, session_id)
self.client.upload_service.complete(session_id)
self.client.upload_service.delete(session_id)
updated_version = self.get_item_version(iso_item_id)
print('ISO Library item version (after content update): {0}'.format(
updated_version))
assert updated_version > original_version, 'content update should increase the version'
def delete_and_upload_scenario(self, library_id):
"""
:param library_id: the OVF item will be created and updated in this library
:return: None
Content update scenario 1:
Update OVF library item by creating an update session for the
OVF item, removing all existing files in the session, then
adding all new files into the same update session, and completing
the session to finish the content update.
"""
# Create a new library item in the content library for uploading the files
ovf_item_id = self.helper.create_library_item(library_id=library_id,
item_name='demo-ovf-item',
item_description='Sample simple VM template',
item_type='ovf')
assert ovf_item_id is not None
print('Library item created id: {0}'.format(ovf_item_id))
print('OVF Library item version (at creation) {0}:'.format(
self.get_item_version(ovf_item_id)))
# Upload a VM template to the CL
ovf_files_map = self.helper.get_ovf_files_map(ClsApiHelper.SIMPLE_OVF_RELATIVE_DIR)
self.helper.upload_files(library_item_id=ovf_item_id, files_map=ovf_files_map)
print('Uploaded ovf and vmdk files to library item {0}'.format(ovf_item_id))
original_version = self.get_item_version(ovf_item_id)
print('OVF Library item version (on original content upload): {0}'.format(
original_version))
# Create a new session and perform content update
session_id = self.client.upload_service.create(
create_spec=UpdateSessionModel(library_item_id=ovf_item_id),
client_token=generate_random_uuid())
existing_files = self.client.upload_file_service.list(session_id)
for file in existing_files:
print('deleting {0}'.format(file.name))
self.client.upload_file_service.remove(session_id, file.name)
ovf_files_map = self.helper.get_ovf_files_map(
ovf_location=ClsApiHelper.PLAIN_OVF_RELATIVE_DIR)
self.helper.upload_files_in_session(ovf_files_map, session_id)
self.client.upload_service.complete(session_id)
self.client.upload_service.delete(session_id)
updated_version = self.get_item_version(ovf_item_id)
print('OVF Library item version (after content update): {0}'.format(
updated_version))
assert updated_version > original_version, 'content update should increase the version'
def get_item_version(self, item_id):
ovf_item_model = self.client.library_item_service.get(item_id)
pre_update_version = ovf_item_model.content_version
return pre_update_version
def _cleanup(self):
if self.local_library:
self.client.local_library_service.delete(library_id=self.local_library.id)
print('Deleted Library Id: {0}'.format(self.local_library.id))
def main():
content_update_sample = ContentUpdate()
content_update_sample.main()
if __name__ == '__main__':
main()
| [
"het@vmware.com"
] | het@vmware.com |
277b796133b417f3f03e8d037d77c96b7187c638 | 5d451964cdab06369ae86e1b9594ebc6bfec789e | /Exercise/concat.py | 05af00724625624bf169a27b3e3a5bdee35b4605 | [] | no_license | Donishal/My-Project | 308d5e07a9285530a12244c66a75b89b8a01912d | 9ee4f1d354efc79f785e40b8b9652ecb01cc4694 | refs/heads/master | 2020-09-03T12:32:48.931940 | 2019-11-04T09:31:31 | 2019-11-04T09:31:31 | 219,463,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | dic1={1:10, 2:20}
dic2={3:30, 4:40}
dic3={5:50, 6:60}
dic1.update(dic2)
dic1.update(dic3)
print(dic1)
| [
"doniisabel7@gmail.com"
] | doniisabel7@gmail.com |
d0d5a0a378ff8e2e830f3b70dd51e7701ecdd2f4 | b243ce8a8e4ed5eb299adaa6e95ef8f268ea1744 | /advent/solver/day_6/task_1/__init__.py | 0c777935f4c9de1d73992dc3aa250a1470b5f750 | [] | no_license | LukaszSac/AdventOfCode | b62d26ab62c303abaab26f6b83f591fe9c4beca4 | d8c0438a9a7a47febabb04133800e0abcea35a5f | refs/heads/master | 2023-01-30T00:15:17.206813 | 2020-12-09T21:25:03 | 2020-12-09T21:25:03 | 318,300,208 | 0 | 0 | null | 2020-12-08T19:08:33 | 2020-12-03T19:37:59 | Python | UTF-8 | Python | false | false | 39 | py | from .solver import DaySixTaskOneSolver | [
"sacewiczlukasz@gmail.com"
] | sacewiczlukasz@gmail.com |
8f6544b242c2b325c60dfe4ba718e842a1bd5da5 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn4 - krajevne funkcije/M-17221-2470.py | 10d80965dddb932bac3c85dd6f23dabfac539c8c | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,073 | py | # Tu pišite svoje funkcije:
from math import *
def koordinate(ime, kraji):
for kraj in kraji:
if(kraj[0] == ime):
return (kraj[1], kraj[2])
return None
def razdalja_koordinat(x1, y1, x2, y2):
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def razdalja(ime1, ime2, kraji):
koordinate1 = koordinate(ime1, kraji)
koordinate2 = koordinate(ime2, kraji)
return razdalja_koordinat(koordinate1[0], koordinate1[1], koordinate2[0], koordinate2[1])
def v_dometu(ime, domet, kraji):
seznamKrajev = []
for kraj in kraji:
if(kraj[0] != ime and razdalja(ime, kraj[0], kraji) <= domet):
seznamKrajev.append(kraj[0])
return seznamKrajev
def najbolj_oddaljeni(ime, imena, kraji):
maxLen = -1
returnIme = None
for imeL in imena:
if(razdalja(ime, imeL, kraji) > maxLen):
maxLen = razdalja(ime, imeL, kraji)
returnIme = imeL
return returnIme
def zalijemo(ime, domet, kraji):
return najbolj_oddaljeni(ime, v_dometu(ime, domet, kraji), kraji)
def presek(s1, s2):
presekKraji = []
for kraj1 in s1:
for kraj2 in s2:
if(kraj1 == kraj2):
presekKraji.append(kraj1)
return presekKraji
def skupno_zalivanje(ime1, ime2, domet, kraji):
seznamKrajev = []
for kraj in kraji:
if razdalja(ime1, kraj[0], kraji) < domet and razdalja(ime2, kraj[0], kraji) < domet:
seznamKrajev.append(kraj[0])
return seznamKrajev
import unittest
class TestKraji(unittest.TestCase):
vsi_kraji = [
('Brežice', 68.66, 7.04),
('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04),
('Ljutomer', 111.26, 71.82),
('Rogaška Slatina', 71.00, 42.00),
('Ribnica', 7.10, -10.50),
('Dutovlje', -56.80, -6.93),
('Lokve', -57.94, 19.32),
('Vinica', 43.81, -38.43),
('Brtonigla', -71.00, -47.25),
('Kanal', -71.00, 26.25),
('Črnomelj', 39.05, -27.93),
('Trbovlje', 29.61, 35.07),
('Beltinci', 114.81, 80.54),
('Domžale', -2.34, 31.50),
('Hodoš', 120.70, 105.00),
('Škofja Loka', -23.64, 35.07),
('Velike Lašče', 0.00, 0.00),
('Velenje', 33.16, 54.29),
('Šoštanj', 29.61, 57.75),
('Laško', 42.60, 33.29),
('Postojna', -29.54, -5.25),
('Ilirska Bistrica', -27.19, -27.93),
('Radenci', 100.61, 84.00),
('Črna', 15.41, 66.57),
('Radeče', 39.05, 24.57),
('Vitanje', 47.36, 57.75),
('Bled', -37.84, 56.07),
('Tolmin', -63.90, 36.75),
('Miren', -72.14, 7.04),
('Ptuj', 87.61, 61.32),
('Gornja Radgona', 97.06, 89.25),
('Plave', -73.34, 21.00),
('Novo mesto', 37.91, -3.47),
('Bovec', -76.89, 52.50),
('Nova Gorica', -69.79, 12.29),
('Krško', 60.35, 14.07),
('Cerknica', -18.89, -3.47),
('Slovenska Bistrica', 66.31, 57.75),
('Anhovo', -72.14, 22.78),
('Ormož', 107.71, 61.32),
('Škofije', -59.14, -27.93),
('Čepovan', -60.35, 22.78),
('Murska Sobota', 108.91, 87.57),
('Ljubljana', -8.24, 22.78),
('Idrija', -43.74, 17.54),
('Radlje ob Dravi', 41.46, 82.32),
('Žalec', 37.91, 43.79),
('Mojstrana', -49.70, 64.79),
('Log pod Mangartom', -73.34, 59.54),
('Podkoren', -62.69, 70.04),
('Kočevje', 16.61, -21.00),
('Soča', -69.79, 52.50),
('Ajdovščina', -53.25, 5.25),
('Bohinjska Bistrica', -48.49, 47.25),
('Tržič', -22.44, 56.07),
('Piran', -75.69, -31.50),
('Kranj', -20.09, 43.79),
('Kranjska Gora', -60.35, 68.25),
('Izola', -68.59, -31.50),
('Radovljica', -31.95, 54.29),
('Gornji Grad', 13.06, 49.03),
('Šentjur', 54.46, 40.32),
('Koper', -63.90, -29.72),
('Celje', 45.01, 42.00),
('Mislinja', 42.60, 66.57),
('Metlika', 48.56, -19.21),
('Žaga', -81.65, 49.03),
('Komen', -63.90, -1.68),
('Žužemberk', 21.30, 0.00),
('Pesnica', 74.55, 80.54),
('Vrhnika', -23.64, 14.07),
('Dravograd', 28.40, 78.75),
('Kamnik', -1.14, 40.32),
('Jesenice', -40.19, 64.79),
('Kobarid', -74.55, 43.79),
('Portorož', -73.34, -33.18),
('Muta', 37.91, 82.32),
('Sežana', -54.39, -13.96),
('Vipava', -47.29, 1.79),
('Maribor', 72.21, 75.28),
('Slovenj Gradec', 31.95, 71.82),
('Litija', 14.20, 22.78),
('Na Logu', -62.69, 57.75),
('Stara Fužina', -52.04, 47.25),
('Motovun', -56.80, -52.50),
('Pragersko', 73.41, 57.75),
('Most na Soči', -63.90, 33.29),
('Brestanica', 60.35, 15.75),
('Savudrija', -80.44, -34.96),
('Sodražica', 0.00, -6.93),
]
class CountCalls:
def __init__(self, f):
self.f = f
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
return self.f(*args, **kwargs)
@classmethod
def setUpClass(cls):
global koordinate, razdalja_koordinat
try:
koordinate = cls.CountCalls(koordinate)
except:
pass
try:
razdalja_koordinat = cls.CountCalls(razdalja_koordinat)
except:
pass
def test_1_koordinate(self):
kraji = [
('Brežice', 68.66, 7.04),
('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04),
('Ljutomer', 111.26, 71.82)
]
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertEqual(koordinate("Lenart", kraji), (85.20, 78.75))
self.assertEqual(koordinate("Rateče", kraji), (-65.04, 70.04))
self.assertEqual(koordinate("Ljutomer", kraji), (111.26, 71.82))
self.assertIsNone(koordinate("Ljubljana", kraji))
kraji = [('Brežice', 68.66, 7.04)]
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertIsNone(koordinate("Lenart", kraji))
kraji = []
self.assertIsNone(koordinate("Brežice", kraji))
def test_1_range_len(self):
class NoGetItem(list):
def __getitem__(*x):
raise IndexError("Nauči se (pravilno) uporabljati zanko for!")
kraji = NoGetItem([('Brežice', 68.66, 7.04), ('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04)])
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertEqual(koordinate("Lenart", kraji), (85.20, 78.75))
self.assertEqual(koordinate("Rateče", kraji), (-65.04, 70.04))
self.assertIsNone(koordinate("Ljubljana", kraji))
def test_2_razdalja_koordinat(self):
self.assertEqual(razdalja_koordinat(0, 0, 1, 0), 1)
self.assertEqual(razdalja_koordinat(0, 0, 0, 1), 1)
self.assertEqual(razdalja_koordinat(0, 0, -1, 0), 1)
self.assertEqual(razdalja_koordinat(0, 0, 0, -1), 1)
self.assertEqual(razdalja_koordinat(1, 0, 0, 0), 1)
self.assertEqual(razdalja_koordinat(0, 1, 0, 0), 1)
self.assertEqual(razdalja_koordinat(-1, 0, 0, 0), 1)
self.assertEqual(razdalja_koordinat(0, -1, 0, 0), 1)
self.assertEqual(razdalja_koordinat(1, 2, 4, 6), 5)
self.assertEqual(razdalja_koordinat(1, 2, -2, 6), 5)
self.assertEqual(razdalja_koordinat(1, 2, 4, -2), 5)
self.assertEqual(razdalja_koordinat(1, 2, -2, -2), 5)
from math import sqrt
self.assertAlmostEqual(razdalja_koordinat(1, 2, 0, 1), sqrt(2))
def test_3_razdalja_krajev(self):
kraji = [
('Brežice', 10, 20),
('Lenart', 13, 24),
('Rateče', 17, 20),
('Ljutomer', 8, 36)
]
from math import sqrt
self.assertEqual(razdalja("Brežice", "Lenart", kraji), 5)
self.assertEqual(razdalja("Lenart", "Brežice", kraji), 5)
self.assertEqual(razdalja("Brežice", "Rateče", kraji), 7)
self.assertAlmostEqual(razdalja("Lenart", "Rateče", kraji), sqrt(32))
self.assertEqual(razdalja("Lenart", "Ljutomer", kraji), 13)
koordinate.call_count = razdalja_koordinat.call_count = 0
razdalja("Brežice", "Lenart", kraji)
self.assertEqual(
koordinate.call_count, 2,
"Funkcija `razdalja` mora dvakrat poklicati `koordinate`")
self.assertEqual(
razdalja_koordinat.call_count, 1,
"Funkcija `razdalja` mora enkrat poklicati `razdalja`")
def test_4_v_dometu(self):
kraji = [
('Lenart', 13, 24),
('Brežice', 10, 20), # Lenart <-> Brežice = 5
('Rateče', 17, 20), # Lenart <-> Rateče = 5.66
('Ljutomer', 8, 36) # Lenart <-> Ljutomer = 13
]
self.assertEqual(v_dometu("Lenart", 5, kraji), ["Brežice"])
self.assertEqual(v_dometu("Lenart", 3, kraji), [])
self.assertEqual(set(v_dometu("Lenart", 6, kraji)), {"Brežice", "Rateče"})
kraji = self.vsi_kraji
self.assertEqual(set(v_dometu("Ljubljana", 20, kraji)), {'Vrhnika', 'Domžale', 'Kamnik', 'Škofja Loka'})
def test_5_najbolj_oddaljeni(self):
kraji = [
('Lenart', 13, 24),
('Brežice', 10, 20), # Lenart <-> Brežice = 5
('Rateče', 17, 20), # Lenart <-> Rateče = 5.66
('Ljutomer', 8, 36) # Lenart <-> Ljutomer = 13
]
self.assertEqual(najbolj_oddaljeni("Lenart", ["Brežice", "Rateče"], kraji), "Rateče")
self.assertEqual(najbolj_oddaljeni("Lenart", ["Brežice"], kraji), "Brežice")
kraji = self.vsi_kraji
self.assertEqual(najbolj_oddaljeni("Ljubljana", ["Domžale", "Kranj", "Maribor", "Vrhnika"], kraji), "Maribor")
def test_6_zalijemo(self):
self.assertEqual(zalijemo("Ljubljana", 30, self.vsi_kraji), "Cerknica")
def test_7_presek(self):
self.assertEqual(presek([1, 5, 2], [3, 1, 4]), [1])
self.assertEqual(presek([1, 5, 2], [3, 0, 4]), [])
self.assertEqual(presek([1, 5, 2], []), [])
self.assertEqual(presek([], [3, 0, 4]), [])
self.assertEqual(presek([], []), [])
self.assertEqual(set(presek([1, 5, 2], [2, 0, 5])), {2, 5})
self.assertEqual(presek(["Ana", "Berta", "Cilka"], ["Cilka", "Dani", "Ema"]), ["Cilka"])
def test_8_skupno_zalivanje(self):
self.assertEqual(set(skupno_zalivanje("Bled", "Ljubljana", 30, self.vsi_kraji)),
{"Kranj", "Škofja Loka"})
if __name__ == "__main__":
unittest.main()
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
bd7bff855a8e80aaf2827d004bd3fd6132dde759 | 0d3f7956ccbb273ca4fa8afcd55a789fa22e6ba5 | /preprocessing.py | 27f746c260e188dee2b66c28b16a26d1f224f396 | [
"MIT"
] | permissive | cltl-students/hamersma-agression-causes | 9017fea1fb77e3ca278cd3752c2c74f9a2f25925 | 11cbfd94031a0a3c84a27afa20d8a539acdab609 | refs/heads/master | 2023-04-15T07:46:58.447952 | 2021-05-03T15:27:35 | 2021-05-03T15:27:35 | 351,095,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,259 | py | import pandas as pd
import spacy
import nltk
import re
import pickle
import numpy as np
from transformers import BertTokenizer, BertModel
from utils.bert_embeddings import get_BERT_embedding
from collections import defaultdict
import torch
from collections import Counter
import os
dirname = os.path.dirname(__file__)
#nlp = spacy.load('nl_core_news_lg')
bertje = 'wietsedv/bert-base-dutch-cased'
bertje_tokenizer = BertTokenizer.from_pretrained(bertje)
bertje_model = BertModel.from_pretrained(bertje, output_hidden_states=True)
bertje_model.eval()
def callback( str ):
''''Removes dots from string eg. mister A.B. becomes mister AB
:param str: string
:returns: string without dot'''
return str.replace('.', '')
def change_abbreviations(text):
'''Processes text by lowercasing, removing dots from name abbreviations and replaces most common abbreviations by
full word.
:param text: string
:returns: pre-processed string'''
text = re.sub(r"(?:[A-Z]\.)+", lambda m: callback(m.group()), text) #meneer A.B.
text = text.lower()
text = text.replace('cliënt', 'client').replace('patiënt', 'patient').replace(';', ':').replace('vos.', 'alarm').replace('pt.', 'client')
text = text.replace('mw.', 'mevrouw').replace('mr.', 'meneer').replace('dhr.', 'meneer').replace('vzo.', 'zorgondersteuner').replace('v.z.o.', 'zorgondersteuner')
text = text.replace('mvr.', 'mevrouw').replace('mnr.', 'meneer').replace('mevr.', 'mevrouw').replace('og.', 'ondergetekende').replace('pte.', 'client')
text = text.replace('vpk.', 'verpleegkundige').replace('bgl.', 'begeleiding').replace('collega\'s', 'collega').replace('pat.', 'client')
text = text.replace('og.', 'begeleider').replace('o.g.', 'begeleider').replace('o.g', 'begeleider').replace('dda.', 'dienstdoende arts')
text = text.replace('vzo.', 'verzorging').replace('medecl.', 'medeclient').replace('cl.', 'client').replace('o.g.', 'ondergetekende')
#text = text.replace('ivm.', 'in verband met').replace('i.v.m.', 'in verband met').replace('bijv.', 'bijvoorbeeld').replace('d.w.z.', 'dat wil zeggen').replace('dwz.', 'dat wil zeggen')
#text = text.replace('ipv.', 'in plaats van').replace('i.p.v.', 'in plaats van').replace('o.a.', 'onder andere').replace('oa.', 'onder andere').replace('n.a.v.', 'naar aanleiding van')
#text = text.replace('m.b.t.', 'met betrekking tot').replace('mbt.', 'met betrekking tot').replace('t/m', 'tot en met')
text = re.sub(r'(?<!\w)([a-z])\.', r'\1', text) # o.a. naar oa, nodig voor sent splitting
text = text.replace('\xa0', ' ')#.decode("utf-8")
return text
def make_predoutput_file(output):
''''Creates csv file with clauses and identifiers.
:param output: list of lists containing id, sent_identifier, chunk_identifier, chunk
:returns: dataframe'''
df = pd.DataFrame(output, columns=['VIM id', 'Sentence identifier', 'Chunk identifier', 'Chunk'])
file = dirname + '/output/preprocessed_clauses.csv'
df.to_csv(file, sep='|', index=False, encoding='utf-8')
return df
def detect_clauses(sent):
''''Splits sentence into clauses by grouping children of the heads.
:param sent: string
:returns: list of tuples of id and clause'''
seen = set() # keep track of covered words
chunks = []
heads = [cc for cc in sent.root.children if cc.dep_ == 'conj']
for head in heads:
words = [ww for ww in head.subtree]
for word in words:
seen.add(word)
chunk = (' '.join([ww.text.strip(' ') for ww in words]))
chunks.append((head.i, chunk))
unseen = [ww for ww in sent if ww not in seen]
chunk = ' '.join([ww.text.strip(' ') for ww in unseen])
chunks.append((sent.root.i, chunk))
chunks = sorted(chunks, key=lambda x: x[0])
return chunks
def dd():
''''Defaultfunction for defaultdict.
:returns: array'''
return np.array([0] * 768)
def preprocess(inputfile):
'''Reads in file as dict, loops through all vims, pre-processes, divides into sentences and clauses and generates a
new file containing the pre-processed clauses. Token, clause and sentence embeddings are stored as a dict for later
usage.
:param inputfile: inputfile as xls or xlsx
:prints: tokens that no embedding is found for'''
data = pd.read_excel(dirname + '/input/' + inputfile, index_col=0).T.to_dict()
output = []
unknown = []
chunk_embeddings = defaultdict(dd) #if i make this defaultdict never keyerror but a specific return
sent_embeddings = defaultdict(dd)
for id, vim in data.items():
text = change_abbreviations(vim['tekst'])
sents = nltk.tokenize.sent_tokenize(text)
sent_i = 0
for sent in sents:
chunk_id = 0
sent_i += 1
sent_embedding, word_embeddings = get_BERT_embedding(sent, bertje_model, bertje_tokenizer) #word_embedding is type dict word:vector
sent_identifier = str(id) + '-' + str(sent_i)
sent_embeddings[sent_identifier] = sent_embedding
split_sent = sent.split(',')
for part in split_sent:
part = part.lstrip(' ').rstrip(' ')
doc = nlp(part)
for sentence in doc.sents:
chunks = detect_clauses(sentence)
for i, chunk in chunks:
chunk_id += 1
chunk = chunk.rstrip(' ').lstrip(' ')
if chunk != chunk or chunk == '': # chunk == nan, nothing left after stripping
continue
chunk_embeds = []
chunk_vecs = []
for word in chunk.split(' '):
vector = word_embeddings.get(word) #does not get all words because of difference tokenizers BERT and NLTK
if vector == None:
unknown.append(word)
else:
chunk_vecs.append(vector)
if chunk_vecs:
word_stack = torch.stack(chunk_vecs, dim=0)
chunk_embedding = torch.mean(word_stack, dim=0)
chunk_embeds.append(np.array(chunk_embedding))
chunk_identifier = sent_identifier + '-' + str(chunk_id)
chunk_embeddings[chunk_identifier] = chunk_embeds
else:
chunk_identifier = sent_identifier + '-' + str(chunk_id)
row = [id, sent_identifier, chunk_identifier, chunk]
output.append(row)
make_predoutput_file(output)
pickle.dump(chunk_embeddings, open(dirname + '/models/clause_embeddings_all.pickle', 'wb'))
pickle.dump(word_embeddings, open(dirname + '/models/token_embeddings_all.pickle', 'wb'))
pickle.dump(sent_embeddings, open(dirname + '/models/sent_embeddings_all.pickle', 'wb'))
print('Words that could not be matched to an embedding:',Counter(unknown)) | [
"noreply@github.com"
] | noreply@github.com |
59c085b313e80a35b2b866517725e5e2b2ab268d | 2c5b7aa3ada684688a5a1556cf6c7934217b7dcd | /movie_analysis/__init__.py | 56f610126ef8054383c7532f5556c19f78520717 | [
"MIT"
] | permissive | rlfranz/movie-gender-sentiment-analysis | 385aafacd8d2f0f964d3f2467cf40a8fd74b6914 | ff8dd6393a4b6224c95e388cfc70a428a001bd41 | refs/heads/master | 2020-04-10T17:06:41.234053 | 2018-12-10T11:30:13 | 2018-12-10T11:30:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from .version import __version__ # noqa
from .movie_analysis import * # noqa
from .get_sentiment_score import * # noqa
from .analyze_comments_tblob import * # noqa
| [
"rchlfranz@gmail.com"
] | rchlfranz@gmail.com |
e778973800b1393a64cfbffed12451da655f8b6e | be0046a5476db35e21325b1d7a009b95c13b2c34 | /analysis/museumstation/usage_stats/usage_stats/get_usage_short.py | 03ec8d28ba5b99f669294a736ccee4af090a1d65 | [] | no_license | brialorelle/kiddraw | 089605e1a20aa38521dcbd411f4781f62f738618 | 78db57e46d8d4eafe49a8edec5a86499abdcb332 | refs/heads/master | 2022-10-03T10:56:26.842485 | 2022-09-09T17:23:42 | 2022-09-09T17:23:42 | 106,886,186 | 18 | 7 | null | 2018-09-04T23:28:09 | 2017-10-14T00:47:07 | Jupyter Notebook | UTF-8 | Python | false | false | 1,270 | py |
## libraries
import pandas as pd
import time
import pymongo as pm
import os
# set input parameters
iterationName = 'cdm_run_v8'
num_hours = 40000
# set up connections
auth = pd.read_csv('../../auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user
pswd = auth.values[0][0]
conn = pm.MongoClient('mongodb://stanford:' + pswd + '@127.0.0.1')
db = conn['kiddraw']
coll = db[iterationName]
#
current_milli_time = lambda: int(round(time.time() * 1000))
x_hours_ago = current_milli_time() - (num_hours*60*60*1000)
# get image recs and sessions since certain date
image_recs = coll.find({'$and': [{'dataType':'finalImage'}, {'endTrialTime': {"$gt": x_hours_ago}}]})
valid_sessions = coll.find({'endTrialTime': {"$gt": x_hours_ago}}).distinct('sessionId')
# get count and label fior number of images
numImages = image_recs.count()
lastImage = image_recs[numImages - 10]
## get date from most recent image
lastestDate = lastImage['date']
# fiveImagesAgo = image_recs[numImages - 5]
# recentDate = fiveImagesAgo['date']
print 'In the past {} hours, we have {} valid sessions from {} with {} drawings.'.format(num_hours, len(valid_sessions), iterationName, numImages)
print 'The last drawing was made at {}.'.format(lastestDate)
| [
"brialorelle@gmail.com"
] | brialorelle@gmail.com |
63231052babc52cc55eefc689d6deb696b58d106 | c9fd6ede5e3f8626f0e581aed9db1729ed2466ad | /browserHistory.py | 981120d322062f94c02bd0733c8c82b44b4a8673 | [] | no_license | jcgaza/leekcodes | 6de2657cc58c5680661d2495b4d996f9d6e89428 | 27fa50185adc347c6b2fe56bec7c81db13265dbc | refs/heads/main | 2023-05-03T07:54:10.573245 | 2021-05-25T12:48:20 | 2021-05-25T12:48:20 | 363,787,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | class BrowserHistory:
def __init__(self, homepage: str):
self.currentIndex = 0
self.urls = [homepage]
def visit(self, url: str) -> None:
self.urls = self.urls[:self.currentIndex+1]
self.urls.append(url)
self.currentIndex = len(self.urls)-1
def back(self, steps: int) -> str:
print("current:", self.currentIndex)
if steps > self.currentIndex:
self.currentIndex = 0
else:
self.currentIndex -= steps
return self.urls[self.currentIndex]
def forward(self, steps: int) -> str:
print(steps, self.currentIndex)
if steps > len(self.urls)-self.currentIndex-1:
self.currentIndex = len(self.urls)-1
else:
self.currentIndex += steps
return self.urls[self.currentIndex]
ans = []
browserHistory = BrowserHistory("zav.com")
ans.append(None)
ans.append(browserHistory.visit("kni.com")) # You are in "leetcode.com". Visit "google.com"
ans.append(browserHistory.back(7)) # You are in "youtube.com", move back to "facebook.com" return "facebook.com"
ans.append(browserHistory.back(7)) # You are in "facebook.com", move back to "google.com" return "google.com"
ans.append(browserHistory.forward(5)) # You are in "google.com", move forward to "facebook.com" return "facebook.com"
ans.append(browserHistory.forward(1)) # You are in "google.com", move forward to "facebook.com" return "facebook.com"
ans.append(browserHistory.visit("pwrrbnw.com")) # You are in "facebook.com". Visit "linkedin.com"
ans.append(browserHistory.visit("mosohif.com")) # You are in "facebook.com". Visit "linkedin.com"
ans.append(browserHistory.back(9)) # You are in "google.com", you can move back only one step to "leetcode.com". return "leetcode.com"
print(ans) | [
"jcgaza@up.edu.ph"
] | jcgaza@up.edu.ph |
9265912218e15a8cb1439ef7f525286b3276040d | c532eea91e84f58f4ba57c27c6c24046498bde22 | /HelloPython/day02/Myfunc04.py | 2b34c220385a7c4d1454354f91fe57294038825a | [] | no_license | seaweedy/python | b9010f3277da09311a6d128d7f992874137c7c82 | 3f966223e19011318ed45308e89afe7d217e6ea4 | refs/heads/master | 2023-01-01T17:53:58.845336 | 2020-10-21T08:10:30 | 2020-10-21T08:10:30 | 301,929,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | def increase(a):
a +=1
def increaseRef(a):
a[0] +=1
a = 1
b = [3]
print(a)
print(b[0])
increase(a)
increaseRef(b)
print(a)
print(b[0])
| [
"ismh5279@gmail.com"
] | ismh5279@gmail.com |
ac605461478c38b29888febfb314e4ce4df02cb0 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2223/60595/241841.py | 6076414ea5c1392865c888fc83043dbdc578bc50 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | num=input()
if(num=="1,2,4,7"):
print("[0, -4]")
elif(num=="1,2,2,4"):
print("[2, 3]")
else:
print("[2, 1]")
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
0fac751aa2481ed3243e8b4ecef04a4bc1c5f709 | 1d0c89ecaa7598e5cb6a26a20a1bdd5f51d60123 | /apps/venta/views.py | 4d70ba59937d3b4dd2807cc2520c3a70b605026b | [] | no_license | chrisstianandres/american_audio | a1fee70e798a151fcbfd492ed75878a8524c783b | ee31f01af4212cc2484188003900648064811fcb | refs/heads/master | 2023-02-01T02:36:59.824789 | 2020-12-09T23:17:45 | 2020-12-09T23:17:45 | 307,203,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,972 | py | import json
from datetime import datetime
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.functions import Coalesce
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import *
from apps.Mixins import ValidatePermissionRequiredMixin
from apps.backEnd import nombre_empresa
from apps.cliente.forms import ClienteForm
from apps.compra.models import Compra
from apps.delvoluciones_venta.models import Devolucion
from apps.inventario.models import Inventario
from apps.servicio.models import Servicio
from apps.venta.forms import VentaForm, Detalle_VentaForm, Detalle_VentaForm_serv
from apps.venta.models import Venta, Detalle_venta, Detalle_venta_servicios
from apps.empresa.models import Empresa
from apps.producto.models import Producto
import os
from django.conf import settings
from django.template.loader import get_template
from xhtml2pdf import pisa
from django.contrib.staticfiles import finders
opc_icono = 'fa fa-shopping-basket '
opc_entidad = 'Ventas'
crud = '/venta/crear'
empresa = nombre_empresa()
class lista(ValidatePermissionRequiredMixin, ListView):
model = Venta
template_name = 'front-end/venta/venta_list.html'
permission_required = 'view_venta'
def get_queryset(self):
return Venta.objects.none()
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['icono'] = opc_icono
data['entidad'] = opc_entidad
data['boton'] = 'Nueva Venta'
data['titulo'] = 'Listado de Ventas'
data['nuevo'] = '/venta/nuevo'
data['empresa'] = empresa
return data
@csrf_exempt
def data(request):
data = []
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
try:
if start_date == '' and end_date == '':
venta = Venta.objects.all()
else:
venta = Venta.objects.filter(fecha_venta__range=[start_date, end_date])
for c in venta:
data.append([
c.fecha_venta.strftime('%d-%m-%Y'),
c.cliente.nombres + " " + c.cliente.apellidos,
c.empleado.get_full_name(),
format(c.total, '.2f'),
c.id,
c.get_estado_display(),
c.id
])
except:
pass
return JsonResponse(data, safe=False)
def nuevo(request):
data = {
'icono': opc_icono, 'entidad': opc_entidad, 'crud': '../venta/get_producto',
'crudserv': '../venta/get_servicio',
'empresa': empresa,
'boton': 'Guardar Venta', 'action': 'add', 'titulo': 'Nuevo Registro de una Venta',
'key': ''
}
if request.method == 'GET':
data['form'] = VentaForm()
data['form2'] = Detalle_VentaForm()
data['form3'] = Detalle_VentaForm_serv()
data['formc'] = ClienteForm()
data['detalle'] = []
return render(request, 'front-end/venta/venta_form.html', data)
@csrf_exempt
def crear(request):
data = {}
if request.method == 'POST':
datos = json.loads(request.POST['ventas'])
if datos:
with transaction.atomic():
c = Venta()
c.fecha_venta = datos['fecha_venta']
c.cliente_id = datos['cliente']
c.empleado_id = request.user.id
c.subtotal = float(datos['subtotal'])
c.iva = float(datos['iva'])
c.total = float(datos['total'])
c.save()
if datos['productos'] and datos['servicios']:
for i in datos['productos']:
dv = Detalle_venta()
dv.venta_id = c.id
dv.producto_id = i['producto']['id']
dv.cantidadp = int(i['cantidad'])
x = Producto.objects.get(pk=i['producto']['id'])
dv.pvp_actual = float(x.pvp)
x.stock = x.stock - int(i['cantidad'])
dv.subtotalp = float(i['subtotal'])
x.save()
dv.save()
inv = Inventario.objects.filter(producto_id=i['producto']['id'], estado=1)[:i['cantidad']]
for itr in inv:
w = Inventario.objects.get(pk=itr.id)
w.estado = 0
w.venta_id = c.id
w.save()
for s in datos['servicios']:
dvs = Detalle_venta_servicios()
dvs.venta_id = c.id
dvs.servicio_id = s['id']
dvs.cantidads = int(s['cantidad'])
dvs.subtotals = float(s['subtotal'])
dvs.pvp_actual_s = float(s['pvp'])
dvs.save()
data['id'] = c.id
data['resp'] = True
elif datos['productos']:
for i in datos['productos']:
dv = Detalle_venta()
dv.venta_id = c.id
dv.producto_id = i['producto']['id']
dv.cantidadp = int(i['cantidad'])
dv.subtotalp = float(i['subtotal'])
x = Producto.objects.get(pk=i['producto']['id'])
dv.pvp_actual = float(x.pvp)
x.stock = x.stock - int(i['cantidad'])
x.save()
inv = Inventario.objects.filter(producto_id=i['producto']['id'], estado=1)[:i['cantidad']]
for itr in inv:
x = Inventario.objects.get(pk=itr.id)
x.estado = 0
x.venta_id = c.id
x.save()
dv.save()
data['id'] = c.id
data['resp'] = True
else:
for i in datos['servicios']:
dvs = Detalle_venta_servicios()
dvs.venta_id = c.id
dvs.servicio_id = s['id']
dvs.cantidads = int(s['cantidad'])
dvs.subtotals = float(s['subtotal'])
dvs.pvp_actual_s = float(s['pvp'])
dvs.save()
data['id'] = c.id
data['resp'] = True
else:
data['resp'] = False
data['error'] = "Datos Incompletos"
return HttpResponse(json.dumps(data), content_type="application/json")
def editar(request, id):
data = {
'icono': opc_icono, 'entidad': opc_entidad, 'crud': '../../venta/get_producto', 'empresa': empresa,
'boton': 'Editar Venta', 'action': 'edit', 'titulo': 'Editar Registro de una Venta',
'key': id
}
venta = Venta.objects.get(id=id)
if request.method == 'GET':
data['form'] = VentaForm(instance=venta)
data['form2'] = Detalle_VentaForm()
data['detalle'] = json.dumps(get_detalle_productos(id))
return render(request, 'front-end/venta/venta_form.html', data)
@csrf_exempt
def editar_save(request):
data = {}
datos = json.loads(request.POST['ventas'])
if request.POST['action'] == 'edit':
with transaction.atomic():
# c = Compra.objects.get(pk=self.get_object().id)
c = Venta.objects.get(pk=request.POST['key'])
c.fecha_venta = datos['fecha_venta']
c.cliente_id = datos['cliente']
c.subtotal = float(datos['subtotal'])
c.iva = float(datos['iva'])
c.total = float(datos['total'])
c.save()
c.detalle_venta_set.all().delete()
for i in datos['productos']:
dv = Detalle_venta()
dv.venta_id = c.id
dv.producto_id = i['id']
dv.cantidad = int(i['cantidad'])
dv.save()
data['resp'] = True
else:
data['resp'] = False
data['error'] = "Datos Incompletos"
return HttpResponse(json.dumps(data), content_type="application/json")
def get_detalle_productos(id):
data = []
try:
for i in Detalle_venta.objects.filter(venta_id=id):
iva_emp = Empresa.objects.get(pk=1)
item = i.producto.toJSON()
item['cantidad'] = i.cantidad
item['iva_emp'] = format(iva_emp.iva, '.2f')
data.append(item)
except:
pass
return data
@csrf_exempt
def get_producto(request):
data = {}
try:
id = request.POST['id']
if id:
query = Inventario.objects.filter(producto_id=id, estado=1, select=0)[0:1]
iva_emp = Empresa.objects.get(pk=1)
data = []
for i in query:
item = i.toJSON()
item['producto'] = i.producto.toJSON()
item['pvp'] = (i.producto.pvp * 100) / (iva_emp.iva + 100)
item['cantidad'] = 1
item['subtotal'] = 0.00
item['iva_emp'] = iva_emp.iva / 100
data.append(item)
i.select = 1
i.save()
else:
data['error'] = 'No ha selecionado ningun Producto'
except Exception as e:
data['error'] = 'Ha ocurrido un error'
return JsonResponse(data, safe=False)
@csrf_exempt
def get_servicio(request):
data = {}
try:
id = request.POST['id']
if id:
servicio = Servicio.objects.filter(pk=id)
iva_emp = Empresa.objects.get(pk=1)
data = []
for i in servicio:
item = i.toJSON()
item['pvp'] = 1.00
item['cantidad'] = 1
item['subtotal'] = 0.00
item['iva_emp'] = iva_emp.iva / 100
data.append(item)
else:
data['error'] = 'No ha selecionado ningun Servicio'
except Exception as e:
data['error'] = 'Ha ocurrido un error'
return JsonResponse(data, safe=False)
@csrf_exempt
def get_detalle(request):
data = {}
try:
id = request.POST['id']
if id:
data = []
result = Detalle_venta.objects.filter(venta_id=id)
empresa = Empresa.objects.get(pk=1)
for p in result:
data.append({
'producto': p.producto.nombre,
'categoria': p.producto.categoria.nombre,
'presentacion': p.producto.presentacion.nombre,
'cantidad': p.cantidadp,
'pvp': format(((p.pvp_actual * 100) / (empresa.iva + 100)), '.2f'),
# format(((p.pvp_actual_s * 100) / (empresa.iva + 100)), '.2f'),
'subtotal': format(((p.pvp_actual * 100) / (empresa.iva + 100)), '.2f')*p.cantidadp,
})
else:
data['error'] = 'Ha ocurrido un error'
except Exception as e:
data['error'] = str(e)
return JsonResponse(data, safe=False)
@csrf_exempt
def get_detalle_serv(request):
data = {}
try:
id = request.POST['id']
if id:
data = []
empresa = Empresa.objects.get(pk=1)
result = Detalle_venta_servicios.objects.filter(venta_id=id)
for p in result:
data.append({
'servicio': p.servicio.nombre,
'cantidad': p.cantidads,
'pvp': format(((p.pvp_actual_s * 100) / (empresa.iva + 100)), '.2f'),
'subtotal': format(((p.pvp_actual_s * 100) / (empresa.iva + 100)), '.2f')*p.cantidads
})
else:
data['error'] = 'Ha ocurrido un error'
except Exception as e:
data['error'] = str(e)
return JsonResponse(data, safe=False)
@csrf_exempt
def estado(request):
data = {}
try:
id = request.POST['id']
if id:
with transaction.atomic():
es = Venta.objects.get(id=id)
es.estado = 0
dev = Devolucion()
dev.venta_id = id
dev.fecha = datetime.now()
dev.save()
for i in Detalle_venta.objects.filter(venta_id=id):
if i.producto==None:
es.save()
else:
ch = Producto.objects.get(id=i.producto.id)
ch.stock = int(ch.stock) + int(i.cantidadp)
ch.save()
for a in Inventario.objects.filter(venta_id=id):
a.estado = 1
a.select = 0
a.venta_id = None
a.save()
es.save()
else:
data['error'] = 'Ha ocurrido un error'
except Exception as e:
data['error'] = str(e)
return JsonResponse(data)
@csrf_exempt
def eliminar(request):
data = {}
try:
id = request.POST['id']
if id:
es = Venta.objects.get(id=id)
es.delete()
else:
data['error'] = 'Ha ocurrido un error'
except Exception as e:
data['error'] = str(e)
return JsonResponse(data)
@csrf_exempt
def grap(request):
data = {}
try:
action = request.POST['action']
if action == 'chart':
data = {
'dat': {
'name': 'Total de ventas',
'type': 'column',
'colorByPoint': True,
'showInLegend': True,
'data': grap_data(),
},
'year': datetime.now().year,
'chart2': {
'data': dataChart2(),
},
'chart3': {
'compras': datachartcontr(),
'ventas': grap_data()
},
'tarjets': {
'data': data_tarjets()
}
}
else:
data['error'] = 'Ha ocurrido un error'
except Exception as e:
data['error'] = str(e)
return JsonResponse(data, safe=False)
def grap_data():
year = datetime.now().year
data = []
for y in range(1, 13):
total = Venta.objects.filter(fecha_venta__year=year, fecha_venta__month=y, estado=1).aggregate(
r=Coalesce(Sum('total'), 0)).get('r')
data.append(float(total))
return data
def data_tarjets():
year = datetime.now().year
ventas = Venta.objects.filter(fecha_venta__year=year, estado=1).aggregate(r=Coalesce(Count('id'), 0)).get('r')
compras = Compra.objects.filter(fecha_compra__year=year, estado=1).aggregate(r=Coalesce(Count('id'), 0)).get('r')
inventario = Inventario.objects.filter(compra__fecha_compra__year=year, estado=1).aggregate(
r=Coalesce(Count('id'), 0)).get('r')
data = {
'ventas': int(ventas),
'compras': int(compras),
'inventario': int(inventario),
}
return data
def dataChart2():
year = datetime.now().year
month = datetime.now().month
data = []
producto = Producto.objects.all()
for p in producto:
total = Detalle_venta.objects.filter(venta__fecha_venta__year=year, venta__fecha_venta__month=month,
producto_id=p).aggregate(r=Coalesce(Sum('venta__total'), 0)).get('r')
data.append({
'name': p.nombre,
'y': float(total)
})
return data
def datachartcontr():
year = datetime.now().year
data = []
for y in range(1, 13):
totalc = Compra.objects.filter(fecha_compra__year=year, fecha_compra__month=y, estado=1).aggregate(
r=Coalesce(Sum('total'), 0)).get('r')
data.append(float(totalc))
return data
class printpdf(View):
def link_callback(self, uri, rel):
"""
Convert HTML URIs to absolute system paths so xhtml2pdf can access those
resources
"""
result = finders.find(uri)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = list(os.path.realpath(path) for path in result)
path = result[0]
else:
sUrl = settings.STATIC_URL # Typically /static/
sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/
mUrl = settings.MEDIA_URL # Typically /media/
mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
else:
return uri
# make sure that file exists
if not os.path.isfile(path):
raise Exception(
'media URI must start with %s or %s' % (sUrl, mUrl)
)
return path
def pvp_cal(self, *args, **kwargs):
data = []
try:
iva_emp = Empresa.objects.get(pk=1)
for i in Detalle_venta.objects.filter(venta_id=self.kwargs['pk']):
item = i.venta.toJSON()
item['producto'] = i.producto.toJSON()
item['pvp'] = format(((i.pvp_actual * 100) / (iva_emp.iva + 100)), '.2f')
item['cantidadp'] = i.cantidadp
item['subtotalp'] = format(((i.pvp_actual * 100) / (iva_emp.iva + 100)), '.2f')*i.cantidadp
data.append(item)
except:
pass
return data
def serv(self, *args, **kwargs):
data = []
try:
iva_emp = Empresa.objects.get(pk=1)
for i in Detalle_venta_servicios.objects.filter(venta_id=self.kwargs['pk']):
item = i.venta.toJSON()
item['servicio'] = i.servicio.toJSON()
item['pvp_s'] = format(((i.pvp_actual_s * 100) / (iva_emp.iva + 100)), '.2f')
item['cantidads'] = i.cantidads
item['subtotals'] = format(((i.pvp_actual_s * 100) / (iva_emp.iva + 100)), '.2f')*i.cantidads
data.append(item)
except:
pass
return data
def get(self, request, *args, **kwargs):
try:
template = get_template('front-end/report/pdf.html')
context = {'title': 'Comprobante de Venta',
'sale': Venta.objects.get(pk=self.kwargs['pk']),
'det_sale': self.pvp_cal(),
'det_serv': self.serv(),
'empresa': Empresa.objects.get(id=1),
'icon': 'media/logo_don_chuta.png',
'inventario': Inventario.objects.filter(venta_id=self.kwargs['pk'])
}
html = template.render(context)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="report.pdf"'
pisa_status = pisa.CreatePDF(html, dest=response, link_callback=self.link_callback)
return response
except:
pass
return HttpResponseRedirect(reverse_lazy('venta:lista'))
@csrf_exempt
def data_report(request):
data = []
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
tipo = request.POST.get('tipo', '')
empresa = Empresa.objects.get(pk=1)
iva = float(empresa.iva / 100)
try:
if int(tipo) == 1:
if start_date == '' and end_date == '':
query = Detalle_venta.objects.exclude(cantidadp=0).values('venta__fecha_venta', 'producto__nombre',
'pvp_actual').order_by().annotate(
Sum('cantidadp')).filter(venta__estado=1)
else:
query = Detalle_venta.objects.exclude(cantidadp=0).values('venta__fecha_venta', 'producto__nombre',
'pvp_actual') \
.filter(venta__fecha_venta__range=[start_date, end_date], venta__estado=1).order_by().annotate(
Sum('cantidadp'))
for p in query:
total = p['pvp_actual'] * p['cantidadp__sum']
total_sin_iva = float((total * 100) / (100 + empresa.iva))
data.append([
p['venta__fecha_venta'].strftime("%d/%m/%Y"),
p['producto__nombre'],
'Producto',
int(p['cantidadp__sum']),
format(p['pvp_actual'], '.2f'),
format(total_sin_iva, '.2f'),
format(total_sin_iva * iva, '.2f'),
format(total, '.2f')
])
elif int(tipo) == 2:
if start_date == '' and end_date == '':
query = Detalle_venta.objects.exclude(cantidads=0).values('venta__fecha_venta', 'servicio__nombre',
'pvp_actual_s').annotate(
Sum('cantidads')).filter(venta__estado=1)
else:
query = Detalle_venta.objects.exclude(cantidads=0).values('venta__fecha_venta', 'servicio__nombre',
'pvp_actual_s') \
.filter(venta__fecha_venta__range=[start_date, end_date], venta__estado=1).annotate(
Sum('cantidads'))
for p in query:
total = float(p['pvp_actual_s'] * p['cantidads__sum'])
data.append([
p['venta__fecha_venta'].strftime("%d/%m/%Y"),
p['servicio__nombre'],
'Servicio',
int(p['cantidads__sum']),
format(p['pvp_actual_s'], '.2f'),
format(total, '.2f'),
format(total * iva, '.2f'),
format(total * (1 + iva), '.2f')
])
else:
if start_date == '' and end_date == '':
query = Detalle_venta.objects.exclude(cantidadp=0).values('venta__fecha_venta', 'producto__nombre',
'pvp_actual').order_by().annotate(
Sum('cantidadp')).filter(venta__estado=1)
query2 = Detalle_venta.objects.exclude(cantidads=0).values('venta__fecha_venta', 'servicio__nombre',
'pvp_actual_s').annotate(
Sum('cantidads')).filter(venta__estado=1)
else:
query = Detalle_venta.objects.exclude(cantidadp=0).values('venta__fecha_venta', 'producto__nombre',
'pvp_actual') \
.filter(venta__fecha_venta__range=[start_date, end_date], venta__estado=1).order_by().annotate(
Sum('cantidadp'))
query2 = Detalle_venta.objects.exclude(cantidads=0).values('venta__fecha_venta', 'servicio__nombre',
'pvp_actual_s') \
.filter(venta__fecha_venta__range=[start_date, end_date], venta__estado=1).annotate(
Sum('cantidads'))
for p in query:
totalp = p['pvp_actual'] * p['cantidadp__sum']
total_sin_iva = float((totalp * 100) / (100 + empresa.iva))
data.append([
p['venta__fecha_venta'].strftime("%d/%m/%Y"),
p['producto__nombre'],
'Producto',
int(p['cantidadp__sum']),
format(p['pvp_actual'], '.2f'),
format(total_sin_iva, '.2f'),
format(total_sin_iva * iva, '.2f'),
format(totalp, '.2f')
])
for q in query2:
totals = float(q['pvp_actual_s'] * q['cantidads__sum'])
data.append([
q['venta__fecha_venta'].strftime("%d/%m/%Y"),
q['servicio__nombre'],
'Servicio',
int(q['cantidads__sum']),
format(q['pvp_actual_s'], '.2f'),
format(totals, '.2f'),
format(totals * iva, '.2f'),
format(totals * (1 + iva), '.2f')
])
except:
pass
return JsonResponse(data, safe=False)
class report(ValidatePermissionRequiredMixin, ListView):
model = Venta
template_name = 'front-end/venta/venta_report_product.html'
permission_required = 'view_venta'
def get_queryset(self):
return Venta.objects.none()
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['icono'] = opc_icono
data['entidad'] = opc_entidad
data['boton'] = 'Nueva Venta'
data['titulo'] = 'Listado de Ventas'
data['nuevo'] = '/venta/nuevo'
data['empresa'] = empresa
data['filter_prod'] = '/venta/lista'
return data
@csrf_exempt
def data_report_total(request):
x = Venta.objects.get(id=35)
x.iva = float(1.80)
x.save()
data = []
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
try:
if start_date == '' and end_date == '':
query = Venta.objects.values('fecha_venta', 'cliente__nombres', 'cliente__apellidos', 'empleado__first_name'
, 'empleado__last_name').annotate(Sum('subtotal')). \
annotate(Sum('iva')).annotate(Sum('total')).filter(estado=1)
else:
query = Venta.objects.values('fecha_venta', 'cliente__nombres', 'cliente__apellidos',
'empleado__first_name',
'empleado__last_name').filter(
fecha_venta__range=[start_date, end_date], estado=1).annotate(Sum('subtotal')). \
annotate(Sum('iva')).annotate(Sum('total'))
for p in query:
data.append([
p['fecha_venta'].strftime("%d/%m/%Y"),
p['cliente__nombres'] + " " + p['cliente__apellidos'],
p['empleado__first_name'] + " " + p['empleado__last_name'],
format(p['subtotal__sum'], '.2f'),
format((p['iva__sum']), '.2f'),
format(p['total__sum'], '.2f')
])
except:
pass
return JsonResponse(data, safe=False)
class report_total(ValidatePermissionRequiredMixin, ListView):
model = Venta
template_name = 'front-end/venta/venta_report_total.html'
permission_required = 'view_venta'
def get_queryset(self):
return Venta.objects.none()
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['icono'] = opc_icono
data['entidad'] = opc_entidad
data['boton'] = 'Nueva Venta'
data['titulo'] = 'Listado de Ventas'
data['nuevo'] = '/venta/nuevo'
data['empresa'] = empresa
data['filter_prod'] = '/venta/lista'
return data
| [
"Chrisstianandres@gmail.com"
] | Chrisstianandres@gmail.com |
edeee6602b190ca18e61ad5f160a964b27d00952 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_256/ch34_2020_04_23_17_53_27_919517.py | ee3644d60592c2aae2ec473d6ba51ecadb41d2fe | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | def eh_primo(n):
if n == 0 or n == 1:
return False
elif n == 2:
return True
for i in range(3, n, 2):
if n%2==0:
return False
elif n%i==0:
return False
else:
return True
def maior_primo_menor_que(n):
p = -1
while n>0:
if n ==2:
return 2
else:
if eh_primo(n):
p == n
elif:
n-=1
return p
| [
"you@example.com"
] | you@example.com |
45f836742cb5dd1b301e9cf7ce5f7cb6f97a3458 | 444f09701504a8c09127aafb7a5bcc71ec40aa38 | /zhihu_browser.py | 2f2d40d932849265b72458996d5e59a06b340543 | [] | no_license | NanrayJack/Python-Spider | 2ea478aa2e726eaa8f3ab725d656f2adf6b2e6f8 | cc0fdb517991f923ec900b64d45a1abcf1049446 | refs/heads/master | 2022-02-17T21:28:10.864080 | 2019-09-03T16:28:17 | 2019-09-03T16:28:17 | 205,997,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,299 | py | import time
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
import secret
import platform
import os
from utils import log
def add_chrome_webdriver():
log(platform.system())
working_path = os.getcwd()
library = 'library'
path = os.path.join(working_path, library)
os.environ['PATH'] += '{}{}{}'.format(os.pathsep, path, os.pathsep)
log(os.environ['PATH'])
def reset_cookie(browser, domain):
browser.delete_all_cookies()
log('before', browser.get_cookies())
for part in secret.cookie.split('; '):
kv = part.split('=', 1)
d = dict(
name=kv[0],
value=kv[1],
path='/',
domain=domain,
secure=True
)
log('cookie', d)
browser.add_cookie(d)
log('after', browser.get_cookies())
def scroll_to_end(browser):
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
def start_crawler(browser):
url = "https://www.zhihu.com"
# 先访问一个 url,之后才能设置这个域名 对应的 cookie
browser.get('https://www.zhihu.com/404')
reset_cookie(browser, '.zhihu.com')
# 访问目的 URL, 有 cookie 就可以伪装登录了
browser.get(url)
# 滚 7 次
count = 7
res = set()
while True:
count -= 1
if count < 0:
break
try:
cards = browser.find_elements_by_css_selector('.Card.TopstoryItem')
for card in cards:
title = card.find_elements_by_css_selector('.ContentItem-title')
# 这里实际上只有一个, 不过 title 默认是数组
for i in title:
res.add(i.text)
except NoSuchElementException:
pass
scroll_to_end(browser)
for text in res:
log(text)
def main():
add_chrome_webdriver()
o = Options()
# o.add_argument("--headless")
browser = webdriver.Chrome(chrome_options=o)
try:
start_crawler(browser)
finally:
browser.quit()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
e71590e32c9544d472276f2fc59eddcc0e2ba366 | c06912c147874c0c0c79412cfa11ba42fb1bbf39 | /python_scripts/check_HIT_submissions.py | 56ab55e990f01fd1e9dbf261d015f3bdf5c4ded3 | [] | no_license | tongliuTL/active_learning_crowds | defa1ca490fcc8f714f67426459312d5233191c7 | 3dc9abda731be65fde7bae35724f09dacae3fba4 | refs/heads/master | 2021-10-12T04:21:10.007342 | 2019-02-01T20:13:19 | 2019-02-01T20:13:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,865 | py | import sys
from create_compensation_hit import get_client
from helper_functions import get_timestamp, get_log_directory
from pymongo import MongoClient
from collections import defaultdict, OrderedDict
MAX_ASSIGNMENTS = 5
SETS_OF_LABELS = 12
def read_HITs_log(file_name):
hit_id_list = list()
with open(get_log_directory('HITs') + "/" + file_name) as input_file:
line_number = 0
for line in input_file:
line_number += 1
validation = line.strip().split(":")[0]
if line_number % 2 == 1 and validation == "Your HIT ID is":
hit_id = line.strip().split(":")[1]
hit_id_list.append(hit_id.strip())
return hit_id_list
def check_submissions_MTurk(client, hit_id):
print('MTurk API report:')
hit = client.get_hit(HITId=hit_id)
# hit.keys() = [u'HIT', 'ResponseMetadata']
# hit['HIT'].keys() = [u'HITGroupId', u'RequesterAnnotation', u'NumberOfAssignmentsCompleted', u'Description', u'MaxAssignments', u'Title', u'NumberOfAssignmentsAvailable', u'Question', u'CreationTime', u'AssignmentDurationInSeconds', u'HITTypeId', u'NumberOfAssignmentsPending', u'HITStatus', u'HITId', u'QualificationRequirements', u'Keywords', u'Expiration', u'Reward', u'HITReviewStatus', u'AutoApprovalDelayInSeconds']
# hit['ResponseMetadata'].keys() = ['RetryAttempts', 'HTTPStatusCode', 'RequestId', 'HTTPHeaders']
HITStatus = hit['HIT']['HITStatus']
HITCreationTime = hit['HIT']['CreationTime'].strftime("%Y-%m-%d %H:%M:%S")
HITExpiration = hit['HIT']['Expiration'].strftime("%Y-%m-%d %H:%M:%S")
HITReviewStatus = hit['HIT']['HITReviewStatus']
NumberOfAssignmentsPending = hit['HIT']['NumberOfAssignmentsPending']
NumberOfAssignmentsAvailable = hit['HIT']['NumberOfAssignmentsAvailable']
NumberOfAssignmentsCompleted = hit['HIT']['NumberOfAssignmentsCompleted']
# https://boto3.readthedocs.io/en/latest/reference/services/mturk.html#MTurk.Client.list_assignments_for_hit
# Retrieve the results for a HIT
response = client.list_assignments_for_hit(
HITId=hit_id,
)
assignments = response['Assignments']
print(hit_id, HITStatus, len(assignments), HITCreationTime, HITExpiration, HITReviewStatus, NumberOfAssignmentsPending, NumberOfAssignmentsAvailable, NumberOfAssignmentsCompleted)
MTurk_workers_assignments = {}
# Assignments lost
if len(assignments) != MAX_ASSIGNMENTS:
for assignment in assignments:
WorkerId = assignment['WorkerId']
assignmentId = assignment['AssignmentId']
assignmentStatus = assignment['AssignmentStatus']
print(WorkerId, assignmentId, assignmentStatus)
MTurk_workers_assignments[WorkerId] = assignmentId
# Assignments complete
else:
print 'The assignments are fully Submitted: {}'.format(len(assignments))
for assignment in assignments:
WorkerId = assignment['WorkerId']
assignmentId = assignment['AssignmentId']
AcceptTime = assignment['AcceptTime']
SubmitTime = assignment['SubmitTime']
Duration = SubmitTime-AcceptTime
print(WorkerId, AcceptTime.strftime("%Y-%m-%d %H:%M:%S"), SubmitTime.strftime("%Y-%m-%d %H:%M:%S"), str(Duration))
MTurk_workers_assignments[WorkerId] = assignmentId
return MTurk_workers_assignments
def check_submissions_MongoDB(hit_collection, label_collection, hit_id, MTurk_workers_assignments):
print('MongoDB report:')
print('hit collection:')
hits_saved = hit_collection.find({'hitID': hit_id}).count()
print(hits_saved)
for WorkerId in MTurk_workers_assignments.keys():
worker_hits_saved = hit_collection.find({'hitID': hit_id, 'workerID': WorkerId}).count()
print(WorkerId, worker_hits_saved)
print('label collection:')
hit_assignment_ids = defaultdict(set)
for WorkerId, MTurk_assignmentId in MTurk_workers_assignments.items():
labels_saved_per_worker = label_collection.find({'hitID': hit_id, 'workerID': WorkerId}).count()
print(WorkerId, labels_saved_per_worker, SETS_OF_LABELS)
if labels_saved_per_worker != SETS_OF_LABELS:
_ids = []
assignmentIds = []
id_s = []
assignment_timestamp = {}
for record in label_collection.find({'hitID': hit_id, 'workerID': WorkerId}):
_id = record['_id']
_ids.append(_id)
assignmentId = record['assignmentID']
assignmentIds.append(assignmentId)
id_ = record['id']
id_s.append(id_)
timestamp = record['timestamp']
assignment_timestamp[_id] = timestamp
print('_id', len(_ids), len(set(_ids)))
print('assignmentID', len(assignmentIds), len(set(assignmentIds)))
print('id', len(id_s), len(set(id_s)))
for k, v in OrderedDict(sorted(assignment_timestamp.items(), key=lambda p: p[1])).items():
print(k, v.strftime("%Y-%m-%d %H:%M:%S"))
else:
labels = label_collection.find({'hitID': hit_id, 'workerID': WorkerId})
for label in labels:
MongoDB_assignmentID = label['assignmentID']
if MTurk_assignmentId != MongoDB_assignmentID:
print(hit_id, WorkerId, MTurk_assignmentId, MongoDB_assignmentID)
else:
hit_assignment_ids[hit_id].add(MTurk_assignmentId)
return hit_assignment_ids
if __name__ == '__main__':
environment = sys.argv[1]
MTurk_client = get_client(environment)
print('Account balance: {}'.format(MTurk_client.get_account_balance()['AvailableBalance']))
MongoDB_client = MongoClient('localhost', 8081)
db = MongoDB_client.meteor
hit_collection = db['hit']
label_collection = db['label']
user_input = sys.argv[2]
# Get hit id(s) from log file (.txt)
if user_input.endswith('.txt'):
file_name = user_input
hit_id_list = read_HITs_log(file_name)
print 'Checking {} HITs......\n'.format(len(hit_id_list))
for index, hit_id in enumerate(hit_id_list):
print(index, hit_id)
MTurk_workers_assignments = check_submissions_MTurk(MTurk_client, hit_id)
print
hit_assignment_ids = check_submissions_MongoDB(hit_collection, label_collection, hit_id, MTurk_workers_assignments)
print
# Get hid id from command line
else:
hit_id = user_input
print 'Checking HIT {}...\n'.format(hit_id)
MTurk_workers_assignments = check_submissions_MTurk(MTurk_client, hit_id)
print
hit_assignment_ids = check_submissions_MongoDB(hit_collection, label_collection, hit_id, MTurk_workers_assignments)
print | [
"tongliu0314@gmail.com"
] | tongliu0314@gmail.com |
89105cb6a33047fafb65b3d7700b868a31019e2f | 782e1a5cf60fd39482d14fb1a3861ab9eeeb6ebf | /src/model/split.py | ffa577b6a3d721a93eb97efe4c0e49f398e8b6fc | [] | no_license | andfanilo/ieee-fraud-detection | f88976e9c0937ead230775c336cb437693272b31 | 672cc784d55c6649e464c7c114bb86431b43085a | refs/heads/master | 2020-06-20T15:32:38.315054 | 2019-09-29T08:28:02 | 2019-09-29T08:28:02 | 197,164,393 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,475 | py | import datetime
import logging
from typing import Optional
import numpy as np
import pandas as pd
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection._split import _BaseKFold
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
LOGGER = logging.getLogger(__name__)
class CustomDateSplitter:
"""Split per month
Provide a 2D array of [training date range] in first column, [testing date range] in second column
Examples
--------
>>> date_ranges = [
[['2017-12-01', '2017-12-05'], ['2017-12-06', '2017-12-10']],
[['2017-12-10', '2017-12-20'], ['2017-12-21', '2017-12-30']],
[['2017-12-05', '2017-12-15'], ['2017-12-20', '2017-12-22']]
]
>>> datetime_array = ds.X_train['TransactionDT']
>>> cv = CustomDateSplitter(datetime_array, date_ranges)
>>> print(cv)
CustomDateSplitter(train_size=None, n_splits=5)
>>> for train_index, test_index in cv.split(X):
... print('TRAIN:', train_index, 'TEST:', test_index)
"""
def __init__(self, datetime_array, date_ranges):
start_date = datetime.datetime.strptime("2017-11-30", "%Y-%m-%d")
datetime_array = datetime_array.reset_index()
datetime_array["TransactionDT"] = datetime_array["TransactionDT"].map(
lambda x: start_date + datetime.timedelta(seconds=x)
)
datetime_array["id"] = np.arange(len(datetime_array))
datetime_array = datetime_array.set_index("TransactionDT")
datetime_array = datetime_array.drop("TransactionID", axis=1)
self.datetime_array = datetime_array
self.date_ranges = date_ranges
def split(self, X=None, y=None, groups=None):
for [[start_train, end_train], [start_test, end_test]] in self.date_ranges:
yield (
self.datetime_array[start_train:end_train].values.ravel(),
self.datetime_array[start_test:end_test].values.ravel(),
)
def get_n_splits(self, X=None, y=None, groups=None):
return len(self.date_ranges)
class TimeSeriesSplit(_BaseKFold): # pylint: disable=abstract-method
"""Time Series cross-validator
Provides train/test indices to split time series data samples that are observed at fixed time intervals,
in train/test sets. In each split, test indices must be higher than before, and thus shuffling in cross validator is
inappropriate.
This cross_validation object is a variation of :class:`TimeSeriesSplit` from the popular scikit-learn package.
It extends its base functionality to allow for expanding windows, and rolling windows with configurable train and
test sizes and delays between each. i.e. train on weeks 1-8, skip week 9, predict week 10-11.
In this implementation we specifically force the test size to be equal across all splits.
Expanding Window:
Idx / Time 0..............................................n
1 | train | delay | test | |
2 | train | delay | test | |
... | |
last | train | delay | test |
Rolling Windows:
Idx / Time 0..............................................n
1 | train | delay | test | |
2 | step | train | delay | test | |
... | |
last | step | ... | step | train | delay | test |
Parameters:
n_splits : int, default=5
Number of splits. Must be at least 4.
train_size : int, optional
Size for a single training set.
test_size : int, optional, must be positive
Size of a single testing set
delay : int, default=0, must be positive
Number of index shifts to make between train and test sets
e.g,
delay=0
TRAIN: [0 1 2 3] TEST: [4]
delay=1
TRAIN: [0 1 2 3] TEST: [5]
delay=2
TRAIN: [0 1 2 3] TEST: [6]
force_step_size : int, optional
Ignore split logic and force the training data to shift by the step size forward for n_splits
e.g
TRAIN: [ 0 1 2 3] TEST: [4]
TRAIN: [ 0 1 2 3 4] TEST: [5]
TRAIN: [ 0 1 2 3 4 5] TEST: [6]
TRAIN: [ 0 1 2 3 4 5 6] TEST: [7]
Examples
--------
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> tscv = TimeSeriesSplit(n_splits=5)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(train_size=None, n_splits=5)
>>> for train_index, test_index in tscv.split(X):
... print('TRAIN:', train_index, 'TEST:', test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
TRAIN: [0 1 2 3] TEST: [4]
TRAIN: [0 1 2 3 4] TEST: [5]
Source : https://www.kaggle.com/mpearmain/extended-timeseriessplitter
"""
def __init__(
self,
n_splits: Optional[int] = 5,
train_size: Optional[int] = None,
test_size: Optional[int] = None,
delay: int = 0,
force_step_size: Optional[int] = None,
):
if n_splits and n_splits < 5:
raise ValueError(f"Cannot have n_splits less than 5 (n_splits={n_splits})")
super().__init__(n_splits, shuffle=False, random_state=None)
self.train_size = train_size
if test_size and test_size < 0:
raise ValueError(
f"Cannot have negative values of test_size (test_size={test_size})"
)
self.test_size = test_size
if delay < 0:
raise ValueError(f"Cannot have negative values of delay (delay={delay})")
self.delay = delay
if force_step_size and force_step_size < 1:
raise ValueError(
f"Cannot have zero or negative values of force_step_size "
f"(force_step_size={force_step_size})."
)
self.force_step_size = force_step_size
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters:
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Yields:
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(
X, y, groups
) # pylint: disable=unbalanced-tuple-unpacking
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
delay = self.delay
if n_folds > n_samples:
raise ValueError(
f"Cannot have number of folds={n_folds} greater than the number of samples: {n_samples}."
)
indices = np.arange(n_samples)
split_size = n_samples // n_folds
train_size = self.train_size or split_size * self.n_splits
test_size = self.test_size or n_samples // n_folds
full_test = test_size + delay
if full_test + n_splits > n_samples:
raise ValueError(
f"test_size\\({test_size}\\) + delay\\({delay}\\) = {test_size + delay} + "
f"n_splits={n_splits} \n"
f" greater than the number of samples: {n_samples}. Cannot create fold logic."
)
# Generate logic for splits.
# Overwrite fold test_starts ranges if force_step_size is specified.
if self.force_step_size:
step_size = self.force_step_size
final_fold_start = n_samples - (train_size + full_test)
range_start = (final_fold_start % step_size) + train_size
test_starts = range(range_start, n_samples, step_size)
else:
if not self.train_size:
step_size = split_size
range_start = (
(split_size - full_test) + split_size + (n_samples % n_folds)
)
else:
step_size = (n_samples - (train_size + full_test)) // n_folds
final_fold_start = n_samples - (train_size + full_test)
range_start = (
final_fold_start - (step_size * (n_splits - 1))
) + train_size
test_starts = range(range_start, n_samples, step_size)
# Generate data splits.
for test_start in test_starts:
idx_start = test_start - train_size if self.train_size is not None else 0
# Ensure we always return a test set of the same size
if indices[test_start : test_start + full_test].size < full_test:
continue
yield (
indices[idx_start:test_start],
indices[test_start + delay : test_start + full_test],
)
if __name__ == "__main__":
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
y = np.array([1, 2, 3, 4, 5, 6])
tscv = TimeSeriesSplit(n_splits=5)
print(tscv) # doctest: +NORMALIZE_WHITESPACE
for train_index, test_index in tscv.split(X):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print("---------------------------------------------")
LARGE_IDX = np.arange(0, 30)
rolling_window = TimeSeriesSplit(train_size=10, test_size=5, delay=3)
print(rolling_window)
for train_index, test_index in rolling_window.split(LARGE_IDX):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = LARGE_IDX[train_index], LARGE_IDX[test_index]
| [
"fanilo.andrianasolo@worldline.com"
] | fanilo.andrianasolo@worldline.com |
1c5a217e9f5c3d0bfa5d81fb7f4e52d6dccae8dc | 746e0dc66893cd287a0a1f94fa7f5fad168d38f9 | /main/migrations/0030_auto_20141012_0029.py | a91a9d3b23b969865ecbf8e5a88cc017fa64e136 | [] | no_license | alpotapov/foodplan | eeeb04bf5a12fac1c67a31443ab34643b59a9ccd | 0dd26a2446da9d7129c259532dcd0f19b87038d6 | refs/heads/master | 2021-01-16T21:17:44.670107 | 2014-11-15T17:22:50 | 2014-11-15T17:22:50 | 26,687,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('main', '0029_auto_20141012_0024'),
]
operations = [
migrations.AlterField(
model_name='scheduledmeal',
name='date',
field=models.DateField(default=datetime.date.today),
),
]
| [
"al.potapov@hotmail.com"
] | al.potapov@hotmail.com |
9add89bb31c6cf98448a3a7a6624b32dfcda9b13 | 825616ad2c01a37c155579facb3ff2b9553cfb9f | /tests/test_ignore.py | 404a2ef2692d265babf632faeee6549b41f4a45e | [
"MIT"
] | permissive | paolodina/namedtupled | f909b0303387b0ef8c73fe31e4b227f586410254 | cca5396dd806dae2039a7099cce3bfc7e568fec2 | refs/heads/master | 2021-04-30T04:40:22.830873 | 2018-02-14T18:57:19 | 2018-02-14T18:57:19 | 121,540,307 | 0 | 0 | null | 2018-02-14T17:34:35 | 2018-02-14T17:34:34 | null | UTF-8 | Python | false | false | 632 | py | import namedtupled
import pytest
mapping = {
'foo': 'bar',
'baz': {'qux': 'quux'},
'tito': {
'tata': 'tutu',
'totoro': 'tots',
'frobnicator': ['this', 'is', 'not', 'a', 'mapping']},
'alist': [{'one': '1', 'a': 'A'}, {'two': '2', 'b': 'B'}]
}
mapping_array = [mapping, mapping]
def test_namedtupled_ignore_object(mapping=mapping):
mapping = namedtupled.ignore(mapping)
t = namedtupled.map(mapping)
assert t == mapping
def test_nametupled_ignore_array(mapping=mapping_array):
mapping = namedtupled.ignore(mapping)
t = namedtupled.map(mapping)
assert t == mapping
| [
"brennv@users.noreply.github.com"
] | brennv@users.noreply.github.com |
0f638cb37c3cfe9526b6699975d791d2e7f0eaff | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_yesdongil_main_big.py | 207b07db4c346d26f784da03b2499dd769587875 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,846 | py | import os
import sys
import math
import itertools
def xrange(start, stop):
i = start
while i < stop:
yield i
i += 1
def is_prime(value) :
ret = 0
if (value % 2) == 0 :
ret = 2
elif (value % 3) == 0 :
ret = 3
else :
limit = int(math.sqrt(value))
index_limit = limit/6 + 1
for i in xrange(1, index_limit) :
prime_v = 6*i - 1
if (value % prime_v) == 0 :
ret = prime_v
break
prime_v = 6*i + 1
if (value % prime_v) == 0 :
ret = prime_v
break
if(prime_v > 10000) :
break
return ret
def make_value(N, middle, base) :
result = 1 + base**(N-1)
mul = base
while (middle > 0) :
remainder = middle % 2
if(remainder == 1) :
result += mul
mul=mul*base
middle /= 2
return result
def get_result(N, J) :
ret = []
result = []
limit = 2**(N-2)
prime_ret = 0
list_count = 0
for i in range(0, limit) :
divisor_list = []
for base in range(2, 11) :
test_v = make_value(N, i, base)
prime_ret = is_prime(test_v)
if(prime_ret == 0) :
break
else :
divisor_list.append(prime_ret)
if(prime_ret > 0) :
result.append(make_value(N, i, 10))
result.extend(divisor_list)
ret.append(result)
result = []
list_count += 1
if(list_count == J) :
break
return ret
def Main():
result_list = []
arg = []
CASE_N = int(raw_input())
line = raw_input()
arg = line.split()
result_list = get_result(int(arg[0]), int(arg[1]))
print 'Case #1:'
for result in result_list :
for result_one in result :
sys.stdout.write(str(result_one) + ' ')
sys.stdout.write('\n')
if __name__ == '__main__':
sys.exit(Main()) | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.