blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da441fe0d6c94bb2e2df9eb6f5089b6d7cb64770 | b65e09f870e3a6f99bd68f3fd7ea92523eff6b0f | /meituan/main.py | 5d3ccf6f5f410e2aededf1b36bc6de6898e3b23f | [] | no_license | f1025395916/untitled | bf079c810bce161693c127f81fbd8efe98508d1f | 8c1005b783a1029d80332fc4cf52074514ea7502 | refs/heads/master | 2020-05-03T02:36:00.963934 | 2019-08-28T08:43:16 | 2019-08-28T08:43:16 | 178,374,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | import requests
import json
import jsonpath
import fontTools
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
'Accept': 'application/json',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Referer': 'https://h5.waimai.meituan.com/waimai/mindex/home',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://h5.waimai.meituan.com',
'Connection': 'keep-alive',
}
cookies = {
'_lx_utm': 'utm_source%3DBaidu%26utm_medium%3Dorganic',
'_lxsdk_cuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'_lxsdk': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'wm_order_channel': 'default',
'utm_source': '',
'terminal': 'i',
'w_utmz': 'utm_campaign=(direct)&utm_source=5000&utm_medium=(none)&utm_content=(none)&utm_term=(none)',
'openh5_uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'w_latlng': '36657209,117055413',
'w_actual_lat': '0',
'w_actual_lng': '0',
'cssVersion': '9968de10',
'au_trace_key_net': 'default',
'_lxsdk_s': '16c2c23ca54-8d5-31a-b59%7C%7C55',
'uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'igateApp': 'custom',
'w_uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'mtsi-real-ip': '60.208.74.98',
'mtsi-cur-time': '2019-07-26 11:02:55',
'w_visitid': '0a2aadc8-ed0a-419d-b086-8546a9a5e0c0',
}
params = (
('_', '1564110538926'),
('X-FOR-WITH', '4CH20e1RM5tFPC3ysgcEMB2eNihocq70OCgAZlOxM6ErimvsViyCQWHTX3s9O5sr5notM0IjX2yiTvX5yQOTNeumO+bn/AmDSF4ilrVVhVYYnuC+CUNIhWdAeJSq7u4Rz2GwjLsUrxG6B4u8Y0P7lw=='),
)
data = {
'startIndex': '4',
'sortId': '0',
'multiFilterIds': '',
'sliderSelectCode': '',
'sliderSelectMin': '',
'sliderSelectMax': '',
'geoType': '2',
'rankTraceId': 'E7BE070C8CAF616576DFC1828EAACE84',
'uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'platform': '3',
'partner': '4',
'originUrl': 'https://h5.waimai.meituan.com/waimai/mindex/home',
'riskLevel': '71',
'optimusCode': '10',
'wm_latitude': '36657209',
'wm_longitude': '117055413',
'wm_actual_latitude': '0',
'wm_actual_longitude': '0',
'openh5_uuid': '16c2302ab6dc8-0a3d0f563d6c22-14367940-1aeaa0-16c2302ab6ec8',
'_token': 'eJxVj9uOokAQht+FW43dICdN5gJBAcEDIqhM5gKksRlsQGgRney7b092NptNKvmrvvorVfXFNXbKTXnIK5Afch1quCnHj+BI5oYcbVlHkkWeh9JYkBVpyJ3/Y+pEYCxpQoObvo9ZX5aVj2+wY/U/8M7LKhyyHfBjKIgsvj02s3CY0rqdAoCl0SPOSZyPCMrpPS5H54qAPwiQvExRD3BFEDuKY8Nkz4aZFj8a/yj9W6/YF8zb5peSZWj5vBYrenu8NG+XqVd9AH1/rNj7Yu7y4+ACHTlu2nVAWi9HGEcJdUAj1brvdtQvdfFzJU/y51YeI2TbeSqsr4uO2sWJksrvbVBhUoEXXy17EiI4e6F75K6LwsGyY+U4qMSj4jzF7UGu562pL3V06PG1UKV5glCtPqpFpXpWlu7v7u7S9wbeGHwhhE77ijUqR6FlmWnvoLqbVKv+VgPdN1IxN/3FHEMziAzPM17SKYg0TWk817bMZrdQwaTxyTHd8KYqiWG2KfdnFHebMqH3e1JcvEkZKb0mCpUZkVWIMYKxvgoVmklacM/EmaBnFw9CFOwNa5m4IIWlC0qE28lztj8r622X2vPn6Yi8wWCTAv/gHPMbWW/5RSo+P9cHeCvqg+uK2lHIE/h4e+N+/QYm5b9y'
}
response = requests.post('https://i.waimai.meituan.com/openh5/homepage/poilist', headers=headers, params=params, cookies=cookies, data=data)
js = json.loads(response.text)
print(js)
| [
"1025395916@qq.com"
] | 1025395916@qq.com |
bacf596e1202013a98cc40f7d2940d69b8a2e216 | afa0d5a97925273f7fb0befef697d36020df5787 | /packages/google-cloud-alloydb/samples/generated_samples/alloydb_v1_generated_alloy_db_admin_get_cluster_sync.py | eb296f332c1d78fa23a91559a1674b1100657a4a | [
"Apache-2.0"
] | permissive | scooter4j/google-cloud-python | dc7ae1ba6a33a62a40b617b806ec8ed723046b8b | 36b1cf08092d5c07c5971bb46edda7a9928166b1 | refs/heads/master | 2023-04-14T18:36:48.643436 | 2023-04-06T13:19:26 | 2023-04-06T13:19:26 | 188,338,673 | 0 | 0 | null | 2019-05-24T02:27:15 | 2019-05-24T02:27:14 | null | UTF-8 | Python | false | false | 1,805 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetCluster
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-alloydb
# [START alloydb_v1_generated_AlloyDBAdmin_GetCluster_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import alloydb_v1
def sample_get_cluster():
# Create a client
client = alloydb_v1.AlloyDBAdminClient()
# Initialize request argument(s)
request = alloydb_v1.GetClusterRequest(
name="name_value",
)
# Make the request
response = client.get_cluster(request=request)
# Handle the response
print(response)
# [END alloydb_v1_generated_AlloyDBAdmin_GetCluster_sync]
| [
"noreply@github.com"
] | noreply@github.com |
ae3df4ef092d4514dccb7aab617de96786eb2497 | 8a640ca583db73c8af025c135d8f6c53afd2e9ba | /tr_sys/tr_ara_ncats/urls.py | 66bd73a7857e77ffbcd1da67a83cd0a2252a6feb | [
"MIT"
] | permissive | edeutsch/Relay | b6434096da3ab5b85d55eab1c14da1bf3bfe0a21 | fc4581e4a512ebd3a0459bbd06652eb4291133e9 | refs/heads/master | 2022-11-30T17:35:23.048297 | 2020-08-04T18:46:09 | 2020-08-04T18:46:09 | 282,288,237 | 0 | 0 | MIT | 2020-07-24T18:17:23 | 2020-07-24T18:17:23 | null | UTF-8 | Python | false | false | 246 | py | from django.urls import path, include
from . import api
apipatterns = [
path(r'', api.index, name='ara-ncats-api'),
path(r'runquery', api.runquery, name='ara-ncats-runquery')
]
urlpatterns = [
path(r'api/', include(apipatterns)),
]
| [
"markwilliams2755@gmail.com"
] | markwilliams2755@gmail.com |
2111e682f882f3aafc271380c846680da7829ba8 | b444692bd50243b75ca6a777dc696950df71e242 | /Anchors/RemoveAllAnchors.py | 521fce0536a33a8a5ac5dc2bf8977eb3bead550f | [
"MIT"
] | permissive | davelab6/robofont-scripts | 7a358a577b891b5e2685806f1eebfc1fb24c47d8 | 2a6d6b2d365d00cb1f2dc7b2afe50ff6a5f6047d | refs/heads/master | 2021-01-18T15:14:53.558768 | 2013-11-23T19:52:02 | 2013-11-23T19:52:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | __copyright__ = __license__ = """
Copyright (c) 2013 Adobe Systems Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__doc__ = """
Remove All Anchors v1.0 - Feb 21 2013
Removes all the anchors in a font.
==================================================
Versions:
v1.0 - Feb 21 2013 - Initial release
"""
def run(font):
anchorFound = False
for glyph in font:
if len(glyph.anchors) > 0:
glyph.clearAnchors()
glyph.glyphChangedUpdate()
anchorFound = True
if anchorFound:
print 'All anchors were removed.'
else:
print 'The font had no anchors.'
if __name__ == "__main__":
font = CurrentFont()
if font == None:
print 'Open a font first.'
else:
if not len(font):
print "The font has no glyphs."
else:
run(font)
| [
"msousa@adobe.com"
] | msousa@adobe.com |
283cc666d0bb18ceb259e7ce977b964c5a16d1a3 | 544d0cef14d6e003b91763b0d351494fbe191f5c | /es_site_config.py.example | 9c1e8301af0d400ae91ddec3581855bd6cdbf821 | [] | no_license | TTimo/es_build | e22e0e1dd7743e22847d16360b41e992a8c1259b | b1e596bb36392a6524c63b1dbed0ce75f2f306a0 | refs/heads/master | 2016-09-06T15:24:24.722752 | 2013-10-10T12:44:07 | 2013-10-10T12:44:07 | 12,408,799 | 3 | 0 | null | 2013-10-05T19:54:27 | 2013-08-27T14:49:14 | Python | UTF-8 | Python | false | false | 116 | example | #!/usr/bin/env python
# override the default configuration settings with a site configuration
FLAVOR = "ubuntu_10"
| [
"ttimo@ttimo.net"
] | ttimo@ttimo.net |
48232ef8dae4207da9df14791e46fa8988f9cf3e | 9b74ef81e5c1c1dbecf847ada0e704c8e808db22 | /data/triviaqa/config.py | 16b933196b1639ff75fa8bd3e17f6de1e78d0420 | [
"MIT"
] | permissive | neufang/jack | 4464ddc1d7c97bd729e1f661734016bb0d0b1397 | 96a4e59be70ec7df382d26d5bf6d6eee2c94f5e7 | refs/heads/master | 2020-06-23T14:05:11.116345 | 2019-07-24T13:52:15 | 2019-07-24T13:52:15 | 198,644,166 | 0 | 0 | MIT | 2019-07-24T13:46:26 | 2019-07-24T13:46:25 | null | UTF-8 | Python | false | false | 275 | py | import os
from os.path import join
"""
Global config options
"""
TRIVIA_QA = os.environ.get('TRIVIAQA_HOME', None)
TRIVIA_QA_UNFILTERED = os.environ.get('TRIVIAQA_UNFILTERED_HOME', None)
CORPUS_DIR = join(os.environ.get('TRIVIAQA_HOME', ''), "preprocessed")
VEC_DIR = ''
| [
"dirk.weissenborn@gmail.com"
] | dirk.weissenborn@gmail.com |
ba83ffc60ad253aed46ec0172ef01d949a01742e | 57ddab24ba7860f8878c689f9fa22b0779d60157 | /categorias/iniciante/uri1051.py | cb688fa9eb809d54eefeb058ecb54ada5c421f65 | [] | no_license | matheusfelipeog/uri-judge | ba1d32e50ad7239b331ad0e1181a1bffc6e61b41 | 0232be52da78fd67261c6d6a74eff3267d423afd | refs/heads/master | 2021-07-03T02:32:13.395829 | 2021-01-29T18:32:35 | 2021-01-29T18:32:35 | 215,845,427 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
salario = float(input())
if 0.0 <= salario <= 2000.00:
print('Isento')
elif 2000.01 <= salario <= 3000.00:
imposto = ((salario - 2000) * 0.08)
print('R$ {:.2f}'.format(imposto))
elif 3000.01 <= salario <= 4500.00:
imposto = (1000 * 0.08) + ((salario - 3000) * 0.18)
print('R$ {:.2f}'.format(imposto))
elif salario > 4500.00:
imposto = (1000 * 0.08) + (1500 * 0.18) + ((salario - 4500) * 0.28)
print('R$ {:.2f}'.format(imposto))
| [
"50463866+matheusfelipeog@users.noreply.github.com"
] | 50463866+matheusfelipeog@users.noreply.github.com |
9a2b83dbf7175667edfd1bdbdcc39fdc38de182f | 0dfcc5e04592062c0a3e80120d3bfc32dd166b76 | /gen_bac_tfrecord.py | 042c81f937144193708b15a9c1c9c96cd7661959 | [] | no_license | nightinwhite/attention_night | b12266a2da81033dbfddbf1328063e7fc9dadaa5 | 245237282e2859b03049ae649ca4cf9aebc426ce | refs/heads/master | 2021-01-19T13:39:12.854013 | 2017-08-23T02:16:20 | 2017-08-23T02:16:20 | 100,852,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,684 | py | #coding:utf-8
import os
import tensorflow as tf
from PIL import Image
import numpy as np
import cv2
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def tst_img(img):
cv2.imshow("tst",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
char_to_label_bac = {
u'君':0,u'不':1,u'见':2,u'黄':3,u'河':4,u'之':5,u'水':6,u'天':7,u'上':8,
u'来':9,u'奔':10,u'流':11,u'到':12,u'海':13,u'复':14,u'回':15,u'烟':16,
u'锁':17,u'池':18,u'塘':19,u'柳':20,u'深':21,u'圳':22,u'铁':23,u'板':24,
u'烧':25,u'0':26,u'1':27,u'2':28,u'3':29,u'4':30,u'5':31,u'6':32,
u'7':33,u'8':34,u'9':35,u'+':36,u'-':37,u'*':38,u'/':39,
u'(':40,u')':41,u'#':42,
}
value_dict = {
u'君':0,u'不':1,u'见':2,u'黄':3,u'河':4,u'之':5,u'水':6,u'天':7,u'上':8,
u'来':9,u'奔':10,u'流':11,u'到':12,u'海':13,u'复':14,u'回':15,u'烟':16,
u'锁':17,u'池':18,u'塘':19,u'柳':20,u'深':21,u'圳':22,u'铁':23,u'板':24,
u'烧':25,u'0':26,u'1':27,u'2':28,u'3':29,u'4':30,u'5':31,u'6':32,
u'7':33,u'8':34,u'9':35,
}
mode = "test"
fill_label = 42
seq_length = 26
file_dir = "/home/night/data/train_split_bac/"
txt_dir = "baiducontest_real_labels.txt"
IMG_WIDTH = 400
IMG_HEIGHT = 64
tf_writer = tf.python_io.TFRecordWriter("tfrecords/{0}_bac_64.records".format(mode))
f = open(txt_dir,"r")
tmp_str = f.readline()
tmp_strs = []
i = 0
while tmp_str!="":
tmp_str = unicode(tmp_str)
# tmp_str = tmp_str[:-1]
# print tmp_str
tmp_strs.append([tmp_str,i])
i+=1
tmp_str = f.readline()
np.random.seed(1234)
np.random.shuffle(tmp_strs)
if mode == "test":
tmp_strs = tmp_strs[90000:]
i = 90000
else:
tmp_strs = tmp_strs[:90000]
i = 0
for tmp_line in tmp_strs:
print i
tmp_str = tmp_line[0].split(" ")[0]
tmp_chars_group = tmp_str.split(";")
tmp_chars = tmp_chars_group[-1]
fen_index = -1
fen_str = ""
l_s = ""
for m in range(len(tmp_chars)):
if tmp_chars[m] == "/":
fen_index = m
break
if fen_index != -1:
p = fen_index - 1
while p >= 0 and value_dict.has_key(tmp_chars[p]):
fen_str = tmp_chars[p] + fen_str
p -= 1
fen_str += "/"
q = fen_index + 1
while q < len(tmp_chars) and value_dict.has_key(tmp_chars[q]):
fen_str = fen_str + tmp_chars[q]
q += 1
for m in range(p + 1):
l_s += tmp_chars[m]
l_s += "/"
for m in range(q, len(tmp_chars)):
l_s += tmp_chars[m]
tmp_chars = l_s
# print tmp_chars
tmp_labels = []
for c in tmp_chars:
tmp_labels.append(char_to_label_bac[c])
while len(tmp_labels) < seq_length:
tmp_labels.append(fill_label)
try:
# print tmp_line[:-1]
# print len(tmp_chars_group)
# print "{0}{1}_{2}_{3}_.png".format(file_dir, i, g,len(tmp_chars_group))
tmp_img = Image.open("{0}{1}_{2}_{3}_.png".format(file_dir, tmp_line[1], -1, len(tmp_chars_group)))
except IOError :
print "!!!!!!!!!!!!!!!!!!!", i
f = open("{0}_tf_wrong.txt".format(mode), "a")
f.write("{0},".format(i))
f.close()
continue
tmp_img = tmp_img.resize((IMG_WIDTH,IMG_HEIGHT))
tmp_img = np.asarray(tmp_img,np.uint8)
# tst_img(tmp_img)
tmp_img_raw = tmp_img.tobytes()
tmp_example = tf.train.Example(features=tf.train.Features(feature={
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[tmp_img_raw])),
'labels':tf.train.Feature(int64_list=tf.train.Int64List(value=tmp_labels)),
}))
tf_writer.write(tmp_example.SerializeToString())
i += 1
tf_writer.close()
| [
"noreply@github.com"
] | noreply@github.com |
9586b10b08a6dad3fcc896702a405df1651e8e2b | 54a2dbd057fe471bae3763499eed9df66cd86af0 | /insight_testsuite/temp/src/graph.py | 77f060754c7b4743de49c02f108e454f41e25d37 | [] | no_license | ying4uang/digital-wallet | 42e03e0d71d1c06bccb2ce3fe75f287dff5dfb5e | 49c82d01df5987acb468f1e79c5ea4eb82a0b96e | refs/heads/master | 2020-12-24T11:17:28.899386 | 2016-11-11T05:06:57 | 2016-11-11T05:06:57 | 73,040,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,451 | py | #!/usr/bin/env python
from collections import deque
class Vertex:
"""
Stores vertices in a Graph. Vertex encapsulates first_connections and node_id.
"""
def __init__(self,node_id):
"""
Construct a new 'Vertex' object.
:param node_id: The id of the vertex, in our case userid.
:return: returns nothing
"""
self.id = node_id
self.first_connections = set()
def add_first_connections(self,node_id):
"""
Add node_id to the first degree connection of the current vertex.
:param node_id: current vertex's neighbor, in our case a user's first degree connection.
:return: returns nothing
"""
self.first_connections.add(node_id)
def __str__(self):
return str(self.id)
def get_first_connections(self):
"""
Retrieve all first degree connections of the current vertex.
:return: returns first degree connections, stored in a set.
"""
return self.first_connections
def get_id(self):
"""
Return node_id of the current vertex.
:return: returns nothing
"""
return self.id
class Graph:
"""
Graph structure, consisted of zero to many vertices.
"""
def __init__(self):
"""
Construct a new Graph object.
:return: returns nothing
"""
self.vert_list = {}
self.numVertices = 0
def add_vertex(self,node_id):
"""
Add a vertex to the graph
:param node_id: int, id of the vertex, userid.
:return: returns nothing
"""
self.numVertices = self.numVertices + 1
newVertex = Vertex(node_id)
self.vert_list[node_id] = newVertex
return newVertex
def get_vertex(self,node_id):
"""
Obtain a vertex object by its id
:param node_id: int, id of the vertex, userid.
:return: returns nothing
"""
if node_id in self.vert_list:
return self.vert_list[node_id]
else:
return None
def add_edge(self,source_id,target_id):
"""
Add an edge to the graph.
:param source_id: int, id of source node.
:param target_id: int, id of target node.
:return: returns degree between the two
"""
if source_id not in self.vert_list:
nv = self.add_vertex(source_id)
if target_id not in self.vert_list:
nv = self.add_vertex(target_id)
self.vert_list[source_id].add_first_connections(target_id)
self.vert_list[target_id].add_first_connections(source_id)
def bibfs_degree_between(self,source_id,target_id,level_limit):
"""
Bidirectional breadth first search on the graph to retrieve the degree between users. It goes through
neighbors of source users and see if it is in connections of target users as the first level. And then
goes through neighbors of target users to see if they contain source user. And then continue to the
second degree connections.
:param source_id: int, id of source node.
:param target_id: int, id of target node.
:param level_limit: int, the limit to the degree of connections we are searching
:return: int, returns degree between the two users.
"""
#stores the current level of target/source users, visited users will be removed from the queue
source_queue = deque()
source_queue.append(source_id)
target_queue = deque()
target_queue.append(target_id)
#whether we have visited the source or target node
source_visited = set()
source_visited.add(source_id)
target_visited = set()
target_visited.add(target_id)
#stores the connections of source/target users. As we goes thru each level, all the connections
#of source/target users will be added.
source_connections = set()
source_connections.add(source_id)
target_connections = set()
target_connections.add(target_id)
#level helps to limit how much further we look into the common connections between the source
#and target users. Since we are searching bidirectionally from both source and target. If we are
#looking for 4th degree connection we only need to go down 2 levels from each side
current_level = 1
#helps determines whether we finish the current degree of connection search for sourcce/target
dist_source = dist_target = 0
while current_level <= level_limit/2:
while (source_queue):
source_vert_id = source_queue.popleft()
source_vert = self.get_vertex(source_vert_id)
if(source_vert is not None):
for source_node in source_vert.get_first_connections():
if source_node not in source_visited:
if source_node in target_connections:
return dist_source + dist_target + 1
source_queue.append(source_node)
source_visited.add(source_node)
source_connections.add(source_node)
dist_source = dist_source + 1
#switching to target loop
if current_level == dist_source:
break
while (target_queue):
target_vert_id = target_queue.popleft()
target_vert = self.get_vertex(target_vert_id)
if(target_vert is not None):
for target_node in target_vert.get_first_connections():
if target_node not in target_visited:
if target_node in source_connections:
return dist_source + dist_target + 1
target_queue.append(target_node)
target_visited.add(target_node)
target_connections.add(target_node)
dist_target = dist_target+1
else:
return 0
if current_level == dist_target:
break
current_level = current_level + 1
return 0
| [
"ying@xandys-MacBook-Pro.local"
] | ying@xandys-MacBook-Pro.local |
f27c50dc81bfd9c7856aaa1f0f9af9ce8f819815 | 1596ebabdc32196d50e2cd9f776b0aaf460f28d5 | /oldScripts/20181002_bgSub_ClusterTrack_ImStack.py | 6dd96a66c179e24a1c6f1bf36102662ced5f48f5 | [
"MIT"
] | permissive | crackmech/flyclimb | 1394a81ef7fae98c13eaadc18a36543a378571c1 | 551621d1d2747d22b407a6b640d7ccaf680b53e5 | refs/heads/master | 2021-04-26T22:38:34.720421 | 2019-09-21T08:30:51 | 2019-09-21T08:30:51 | 124,124,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,438 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 14 18:46:57 2017
@author: aman
"""
import cv2
import os
import numpy as np
import re
from datetime import datetime
import Tkinter as tk
import tkFileDialog as tkd
import multiprocessing as mp
import time
import glob
#import trackpy as tp
import random
import csv
import itertools
from sklearn import cluster
flyParams = cv2.SimpleBlobDetector_Params()
flyParams.blobColor = 0
flyParams.minThreshold = 5
flyParams.maxThreshold = 240#120 120 for original image, 250 for bg subtracted images
flyParams.filterByArea = True
flyParams.filterByCircularity = True
flyParams.minCircularity = 0
flyParams.filterByConvexity = False
flyParams.filterByInertia = False
flyParams.minArea = 200# 200 for flyClimbing, 1000 for fly walking
flyParams.maxArea = 8000
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(flyParams)
else :
detector = cv2.SimpleBlobDetector_create(flyParams)
nImThresh = 100# if number of images in a folder is less than this, then the folder is not processed
imgDatafolder = 'imageData'
def present_time():
now = datetime.now()
return now.strftime('%Y%m%d_%H%M%S')
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def getFolder(initialDir):
'''
GUI funciton for browsing and selecting the folder
'''
root = tk.Tk()
initialDir = tkd.askdirectory(parent=root,
initialdir = initialDir, title='Please select a directory')
root.destroy()
return initialDir+'/'
def getDirList(folder):
return natural_sort([os.path.join(folder, name) for name in os.listdir(folder) if os.path.isdir(os.path.join(folder, name))])
def random_color():
levels = range(0,255,32)
return tuple(random.choice(levels) for _ in range(3))
colors = [(0,200,200),(200,0,200),(200,200,0),(150,0,0),(0,0,200),(200,200,255)]
colors = [random_color() for x in xrange(1000)]
colors = \
[(64, 96, 32), (96, 0, 160), (96, 128, 32), (128, 192, 224), (128, 32, 0), (0, 224, 64),\
(224, 96, 0), (160, 0, 64), (32, 32, 64), (160, 192, 224), (160, 64, 96), (160, 96, 64),
(224, 160, 224), (192, 96, 128), (128, 160, 64), (192, 32, 192), (160, 96, 32), (32, 96, 32),
(32, 128, 96), (224, 32, 96), (128, 0, 160), (64, 224, 32), (32, 64, 32), (192, 96, 224),
(0, 192, 0), (0, 32, 0), (128, 96, 224), (32, 224, 64), (64, 32, 64), (224, 128, 32),
(32, 192, 96), (128, 96, 128), (32, 64, 224), (160, 160, 64), (32, 32, 160), (128, 192, 128),
(128, 128, 96), (192, 0, 32), (64, 192, 224), (64, 32, 128), (96, 32, 160), (160, 160, 32),
(224, 224, 96), (224, 192, 224), (96, 0, 64), (224, 224, 128), (32, 224, 128), (64, 64, 128),
(64, 64, 192), (64, 64, 64), (64, 192, 224), (96, 128, 64), (192, 64, 160), (96, 64, 0),
(192, 32, 0), (192, 96, 96), (192, 224, 0), (192, 224, 128), (224, 64, 0), (0, 96, 192)]
#csvOutFile = '/media/aman/data/thesis/colorPalette_20181004.csv'
#with open(csvOutFile, "wb") as f:
# writer = csv.writer(f)
# writer.writerows(colors)
def createTrack(trackData, img):
'''
input:
create an image of shape 'imgShape' with the x,y coordiates of the track from the array 'trackData
returns:
an np.array with the cv2 image array, which can be saved or viewed independently of this function
'''
#img = np.ones((imgShape[0], imgShape[1], 3), dtype = 'uint8')
blue = np.hstack((np.linspace(0, 255, num = len(trackData)/2),np.linspace(255, 0, num = (len(trackData)/2)+1)))
green = np.linspace(255, 0, num = len(trackData))
red = np.linspace(0, 255, num = len(trackData))
cv2.putText(img,'Total frames: '+str(len(trackData)), (10,30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,255))
for i in xrange(1,len(trackData)):
cv2.circle(img,(int(trackData[i,0]), int(trackData[i,1])), 2, (blue[i], green[i], red[i]), thickness=2)#draw a circle on the detected body blobs
for i in xrange(1,len(trackData)):
if i%100==0:
cv2.putText(img,'^'+str(i), (int(trackData[i,0]), int(trackData[i,1])), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,255))
#cv2.imshow('track', img); cv2.waitKey(); cv2.destroyAllWindows()
return img
def getTrackData(imStack, Blobparams, blurParams):
'''
returns the numpy array of coordinates of the centroid of blob in the stack of images provided as input numpy array 'imStack'
'''
nFrames = imStack.shape[0]
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(Blobparams)
else :
detector = cv2.SimpleBlobDetector_create(Blobparams)
trackData = np.zeros((nFrames,2))
kernel, sigma = blurParams
for f in xrange(nFrames):
im = imStack[f]
keypoints = detector.detect(cv2.GaussianBlur(im, (kernel, kernel), sigma))
kp = None
try:
for kp in keypoints:
trackData[f] = (kp.pt[0],kp.pt[1])
except:
pass
return trackData
def getContours((idx, im, contourParams, blurParams)):
kernel, sigma = blurParams
#print idx, im.shape, contourParams, blurParams
ret, th = cv2.threshold(cv2.GaussianBlur(im, (kernel,kernel), sigma), contourParams['threshLow'], contourParams['threshHigh'],cv2.THRESH_BINARY)
th = cv2.bitwise_not(th)
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
contours, hierarchy = cv2.findContours(th, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
else :
im2, contours, hierarchy = cv2.findContours(th, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
try:
contours = max(contours, key = cv2.contourArea)
# if not contourParams['minCntArea']<=cv2.contourArea(x)<=contourParams['maxCntArea']:
# contours = []
except:
contours = []
return [idx, contours]
def getContourData(imStack, fList, contourParams, blurParams, pool):
'''
returns the ellipse fit data of the fly in the stack of images provided as input numpy array 'imStack'
'''
# imgStack = np.array(pool.map(imRead, flist), dtype=np.uint8)
# poolArgList = itertools.izip(flist, itertools.repeat(params), np.arange(len(flist)))
# imgWithCnt = pool.map(imReadNCnt, poolArgList)
poolArgList = itertools.izip(fList, imStack, itertools.repeat(contourParams), itertools.repeat(blurParams))
contours = pool.map(getContours, poolArgList)
trackData = []
for idx,cnt in enumerate(contours):
if len(cnt[1])>=5:
try:
trackData.append([cnt[0], cv2.fitEllipse(cnt[1])])
except:
#print ('no contour detected in frame# %s'%cnt[0])
trackData.append([cnt[0], 'noContourDetected'])
else:
#print ('no contour detected) in frame# %s'%cnt[0])
trackData.append([cnt[0], 'noContourDetected'])
return trackData
# trackData = []
# for idx, im in enumerate(imStack):
# frId = flist[idx]
# contours = getContours((frId, im, contourParams, blurParams))
# if len(contours[1])!=0:
# trackData.append([contours[0], cv2.fitEllipse(contours[1])])
# else:
# print ('no contour detected in frame# %d'%frId)
# trackData.append([contours[0], 'noContourDetected'])
# cv2.destroyAllWindows()
# return trackData
def cropImstack(imStack, trackData, heightCropbox, widthCropbox, blurParams, ratTailParams):
'''
returns a list of all images, cropped as per cropBox dimensions
'''
kernel, sigma = blurParams
thresh, nIterations, erodeKernel = ratTailParams
ims = []
for i in xrange(imStack.shape[0]):
im = imStack[i]
try:
x,y = trackData[i]
if (heightCropbox<=y<=imStack.shape[1]-heightCropbox and widthCropbox<=x<=imStack.shape[2]-widthCropbox):
pts = [int(y)-heightCropbox, int(y)+heightCropbox, int(x)-widthCropbox,int(x)+widthCropbox]
im_cropped = im[pts[0]:pts[1], pts[2]:pts[3]]
_,th = cv2.threshold(cv2.GaussianBlur(im_cropped, (kernelSize,kernelSize), sigma), thresh, 255,cv2.THRESH_BINARY)
th = cv2.bitwise_not(th)
erosion = cv2.erode(th,erodeKernel,iterations = nIterations)
dilation = cv2.dilate(erosion, erodeKernel, iterations = nIterations)
ims.append([i,np.bitwise_xor(th, dilation)])
else:
ims.append([i, 'NoCroppedImage'])
except:
pass
return ims
def cropImstackGray(imStack, trackData, heightCropbox, widthCropbox):
'''
returns a list of all images, cropped as per cropBox dimensions
'''
ims = []
for i in xrange(imStack.shape[0]):
im = imStack[i]
try:
x,y = trackData[i]
if (heightCropbox<=y<=imStack.shape[1]-heightCropbox and widthCropbox<=x<=imStack.shape[2]-widthCropbox):
pts = [int(y)-heightCropbox, int(y)+heightCropbox, int(x)-widthCropbox,int(x)+widthCropbox]
im_cropped = im[pts[0]:pts[1], pts[2]:pts[3]]
ims.append([i,im_cropped])
else:
ims.append([i, 'NoCroppedImage'])
except:
pass
return ims
def saveCroppedIms(croppedStack, ImStack, saveDir, extension, hCropbox):
'''
saves the output of the tracked flies in the given format (specifice by 'extension') in the given directory.
If a fly is not detected in a continous frame, new folder is created to save the next sequence
'''
ext = extension
outDir = saveDir
cropDir = outDir+'_cropped/'
imDir = outDir+'_original_subIms/'
os.mkdir(imDir)
os.mkdir(cropDir)
for i in xrange(len(croppedStack)):
if 'NoCroppedImage' not in croppedStack[i][1]:
cv2.imwrite(cropDir+str(i)+ext, croppedStack[i][1])
cv2.imwrite(imDir+str(i)+ext, ImStack[i])
else:
print i, croppedStack[i][1]
return cropDir, imDir
def getFiles(dirname, extList):
filesList = []
for ext in extList:
filesList.extend(glob.glob(os.path.join(dirname, ext)))
return natural_sort(filesList)
def displayImgs(imgs, fps):
f = 1000/fps
for i, img in enumerate(imgs):
cv2.imshow('123',img)
key = cv2.waitKey(f) & 0xFF
if key == ord("q"):
break
if key == ord("p"):
f = 1000/fps
cv2.waitKey(0)
if key == ord("n"):
cv2.imshow('123',imgs[i+1])
f=0
cv2.waitKey(f)
cv2.destroyAllWindows()
def imRead(x):
return cv2.imread(x, cv2.IMREAD_GRAYSCALE)
#return cv2.rotate(cv2.imread(x, cv2.IMREAD_GRAYSCALE), cv2.ROTATE_90_COUNTERCLOCKWISE)
def getBgIm(imgs):
'''
returns a background Image for subtraction from all the images using weighted average
'''
avg = np.array((np.median(imgs, axis=0)))
return cv2.convertScaleAbs(avg)
def getBgSubImStack((inImgstack, bgIm)):
'''
returns the stack of images after subtracting the background image from the input imagestack
'''
subIms = np.zeros(np.shape(inImgstack), dtype=np.uint8)
for f in range(0, len(inImgstack)):
subIms[f] = cv2.bitwise_not(cv2.absdiff(inImgstack[f], bgIm))
return subIms
def getBgSubIm((inImg, bgIm)):
'''
returns the stack of images after subtracting the background image from the input imagestack
'''
return cv2.bitwise_not(cv2.absdiff(inImg, bgIm))
def getSubIms(dirname, imExts, pool, workers):
'''
tracks the fly using cv2.SimpleBlobDetector method and saves the tracked flies in folders
'''
flist = getFiles(dirname, imExts)
#startTime = time.time()
imgStack = pool.map(imRead, flist)
ims = np.zeros((len(imgStack),imgStack[0].shape[0], imgStack[0].shape[1] ), dtype=np.uint8)
for i,x in enumerate(imgStack):
ims[i]=x
imgStack = ims.copy()
#t1 = time.time()-startTime
#print("imRead time for %d frames: %s Seconds at %f FPS"%(len(flist),t1 ,len(flist)/float(t1)))
#t1 = time.time()
imStackChunks = np.array_split(imgStack, 4*workers, axis=1)
imStackChunks = [x.copy() for x in imStackChunks if x.size > 0]
bgImChunks = pool.map(getBgIm, imStackChunks)
bgIm = np.array(np.vstack((bgImChunks)), dtype=np.uint8)
#t2 = time.time()-t1
#print("bg calculation time for %d frames: %s Seconds at %f FPS"%(len(flist),t2 ,len(flist)/float(t2)))
#t2 = time.time()
subIms = pool.map(getBgSubIm, itertools.izip(imgStack, itertools.repeat(bgIm)))
ims = np.zeros((len(subIms),subIms[0].shape[0], subIms[0].shape[1] ), dtype=np.uint8)
for i,x in enumerate(subIms):
ims[i]=x
subIms = ims.copy()
#t = time.time()-t2
#print("bg Subtraction time for %d frames: %s Seconds at %f FPS"%(len(flist),t ,len(flist)/float(t)))
return imgStack, subIms, flist
def getEuDisCenter((x1,y1)):
return np.sqrt(np.square(x1-heightCrop)+np.square(y1-widthCrop))
def getEuDisCorner((x1,y1)):
return np.sqrt(np.square(x1)+np.square(y1))
def getFarPoint(cnt):
'''
returns the coordinates of the far most point w.r.t to the origin
'''
leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
disSorted = sorted([leftmost, rightmost, topmost, bottommost], key=getEuDisCenter)
return disSorted
def tracknCrop(dirname, imgExt, heightcrop, widthcrop, contourParams, outFname, \
params, nImThreshold, blurParams, ratTailParams, pool, workers):
'''
tracks the fly using cv2.SimpleBlobDetector method and saves the tracked flies in folders
'''
flist = natural_sort(os.listdir(dirname))
if len(flist)<=nImThreshold:
print('Less Images to process, not processing folder, nImages present: %i'%len(flist))
pass
else:
imgs, subImgs, flist = getSubIms(dirname, imgExt, pool, workers)
trackedData = getContourData(imStack = subImgs, fList = flist, contourParams= contourParams, blurParams=blurParams, pool=pool)
blobXYs = [x[1][0] for _,x in enumerate(trackedData)]
cropSubImStack = cropImstack(imStack = subImgs, trackData = blobXYs, heightCropbox = heightcrop, widthCropbox = widthcrop,\
blurParams=blurParams, ratTailParams=ratTailParams)
cropImStack = cropImstackGray(imStack = imgs, trackData = blobXYs, heightCropbox = heightcrop, widthCropbox = widthcrop)
moddedTrackedData = []
for _,data in enumerate(trackedData):
if data[1]!='noContourDetected':
moddedTrackedData.append([data[0], data[1][0][0], data[1][0][1],data[1][1][0], data[1][1][1], data[1][2]])
else:
moddedTrackedData.append([data[0], data[1]])
with open(outFname+"_centroids.csv", "wb") as f:
writer = csv.writer(f)
writer.writerow(['frame','X-Coord','Y-Coord','minorAxis',' majorAxis',' angle'])
writer.writerows(moddedTrackedData)
return cropImStack, cropSubImStack, flist
def getLegTipLocs(rawDir, trackParams, legContourThresh, outFname, pool):
imExts, height, width, cntparams, \
flyparams, nImThresh, blurParams, ratTailparams = trackParams
croppedImStack, croppedSubImStack, fList = tracknCrop(rawDir, imExts, height,\
width, cntparams, outFname, flyparams,\
nImThresh, blurParams, ratTailparams,\
pool)
croppedSubIms = []
croppedIms = []
for i in xrange(len(croppedSubImStack)):
if 'NoCroppedImage' not in croppedSubImStack[i][1]:
croppedSubIms.append(croppedSubImStack[i][1])
croppedIms.append(croppedImStack[i][1])
croppedIms = np.array(croppedIms, dtype=np.uint8)
croppedSubIms = np.array(croppedSubIms, dtype=np.uint8)
allLocs = []
for i, im in enumerate(croppedSubIms):
_, contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [x for x in sorted(contours, key = cv2.contourArea)[-6:] if cv2.contourArea(x)>=legContourThresh]
locs = []
for j,cnt in enumerate(contours):
locs.append(getFarPoint(cnt)[-1])
allLocs.append(np.array(sorted([x for x in locs], key=getEuDisCorner)))
return allLocs, croppedIms
def getAllLocs(rawDir, trackParams, legContourThresh, outFname, pool, workers):
imExts, height, width, cntparams, \
flyparams, nImThresh, blurParams, ratTailparams = trackParams
croppedImStack, croppedSubImStack, fList = tracknCrop(rawDir, imExts, height,\
width, cntparams, outFname, flyparams,\
nImThresh, blurParams, ratTailparams,\
pool, workers)
croppedSubIms = []
croppedIms = []
frNames = []
nCnts = 0
for i in xrange(len(croppedSubImStack)):
if 'NoCroppedImage' not in croppedSubImStack[i][1]:
croppedSubIms.append(croppedSubImStack[i][1])
croppedIms.append(croppedImStack[i][1])
frNames.append(fList[i])
nCnts+=1
croppedIms = np.array(croppedIms, dtype=np.uint8)
croppedSubIms = np.array(croppedSubIms, dtype=np.uint8)
#displayImgs(croppedIms, 100)
#displayImgs(croppedSubIms, 100)
allLocs = []
for i, im in enumerate(croppedSubIms):
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [x for x in sorted(contours, key = cv2.contourArea)[-6:] if cv2.contourArea(x)>=legContourThresh]
locs = []
for j,cnt in enumerate(contours):
locs.append(getFarPoint(cnt)[-1])
# allLocs.append(np.array(sorted([x for x in locs], key=getEuDisCorner)))
allLocs.append([frNames[i],locs])
print('Contours detected in %d of %d frames'%(nCnts, len(fList)))
return allLocs, croppedIms
def assignLegTips(tipLocs, pxMvmntThresh, frmSkipThresh, saveFileName, crpImStack):
t = tp.link_iter(tipLocs, search_range = pxMvmntThresh, memory=frmSkipThresh) #iterator of locations, distance moved between frames, memory of skipped frame
trackedIds = []
for idx,x in enumerate(t):
trackedIds.append(x[1])
legTips = [['frame#','x','y','trackId']]
for i,loc in enumerate(tipLocs):
for j,l in enumerate(loc):
legTips.append([i, l[0], l[1],trackedIds[i][j]])
csvOutFile = saveFileName+'.csv'
with open(csvOutFile, "wb") as f:
writer = csv.writer(f)
writer.writerows(legTips)
legTipsFr = [['frame#',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId',\
'x','y','trackId']]
for i,loc in enumerate(tipLocs):
frLocs = [i]
for j,l in enumerate(loc):
frLocs.extend((l[0], l[1],trackedIds[i][j]))
legTipsFr.append(frLocs)
csvOutFile = saveFileName+'_FramesTogether.csv'
with open(csvOutFile, "wb") as f:
writer = csv.writer(f)
writer.writerows(legTipsFr)
dispIms = []
for i, im in enumerate(crpImStack):
img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
locs = tipLocs[i]
for j,loc in enumerate(locs):
cv2.circle(img, tuple(loc), 2, colors[trackedIds[i][j]], 2)
cv2.putText(img, str(trackedIds[i][j]), tuple(loc), cv2.FONT_HERSHEY_COMPLEX, 0.4, colors[trackedIds[i][j]])
dispIms.append(img)
return trackedIds, dispIms
def getClusters(nclusters, locsArr, fname, imShape, workers):
frLabels = [x[0] for x in locsArr]
locs = [x[-1] for x in locsArr]
allFrLabels = []
for i,x in enumerate(locs):
for j in xrange(len(x)):
allFrLabels.append(frLabels[i])
allVerts = np.vstack((locs))
spectral = cluster.SpectralClustering(
n_clusters=nclusters, eigen_solver='arpack',
affinity="nearest_neighbors", n_jobs=workers)
spectral.fit(allVerts)
y_pred = spectral.labels_.astype(np.int)
allLocs1 = [np.hstack((np.zeros((len(x),1))+i,np.arange(len(x),0, -1).reshape((len(x),1)), x)) for i,x in enumerate(locs)]
allLocs1 = np.vstack((allLocs1))
allLocs1 = np.hstack((allLocs1, np.reshape(y_pred, (len(y_pred),1))))
labArr = np.array((allFrLabels))
labArr = labArr.reshape((len(labArr),1))
outData = np.hstack((labArr, allLocs1))
csvOutFile = fname+'_legTipLocs.csv'
with open(csvOutFile, "wb") as f:
writer = csv.writer(f)
writer.writerow(['frame','frame#','cluster#','X-Coord',' Y-Coord',' clusterId'])
writer.writerows(outData)
blImBl = np.zeros((imShape[1], imShape[0],3), dtype=np.uint8)
for i,v in enumerate(allVerts):
blImBl[v[1],v[0]] = colors[y_pred[i]]
cv2.imwrite(fname+'_legTipLocs_black.png', blImBl)
return allLocs1, allFrLabels
initialDir = '/media/pointgrey/data/flywalk/legTracking/data/all/'
#initialDir = '/media/aman/data/flyWalk_data/climbingData/gait/data/tmp/pythonTmp/'
#initialDir = '/media/aman/data/flyWalk_data/climbingData/gait/data/copiedLegTrackingTrackData/'
baseDir = getFolder(initialDir)
outDir = '/media/aman/data/flyWalk_data/climbingData/gait/data/tmp/'
legTipclusters = 20
imExtensions = ['*.png', '*.jpeg']
heightCrop = 100
widthCrop = 100
legCntThresh = 2
nThreads = 7
kernelSize = 5
gauBlurParams = (kernelSize,1)
threshVal = 250
nIterations = 2
kernel = np.ones((kernelSize,kernelSize),np.uint8)
pxMvdByLegBwFrm = 50
legTipFrmSkipthresh = 40
rattailparams = (threshVal, nIterations, kernel)
#baseDir = initialDir
print baseDir
cntParams = {'maxCntArea' : 7000,\
'minCntArea' : 2000,\
'threshLow' : 210,\
'threshHigh' : 255}
trackparams = [imExtensions, heightCrop, widthCrop, cntParams, flyParams,\
nImThresh, gauBlurParams, rattailparams]
rawDirs = getDirList(baseDir)
pool = mp.Pool(processes=nThreads)
procStartTime = time.time()
totalNFrames = 0
print "Started processing directories at "+present_time()
for _,rawDir in enumerate(rawDirs):
d = os.path.join(rawDir, imgDatafolder)
print rawDir
imdirs = getDirList(d)
for imdir in imdirs:
startTime = time.time()
nFrames = len(getFiles(imdir, imExtensions))
if nFrames>nImThresh:
fname = imdir.rstrip(os.sep)+'_legTipsClus_n'+str(legTipclusters)+'-Climbing'
legTipLocs = getAllLocs(imdir, trackparams, legCntThresh, fname, pool, nThreads)
allLocs, croppedIms = legTipLocs
locs = [x[-1] for x in allLocs]
if len(allLocs)>25:
try:
np.vstack((locs))
lbldLocs, frLabelsAll = getClusters(nclusters = legTipclusters, locsArr = allLocs,\
fname = fname, imShape = (2*heightCrop, 2*widthCrop),\
workers = nThreads)
except:
print('legTips not tracked properly in %s'%imdir)
print('==> Processed %i frames in %0.3f seconds at: %05f FPS'\
%(nFrames, time.time()-startTime, (nFrames/(time.time()-startTime))))
totalNFrames +=nFrames
pool.close()
totSecs = time.time()-procStartTime
print('Finished processing %d frames at: %05s, in %sSeconds, total processing speed: %05f FPS\n'\
%(totalNFrames, present_time(),totSecs , totalNFrames/totSecs))
displayImgs(croppedIms,100)
#outData = []
#
#aa = lbldLocs.copy()
#aaa = np.array(aa, dtype=np.uint8)
#for i in xrange(len(aaa)):
# frData = [frLabelsAll[i]]
# for j,x in enumerate(aaa[i].astype(list)):
# frData.extend(list(aaa[i])[j])
# outData.append(frData)
#
#
#labArr = np.array((frLabelsAll)).reshape((len(frLabelsAll),1))
#
#out = np.hstack((labArr, lbldLocs))
#csvOutFile = fname+'_legTipLocs.csv'
#with open(csvOutFile, "wb") as f:
# writer = csv.writer(f)
# writer.writerows(out)
#
#allVerts = np.vstack((allLocs))
#X = allVerts.copy()
#
#spectral = cluster.SpectralClustering(
# n_clusters=params['n_clusters'], eigen_solver='arpack',
# affinity="nearest_neighbors")
#spectral.fit(X)
#y_pred = spectral.labels_.astype(np.int)
#
#labels = y_pred
#blIm = np.zeros((2*heightCrop, 2*widthCrop,3), dtype=np.uint8)
#for i,v in enumerate(allVerts):
# blIm[v[1],v[0]] = colors[labels[i]]
#cv2.imshow("Original", blIm)
#key = cv2.waitKey(0)
#cv2.destroyAllWindows()
#
#allLocs1 = [np.hstack((x, np.zeros((len(x),1))+i)) for i,x in enumerate(allLocs)]
#
#
#
#allVerts1 = np.vstack((allLocs1))
#X = allVerts1.copy()
#
#spectral = cluster.SpectralClustering(
# n_clusters=params['n_clusters'], eigen_solver='arpack',
# affinity="nearest_neighbors")
#spectral.fit(X)
#y_pred = spectral.labels_.astype(np.int)
#
#labels = y_pred
#blIm = np.zeros((2*heightCrop, 2*widthCrop,3), dtype=np.uint8)
#for i,v in enumerate(allVerts1):
# blIm[v[1],v[0]] = colors[labels[i]]
#cv2.imshow("Original", blIm)
#key = cv2.waitKey(0)
#cv2.destroyAllWindows()
#
#
#allVertsList = [list(x) for _,x in enumerate(allVerts)]
#frLegTipLabels = []
#for i, tips in enumerate(allLocs):
# ltlabels = []
# for j, tip in enumerate(tips):
# ltlabels.append(labels[allVertsList.index(list(tip))])
# frLegTipLabels.append(ltlabels)
#
#blIms = [cv2.cvtColor(x, cv2.COLOR_GRAY2BGR) for x in croppedIms.copy()]
#for idx,im in enumerate(blIms):
# for j, pt in enumerate(allLocs[idx]):
# cv2.circle(im, tuple(pt), 2, colors[frLegTipLabels[idx][j]+10], thickness=3)
#
#displayImgs(blIms,10)
| [
"crack.mech@gmail.com"
] | crack.mech@gmail.com |
14e34fc8a7237c6e0e7bbb1d39f8c59edbe12d95 | 1b89bab135d08da8a4f9458c3be7cb4443a24307 | /learn.py | a416bdf9d96efdc55e276e520acd339c4670a053 | [] | no_license | hbirler/datalovr | 2ddfa9356a4611c01377ea4d20fffe3de9ca694d | f530c92a1b453b7ba8ecb2421b0d53acf372c405 | refs/heads/master | 2020-09-16T21:08:19.121875 | 2016-11-13T11:18:54 | 2016-11-13T11:18:54 | 73,515,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | import numpy, random
from numpy import array, random, exp, seterr, dot, copy, log, vectorize, tanh
from numpy.linalg import norm
from itertools import combinations,combinations_with_replacement, chain
from operator import mul
from sklearn.neural_network import MLPRegressor
from sklearn import svm
def log_reg(X, y):
y = y.reshape((-1, 1))
#clf = svm.SVC(probability = True)
clf = MLPRegressor(hidden_layer_sizes=(30,10), activation="tanh")
clf.fit(X, y)
return clf
def check_w(w,p):
return w.predict((p,))[0]
def sigmoid(s):
return 1 / (1+1/exp(s))
def sigmoid_v(s):
return 1 / (1+1/exp(s))
sigmoid_v = vectorize(sigmoid_v)
"""
def check_w(w,p):
return sigmoid(dot(w,p))
"""
def nonlinear_transform(v, deg=2):
n = len(v)
ss = ((i,) for i in xrange(n))
for i in range(2, deg+1):
nn = combinations_with_replacement(xrange(n), i)
ss = chain(ss, nn)
retv = chain((1,) ,(reduce(mul, (v[i] for i in tup)) for tup in ss))
return array(list(retv))
print nonlinear_transform([1,2])
"""
def log_reg(X, y, eta = 0.1, eps = 0.01, lamb = 0.01):
N = len(X)
w = numpy.random.uniform(-1,1,len(X[0]))
#w = numpy.zeros(len(X[0]))
wp = w - array(numpy.ones(len(X[0])))
sigmoid_v(X)
eco = 0
while True:
Xi = random.permutation(len(X))
for i in Xi:
decay = 2 * eta * lamb / N
#print y[i]
#print w
#print i, y[i] * dot(w.T, X[i])
w = w*(1 - decay) + eta * (y[i] * X[i] * (1.0 / (1+exp(y[i] * dot(w.T, X[i])) ) ) )
eco += 1
#print eco
if norm(wp - w) < eps or eco > 100000:
break
wp = copy(w)
return w
""" | [
"Hasan Altan Birler"
] | Hasan Altan Birler |
9afdfa1a9b972bbdca3bfd354b2ca459f4a04cbf | 9b6bc3f768ec86caf2074141be90262c9662762a | /tests/catalyst/metrics/functional/test_cmc_metric.py | 9c63fa38b2ce05c4c58d594b1c1a73fc454b599b | [
"Apache-2.0"
] | permissive | Podidiving/catalyst | 298dca23e5cf51dda6bbc0a744874ae2c8787bc5 | ac8567dc389fb7a265e3104e8a743497aa903165 | refs/heads/master | 2021-12-03T21:48:48.232619 | 2021-11-03T05:29:19 | 2021-11-03T05:29:19 | 225,822,776 | 2 | 0 | Apache-2.0 | 2019-12-04T08:59:03 | 2019-12-04T08:59:03 | null | UTF-8 | Python | false | false | 8,443 | py | # flake8: noqa
from typing import List, Tuple
from itertools import chain
import numpy as np
import pytest
import torch
from catalyst.metrics.functional._cmc_score import cmc_score, cmc_score_count, masked_cmc_score
EPS = 1e-4
TEST_DATA_SIMPLE = (
# (distance_matrix, conformity_matrix, topk, expected_value)
(torch.tensor([[1, 2], [2, 1]]), torch.tensor([[0, 1], [1, 0]]), 1, 0.0),
(torch.tensor([[0, 0.5], [0.0, 0.5]]), torch.tensor([[0, 1], [1, 0]]), 1, 0.5),
(torch.tensor([[0, 0.5], [0.0, 0.5]]), torch.tensor([[0, 1], [1, 0]]), 2, 1),
(
torch.tensor([[1, 0.5, 0.2], [2, 3, 4], [0.4, 3, 4]]),
torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
2,
1 / 3,
),
(torch.randn((10, 10)), torch.ones((10, 10)), 1, 1),
)
TEST_DATA_LESS_SMALL = (
(torch.rand((10, 10)) + torch.tril(torch.ones((10, 10))), torch.eye(10), i, i / 10)
for i in range(1, 10)
)
TEST_DATA_GREATER_SMALL = (
(
torch.rand((10, 10)) + torch.triu(torch.ones((10, 10)), diagonal=1),
torch.eye(10),
i,
i / 10,
)
for i in range(1, 10)
)
TEST_DATA_LESS_BIG = (
(torch.rand((100, 100)) + torch.tril(torch.ones((100, 100))), torch.eye(100), i, i / 100)
for i in range(1, 101, 10)
)
@pytest.mark.parametrize("distance_matrix,conformity_matrix,topk,expected", TEST_DATA_SIMPLE)
def test_metric_count(distance_matrix, conformity_matrix, topk, expected):
"""Simple test"""
out = cmc_score_count(
distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk
)
assert np.isclose(out, expected)
@pytest.mark.parametrize(
"distance_matrix,conformity_matrix,topk,expected",
chain(TEST_DATA_LESS_SMALL, TEST_DATA_LESS_BIG),
)
def test_metric_less(distance_matrix, conformity_matrix, topk, expected):
"""Simple test"""
out = cmc_score_count(
distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk
)
assert out - EPS <= expected
@pytest.mark.parametrize(
"distance_matrix,conformity_matrix,topk,expected", chain(TEST_DATA_GREATER_SMALL)
)
def test_metric_greater(distance_matrix, conformity_matrix, topk, expected):
"""Simple test"""
out = cmc_score_count(
distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk
)
assert out + EPS >= expected
@pytest.fixture
def generate_samples_for_cmc_score() -> List[
Tuple[float, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
]:
"""
Generate list of query and gallery data for cmc score testing.
"""
data = []
for error_rate in [
0.05,
0.1,
0.15,
0.2,
0.25,
]:
# generate params of the datasets
class_number = np.random.randint(low=2, high=10)
kq = np.random.randint(low=1000, high=1500)
kg = np.random.randint(low=500, high=1000)
def generate_samples(n_labels, samples_per_label):
samples = []
labels = []
# for each label generate dots that will be close to each other and
# distanced from samples of other classes
for i in range(n_labels):
tmp_samples = np.random.uniform(
low=2 * i, high=2 * i + 0.2, size=(samples_per_label,)
)
samples = np.concatenate((samples, tmp_samples))
labels = np.concatenate((labels, [i] * samples_per_label))
return samples.reshape((-1, 1)), labels
query_embs, query_labels = generate_samples(n_labels=class_number, samples_per_label=kq)
gallery_embs, gallery_labels = generate_samples(
n_labels=class_number, samples_per_label=kg
)
# spoil generated gallery dataset: for each sample from data change
# label to any other one with probability error_rate
def confuse_labels(labels, error_rate):
unique_labels = set(labels)
size = len(labels)
for i in range(size):
if np.random.binomial(n=1, p=error_rate, size=1)[0]:
labels[i] = np.random.choice(list(unique_labels - {labels[i]}))
return labels
gallery_labels = confuse_labels(gallery_labels, error_rate=error_rate)
query_embs = torch.tensor(query_embs)
gallery_embs = torch.tensor(gallery_embs)
query_labels = torch.tensor(query_labels, dtype=torch.long)
gallery_labels = torch.tensor(gallery_labels, dtype=torch.long)
data.append((error_rate, query_embs, query_labels, gallery_embs, gallery_labels))
return data
def test_cmc_score_with_samples(generate_samples_for_cmc_score):
"""
Count cmc score callback for sets of well-separated data clusters labeled
with error_rate probability mistake.
"""
for (
error_rate,
query_embs,
query_labels,
gallery_embs,
gallery_labels,
) in generate_samples_for_cmc_score:
true_cmc_01 = 1 - error_rate
conformity_matrix = (query_labels.reshape((-1, 1)) == gallery_labels).to(torch.bool)
cmc = cmc_score(
query_embeddings=query_embs,
gallery_embeddings=gallery_embs,
conformity_matrix=conformity_matrix,
topk=1,
)
assert abs(cmc - true_cmc_01) <= 0.05
@pytest.mark.parametrize(
(
"query_embeddings",
"gallery_embeddings",
"conformity_matrix",
"available_samples",
"topk",
"expected",
),
(
(
torch.tensor([[1, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 1], [0, 0, 1, 1]]).float(),
torch.tensor([[1, 1, 1, 0], [1, 1, 1, 1], [0, 1, 1, 0]]).float(),
torch.tensor(
[
[True, False, False],
[True, False, False],
[False, True, True],
[False, True, True],
]
),
torch.tensor(
[[False, True, True], [True, True, True], [True, False, True], [True, True, True]]
),
1,
0.75,
),
(
torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]).float(),
torch.tensor([[0, 1, 0], [0, 0, 1], [1, 0, 1]]).float(),
torch.tensor(
[
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True],
]
),
torch.tensor(
[[True, True, True], [False, True, True], [True, False, True], [True, True, False]]
),
1,
0.25,
),
),
)
def test_masked_cmc_score(
query_embeddings, gallery_embeddings, conformity_matrix, available_samples, topk, expected
):
score = masked_cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=conformity_matrix,
available_samples=available_samples,
topk=topk,
)
assert score == expected
@pytest.mark.parametrize(
("query_embeddings", "gallery_embeddings", "conformity_matrix", "available_samples", "topk"),
(
(
torch.rand(size=(query_size, 32)).float(),
torch.rand(size=(gallery_size, 32)).float(),
torch.randint(low=0, high=2, size=(query_size, gallery_size)).bool(),
torch.ones(size=(query_size, gallery_size)).bool(),
k,
)
for query_size, gallery_size, k in zip(
list(range(10, 20)), list(range(25, 35)), list(range(1, 11))
)
),
)
def test_no_mask_cmc_score(
query_embeddings, gallery_embeddings, conformity_matrix, available_samples, topk
) -> None:
"""
In this test we just check that masked_cmc_score is equal to cmc_score
when all the samples are available for for scoring.
"""
masked_score = masked_cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=conformity_matrix,
available_samples=available_samples,
topk=topk,
)
score = cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=conformity_matrix,
topk=topk,
)
assert masked_score == score
| [
"noreply@github.com"
] | noreply@github.com |
1a2b36b339ecff0883a453b19da7a5f284bfcb33 | 9de1565e41bc7f7b649d39c78730ac1fafe47738 | /bounder/region/metaheuristics/abstracts.py | 17abf17bbe0b90de5ae72bfd9902679576101d86 | [
"MIT"
] | permissive | terratenney/bounder | 3392a3ed93b67974084fbcfacf390285ccc19f92 | 0c3b69a71145ce3aff6ead0af4c02916972b0465 | refs/heads/master | 2020-06-04T16:11:15.650293 | 2017-07-26T18:21:37 | 2017-07-26T18:21:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | class MetaHeuristic(object):
def __init__(self):
pass | [
"levi.john.wolf@gmail.com"
] | levi.john.wolf@gmail.com |
0fee4123dd316b974c3fdd92e1ace45e6046c0e7 | 1f40a08ee85ef6f78384e6f6f53bcf3f86b8c44b | /shorten/app/views.py | fec1ecdf840fbfdd7d0588f916a668b2701fdb4d | [] | no_license | infsolution/EncurtUrl | bff4543fb17f3c2a6853c64abc24d307abcd04bf | 0f6d8aa23a2498a8bf5575797db9a5a8eb855403 | refs/heads/master | 2020-05-14T09:31:39.265337 | 2019-09-28T17:44:25 | 2019-09-28T17:44:25 | 181,741,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,523 | py | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage
from django.shortcuts import render, redirect
from django.http import JsonResponse
from rest_framework import generics
from .models import *
from .forms import *
def index(request):
perfil_logado = get_perfil_logado(request)
return render(request,'app/index.html',{"title_page":"O melhor encurtador","perfil_logado":perfil_logado})
def get_perfil_logado(request):
try:
perfil = Perfil.objects.get(user=request.user)
except Exception as e:
return None
return perfil
def shorten(request):
if request.GET.get('url'):
short = Shortened(perfil=get_perfil_logado(request), url_user=request.GET.get('url'))
short.shorten()
if request.GET.getlist('private'):
short.get_private_code()
if request.GET.getlist('preview'):
short.preview=True
short.preview_message = request.GET.get('preview_msg')
short.save()
return render(request, 'app/showurl.html',{"url_short":short.url_shortened,"perfil_logado":get_perfil_logado(request),
"title_page":"TShort: Sua url encurtada"})
return render(request,'app/urlnotfound.html', {"value":"Nenhuma url foi informada",
"title_page":"Url Não encontrada","perfil_logado":get_perfil_logado(request)})
@login_required
def shotened_report(request):
ITEMS_PER_PAGE = 5
perfil_logado = get_perfil_logado(request)
shorteneds = Shortened.objects.filter(perfil=perfil_logado)
paginator = Paginator(shorteneds, ITEMS_PER_PAGE)
page = request.GET.get('page',1)
try:
short_page = paginator.get_page(page)
except InvalidPage:
short_page = paginator.get_page(1)
return render(request, 'app/report.html',{"shorteneds":short_page,"perfil_logado":perfil_logado})
@login_required
def detail(request, shortened_id):
shorten = Shortened.objects.get(id=shortened_id)
return render(request, 'app/report_detail.html', {'shorten':shorten, 'perfil_logado':get_perfil_logado(request)})
def go_to_url(request, shortened):
if request.method == 'GET':
try:
short = Shortened.objects.get(url_shortened=shortened)
get_click(request,short)
except Exception as e:
return render(request,'app/urlnotfound.html', {"value":shortened,"error":e, "title_page":"Url Não encontrada"})
if short.private_code != None:
return render(request, 'app/private_access.html',{"short":short})
if short.preview:
return render(request, 'app/preview.html',{'short':short, 'perfil_logado':get_perfil_logado(request)})
return redirect(short.url_user)
def create_user(request):
if request.method == 'POST':
form = UserModelForm(request.POST)
if form.is_valid():
if request.POST['last-password'] == request.POST['password']:
user = User.objects.create_user(request.POST['username'], request.POST['email'], request.POST['last-password'])#validar se as senhas são igauis
perfil = Perfil(name=user.username, user=user)
perfil.save()
return render(request, 'app/add.html', {'form':UserModelForm(), 'alert_type':'success', 'msg_confirm':'Parabéns seu cadastro foi realizado.'})
else:
return render(request, 'app/add.html', {'form':UserModelForm(),'alert_type':'danger' , 'msg_confirm':'As senhas não são iguais'})
return render(request, 'app/add.html',{'form':UserModelForm(request.POST), 'alert_type':'danger','msg_confirm':'Ocorreu um erro ao realizar o cadastro.'})
form = UserModelForm()
return render(request, 'app/add.html', {"form":form})
'''def do_login(request):
if request.method == 'POST':
user = authenticate(username = request.POST['username'], password = request.POST['password'])
if user is not None:
login(request,user)
#return redirect('/app/'+str(user.id), user)
return redirect('index')
return render(request,'app/login.html' ,{"error_msg":"Usuário ou senha Invalidos"})
return render(request, 'app/login.html')'''
def do_logout(request):
logout(request)
return redirect('/login/')
def access_private(request):
if request.method == 'POST':
short = Shortened.objects.get(url_shortened=request.POST['url_shortened'])
if request.POST.get('private_code') == short.private_code:
return redirect(short.url_user)
return render(request, 'app/private_access.html',{"short":short, "error_msg":"Código inválido"})
@login_required
def get_contatos(request):
return render(request, 'app/contatos.html', {"perfil_logado":get_perfil_logado(request)})
def request_access(request, codeurl):
if request.method == 'POST':
short = Shortened.objects.get(url_shortened=codeurl)
if send_message(short):
return render(request,'app/request_access.html',{"code":codeurl,"msg":"Sua solicitação foi enviada. Aquarde contato."})
return render(request,'app/request_access.html',{"code":codeurl})
def send_message(short):
return True
def get_click(request, shortened):
shor = Click(shortened=shortened)
print(shor.save())
def about(request):
context = {}
if get_perfil_logado(request):
context = {"perfil_logado":get_perfil_logado(request)}
return render(request, 'app/about.html',context)
def help(request):
context = {}
if get_perfil_logado(request):
context = {"perfil_logado":get_perfil_logado(request)}
return render(request, 'app/help.html',context)
def personalize(request, shortened_id):
pass
def valid(request, url):
rersult = None
try:
url = Shortened.objects.get(url_shortened=url)
rersult = True
except Exception as e:
rersult = False
return JsonResponse({'result':result})
#API#
| [
"clsinfsolution@gmail.com"
] | clsinfsolution@gmail.com |
d58a8bc255946527fe87ccd38e24f8190773a730 | 86432b51b07eae85712668a8576d4b172220105a | /packages/chrome-ice/tmp/usr/lib/chrome-ice/pyproto/device_management_pb/device_management_backend_pb2.py | cac1ce374ca7567377e9044a072648ee2b4dbb32 | [] | no_license | hfuerst/Reelvdr-vdr2.2-Ubuntu16.04 | e6ddcd50c9f5cf8fe39e63406b08f1e3d6bdd181 | a87c2bf4f4f201d29d06202651a0d0b12f77ae26 | refs/heads/master | 2020-04-07T00:29:04.782748 | 2017-07-11T17:40:06 | 2017-07-11T17:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 29,095 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='device_management_backend.proto',
package='enterprise_management',
serialized_pb='\n\x1f\x64\x65vice_management_backend.proto\x12\x15\x65nterprise_management\"\xc1\x01\n\x15\x44\x65viceRegisterRequest\x12\x12\n\nreregister\x18\x01 \x01(\x08\x12\x43\n\x04type\x18\x02 \x01(\x0e\x32\x31.enterprise_management.DeviceRegisterRequest.Type:\x02TT\x12\x12\n\nmachine_id\x18\x03 \x01(\t\x12\x15\n\rmachine_model\x18\x04 \x01(\t\"$\n\x04Type\x12\x06\n\x02TT\x10\x00\x12\x08\n\x04USER\x10\x01\x12\n\n\x06\x44\x45VICE\x10\x02\"O\n\x16\x44\x65viceRegisterResponse\x12\x1f\n\x17\x64\x65vice_management_token\x18\x01 \x02(\t\x12\x14\n\x0cmachine_name\x18\x02 \x01(\t\"\x19\n\x17\x44\x65viceUnregisterRequest\"\x1a\n\x18\x44\x65viceUnregisterResponse\"<\n\x1a\x44\x65vicePolicySettingRequest\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\x11\n\twatermark\x18\x02 \x01(\t\"\xd8\x01\n\x12PolicyFetchRequest\x12\x13\n\x0bpolicy_type\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12U\n\x0esignature_type\x18\x03 \x01(\x0e\x32\x37.enterprise_management.PolicyFetchRequest.SignatureType:\x04NONE\x12\x1a\n\x12public_key_version\x18\x04 \x01(\x05\"\'\n\rSignatureType\x12\x08\n\x04NONE\x10\x00\x12\x0c\n\x08SHA1_RSA\x10\x01\"\xb2\x02\n\nPolicyData\x12\x13\n\x0bpolicy_type\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x15\n\rrequest_token\x18\x03 \x01(\t\x12\x14\n\x0cpolicy_value\x18\x04 \x01(\x0c\x12\x14\n\x0cmachine_name\x18\x05 \x01(\t\x12\x1a\n\x12public_key_version\x18\x06 \x01(\x05\x12\x10\n\x08username\x18\x07 \x01(\t\x12\x11\n\tdevice_id\x18\x08 \x01(\t\x12I\n\x05state\x18\t \x01(\x0e\x32\x32.enterprise_management.PolicyData.AssociationState:\x06\x41\x43TIVE\"-\n\x10\x41ssociationState\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\r\n\tUNMANAGED\x10\x01\"\xae\x01\n\x13PolicyFetchResponse\x12\x12\n\nerror_code\x18\x01 \x01(\x05\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12\x13\n\x0bpolicy_data\x18\x03 \x01(\x0c\x12\x1d\n\x15policy_data_signature\x18\x04 \x01(\x0c\x12\x16\n\x0enew_public_key\x18\x05 \x01(\x0c\x12 \n\x18new_public_key_signature\x18\x06 \x01(\x0c\"\xb3\x01\n\x13\x44\x65vicePolicyRequest\x12\x14\n\x0cpolicy_scope\x18\x01 \x01(\t\x12J\n\x0fsetting_request\x18\x02 \x03(\x0b\x32\x31.enterprise_management.DevicePolicySettingRequest\x12:\n\x07request\x18\x03 \x03(\x0b\x32).enterprise_management.PolicyFetchRequest\"T\n\x14\x44\x65vicePolicyResponse\x12<\n\x08response\x18\x03 \x03(\x0b\x32*.enterprise_management.PolicyFetchResponse\"\xf1\x01\n\x17\x44\x65viceManagementRequest\x12\x46\n\x10register_request\x18\x01 \x01(\x0b\x32,.enterprise_management.DeviceRegisterRequest\x12J\n\x12unregister_request\x18\x02 \x01(\x0b\x32..enterprise_management.DeviceUnregisterRequest\x12\x42\n\x0epolicy_request\x18\x03 \x01(\x0b\x32*.enterprise_management.DevicePolicyRequest\"\x8f\x02\n\x18\x44\x65viceManagementResponse\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12H\n\x11register_response\x18\x03 \x01(\x0b\x32-.enterprise_management.DeviceRegisterResponse\x12L\n\x13unregister_response\x18\x04 \x01(\x0b\x32/.enterprise_management.DeviceUnregisterResponse\x12\x44\n\x0fpolicy_response\x18\x05 \x01(\x0b\x32+.enterprise_management.DevicePolicyResponseB\x02H\x03')
_DEVICEREGISTERREQUEST_TYPE = descriptor.EnumDescriptor(
name='Type',
full_name='enterprise_management.DeviceRegisterRequest.Type',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TT', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='USER', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DEVICE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=216,
serialized_end=252,
)
_POLICYFETCHREQUEST_SIGNATURETYPE = descriptor.EnumDescriptor(
name='SignatureType',
full_name='enterprise_management.PolicyFetchRequest.SignatureType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='SHA1_RSA', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=630,
serialized_end=669,
)
_POLICYDATA_ASSOCIATIONSTATE = descriptor.EnumDescriptor(
name='AssociationState',
full_name='enterprise_management.PolicyData.AssociationState',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='ACTIVE', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UNMANAGED', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=933,
serialized_end=978,
)
_DEVICEREGISTERREQUEST = descriptor.Descriptor(
name='DeviceRegisterRequest',
full_name='enterprise_management.DeviceRegisterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='reregister', full_name='enterprise_management.DeviceRegisterRequest.reregister', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type', full_name='enterprise_management.DeviceRegisterRequest.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='machine_id', full_name='enterprise_management.DeviceRegisterRequest.machine_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='machine_model', full_name='enterprise_management.DeviceRegisterRequest.machine_model', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_DEVICEREGISTERREQUEST_TYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=59,
serialized_end=252,
)
_DEVICEREGISTERRESPONSE = descriptor.Descriptor(
name='DeviceRegisterResponse',
full_name='enterprise_management.DeviceRegisterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='device_management_token', full_name='enterprise_management.DeviceRegisterResponse.device_management_token', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='machine_name', full_name='enterprise_management.DeviceRegisterResponse.machine_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=254,
serialized_end=333,
)
_DEVICEUNREGISTERREQUEST = descriptor.Descriptor(
name='DeviceUnregisterRequest',
full_name='enterprise_management.DeviceUnregisterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=335,
serialized_end=360,
)
_DEVICEUNREGISTERRESPONSE = descriptor.Descriptor(
name='DeviceUnregisterResponse',
full_name='enterprise_management.DeviceUnregisterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=362,
serialized_end=388,
)
_DEVICEPOLICYSETTINGREQUEST = descriptor.Descriptor(
name='DevicePolicySettingRequest',
full_name='enterprise_management.DevicePolicySettingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='key', full_name='enterprise_management.DevicePolicySettingRequest.key', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='watermark', full_name='enterprise_management.DevicePolicySettingRequest.watermark', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=390,
serialized_end=450,
)
_POLICYFETCHREQUEST = descriptor.Descriptor(
name='PolicyFetchRequest',
full_name='enterprise_management.PolicyFetchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='policy_type', full_name='enterprise_management.PolicyFetchRequest.policy_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='timestamp', full_name='enterprise_management.PolicyFetchRequest.timestamp', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='signature_type', full_name='enterprise_management.PolicyFetchRequest.signature_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='public_key_version', full_name='enterprise_management.PolicyFetchRequest.public_key_version', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_POLICYFETCHREQUEST_SIGNATURETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=453,
serialized_end=669,
)
_POLICYDATA = descriptor.Descriptor(
name='PolicyData',
full_name='enterprise_management.PolicyData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='policy_type', full_name='enterprise_management.PolicyData.policy_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='timestamp', full_name='enterprise_management.PolicyData.timestamp', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='request_token', full_name='enterprise_management.PolicyData.request_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='policy_value', full_name='enterprise_management.PolicyData.policy_value', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='machine_name', full_name='enterprise_management.PolicyData.machine_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='public_key_version', full_name='enterprise_management.PolicyData.public_key_version', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='username', full_name='enterprise_management.PolicyData.username', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='device_id', full_name='enterprise_management.PolicyData.device_id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='state', full_name='enterprise_management.PolicyData.state', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_POLICYDATA_ASSOCIATIONSTATE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=672,
serialized_end=978,
)
_POLICYFETCHRESPONSE = descriptor.Descriptor(
name='PolicyFetchResponse',
full_name='enterprise_management.PolicyFetchResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='error_code', full_name='enterprise_management.PolicyFetchResponse.error_code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='error_message', full_name='enterprise_management.PolicyFetchResponse.error_message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='policy_data', full_name='enterprise_management.PolicyFetchResponse.policy_data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='policy_data_signature', full_name='enterprise_management.PolicyFetchResponse.policy_data_signature', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='new_public_key', full_name='enterprise_management.PolicyFetchResponse.new_public_key', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='new_public_key_signature', full_name='enterprise_management.PolicyFetchResponse.new_public_key_signature', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=981,
serialized_end=1155,
)
_DEVICEPOLICYREQUEST = descriptor.Descriptor(
name='DevicePolicyRequest',
full_name='enterprise_management.DevicePolicyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='policy_scope', full_name='enterprise_management.DevicePolicyRequest.policy_scope', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='setting_request', full_name='enterprise_management.DevicePolicyRequest.setting_request', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='request', full_name='enterprise_management.DevicePolicyRequest.request', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1158,
serialized_end=1337,
)
_DEVICEPOLICYRESPONSE = descriptor.Descriptor(
name='DevicePolicyResponse',
full_name='enterprise_management.DevicePolicyResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response', full_name='enterprise_management.DevicePolicyResponse.response', index=0,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1339,
serialized_end=1423,
)
_DEVICEMANAGEMENTREQUEST = descriptor.Descriptor(
name='DeviceManagementRequest',
full_name='enterprise_management.DeviceManagementRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='register_request', full_name='enterprise_management.DeviceManagementRequest.register_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='unregister_request', full_name='enterprise_management.DeviceManagementRequest.unregister_request', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='policy_request', full_name='enterprise_management.DeviceManagementRequest.policy_request', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1426,
serialized_end=1667,
)
_DEVICEMANAGEMENTRESPONSE = descriptor.Descriptor(
name='DeviceManagementResponse',
full_name='enterprise_management.DeviceManagementResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='error_message', full_name='enterprise_management.DeviceManagementResponse.error_message', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='register_response', full_name='enterprise_management.DeviceManagementResponse.register_response', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='unregister_response', full_name='enterprise_management.DeviceManagementResponse.unregister_response', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='policy_response', full_name='enterprise_management.DeviceManagementResponse.policy_response', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1670,
serialized_end=1941,
)
_DEVICEREGISTERREQUEST.fields_by_name['type'].enum_type = _DEVICEREGISTERREQUEST_TYPE
_DEVICEREGISTERREQUEST_TYPE.containing_type = _DEVICEREGISTERREQUEST;
_POLICYFETCHREQUEST.fields_by_name['signature_type'].enum_type = _POLICYFETCHREQUEST_SIGNATURETYPE
_POLICYFETCHREQUEST_SIGNATURETYPE.containing_type = _POLICYFETCHREQUEST;
_POLICYDATA.fields_by_name['state'].enum_type = _POLICYDATA_ASSOCIATIONSTATE
_POLICYDATA_ASSOCIATIONSTATE.containing_type = _POLICYDATA;
_DEVICEPOLICYREQUEST.fields_by_name['setting_request'].message_type = _DEVICEPOLICYSETTINGREQUEST
_DEVICEPOLICYREQUEST.fields_by_name['request'].message_type = _POLICYFETCHREQUEST
_DEVICEPOLICYRESPONSE.fields_by_name['response'].message_type = _POLICYFETCHRESPONSE
_DEVICEMANAGEMENTREQUEST.fields_by_name['register_request'].message_type = _DEVICEREGISTERREQUEST
_DEVICEMANAGEMENTREQUEST.fields_by_name['unregister_request'].message_type = _DEVICEUNREGISTERREQUEST
_DEVICEMANAGEMENTREQUEST.fields_by_name['policy_request'].message_type = _DEVICEPOLICYREQUEST
_DEVICEMANAGEMENTRESPONSE.fields_by_name['register_response'].message_type = _DEVICEREGISTERRESPONSE
_DEVICEMANAGEMENTRESPONSE.fields_by_name['unregister_response'].message_type = _DEVICEUNREGISTERRESPONSE
_DEVICEMANAGEMENTRESPONSE.fields_by_name['policy_response'].message_type = _DEVICEPOLICYRESPONSE
DESCRIPTOR.message_types_by_name['DeviceRegisterRequest'] = _DEVICEREGISTERREQUEST
DESCRIPTOR.message_types_by_name['DeviceRegisterResponse'] = _DEVICEREGISTERRESPONSE
DESCRIPTOR.message_types_by_name['DeviceUnregisterRequest'] = _DEVICEUNREGISTERREQUEST
DESCRIPTOR.message_types_by_name['DeviceUnregisterResponse'] = _DEVICEUNREGISTERRESPONSE
DESCRIPTOR.message_types_by_name['DevicePolicySettingRequest'] = _DEVICEPOLICYSETTINGREQUEST
DESCRIPTOR.message_types_by_name['PolicyFetchRequest'] = _POLICYFETCHREQUEST
DESCRIPTOR.message_types_by_name['PolicyData'] = _POLICYDATA
DESCRIPTOR.message_types_by_name['PolicyFetchResponse'] = _POLICYFETCHRESPONSE
DESCRIPTOR.message_types_by_name['DevicePolicyRequest'] = _DEVICEPOLICYREQUEST
DESCRIPTOR.message_types_by_name['DevicePolicyResponse'] = _DEVICEPOLICYRESPONSE
DESCRIPTOR.message_types_by_name['DeviceManagementRequest'] = _DEVICEMANAGEMENTREQUEST
DESCRIPTOR.message_types_by_name['DeviceManagementResponse'] = _DEVICEMANAGEMENTRESPONSE
class DeviceRegisterRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEREGISTERREQUEST
# @@protoc_insertion_point(class_scope:enterprise_management.DeviceRegisterRequest)
class DeviceRegisterResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEREGISTERRESPONSE
# @@protoc_insertion_point(class_scope:enterprise_management.DeviceRegisterResponse)
class DeviceUnregisterRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEUNREGISTERREQUEST
# @@protoc_insertion_point(class_scope:enterprise_management.DeviceUnregisterRequest)
class DeviceUnregisterResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEUNREGISTERRESPONSE
# @@protoc_insertion_point(class_scope:enterprise_management.DeviceUnregisterResponse)
class DevicePolicySettingRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEPOLICYSETTINGREQUEST
# @@protoc_insertion_point(class_scope:enterprise_management.DevicePolicySettingRequest)
class PolicyFetchRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POLICYFETCHREQUEST
# @@protoc_insertion_point(class_scope:enterprise_management.PolicyFetchRequest)
class PolicyData(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POLICYDATA
# @@protoc_insertion_point(class_scope:enterprise_management.PolicyData)
class PolicyFetchResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POLICYFETCHRESPONSE
# @@protoc_insertion_point(class_scope:enterprise_management.PolicyFetchResponse)
class DevicePolicyRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEPOLICYREQUEST
# @@protoc_insertion_point(class_scope:enterprise_management.DevicePolicyRequest)
class DevicePolicyResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEPOLICYRESPONSE
# @@protoc_insertion_point(class_scope:enterprise_management.DevicePolicyResponse)
class DeviceManagementRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEMANAGEMENTREQUEST
# @@protoc_insertion_point(class_scope:enterprise_management.DeviceManagementRequest)
class DeviceManagementResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DEVICEMANAGEMENTRESPONSE
# @@protoc_insertion_point(class_scope:enterprise_management.DeviceManagementResponse)
# @@protoc_insertion_point(module_scope)
| [
"frank.musbach@web.de"
] | frank.musbach@web.de |
beeba182233a55afed473db2a9ac951a1fb5db8c | 94a8b97049df146e3777aa7244b7bf2037716e8c | /mysite/util_script_import_ibge_Ufs_municipios_populacao_projetada.py | 5f9ecf7c2c5934d8356b01efbba2538180ee7627 | [] | no_license | Mardik/covid19-devopspbs | 8b9987816e58993fd7c8eab4030ab88722020377 | 282ffa2efc5e674014d9062e14e8ed165236d5be | refs/heads/master | 2021-04-11T11:35:26.046693 | 2020-04-14T23:07:51 | 2020-04-14T23:07:51 | 249,016,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | import locale
import csv
from covid19.models import UF,Municipio
from datetime import datetime
from django.db.models import Q
#exec(open('util_script_import_ibge_Ufs_municipios_populacao_projetada.py').read())
BASE_PATH = 'dados-externos/Populacao_projetada_2019.csv'
def import_ibge_uf_municipios_dado_populacao_projetada(base_path):
print("Importando dados sobre população projetada dos Municipios para ano de 2019")
with open(base_path,'r') as f:
reader = csv.DictReader(f,delimiter=';')
for dict_r in reader:
municipio = Municipio.objects.get(id=dict_r['cod'])
municipio.populacao_projetada = dict_r['popula']
print(municipio.populacao_projetada)
municipio.save()
import_ibge_uf_municipios_dado_populacao_projetada(BASE_PATH) | [
"thiagolsa@gmail.com"
] | thiagolsa@gmail.com |
9aaa5c64aad7c4b8086e9c0f5c5b5cf18c161a9d | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2016_schizConnect/supervised_analysis/NMorphCH/VBM/30yo_scripts/03_svm_NMorphCH.py | ddf0b8d658716ee3e6a5800a6a9e9825811f7e0e | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,690 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 22 09:44:24 2017
@author: ad247405
"""
import os
import json
import numpy as np
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import precision_recall_fscore_support
from scipy.stats import binom_test
from collections import OrderedDict
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
from sklearn import svm
import pandas as pd
import shutil
WD = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/VBM/results_30yo/svm/svm_NMorphCH_30yo'
def config_filename(): return os.path.join(WD,"config_dCV.json")
def results_filename(): return os.path.join(WD,"results_dCV.xlsx")
#############################################################################
def load_globals(config):
import mapreduce as GLOBAL # access to global variables
GLOBAL.DATA = GLOBAL.load_data(config["data"])
def resample(config, resample_nb):
import mapreduce as GLOBAL # access to global variables
GLOBAL.DATA = GLOBAL.load_data(config["data"])
resample = config["resample"][resample_nb]
GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k][idx, ...] for idx in resample]
for k in GLOBAL.DATA}
def mapper(key, output_collector):
import mapreduce as GLOBAL
Xtr = GLOBAL.DATA_RESAMPLED["X"][0]
Xte = GLOBAL.DATA_RESAMPLED["X"][1]
ytr = GLOBAL.DATA_RESAMPLED["y"][0]
yte = GLOBAL.DATA_RESAMPLED["y"][1]
c = float(key[0])
print("c:%f" % (c))
class_weight='balanced' # unbiased
mask = np.ones(Xtr.shape[0], dtype=bool)
scaler = preprocessing.StandardScaler().fit(Xtr)
Xtr = scaler.transform(Xtr)
Xte=scaler.transform(Xte)
mod = svm.LinearSVC(C=c,fit_intercept=False,class_weight= class_weight)
mod.fit(Xtr, ytr.ravel())
y_pred = mod.predict(Xte)
y_proba_pred = mod.decision_function(Xte)
ret = dict(y_pred=y_pred, y_true=yte,prob_pred = y_proba_pred, beta=mod.coef_, mask=mask)
if output_collector:
output_collector.collect(key, ret)
else:
return ret
def scores(key, paths, config):
import mapreduce
print (key)
values = [mapreduce.OutputCollector(p) for p in paths]
values = [item.load() for item in values]
y_true = [item["y_true"].ravel() for item in values]
y_pred = [item["y_pred"].ravel() for item in values]
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
prob_pred = [item["prob_pred"].ravel() for item in values]
prob_pred = np.concatenate(prob_pred)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
auc = roc_auc_score(y_true, prob_pred) #area under curve score.
#betas = np.hstack([item["beta"] for item in values]).T
# threshold betas to compute fleiss_kappa and DICE
#betas_t = np.vstack([array_utils.arr_threshold_from_norm2_ratio(betas[i, :], .99)[0] for i in range(betas.shape[0])])
#Compute pvalue
success = r * s
success = success.astype('int')
prob_class1 = np.count_nonzero(y_true) / float(len(y_true))
pvalue_recall0_true_prob = binom_test(success[0], s[0], 1 - prob_class1,alternative = 'greater')
pvalue_recall1_true_prob = binom_test(success[1], s[1], prob_class1,alternative = 'greater')
pvalue_recall0_unknwon_prob = binom_test(success[0], s[0], 0.5,alternative = 'greater')
pvalue_recall1_unknown_prob = binom_test(success[1], s[1], 0.5,alternative = 'greater')
pvalue_recall_mean = binom_test(success[0]+success[1], s[0] + s[1], p=0.5,alternative = 'greater')
scores = OrderedDict()
try:
a, l1, l2 , tv = [float(par) for par in key.split("_")]
scores['a'] = a
scores['l1'] = l1
scores['l2'] = l2
scores['tv'] = tv
left = float(1 - tv)
if left == 0: left = 1.
scores['l1_ratio'] = float(l1) / left
except:
pass
scores['recall_0'] = r[0]
scores['recall_1'] = r[1]
scores['recall_mean'] = r.mean()
scores["auc"] = auc
scores['pvalue_recall0_true_prob_one_sided'] = pvalue_recall0_true_prob
scores['pvalue_recall1_true_prob_one_sided'] = pvalue_recall1_true_prob
scores['pvalue_recall0_unknwon_prob_one_sided'] = pvalue_recall0_unknwon_prob
scores['pvalue_recall1_unknown_prob_one_sided'] = pvalue_recall1_unknown_prob
scores['pvalue_recall_mean'] = pvalue_recall_mean
#scores['prop_non_zeros_mean'] = float(np.count_nonzero(betas_t)) / \
# float(np.prod(betas.shape))
scores['param_key'] = key
return scores
def reducer(key, values):
import os, glob, pandas as pd
os.chdir(os.path.dirname(config_filename()))
config = json.load(open(config_filename()))
paths = glob.glob(os.path.join(config['map_output'], "*", "*", "*"))
#paths = [p for p in paths if not p.count("0.8_-1")]
def close(vec, val, tol=1e-4):
return np.abs(vec - val) < tol
def groupby_paths(paths, pos):
groups = {g:[] for g in set([p.split("/")[pos] for p in paths])}
for p in paths:
groups[p.split("/")[pos]].append(p)
return groups
def argmaxscore_bygroup(data, groupby='fold', param_key="param_key", score="recall_mean"):
arg_max_byfold = list()
for fold, data_fold in data.groupby(groupby):
assert len(data_fold) == len(set(data_fold[param_key])) # ensure all param are diff
arg_max_byfold.append([fold, data_fold.ix[data_fold[score].argmax()][param_key], data_fold[score].max()])
return pd.DataFrame(arg_max_byfold, columns=[groupby, param_key, score])
print('## Refit scores')
print('## ------------')
byparams = groupby_paths([p for p in paths if p.count("all") and not p.count("all/all")],3)
byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}
data = [list(byparams_scores[k].values()) for k in byparams_scores]
columns = list(byparams_scores[list(byparams_scores.keys())[0]].keys())
scores_refit = pd.DataFrame(data, columns=columns)
print('## doublecv scores by outer-cv and by params')
print('## -----------------------------------------')
data = list()
bycv = groupby_paths([p for p in paths if p.count("cvnested")],1)
for fold, paths_fold in bycv.items():
print(fold)
byparams = groupby_paths([p for p in paths_fold], 3)
byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}
data += [[fold] + list(byparams_scores[k].values()) for k in byparams_scores]
scores_dcv_byparams = pd.DataFrame(data, columns=["fold"] + columns)
print('## Model selection')
print('## ---------------')
svm = argmaxscore_bygroup(scores_dcv_byparams); svm["method"] = "svm"
scores_argmax_byfold = svm
print('## Apply best model on refited')
print('## ---------------------------')
scores_svm = scores("nestedcv", [os.path.join(config['map_output'], row["fold"], "all", row["param_key"]) for index, row in svm.iterrows()], config)
scores_cv = pd.DataFrame([["svm"] + list(scores_svm.values())], columns=["method"] + list(scores_svm.keys()))
with pd.ExcelWriter(results_filename()) as writer:
scores_refit.to_excel(writer, sheet_name='cv_by_param', index=False)
scores_dcv_byparams.to_excel(writer, sheet_name='cv_cv_byparam', index=False)
scores_argmax_byfold.to_excel(writer, sheet_name='cv_argmax', index=False)
scores_cv.to_excel(writer, sheet_name='dcv', index=False)
##############################################################################
if __name__ == "__main__":
WD = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/VBM/results_30yo/svm/svm_NMorphCH_30yo'
INPUT_DATA_X = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/VBM/data/data_30yo/X.npy'
INPUT_DATA_y = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/VBM/data/data_30yo/y.npy'
INPUT_MASK_PATH = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/VBM/data/data_30yo/mask.nii'
INPUT_CSV = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/VBM/population_30yo.csv'
pop = pd.read_csv(INPUT_CSV,delimiter=' ')
number_subjects = pop.shape[0]
NFOLDS_OUTER = 5
NFOLDS_INNER = 5
shutil.copy(INPUT_DATA_X, WD)
shutil.copy(INPUT_DATA_y, WD)
shutil.copy(INPUT_MASK_PATH, WD)
#############################################################################
## Create config file
y = np.load(INPUT_DATA_y)
cv_outer = [[tr, te] for tr,te in StratifiedKFold(y.ravel(), n_folds=NFOLDS_OUTER, random_state=42)]
if cv_outer[0] is not None: # Make sure first fold is None
cv_outer.insert(0, None)
null_resampling = list(); null_resampling.append(np.arange(0,len(y))),null_resampling.append(np.arange(0,len(y)))
cv_outer[0] = null_resampling
import collections
cv = collections.OrderedDict()
for cv_outer_i, (tr_val, te) in enumerate(cv_outer):
if cv_outer_i == 0:
cv["all/all"] = [tr_val, te]
else:
cv["cv%02d/all" % (cv_outer_i -1)] = [tr_val, te]
cv_inner = StratifiedKFold(y[tr_val].ravel(), n_folds=NFOLDS_INNER, random_state=42)
for cv_inner_i, (tr, val) in enumerate(cv_inner):
cv["cv%02d/cvnested%02d" % ((cv_outer_i-1), cv_inner_i)] = [tr_val[tr], tr_val[val]]
for k in cv:
cv[k] = [cv[k][0].tolist(), cv[k][1].tolist()]
C_range = [[100],[10],[1],[1e-1],[1e-2],[1e-3],[1e-4],[1e-5],[1e-6],[1e-7],[1e-8],[1e-9]]
user_func_filename = "/home/ad247405/git/scripts/2016_schizConnect/supervised_analysis/NMorphCH/VBM/30yo_scripts/03_svm_NMorphCH.py"
config = dict(data=dict(X="X.npy", y="y.npy"),
params=C_range, resample=cv,
structure="mask.nii",
map_output="model_selectionCV",
user_func=user_func_filename,
reduce_input="results/*/*",
reduce_group_by="params",
reduce_output="model_selectionCV.csv")
json.dump(config, open(os.path.join(WD, "config_dCV.json"), "w"))
# Build utils files: sync (push/pull) and PBS
import brainomics.cluster_gabriel as clust_utils
sync_push_filename, sync_pull_filename, WD_CLUSTER = \
clust_utils.gabriel_make_sync_data_files(WD)
cmd = "mapreduce.py --map %s/config_dCV.json" % WD_CLUSTER
clust_utils.gabriel_make_qsub_job_files(WD, cmd,walltime = "250:00:00") | [
"ad247405@is222241.intra.cea.fr"
] | ad247405@is222241.intra.cea.fr |
c01e2bd21370ace182254cb8629b9b197c1c00b2 | f1c8091b30ebbb49bca7f60fe1aa078613331a65 | /Set08/clustercheck.py | bfaa21dadb11ef9f7e1f8b113d20ff3369bee84b | [] | no_license | nikhil-kathuria/information_retrieval_cs6200 | 4c92d16e6ba963996d2e7948753cfe19851875b8 | 6e683675fe3974df1c1a04bf10eddc5ad2bff53b | refs/heads/master | 2021-06-12T08:19:25.013279 | 2016-12-27T06:52:11 | 2016-12-27T06:52:11 | 69,931,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | from collections import defaultdict
class ClusterCheck:
def __init__(self):
self._path ="../Set01/qrels.adhoc.51-100.AP89.txt"
self._dqmap = defaultdict(set)
self._pairlist = list()
self._reldoc = set()
self._qset = set()
self.querygen()
self.popdata()
self.genpairs()
def querygen(self):
fileloc = "../Set01/AP_DATA/query_desc.51-100.short.txt"
qfile = open(fileloc)
for line in qfile:
self._qset.add(line.split('.')[0])
qfile.close()
def popdata(self):
fobj = open(self._path, 'r')
for line in fobj:
arr = line.split()
if arr[0] in self._qset and arr[3] != "0":
self._dqmap[arr[2]].add(arr[0])
self._reldoc.add(arr[2])
fobj.close()
def genpairs(self):
docs = list(self._reldoc)
for slow in range(len(docs)):
for fast in range(slow + 1, len(docs)):
self._pairlist.append((docs[slow], docs[fast]))
self._qset = None
self._reldoc = None
# print(len(docs))
# print(len(pairlist))
# print(len(self._qmap))
for doc in self._dqmap:
if len(self._dqmap[doc]) > 1:
print(self._dqmap[doc])
if __name__ == '__main__':
ck = ClusterCheck()
| [
"nikhil.kathuria@gmail.com"
] | nikhil.kathuria@gmail.com |
d40223e572f7fdd907e876aca26d04b979294f0c | 1de21eb8a0f60c3b6c80ff5c38d635dfcc3659ea | /person.py | e7f3edb718ec05eed63af909efb48dacfba55e8d | [] | no_license | ravisankar712/SIR-Simulation | 406c51139b95794253de59a454ff383c66ba8948 | 18a2405b1e76cca38d52288d92526220c669efdb | refs/heads/master | 2022-07-25T07:12:31.195672 | 2020-05-22T09:13:52 | 2020-05-22T09:13:52 | 261,978,066 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,792 | py | import numpy as np
import pygame as pg
pg.init()
width = 600
height = 600
canvas = pg.display.set_mode((width, height))
class Person:
def __init__(self, x, y, compartments = 1):
self.pos = np.array([x, y])
self.vel = np.random.random(2) * 2 - 1
self.acc = np.zeros(2)
self.maxSpeed = 5.0
self.Speed_memory = self.maxSpeed
self.size = 4
self.perception = 50
self.condition = 'S'
self.prob_inf = 0.02 #prob of getting infected
self.clock = 0 #internal clock to keep track of recovery time
#compartmentalising the object based on its pos
self.res = int(np.sqrt(compartments))
grid_x = int(self.res * self.pos[0]/width)
grid_y = int(self.res * self.pos[1]/height)
self.locality = [grid_x, grid_y]
#intercompartment travel stuff
self.prob_interstate = 0.002 #prob of traveling to another compartment
self.flying = False #variable is true only when traveling to other compartments
self.next_locality = [] #the compartment to which the object is flying
self.next_pos = np.zeros(2) #the position to which object is flying
#recovery and other stuff.
self.recovery_time = 120
self.symptoms = True
self.under_quarantine = False
self.symptom_time = 50
self.quarantine_zone = True
#making some objects symptomless based on a given prob
def set_symptoms(self, prob):
if np.random.random() < prob:
pass
else:
self.symptoms = False
#making the quarantine zone facility on and off
def set_quarantinezone(self, value):
self.quarantine_zone = value
#drawing
def show(self):
if self.condition == 'S':
color = (0, 200, 255) #susceptible are blue
elif self.condition == 'I':
if self.symptoms:
color = (255, 100, 0) #infected with symptoms is red
else:
color = (255, 255, 0) #infected without symptoms is yellow
elif self.condition == 'R':
color = (255, 255, 255) #recovered is white
#if object is newly infected, its radius doubles for a short time. blink() keeps the track of it
if self.blink():
r = self.size * 2
else:
r = self.size
pg.draw.circle(canvas, color, [int(self.pos[0]), int(self.pos[1])], int(r))
def blink(self):
blink = False
if 1 < self.clock < 10:
blink = True
return blink
def update(self):
self.edges()
if self.condition == 'I': #if infected, check for recovery. If not recovered, internal clock ticks
self.recovery()
self.clock += 1
self.vel += self.acc
#limiting the speed
if np.linalg.norm(self.vel) > self.maxSpeed:
self.vel = self.vel/np.linalg.norm(self.vel) * self.maxSpeed
self.pos += self.vel
self.acc *= 0.0
def edges(self):
#collides with edges only when not flying
if not self.flying:
x, y = self.pos
i, j = self.locality
#finding edges based on the locality.
lower_x = i * width / self.res
upper_x = (i + 1) * width / self.res
lower_y = j * height / self.res
upper_y = (j + 1) * height / self.res
if x < lower_x :
self.pos[0] = lower_x
self.acc[0] *= -1
self.vel[0] *= -1
if x > upper_x - self.size:
self.pos[0] = upper_x - self.size
self.acc[0] *= -1
self.vel[0] *= -1
if y < lower_y :
self.pos[1] = lower_y
self.acc[1] *= -1
self.vel[1] *= -1
if y > upper_y - self.size:
self.pos[1] = upper_y - self.size
self.acc[1] *= -1
self.vel[1] *= -1
else:
self.fly(self.next_pos[0], self.next_pos[1])
#the random walk
def walk(self):
if np.random.random() < 0.5:
self.acc += (np.random.random(2) - 0.5) * 2
# if np.linalg.norm(self.acc) > self.maxSpeed:
# self.acc = self.acc/np.linalg.norm(self.acc) * self.maxSpeed
def get_infection(self, other):
#if susceptible, and see another which is infected under the perception radius,
#while both self and other are not flying, then get infected with some probability
if self.condition == 'S':
for o in other:
if o.condition == 'I' and o.locality == self.locality and not o.flying and not self.flying:
d = np.linalg.norm(self.pos - o.pos)
if 0 < d < self.perception:
if np.random.random() < self.prob_inf:
self.condition = 'I'
#I becomes R after the recovery time.
def recovery(self):
if self.condition == 'I' and self.clock > self.recovery_time:
self.condition = 'R'
#social distancing is a repulsive force, towards someone in the same compartment, who is not flying.
#perception is twice as that of infection.
def social_distancing(self, other):
for o in other:
if o.locality == self.locality and not o.flying and not self.flying:
f = self.pos - o.pos
d = np.linalg.norm(f)
if 0 < d < self.perception * 2:
self.acc += f/d * self.maxSpeed
#intercompartment traveling. Doesnt do so while flying or under quarantine.
def travel_interstate(self):
if np.random.random() < self.prob_interstate and not self.flying and not self.under_quarantine:
new_x = np.random.random() * width
new_y = np.random.random() * height
grid_x = int(self.res * new_x/width)
grid_y = int(self.res * new_y/height)
new_locality = [grid_x, grid_y]
if self.locality == new_locality :
pass
elif self.quarantine_zone and new_locality == [self.res - 1, self.res - 1]:
pass
else:
self.next_locality = [grid_x, grid_y]
self.next_pos = np.array([new_x, new_y])
self.flying = True
#flying to another pos
def fly(self, x, y):
new_pos = np.array([x, y])
f = self.pos - new_pos
d = np.linalg.norm(f)
if d > width/(self.res*2):
self.acc -= f
self.maxSpeed += 1
else:
self.pos = self.next_pos
self.locality = self.next_locality
self.maxSpeed = self.Speed_memory
self.flying = False
#quarantining, if there is facility and object shows symptoms (it takes a while to show the symptoms)
def get_quarantined(self):
if self.quarantine_zone and self.symptoms and self.condition == 'I' and not self.under_quarantine and self.clock > self.symptom_time:
#quarantine is implemented using the fly. An additional compartment at the end, of width 100
self.next_locality = [self.res, self.res]
self.next_pos = np.array([width + width/(self.res*2), height + height/(self.res*2)])
#if inside quarantine zone, stop doing inter compartment, dont percieve anyone. Else fly to the zone!
if np.linalg.norm(self.next_pos - self.pos) < width/(self.res*2):
self.under_quarantine = True
self.prob_interstate = -1
self.perception = 0
self.maxSpeed = 2.0 #under quarantine, max speed is reduced (just for aesthetic purposes!)
else:
self.maxSpeed += 2 #when going to quarantine, speed increses(aesthetics!)
self.flying = True | [
"noreply@github.com"
] | noreply@github.com |
7cc48ee85f34495441b6766c72da1e16011dc1c8 | a30fd3fbb3cc20eec07cd4878214b29ebe89af33 | /Chapter7/task7-5.py | 1a8443e20745c4d682ad20eb67f2625ce675112b | [] | no_license | cmdtvt/R421-python-course | 310cbffe727ea39dc184b4e34d1c73c2a1fde01a | bdf23396bbce31ce97e6d1e74c4e2e48b1ba42b4 | refs/heads/master | 2023-08-16T13:22:02.170134 | 2021-10-10T09:21:06 | 2021-10-10T09:21:06 | 404,047,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | '''
The last exercise in this chapter continues with the exercise from the last chapter, the calculator. In this exercise, expand the existing code by implementing the following new features: (A) Calculator does not automatically quit when the result is given, allowing user to do new calculations. The user has to select "6" in the menu to exit the program. (B) The calculator shows the selected numbers in the main menu by printing "Current numbers:" and the user-given input. By selecting "5" in the calculator menu, the user can change the given numbers. When implemented correctly, the program prints out following:
############################
Calculator
Give the first number: 100
Give the second number: 25
(1) +
(2) -
(3) *
(4) /
(5) Change numbers
(6) Quit
Current numbers: 100 25
Please select something (1-6): 5
Give the first number: 10
Give the second number: 30
(1) +
(2) -
(3) *
(4) /
(5) Change numbers
(6) Quit
Current numbers: 10 30
Please select something (1-6): 1
The result is: 40
(1) +
(2) -
(3) *
(4) /
(5) Change numbers
(6) Quit
Current numbers: 10 30
Please select something (1-6): 6
Thank you!
>>>
############################
Again, implement the program within one large while True-segment, which is terminated with break if the user selects the option "6".
############################
Example output:
Calculator
Give the first number: 50
Give the second number: 5
(1) +
(2) -
(3) *
(4) /
(5)Change numbers
(6)Quit
Current numbers: 50 5
Please select something (1-6): 1
The result is: 55
(1) +
(2) -
(3) *
(4) /
(5)Change numbers
(6)Quit
Current numbers: 50 5
Please select something (1-6): 2
The result is: 45
(1) +
(2) -
(3) *
(4) /
(5)Change numbers
(6)Quit
Current numbers: 50 5
Please select something (1-6): 4
The result is: 10.0
(1) +
(2) -
(3) *
(4) /
(5)Change numbers
(6)Quit
Current numbers: 50 5
Please select something (1-6): 6
Thank you!
############################
'''
import math
selectGood = True #Jos väärä inputti niin piilotetaan joitain asioita tulostuksesta.
print("Calculator")
num1 = int(input("Give the first number: "))
num2 = int(input("Give the second number: "))
while True:
print("(1) +\n(2) -\n(3) *\n(4) /\n(5)sin(number1/number2)\n(6)cos(number1/number2)\n(7) Change numbers\n(8) Quit")
print("Current numbers: "+str(num1)+" "+str(num2))
select = int(input("Please select something (1-6): "))
result = 0
if select == 1:
result = num1 + num2
selectGood = True
elif select == 2:
result = num1 - num2
elif select == 3:
result = num1 * num2
elif select == 4:
result = num1 / num2
elif select == 5:
result = math.sin(num1/num2)
elif select == 6:
result = math.cos(num1/num2)
elif select == 7:
# print("Current numbers: "+str(num1)+" "+str(num2))
#print("Change numbers")
num1 = int(input("Give the first number: "))
num2 = int(input("Give the second number: "))
selectGood = False
elif select == 8:
print("Thank you!")
break;
else:
print("Selection was not correct.")
selectGood = False
if selectGood == True:
print("The result is: "+str(result))
| [
"tvtuusa@gmail.com"
] | tvtuusa@gmail.com |
dcd3654361b5e5399abbe0ea2e240e961a2e7f97 | 628c5ce89ae7d212cd118171cb6ee0787cb01857 | /color_maps.py | 54d2c1693614de9d6ed6033798d9f1aea8badcab | [] | no_license | sbs87/qpcr | c961881fd07c79fff441d2eecbabb67ecc79c9ce | 7c8da4fd5e54d012eddb5c18a043787058be4426 | refs/heads/master | 2016-09-07T18:38:23.147402 | 2013-06-25T20:58:20 | 2013-06-25T20:58:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,313 | py | """Python colormaps demo
includes:
examples for registering own color maps
utility for showing all or selected named colormaps including self-defined ones"""
import matplotlib
import matplotlib.colors as col
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
def register_own_cmaps():
"""define two example colormaps as segmented lists and register them"""
# a good guide for choosing colors is provided at
# http://geography.uoregon.edu/datagraphics/color_scales.htm
#
# example 1:
# create own colormap from purple, blue, green, orange to red
# cdict contains a tuple structure for 'red', 'green', and 'blue'.
# Each color has a list of (x,y0,y1) tuples, where
# x defines the "index" in the colormap (range 0..1), y0 is the
# color value (0..1) left of x, and y1 the color value right of x.
# The LinearSegmentedColormap method will linearly interpolate between
# (x[i],y1) and (x[i+1],y0)
# The gamma value denotes a "gamma curve" value which adjusts the brightness
# at the bottom and top of the colormap. According to matlab documentation
# this means:
# colormap values are modified as c^gamma, where gamma is (1-beta) for
# beta>0 and 1/(1+beta) for beta<=0
cdict = {'red': ((0.0, 0.0, 0.0),
(0.3, 0.5, 0.5),
(0.6, 0.7, 0.7),
(0.9, 0.8, 0.8),
(1.0, 0.8, 0.8)),
'green': ((0.0, 0.0, 0.0),
(0.3, 0.8, 0.8),
(0.6, 0.7, 0.7),
(0.9, 0.0, 0.0),
(1.0, 0.7, 0.7)),
'blue': ((0.0, 1.0, 1.0),
(0.3, 1.0, 1.0),
(0.6, 0.0, 0.0),
(0.9, 0.0, 0.0),
(1.0, 1.0, 1.0))}
cmap1 = col.LinearSegmentedColormap('my_colormap',cdict,N=256,gamma=0.75)
cm.register_cmap(name='own1', cmap=cmap1)
# example 2: use the "fromList() method
startcolor = '#586323' # a dark olive
midcolor = '#fcffc9' # a bright yellow
endcolor = '#bd2309' # medium dark red
cmap2 = col.LinearSegmentedColormap.from_list('own2',[startcolor,midcolor,endcolor])
# extra arguments are N=256, gamma=1.0
cm.register_cmap(cmap=cmap2)
# we can skip name here as it was already defined
return cmap2
def discrete_cmap(N=8):
"""create a colormap with N (N<15) discrete colors and register it"""
# define individual colors as hex values
cpool = [ '#FFFFFF', '#ADD8E6', '#FF0000', '#0000FF', '#000000',
'#faf214', '#2edfea', '#ea2ec4', '#ea2e40', '#cdcdcd',
'#577a4d', '#2e46c0', '#f59422', '#219774', '#8086d9' ]
cmap3 = col.ListedColormap(cpool[0:N], 'indexed')
cm.register_cmap(cmap=cmap3)
return cmap3
def show_cmaps(names=None):
"""display all colormaps included in the names list. If names is None, all
defined colormaps will be shown."""
# base code from http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps
matplotlib.rc('text', usetex=False)
a=np.outer(np.arange(0,1,0.01),np.ones(10)) # pseudo image data
f=plt.figure(figsize=(10,5))
f.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)
# get list of all colormap names
# this only obtains names of built-in colormaps:
maps=[m for m in cm.datad if not m.endswith("_r")]
# use undocumented cmap_d dictionary instead
maps = [m for m in cm.cmap_d if not m.endswith("_r")]
maps.sort()
# determine number of subplots to make
l=len(maps)+1
if names is not None: l=len(names) # assume all names are correct!
# loop over maps and plot the selected ones
i=0
for m in maps:
if names is None or m in names:
i+=1
ax = plt.subplot(1,l,i)
ax.axis("off")
plt.imshow(a,aspect='auto',cmap=cm.get_cmap(m),origin="lower")
plt.title(m,rotation=90,fontsize=10,verticalalignment='bottom')
plt.savefig("colormaps.png",dpi=100,facecolor='gray')
#if __name__ == "__main__":
# register_own_cmaps()
# discrete_cmap(8)
# show_cmaps(['indexed','Blues','OrRd','PiYG','PuOr',
# 'RdYlBu','RdYlGn','afmhot','binary','copper',
# 'gist_ncar','gist_rainbow','own1','own2']) | [
"steve@stevenbsmith.net"
] | steve@stevenbsmith.net |
1c5bdf7b6fb22522e79be46afc518f7472a7a7a6 | ffd0ff0492c4190097283dcfa1d51de70c84c1d1 | /Misc/python/test.py | 470db1309946306e8e6ea2490ca59ab1f2bd1a94 | [] | no_license | keithbrown/Miscellaneous | c8b6b6db97fb0b077e77d46017f75dfedd14514b | 6d2d44f67fe33fb24674cc8b016df0814cdda753 | refs/heads/master | 2021-01-11T01:30:31.928471 | 2014-06-05T15:37:40 | 2014-06-05T15:37:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | y = 'hello world'
print y
| [
"levi@roxsoftware.com"
] | levi@roxsoftware.com |
6372b8149189745fdc654366cf80c4ac81e6f43a | a59ba03b6194dceb81d7e4fa1290c003bc249142 | /20-SEPT-19/Semaphore.py | 6377a1eebb136bc461bf7274b94dc22206ef205f | [] | no_license | reachvedprakash/OS_LAB | 072845043677604d8a292695ff74a5c81845219e | 124bb74706c82a7db0329e7603edde8b5a645c38 | refs/heads/master | 2020-07-05T17:34:10.412069 | 2019-11-15T08:11:29 | 2019-11-15T08:11:29 | 202,714,660 | 0 | 0 | null | 2019-10-25T07:00:59 | 2019-08-16T11:18:43 | C++ | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import threading
x = 0
# In[8]:
def increment():
global x
x += 5
# In[9]:
def thread_task(lock,sem):
for _ in range(100):
sem.acquire()
lock.acquire()
increment()
lock.release()
sem.release()
# In[10]:
def main_task():
global x
x = 0
lock = threading.Lock()
sem = threading.Semaphore()
t1 = threading.Thread(target=thread_task, args=(lock,sem))
t2 = threading.Thread(target=thread_task, args=(lock,sem))
t1.start()
t2.start()
t1.join()
t2.join()
# In[11]:
if __name__ == "__main__":
for i in range(10):
main_task()
print("Iteration {0}: x = {1}".format(i,x))
# In[ ]:
# In[ ]:
# In[ ]:
| [
"reachvedpraksh@gmail.com"
] | reachvedpraksh@gmail.com |
c6b2f7f233ae749bd30af6352d77ddab044b9439 | 70477889f721da253a4ab49a47d5a504458d5818 | /source/Lidar_curb_scan/Single_lidar_curb_scan/vscan_removal_tracking/utils/hot_key.py | 919c945aa30d830126d0eab8ef95bf7d0830cf38 | [] | no_license | iljoobaek/Lidar_curb_detection | af2d359627b2228e7678563a7d05137ef0c52a04 | a410b77eecadfc1fe7584de64c7b7a010999ed75 | refs/heads/master | 2022-08-06T01:34:19.848581 | 2020-05-26T16:29:21 | 2020-05-26T16:29:21 | 191,998,099 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import cv2
import numpy as np
key_map = {
'interrupt_key': ord('i'),
'continue_key': ord('c'),
'next_step_key':ord('n'),
'save_key':ord('s')
}
class VidHotKey():
def __init__(self, saver = None, img = None):
self.state = 'idle'
self.saver = saver
self.img = img
def vid_hot_key(self,key):
""" hot key setting for opencv imshow videos """
key = key&0xff
# entering hot key mode
if key == key_map['interrupt_key']:
self.state = 'interrupt'
key = cv2.waitKey()&0xff
if self.state == 'interrupt':
# must be valid key
if key == (-1&0xff):
key = cv2.waitKey()&0xff
# single step
if key == key_map['next_step_key']:
return 0
# leaving hot key mode
if key == key_map['continue_key']:
self.state = 'idle'
return 0
# return anything else
return key
return key
| [
"manojbhat09@gmail.com"
] | manojbhat09@gmail.com |
89c15d22026b3b212352abb9c8bcef7689cf72ea | aa494e8bc6bddbe43ad8a04004b69f8f7cf2854c | /get_audio_index.py | e71fc3dd0fa8e6d31f8864aa928e529eaf9a43c9 | [] | no_license | junyamorita1030/speechtotextstream | 16a6d80089ef4e3cade4357967217fbf2567a9c6 | e43518cb84bc5142c34b28c149e72cc44b6c250c | refs/heads/master | 2020-03-31T04:46:02.344065 | 2018-10-07T09:45:49 | 2018-10-07T09:45:49 | 151,918,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # -*- coding: utf-8 -*
import pyaudio
import wave
def main():
audio = pyaudio.PyAudio()
# 音声デバイス毎のインデックス番号を一覧表示
for x in range(0, audio.get_device_count()):
print(audio.get_device_info_by_index(x))
if __name__ == '__main__':
main()
| [
"junyam1030@gmail.com"
] | junyam1030@gmail.com |
1faf17cd1781a8b68701e4f734214ded75ebb7f0 | aef8d3773931ea9931f9db2cc8a24bb3fc05855f | /problem16.py | 820f592ac1a07cb58e561ebb5f8ce55a3b1908ed | [] | no_license | t3chboy/python_workshop | 77389d8eb2dc2ad687e405fdabd666e5bd40e38a | 75334bbb846de6f224d03aa447d7b9294f438dc7 | refs/heads/master | 2020-06-23T03:09:36.337736 | 2019-08-18T10:12:06 | 2019-08-18T10:12:06 | 198,489,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | import os
import time
import sys
import aiohttp
import asyncio
POP20_CC = ('CN IN US ID BR PK NG BD RU JP '
'MX PH VN ET EG DE IR TR CD FR').split()
BASE_URL = 'http://52.14.205.215/flags/'
DEST_DIR = 'downloads/'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
async def get_flag(cc_list):
async with aiohttp.ClientSession() as session:
for cc in cc_list:
url = '{}/{cc}/{cc}.gif'.format(BASE_URL, cc=cc.lower())
async with session.post(url=url) as resp:
image = await resp.read()
show(cc)
save_flag(image, cc.lower() + '.gif')
def show(text):
print(text, end=' ')
sys.stdout.flush()
def main():
t0 = time.time()
loop = asyncio.get_event_loop()
sorted_flag_list = sorted(POP20_CC)
loop.run_until_complete(get_flag(sorted_flag_list))
count = len(POP20_CC)
elapsed = time.time() - t0
msg = '\n{} flags downloaded in {:.2f}s'
print(msg.format(count, elapsed))
if __name__ == '__main__':
main()
| [
"kaushilrambhia@gofynd.com"
] | kaushilrambhia@gofynd.com |
a96cbe274cfcf2f325cbf2fdaecbdbd0a2d78752 | 0dbb5d8cbba0e7b8876721b28f2ac9adbc2d4ae3 | /TechEdison/wsgi.py | 9e8cc6f988ef6c585d5db0a879571b33a243a58d | [] | no_license | vaibhavmathur91/TechEdison | 6c75d7a79911dcec2a546891c3f0e3ee58a15a02 | f1e415dd42c8ca864acba25720ccebd1e5d26a7f | refs/heads/master | 2021-06-12T11:44:50.843187 | 2017-03-06T23:40:43 | 2017-03-06T23:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | """
WSGI config for TechEdison project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TechEdison.settings")
application = get_wsgi_application()
| [
"vaibhav01921991@gmail.com"
] | vaibhav01921991@gmail.com |
181e7b74badb94f84f21f9c5dc0567adfd905621 | 46bef14fee21048a621e55d5b407aeb67f29c2bf | /bot/plugins/location/constant.py | 92673eb6934d39e93d91b16ad7e301bb72cd6c4f | [] | no_license | TheKevJames/jarvis | 2e9357988cb2a2d4994a4b089ea21a07dbb0b2e1 | 145b0185a5b326ea4404fbde44d4b34c6d5604c2 | refs/heads/master | 2020-03-26T15:03:39.690128 | 2018-05-08T06:16:54 | 2018-05-08T06:16:54 | 43,014,879 | 7 | 8 | null | 2018-05-08T06:18:16 | 2015-09-23T17:02:53 | Python | UTF-8 | Python | false | false | 787 | py | import random
def ACQUIESE():
return random.choice(('Check.', 'Very good, sir.', 'Yes, sir.'))
def ERROR_NOT_ENABLED():
return random.choice((
'Sir, this instance is not weather-ready.',
'Sorry sir, this instance is not configured for weather.'))
ERROR_RETRIEVING_WEATHER = 'I was unable to retrieve the weather.'
PRINT_WEATHER = """
{}, sir. It's {}. The weather in {} is {} degrees Celsius and {}. Today's
sunrise {} at {} and sunset {} at {}.
""".replace('\n', ' ').format
UPDATED_LOCATION = lambda x: \
ACQUIESE() + " I've updated your location to {}.".format(x)
WEATHER_URL = ('http://api.worldweatheronline.com/free/v2/weather.ashx?q={}'
'&format=json&num_of_days=1&includelocation=yes'
'&showlocaltime=yes&key={}')
| [
"noreply@github.com"
] | noreply@github.com |
a5135677f76d08f555511a523ec4ca974fa4de18 | 6c75302f2ab1165fb1bd9707612b9d337512c969 | /Algoritma/wsgi.py | 17a961a11a4b3ab49577f9919a99e8782d476768 | [] | no_license | enghamzasalem/innosoft-django | 59d42d0a9a904d52e096cff59ddd5f17cc4a19b1 | 08b8e81023fcc7450e054807696ae2702aef3611 | refs/heads/master | 2022-09-17T06:39:23.108742 | 2019-08-19T21:22:25 | 2019-08-19T21:22:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for Algoritma project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Algoritma.settings')
application = get_wsgi_application()
| [
"softilnyr16@gmail.com"
] | softilnyr16@gmail.com |
0802931f1b6be864b751e6bba2dcc363d8933f5c | 3aefa0e4106d36a3dfc4a56e686c7a3acc1aabcd | /src/ui.py | d3db21c21b8558e73ae9e5b97c2d1e40b8feb86f | [] | no_license | miguelfAndrade/mancala_game_iart | be1f45acf74773a4819f9fb1f036d45e378966c5 | e203e0b0a1535af225e2ed631ddf2eea9cc73962 | refs/heads/master | 2020-05-22T15:18:34.674135 | 2020-01-22T22:36:40 | 2020-01-22T22:36:40 | 186,405,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import sys
import time
import math
import tkinter
import mancala
import ai
def main_gui():
root = tkinter.Tk()
root.title("Mancala")
size = w_width, w_height = 800, 600
speed = [2, 2]
white = '#FFFFFF'
board = mancala.init_board(mancala.BOARD_SIZE)
canvas = tkinter.Canvas(root, height = w_height, width = w_width)
canvas.pack()
frame = tkinter.Frame(root, bg = white)
frame.place(relwidth = 1, relheight = 1)
# photo1 = tkinter.PhotoImage(file = "teste_foto.png")
# bkg_label = tkinter.Label(root, image = photo1)
# bkg_label.place(x = -500, y = -500, relwidth = 2, relheight = 2)
root.mainloop()
main_gui() | [
"miguelandrade.96.18@gmail.com"
] | miguelandrade.96.18@gmail.com |
b4f5ffe00fcc9421686197928264fbda0ae6cf14 | d64db60c2cffd9732d2f923d22cf16ee62fb33f7 | /Analetics_py_sql/lexiconMaker/nipostalcodemaker.py | f129184596d5cdd3081e792b0f712bcde3a9892f | [] | no_license | TristanHermant4pm/analyticsPY | 5f47146570e1ebc910fce4bddb5c6f2116a7b58a | 2cd67c2c8e55d870816c411af18508120982ea20 | refs/heads/master | 2020-03-26T05:24:25.762428 | 2018-08-13T09:07:32 | 2018-08-13T09:07:32 | 144,555,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import os
import sys
import pyodbc
import string
import re
from collections import OrderedDict
ctListIn = []
ctListOut = []
'''
import codecs
types_of_encoding = ["utf8", "cp1252"]
for encoding_type in types_of_encoding:
with codecs.open(".\\lexicons\\censusTownList.txt", encoding = encoding_type, errors ='replace') as file_pointer:
for line in file_pointer.readlines():
ctList.append(line)
'''
file_pointer = open(".\\lexicons\\niPostalcodeRaw.txt", "r")
for line in file_pointer.read().split("\n"):
ctListIn.append(str(line))
file_pointer.close()
print(str(len(ctListIn)))
i = 0
currentTown = ""
for line in ctListIn:
g = re.search("[a-z]", line)
if g != None:
currentTown = line
else:
i += 1
if currentTown != "":
ctListOut.append((line, currentTown))
else:
print("debug 1")
print(str(len(ctListOut)))
'''
for key, value in ctListOut:
print(key + " : " + value)
'''
'''
ctListOut = sorted(ctListOut)
'''
with open(".\\lexicons\\niPostalcode.txt", "w+") as filepointer:
filepointer.write("Postal code;City Name\n")
for el in ctListOut:
filepointer.write(el[0] + ";" + el[1] + "\n")
| [
"tristan.hermant@u-psud.fr"
] | tristan.hermant@u-psud.fr |
a4687c23319ff1656225fc5c0d8aa5d029be9256 | f9ad08b66c0e87920b55f95eacd465c510272efe | /part_reid/lib/python_layer/reid_layer/data_layer_dfcd_neg20.py | 493bcfbf9f21778512425c968c0b38e8206fae2c | [] | no_license | xianghan228/Clothes_Retrieval | fa09435162eb855ca0fc0d3d74b52ee1dea445b5 | 7c24eed189917625e375f64d8a9bfacc22dd67ba | refs/heads/master | 2020-03-17T17:28:05.266931 | 2018-03-09T12:07:14 | 2018-03-09T12:07:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,346 | py | # imports
import sys
sys.path.insert(0,'/data1/qtang/samsung/part_reid/caffe/python')
import caffe
import numpy as np
import os
from PIL import Image
import random
import time
import pdb
import pickle
#pdb.set_trace()
class DataLayer(caffe.Layer):
"""g
This is a simple syncronous datalayer for training a Detection model on
PASCAL.
"""
def setup(self, bottom, top):
#print 'setup'
self.top_names = ['data', 'label']
# === Read input parameters ===
# params is a python dictionary with layer parameters.
params=self.check_params(self.param_str)
# store input as class variables
self.batch_size = params['batch_size']
self.input_shape = params['shape']
# === reshape tops ===
top[0].reshape(self.batch_size, 3, self.input_shape[0], self.input_shape[1])
top[1].reshape(self.batch_size)
# Create a batch loader to load the images.
self.dp=DataProvider(params) #prefech up to 8 batches, with 4 workers
#params_p=params.copy()
#params_p['root_folder']=params['root_folder_p']
#params_p['source']=params['source_p']
#self.batch_prefechers = [BatchLoader(self.queue, params if i>0 else params_p) for i in range(3)]
#for worker in self.batch_prefechers:
# worker.start()
# time.sleep(0.25)
def forward(self, bottom, top):
image,label=self.dp.get_batch_vec()
#print len(image)
top[0].data[...] = image
top[1].data[...] = label
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
def check_params(self, param_str):
params = eval(param_str)
if 'shape' not in params:
params['shape'] = (160, 80)
if 'mean' not in params:
params['mean'] = [104, 117, 123]
if 'mirror' not in params:
params['mirror'] = False
if 'trans' not in params:
params['trans'] = False
if 'pad' not in params:
params['pad'] = 0
if 'max_per_id' not in params:
params['max_per_id'] = 10 #max images per id within one batch
if 'root_folder_p' not in params:
params['root_folder_p'] = params['root_folder']
if 'source_p' not in params:
params['source_p'] = params['source']
return params
class DataProvider:
def __init__(self, params):
#print params
self.batch_size = params['batch_size']
self.id_dict,self.source_len = self.process_list(params['source'])
#print 'self id',self.id_dict
self.max_per_id = params['max_per_id']
self.all_id=self.id_dict.keys()
self.index=0
self.count=0
self.e_c=0
# this class does some simple data-manipulations
self.transformer = SimpleTransformer(params)
with open('/data1/qtang/samsung/part_reid/train/Samsung/alidfcd_ivabox_sambox_partnet_baseline/hard_neg_dfcd_top20.pkl','r') as f:
self.q_neg = pickle.load(f)
def process_list(self, filename):
list_file=open(filename)
try:
content = list_file.read( )
finally:
list_file.close( )
lines = content.split('\n')
source_len=len(lines)
all_id_dict={}
for line in lines:
if len(line.split())<2:
continue
file_name=line.split()[0]
label_id=int(line.split()[1])
if all_id_dict.has_key(label_id):
all_id_dict[label_id].append(file_name)
else:
all_id_dict[label_id]=[file_name]
#print 'len all id dict',len(all_id_dict)
return all_id_dict, source_len
def get_batch_list(self):
#self.count=0
batch_list=[]
cur_range=xrange(self.index,self.index+self.batch_size)
#print 'cur_range',cur_range
for idx in cur_range:
if idx>=len(self.all_id):
random.shuffle(self.all_id)
self.e_c+=1
id=self.all_id[idx%len(self.all_id)]
if id in self.q_neg:
for file_name in self.q_neg[id]:
if self.count < self.batch_size:
batch_list.append((file_name.split()[0],int(file_name.split()[1])))
self.count += 1
else:
self.index=idx%len(self.all_id)
self.count=0
return batch_list
random.shuffle(self.id_dict[id])
for file_name in self.id_dict[id][:self.max_per_id]:
if self.count < self.batch_size:
self.count += 1
batch_list.append((file_name,id))
else:
self.index=idx%len(self.all_id)
self.count=0
#print 'epoch',self.e_c
return batch_list
return batch_list
def get_batch_vec(self):
image_list=[]
label_list=[]
batch_list=self.get_batch_list()
#print 'batchlist len',len(batch_list)
for file_name,label in batch_list:
image=self.transformer.preprocess(file_name)
image_list.append(image)
label_list.append(label)
blobs=(np.array(image_list),np.array(label_list))
#print 'blobs len ',len(blobs)
return blobs
class SimpleTransformer:
"""
SimpleTransformer is a simple class for preprocessing and deprocessing
images for caffe.
"""
def __init__(self, params):
self.mean = params['mean']
self.pad = params['pad']
self.is_mirror = params['mirror']
self.do_trans = params['trans']
self.img_h, self.img_w = params['shape']
self.root_folder=params['root_folder']
def rand_transform(self, image):
M=self.img_h;N=self.img_w
pts1 = np.float32([[0,0,1],[N,0,1],[0,M,1]])
pts1=pts1.T
ratio = 0.02
ratio_s = 0.02
#points
dx = random.uniform(N*(-ratio), N*(ratio))
dy = random.uniform(M*(-ratio), M*(ratio))
ds = random.uniform(-ratio_s,ratio_s)
ds_x = (N-(1+ds)*N)/2
ds_y = (M-(1+ds)*M)/2
if random.uniform(0,1) > 0.3:
pts2 = np.float32([[dx+ds_x,dy+ds_y],[N+dx-ds_x,dy+ds_y],[dx+ds_x,M+dy-ds_y]])
else:
pts2 = np.float32([[N+dx-ds_x,dy+ds_y],[dx+ds_x,dy+ds_y],[N+dx-ds_x,M+dy-ds_y]])
pts2=pts2.T
[[a,b,c],[d,e,f]]=np.dot(pts2,np.linalg.inv(pts1))
cols,rows= image.size
#matrix = cv2.getAffineTransform(pts1,pts2)
#dst_img = cv2.warpAffine(image,matrix,(cols,rows))
dst_img=image.transform((cols,rows),'Image.AFFINE',(a,b,c,d,e,f))
return dst_img
def rand_pad_crop(self, image):
padded=np.zeros((3,self.img_h+self.pad*2,self.img_w+self.pad*2))
padded[:,self.pad:self.pad+self.img_h,self.pad:self.pad+self.img_w]=image
left, top = np.random.randint(self.pad*2+1), np.random.randint(self.pad*2+1)
return padded[:, top:top+self.img_h, left:left+self.img_w]
def preprocess(self, file_name):
"""
preprocess() emulate the pre-processing occuring in the vgg16
"""
full_name=os.path.join(self.root_folder, file_name)
#pdb.set_trace()
if not os.path.isfile(full_name):
print "Image file %s not exist!"%full_name
return None
#image = cv2.imread(full_name, cv2.IMREAD_COLOR)
#image = cv2.resize(image,(self.img_w,self.img_h))
image=Image.open(full_name)
image=image.resize((self.img_w,self.img_h))
if self.do_trans:
image=self.rand_transform(image)
image = np.asarray(image, np.float32)
image -= self.mean
image = image.transpose((2, 0, 1))
if self.is_mirror and np.random.random() < 0.5:
image=image[:,:,::-1]
if self.pad>0:
image=self.rand_pad_crop(image)
return image
if __name__ == '__main__':
print 'Hello world!'
| [
"qtang@localhost.localdomain"
] | qtang@localhost.localdomain |
c9d19954f5d9de2c97e3ed2d8f73a4a166ae8792 | dd02c3961cd2fb801345c9f3bf212bc07beb2d81 | /tic_tac_toe.py | 0c58c5e80d544b23f10254fb6d20a3f270679408 | [] | no_license | benryan03/Tic-Tac-Toe | b6e7fb511a2ff2fe192118ad3818167f19564229 | f2cf783f3d1bb3f3dae9437f60530653b29725f9 | refs/heads/master | 2022-01-09T21:39:31.578527 | 2019-05-21T15:03:43 | 2019-05-21T15:03:43 | 187,274,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,245 | py | #Imports tkinter module (Used for drawing GUI).
import tkinter
#Sets Root variable (base window) to tkinter.Tk class.
root = tkinter.Tk()
#Sets title of window
root.title("Tic-Tac-Toe")
#Variables for image imports - must be inside Root.
x = tkinter.PhotoImage(file="x.png")
o = tkinter.PhotoImage(file="o.png")
blank = tkinter.PhotoImage(file="blank.png")
#Defining other variables used for game status.
button1_status = "blank"
button2_status = "blank"
button3_status = "blank"
button3_status = "blank"
button4_status = "blank"
button5_status = "blank"
button6_status = "blank"
button7_status = "blank"
button8_status = "blank"
button9_status = "blank"
turn = 0
gameover = False
def click(status, num): #This function is called when one of the buttons is clicked.
global button1_status
global button2_status
global button3_status
global button3_status
global button4_status
global button5_status
global button6_status
global button7_status
global button8_status
global button9_status
global turn
global gameover
if gameover == False:
#This should probably be turned into a Loop at some point.
if num == 1:
if button1_status == "blank" and turn % 2 == 0:
button1.config(image=x)
button1_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button1_status == "blank" and turn % 2 != 0:
button1.config(image=o)
button1_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 2:
if button2_status == "blank" and turn % 2 == 0:
button2.config(image=x)
button2_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button2_status == "blank" and turn % 2 != 0:
button2.config(image=o)
button2_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 3:
if button3_status == "blank" and turn % 2 == 0:
button3.config(image=x)
button3_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button3_status == "blank" and turn % 2 != 0:
button3.config(image=o)
button3_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 4:
if button4_status == "blank" and turn % 2 == 0:
button4.config(image=x)
button4_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button4_status == "blank" and turn % 2 != 0:
button4.config(image=o)
button4_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 5:
if button5_status == "blank" and turn % 2 == 0:
button5.config(image=x)
button5_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button5_status == "blank" and turn % 2 != 0:
button5.config(image=o)
button5_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 6:
if button6_status == "blank" and turn % 2 == 0:
button6.config(image=x)
button6_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button6_status == "blank" and turn % 2 != 0:
button6.config(image=o)
button6_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 7:
if button7_status == "blank" and turn % 2 == 0:
button7.config(image=x)
button7_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button7_status == "blank" and turn % 2 != 0:
button7.config(image=o)
button7_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 8:
if button8_status == "blank" and turn % 2 == 0:
button8.config(image=x)
button8_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button8_status == "blank" and turn % 2 != 0:
button8.config(image=o)
button8_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
elif num == 9:
if button9_status == "blank" and turn % 2 == 0:
button9.config(image=x)
button9_status = "x"
turn = turn + 1
label.config(text="O's turn.")
win_check()
elif button9_status == "blank" and turn % 2 != 0:
button9.config(image=o)
button9_status = "o"
turn = turn + 1
label.config(text="X's turn.")
win_check()
def win_check(): #This function is called at the end of each button click.
global button1_status
global button2_status
global button3_status
global button3_status
global button4_status
global button5_status
global button6_status
global button7_status
global button8_status
global button9_status
global gameover
#This should probably be turned into a Loop at some point.
if button1_status == "x" and button2_status == "x" and button3_status == "x":
label.config(text="X wins!")
gameover = True
elif button4_status == "x" and button5_status == "x" and button6_status == "x":
label.config(text="X wins!")
gameover = True
elif button7_status == "x" and button8_status == "x" and button9_status == "x":
label.config(text="X wins!")
gameover = True
elif button1_status == "x" and button4_status == "x" and button7_status == "x":
label.config(text="X wins!")
gameover = True
elif button2_status == "x" and button5_status == "x" and button8_status == "x":
label.config(text="X wins!")
gameover = True
elif button3_status == "x" and button6_status == "x" and button9_status == "x":
label.config(text="X wins!")
gameover = True
elif button1_status == "x" and button5_status == "x" and button9_status == "x":
label.config(text="X wins!")
gameover = True
elif button3_status == "x" and button5_status == "x" and button7_status == "x":
label.config(text="X wins!")
gameover = True
elif button1_status == "o" and button2_status == "o" and button3_status == "o":
label.config(text="O wins!")
gameover = True
elif button4_status == "o" and button5_status == "o" and button6_status == "o":
label.config(text="O wins!")
gameover = True
elif button7_status == "o" and button8_status == "o" and button9_status == "o":
label.config(text="O wins!")
gameover = True
elif button1_status == "o" and button4_status == "o" and button7_status == "o":
label.config(text="O wins!")
gameover = True
elif button2_status == "o" and button5_status == "o" and button8_status == "o":
label.config(text="O wins!")
gameover = True
elif button3_status == "o" and button6_status == "o" and button9_status == "o":
label.config(text="O wins!")
gameover = True
elif button1_status == "o" and button5_status == "o" and button9_status == "o":
label.config(text="O wins!")
gameover = True
elif button3_status == "o" and button5_status == "o" and button7_status == "o":
label.config(text="O wins!")
gameover = True
canvas = tkinter.Canvas(root, width=318, height=358) #Defines Canvas variable (area in window where GUI elements can be arranged), with location inside Root window
canvas.pack() #activates canvas variable using "pack" placement method
frame1 = tkinter.Frame(canvas) #frame1 is the space for the label that displays the game status.
frame1.place(width=318, height=40)
frame2 = tkinter.Frame(canvas) #frame2 is the space for the 9 buttons.
frame2.place(y=40, width=318, height=318)
label = tkinter.Label(frame1, text="X's turn.", bg="white", font=("", 16)) #label displays the game status.
label.place(rely=.1, width=318, height=30)
#Below, each variable for the 9 button is defined and then placed inside frame2.
button1 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button1_status, 1))
button1.grid(row=0, column=0)
button2 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button2_status, 2))
button2.grid(row=0, column=1)
button3 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button3_status, 3))
button3.grid(row=0, column=2)
button4 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button4_status, 4))
button4.grid(row=1, column=0)
button5 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button5_status, 5))
button5.grid(row=1, column=1)
button6 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button6_status, 6))
button6.grid(row=1, column=2)
button7 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button7_status, 7))
button7.grid(row=2, column=0)
button8 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button8_status, 8))
button8.grid(row=2, column=1)
button9 = tkinter.Button(frame2, image=blank, width=100, height=100, command=lambda: click(button9_status, 9))
button9.grid(row=2, column=2)
root.mainloop() #This line causes the Root window to remain on the screen. | [
"noreply@github.com"
] | noreply@github.com |
62e1dee7330ac50ccf9663dd18a2701306b66fa2 | 31b5bd2fc8306ff670a34ecebeb1689db13fa11f | /0x0F-python-object_relational_mapping/5-filter_cities.py | da9c1e56f747ad5e795b5dc6fac0ba11687c51c0 | [] | no_license | fortune-07/alx-higher_level_programming | 88bbf7d9948afd04f740292c44da16919954bd31 | 0be342723c6f702ea6cc4a7140091484f132b378 | refs/heads/main | 2023-08-31T12:35:44.289545 | 2021-10-20T09:28:15 | 2021-10-20T09:28:15 | 361,701,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | #!/usr/bin/python3
# Displays all cities of a given state from the
# states table of the database hbtn_0e_4_usa.
# Safe from SQL injections.
"""# Usage: ./5-filter_cities.py <mysql username> \
# <mysql password> \
# <database name> \
# <state name searched>"""
import sys
import MySQLdb
if __name__ == "__main__":
db = MySQLdb.connect(user=sys.argv[1], passwd=sys.argv[2], db=sys.argv[3])
c = db.cursor()
c.execute("SELECT * FROM `cities` as `c` \
INNER JOIN `states` as `s` \
ON `c`.`state_id` = `s`.`id` \
ORDER BY `c`.`id`")
print(", ".join([ct[2] for ct in c.fetchall() if ct[4] == sys.argv[4]]))
| [
"fortuneniguel@gmail.com"
] | fortuneniguel@gmail.com |
568adf917a33a914cba15a49c8c76eec78d9e70c | 8fa8ded3772dd7a124c1bbb91fc109ed2b63574b | /mycelium/apps/data_import/ajax_backends.py | 1db6a810c321f46ba03880b7a3f42cb1ee69194c | [] | no_license | skoczen/mycelium | 3642b0f5e5ea03d609a3e499c7ad68092101dce0 | da0f169163f4dc93e2dc2b0d934abf4f18c18af0 | refs/heads/master | 2020-04-10T09:21:46.893254 | 2014-05-20T02:27:06 | 2014-05-20T02:27:06 | 2,114,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | from ajaxuploader.backends.s3 import S3UploadBackend
from django.core.files.storage import default_storage
from spreadsheets.spreadsheet import SpreadsheetAbstraction
import time
class DataImportUploadBackend(S3UploadBackend):
def update_filename(self, request, filename):
return "import/%s/%s.%s" % (request.account.pk, int(time.time()), filename, )
def upload_complete(self, request, filename, **kwargs):
self._pool.close()
self._pool.join()
self._mp.complete_upload()
# filename is a file at s3. Get it.
f = default_storage.open(filename, 'r')
# parse the file.
s = SpreadsheetAbstraction(request.account, f, request.import_type, filename=filename)
f.close()
# get the number of rows
num_rows = s.num_rows
# see if it has a header
header_row = []
has_header = s.has_header
if s.has_header:
header_row = s.header_row
# get the first five columns
first_rows = s.get_rows(0,8)
return_dict = {
'num_rows': num_rows,
'first_rows': first_rows,
'header_row': header_row,
'has_header': has_header,
'filename':filename,
}
return return_dict | [
"steven@quantumimagery.com"
] | steven@quantumimagery.com |
0e8add5503c0f19cc83c852cd3de5c7470b2e2ec | 3bfee37d0780ab3663e1424c6fb9833d25901ca1 | /django/week3/portfolio/resume/migrations/0005_auto_20180210_0226.py | 4f285127b7b12d1a36a58e57471c4db2f739bbf9 | [] | no_license | ArjunPadaliya/COMP-805 | 39a304d08c75f8c5746e7ad0b950192d8df29778 | af66da9ce5b3df549d11c6ec5268bb9fc347cbb0 | refs/heads/master | 2021-05-08T22:58:42.507726 | 2018-03-20T14:41:01 | 2018-03-20T14:41:01 | 119,552,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # Generated by Django 2.0.1 on 2018-02-10 02:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resume', '0004_auto_20180210_0202'),
]
operations = [
migrations.CreateModel(
name='Education',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('institution_name', models.CharField(max_length=256, null=True)),
('location', models.CharField(max_length=64, null=True)),
('degree', models.CharField(max_length=64, null=True)),
('major', models.CharField(max_length=64, null=True)),
('gpa', models.FloatField(blank=True, null=True)),
],
),
migrations.AlterField(
model_name='experience',
name='location',
field=models.CharField(max_length=64, null=True),
),
]
| [
"ap1170@wildcats.unh.edu"
] | ap1170@wildcats.unh.edu |
aa8b7d91aabaaf25a60c166a664d74ba941f4436 | 24a3a8c46b7c78d6c57aec7badf9b394e3727112 | /guiEinheitAnlegen.py | d57d0e5474bc6ebf7b9988c728f188d0c8624ad2 | [] | no_license | marcelfeige/TrainingPython | 0020f2c106fed01fb77537d3e0c2dd903315d1b5 | d8c1f74dea610776ba765bb9379e2dc48ead2fc8 | refs/heads/master | 2023-06-07T09:16:34.906765 | 2021-07-06T12:10:53 | 2021-07-06T12:10:53 | 383,455,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,278 | py | from tkinter import *
from tkinter import ttk
from datenbankTrainingseinheiten import *
from datenbankUebung import *
from tkcalendar import *
class guiEinheitAnlegen:
def __init__(self, master, tNameDatenbank=None):
self.master = master
self.master.wm_title("Trainingseinheit anlegen")
tTitel = "Neue Trainingseinheit anlegen"
tBtnOK = "Hinzufügen"
tBtnEnd = "Ende"
# Schriftart und -groessen festlegen
appFontStyle = "Calibri"
appFontSizeSmall = 12
appFontSizeMedium = 16
appFontSizeBig = 14
appFontSmall = appFontStyle + ", " + str(appFontSizeSmall)
appFontMedium = appFontStyle + ", " + str(appFontSizeMedium)
appFontBig = appFontStyle + ", " + str(appFontSizeBig)
# Label
self.labTitel = Label(master, text =tTitel, font = appFontBig)
self.labTitel.grid(row=0, columnspan=2)
# Label mit den Bezeichnungen erstellen
self.labKategorie = Label(self.master, text="Kategorie", font=appFontSmall) \
.grid(row=1, column=0)
self.labBezeichnung = Label(self.master, text="Bezeichnung", font=appFontSmall) \
.grid(row=2, column=0)
self.labGruppe = Label(self.master, text="Gruppe", font=appFontSmall) \
.grid(row=3, column= 0)
dbUebungen = datenbankUebung()
# Optionmenu
# Zuordnung der Kategorien und Bezeichnungen
self.bezeichnungenBeine = dbUebungen.getBezeichnung("Beine")
self.bezeichnungenOberkoerper = dbUebungen.getBezeichnung("Oberkörper")
self.bezeichnungenRuecken = dbUebungen.getBezeichnung("Rücken")
self.bezeichnungenArme = dbUebungen.getBezeichnung("Arme")
self.dict = {"Beine": self.bezeichnungenBeine,
"Oberkörper": self.bezeichnungenOberkoerper,
"Rücken": self.bezeichnungenRuecken,
"Arme" : self.bezeichnungenArme}
self.varKategorie = StringVar()
self.varBezeichnung = StringVar()
self.varKategorie.trace("w", self.updateKategorie)
self.omKategorie = OptionMenu(self.master, self.varKategorie, *self.dict.keys())
self.omBezeichnung = OptionMenu(self.master, self.varBezeichnung, "")
self.varKategorie.set("Beine")
self.omKategorie.grid(row = 1, column = 1)
self.omBezeichnung.grid(row = 2, column = 1)
# Label
self.labAnzahlSaetze = Label(self.master, text = "Anzahl Sätze", font = appFontSmall)
self.labAnzahlSaetze.grid(row = 3, column = 0)
self.labAnzahlWiederrholungen = Label(self.master, text="Anzahl Wiederholungen", font = appFontSmall)
self.labAnzahlWiederrholungen.grid(row = 4, column = 0)
self.labGewicht = Label(self.master, text = "Gewicht (in kg)", font = appFontSmall)
self.labGewicht.grid(row = 5, column = 0)
self.labKalender = Label(self.master, text = "Datum", font = appFontSmall)
self.labKalender.grid(row = 6, column = 0)
# Entry id
self.entAnzahlSaetze = Entry(self.master)
self.entAnzahlSaetze.grid(row = 3, column = 1)
self.entAnzahlWiederholungen = Entry(self.master)
self.entAnzahlWiederholungen.grid(row=4, column=1)
self.entGewicht = Entry(self.master)
self.entGewicht.grid(row = 5, column = 1)
# Kalender
self.entDatum = DateEntry(self.master, width = 12, background="grey",
foreground="white", borderwidth=2, locale="de_DE", date_pattern="dd.mm.y")
self.entDatum.grid(row=6, column=1)
self.tvAusgabe = ttk.Treeview(self.master)
self.tvAusgabe["columns"] = ("Id", "Kategorie", "Bezeichnung", "Sätze", "Wiederholungen", "Gewicht", "Datum")
self.tvAusgabe.column("#0", width = 0, stretch=NO)
self.tvAusgabe.column("Id", anchor = CENTER, width=120)
self.tvAusgabe.column("Kategorie", anchor = CENTER, width=120)
self.tvAusgabe.column("Bezeichnung", anchor = CENTER, width=120)
self.tvAusgabe.column("Sätze", anchor = CENTER, width=120)
self.tvAusgabe.column("Wiederholungen", anchor = CENTER, width=120)
self.tvAusgabe.column("Gewicht", anchor = CENTER, width=120)
self.tvAusgabe.column("Gewicht", anchor=CENTER, width=120)
self.tvAusgabe.heading("Datum", text="Gewicht", anchor=CENTER)
self.tvAusgabe.heading("#0", text = "", anchor = CENTER)
self.tvAusgabe.heading("Id", text = "Id", anchor = CENTER)
self.tvAusgabe.heading("Kategorie", text = "Kategorie", anchor = CENTER)
self.tvAusgabe.heading("Bezeichnung", text="Bezeichnung", anchor = CENTER)
self.tvAusgabe.heading("Sätze", text = "Sätze", anchor = CENTER)
self.tvAusgabe.heading("Wiederholungen", text="Wiederholungen", anchor = CENTER)
self.tvAusgabe.heading("Gewicht", text = "Gewicht", anchor = CENTER)
self.tvAusgabe.heading("Datum", text="Gewicht", anchor=CENTER)
self.tvAusgabe.grid(row = 7, columnspan = 2, padx=10)
# Scrollbar
self.scrollbar = Scrollbar(self.master)
self.scrollbar.grid(row=7, column=3, sticky="nsew", padx=10)
self.tvAusgabe.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.tvAusgabe.yview)
# Label fuer Status informationen
self.labStatus = Label(self.master, text="Keine Statusmeldungen vorhanden", font = appFontSmall)
self.labStatus.grid(row = 8, columnspan = 2)
# Label fuer die Anzahl der Datensaetze
self.labDatensaetze = Label(self.master, text="", font = appFontSmall)
self.labDatensaetze.grid(row = 9, columnspan = 2, padx = 30, pady = 20)
# Button
self.btnOK = Button(self.master, text=tBtnOK, font=appFontSmall, command = self.einheitSpeichern, width=20)
self.btnEnd = Button(self.master, text=tBtnEnd, font=appFontSmall, command = self.master.destroy, width=20)
self.btnOK.grid(row = 10, column = 0, padx = 20, pady = 0)
self.btnEnd.grid(row = 10, column = 1, padx = 20, pady = 20)
# Anpassen der Label auf die Fenstergroesse (weight = 1)
self.master.columnconfigure(0, weight = 1)
self.master.columnconfigure(1, weight = 1)
for row in range(10):
self.master.rowconfigure(row, weight = 1)
# neues Datenbank Objekt
dbTrainingseinheiten = datenbankTrainingseinheiten()
# Datenbank initialisieren und Rueckgabewert in der Statuszeile ausgeben
self.labStatus["text"] = dbTrainingseinheiten.initDB()
# Ergebnis des SQL Statemantes in der Variablen result speichern
result = dbTrainingseinheiten.leseDB()
# Treeview Feld fuellen
count = 0
for row in result:
self.tvAusgabe.insert(parent="", index=count, iid=count, text="", values=result[count])
count = count + 1
self.labDatensaetze["text"] = "Anzahl Datensätze: " + str(count)
def einheitSpeichern(self):
dbTrainingseinheiten = datenbankTrainingseinheiten()
self.labStatus["text"] = ""
# Einzelne Eingabefelder auslesen und Daten speichern
dbTrainingseinheiten.schreibDB(self.varKategorie.get(), self.varBezeichnung.get(), self.entAnzahlSaetze.get(), self.entAnzahlWiederholungen.get(),
self.entGewicht.get(), self.entDatum.get())
# Datenbank auslesen
result = dbTrainingseinheiten.leseDB()
count = 0
for i in self.tvAusgabe.get_children():
self.tvAusgabe.delete(i)
for row in result:
self.tvAusgabe.insert(parent = "", index = count, iid = count, text = "", values = result[count])
count = count + 1
self.labDatensaetze["text"] = "Anzahl Datensätze: " + str(count)
def updateKategorie(self, *args):
kategorien = self.dict[self.varKategorie.get()]
self.varBezeichnung.set(kategorien[0])
menu = self.omBezeichnung["menu"]
menu.delete(0, "end")
for kategorie in kategorien:
menu.add_command(label=kategorie,
command=lambda bezeichnung=kategorie: self.varBezeichnung.set(bezeichnung))
| [
"marcel_feige@hotmail.de"
] | marcel_feige@hotmail.de |
53d7a1c756ba1e532f3b3fc6092768370b3a8b40 | 8eac548c15cdabeb662c9af2ca67994f92c255ee | /词性标注&词性提取/Word_Marking_test.py | 75c73dfd2590d58fbea3ac14a141dd71b9fe05c0 | [] | no_license | yaolinxia/Chinese-word-segmentation | f7de7317509dc7ed53bb40e5a1367206bd36abc1 | 42d619ec838fe2f8c98822b15c69c640972b984e | refs/heads/master | 2021-07-06T19:52:58.916128 | 2019-04-15T14:08:54 | 2019-04-15T14:08:54 | 117,522,537 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
#1.先分好词,存在一个字符数组里面
#2.遍历字符数组,进行词性标注
import sys
import glob
import os
import xml.dom.minidom
import jieba
import jieba.posseg as pseg
#遍历某个文件夹下所有xml文件,path为存放xml的文件夹路径
#词性标注
def WorkMark(path):
#textCut=jieba.cut(text,cut_all=False)
#词性标注
with open(path, encoding="utf-8") as file_object:
contents = file_object.read()
textCut = pseg.cut(contents)
for ele in textCut:
print(ele)
result = ''
for word in textCut:
result +=word+' '
print('%s' % (word))
print('sucess WorkMark')
return result
#路径path下的内容写入进text中
def write_WorkMark(path,text):
f=open(path,'w',encoding='utf-8')
f.write(text)
f.close()
print('success write_WorkMark')
if __name__=='__main__':
#path1 = r'G:\研究生\法律文书\民事一审测试集\民事一审测试集'
#输出的结果路径
path2 = r'H:\python-workspace\test-path\test_QW_1-29.txt'
#path3 = r'H:\python-workspace\\1-5-testWenShu\\stopword.dic'
#path4:提取的字段路径
path4 = r'H:\python-workspace\1-12-testWenShu\test_QW_addDic.txt'
#path4=r'C:\Users\LFK\Desktop\1.txt'
#text = read_XMLFile(path1)
#write_segmentFile(path4, text)
# text=read_txt(path4)
result = WorkMark(path4)
write_WorkMark(path2,result)
"""
import jieba.posseg as pseg
words = pseg.cut("我爱北京天安门")
for word,flag in words:
print('%s %s' % (word, flag))
""" | [
"18860976931@163.com"
] | 18860976931@163.com |
ae2a4491e45e20f804e4e6339f271af09b072786 | 931a3304ea280d0a160acb87e770d353368d7d7d | /vendor/swagger_client/models/get_characters_character_id_attributes_ok.py | b7705fa3e23be56e5043bffcb69cf65b385f96b8 | [] | no_license | LukeS5310/Broadsword | c44786054e1911a96b02bf46fe4bdd0f5ad02f19 | 3ba53d446b382c79253dd3f92c397cca17623155 | refs/heads/master | 2021-09-08T00:05:26.296092 | 2017-10-24T07:01:48 | 2017-10-24T07:01:48 | 105,143,152 | 0 | 1 | null | 2017-11-03T14:29:38 | 2017-09-28T12:03:19 | Python | UTF-8 | Python | false | false | 9,633 | py | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.6.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCharactersCharacterIdAttributesOk(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, accrued_remap_cooldown_date=None, bonus_remaps=None, charisma=None, intelligence=None, last_remap_date=None, memory=None, perception=None, willpower=None):
"""
GetCharactersCharacterIdAttributesOk - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'accrued_remap_cooldown_date': 'datetime',
'bonus_remaps': 'int',
'charisma': 'int',
'intelligence': 'int',
'last_remap_date': 'datetime',
'memory': 'int',
'perception': 'int',
'willpower': 'int'
}
self.attribute_map = {
'accrued_remap_cooldown_date': 'accrued_remap_cooldown_date',
'bonus_remaps': 'bonus_remaps',
'charisma': 'charisma',
'intelligence': 'intelligence',
'last_remap_date': 'last_remap_date',
'memory': 'memory',
'perception': 'perception',
'willpower': 'willpower'
}
self._accrued_remap_cooldown_date = accrued_remap_cooldown_date
self._bonus_remaps = bonus_remaps
self._charisma = charisma
self._intelligence = intelligence
self._last_remap_date = last_remap_date
self._memory = memory
self._perception = perception
self._willpower = willpower
@property
def accrued_remap_cooldown_date(self):
"""
Gets the accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk.
Neural remapping cooldown after a character uses remap accrued over time
:return: The accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk.
:rtype: datetime
"""
return self._accrued_remap_cooldown_date
@accrued_remap_cooldown_date.setter
def accrued_remap_cooldown_date(self, accrued_remap_cooldown_date):
"""
Sets the accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk.
Neural remapping cooldown after a character uses remap accrued over time
:param accrued_remap_cooldown_date: The accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk.
:type: datetime
"""
self._accrued_remap_cooldown_date = accrued_remap_cooldown_date
@property
def bonus_remaps(self):
"""
Gets the bonus_remaps of this GetCharactersCharacterIdAttributesOk.
Number of available bonus character neural remaps
:return: The bonus_remaps of this GetCharactersCharacterIdAttributesOk.
:rtype: int
"""
return self._bonus_remaps
@bonus_remaps.setter
def bonus_remaps(self, bonus_remaps):
"""
Sets the bonus_remaps of this GetCharactersCharacterIdAttributesOk.
Number of available bonus character neural remaps
:param bonus_remaps: The bonus_remaps of this GetCharactersCharacterIdAttributesOk.
:type: int
"""
self._bonus_remaps = bonus_remaps
@property
def charisma(self):
"""
Gets the charisma of this GetCharactersCharacterIdAttributesOk.
charisma integer
:return: The charisma of this GetCharactersCharacterIdAttributesOk.
:rtype: int
"""
return self._charisma
@charisma.setter
def charisma(self, charisma):
"""
Sets the charisma of this GetCharactersCharacterIdAttributesOk.
charisma integer
:param charisma: The charisma of this GetCharactersCharacterIdAttributesOk.
:type: int
"""
if charisma is None:
raise ValueError("Invalid value for `charisma`, must not be `None`")
self._charisma = charisma
@property
def intelligence(self):
"""
Gets the intelligence of this GetCharactersCharacterIdAttributesOk.
intelligence integer
:return: The intelligence of this GetCharactersCharacterIdAttributesOk.
:rtype: int
"""
return self._intelligence
@intelligence.setter
def intelligence(self, intelligence):
"""
Sets the intelligence of this GetCharactersCharacterIdAttributesOk.
intelligence integer
:param intelligence: The intelligence of this GetCharactersCharacterIdAttributesOk.
:type: int
"""
if intelligence is None:
raise ValueError("Invalid value for `intelligence`, must not be `None`")
self._intelligence = intelligence
@property
def last_remap_date(self):
"""
Gets the last_remap_date of this GetCharactersCharacterIdAttributesOk.
Datetime of last neural remap, including usage of bonus remaps
:return: The last_remap_date of this GetCharactersCharacterIdAttributesOk.
:rtype: datetime
"""
return self._last_remap_date
@last_remap_date.setter
def last_remap_date(self, last_remap_date):
"""
Sets the last_remap_date of this GetCharactersCharacterIdAttributesOk.
Datetime of last neural remap, including usage of bonus remaps
:param last_remap_date: The last_remap_date of this GetCharactersCharacterIdAttributesOk.
:type: datetime
"""
self._last_remap_date = last_remap_date
@property
def memory(self):
"""
Gets the memory of this GetCharactersCharacterIdAttributesOk.
memory integer
:return: The memory of this GetCharactersCharacterIdAttributesOk.
:rtype: int
"""
return self._memory
@memory.setter
def memory(self, memory):
"""
Sets the memory of this GetCharactersCharacterIdAttributesOk.
memory integer
:param memory: The memory of this GetCharactersCharacterIdAttributesOk.
:type: int
"""
if memory is None:
raise ValueError("Invalid value for `memory`, must not be `None`")
self._memory = memory
@property
def perception(self):
"""
Gets the perception of this GetCharactersCharacterIdAttributesOk.
perception integer
:return: The perception of this GetCharactersCharacterIdAttributesOk.
:rtype: int
"""
return self._perception
@perception.setter
def perception(self, perception):
"""
Sets the perception of this GetCharactersCharacterIdAttributesOk.
perception integer
:param perception: The perception of this GetCharactersCharacterIdAttributesOk.
:type: int
"""
if perception is None:
raise ValueError("Invalid value for `perception`, must not be `None`")
self._perception = perception
@property
def willpower(self):
"""
Gets the willpower of this GetCharactersCharacterIdAttributesOk.
willpower integer
:return: The willpower of this GetCharactersCharacterIdAttributesOk.
:rtype: int
"""
return self._willpower
@willpower.setter
def willpower(self, willpower):
"""
Sets the willpower of this GetCharactersCharacterIdAttributesOk.
willpower integer
:param willpower: The willpower of this GetCharactersCharacterIdAttributesOk.
:type: int
"""
if willpower is None:
raise ValueError("Invalid value for `willpower`, must not be `None`")
self._willpower = willpower
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCharactersCharacterIdAttributesOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"cyberlibertyx@gmail.com"
] | cyberlibertyx@gmail.com |
9d22a0a64c1e33a0c6e89db8524cf7e1ad8c864c | afe00cfd4f01be872b8fecbecc74b59edbd95bd5 | /craigslist/__init__.py | 4da61ec92f3e2777962006a3960690de11a9fdf6 | [] | no_license | SamVarney/craigslist-scraper | c6e760f9267c498059aff40fef32677fa66c09cd | a721639d4db6ed46bda905ed1d0f2b3c4a586a15 | refs/heads/master | 2021-07-08T00:43:58.714903 | 2017-10-05T20:14:54 | 2017-10-05T20:14:54 | 103,089,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,480 | py | import logging
try:
from Queue import Queue # PY2
except ImportError:
from queue import Queue # PY3
from threading import Thread
try:
from urlparse import urljoin # PY2
except ImportError:
from urllib.parse import urljoin # PY3
from bs4 import BeautifulSoup
import requests
from requests.exceptions import RequestException
from six import iteritems
from six.moves import range
from .sites import get_all_sites
ALL_SITES = get_all_sites() # All the Craiglist sites
RESULTS_PER_REQUEST = 100 # Craigslist returns 100 results per request
def requests_get(*args, **kwargs):
"""
Retries if a RequestException is raised (could be a connection error or
a timeout).
"""
logger = kwargs.pop('logger', None)
try:
return requests.get(*args, **kwargs)
except RequestException as exc:
if logger:
logger.warning('Request failed (%s). Retrying ...', exc)
return requests.get(*args, **kwargs)
def get_list_filters(url):
list_filters = {}
response = requests_get(url)
soup = BeautifulSoup(response.content, 'html.parser')
for list_filter in soup.find_all('div', class_='search-attribute'):
filter_key = list_filter.attrs['data-attr']
filter_labels = list_filter.find_all('label')
options = [opt.text.strip() for opt in filter_labels]
list_filters[filter_key] = {'url_key': filter_key, 'value': options}
return list_filters
class CraigslistBase(object):
""" Base class for all Craiglist wrappers. """
url_templates = {
'base': 'http://%(site)s.craigslist.org',
'no_area': 'http://%(site)s.craigslist.org/search/%(category)s',
'area': 'http://%(site)s.craigslist.org/search/%(area)s/%(category)s'
}
default_site = 'sfbay'
default_category = None
base_filters = {
'query': {'url_key': 'query', 'value': None},
'search_titles': {'url_key': 'srchType', 'value': 'T'},
'has_image': {'url_key': 'hasPic', 'value': 1},
'posted_today': {'url_key': 'postedToday', 'value': 1},
'search_distance': {'url_key': 'search_distance', 'value': None},
'zip_code': {'url_key': 'postal', 'value': None},
}
extra_filters = {}
# Set to True to subclass defines the customize_results() method
custom_result_fields = False
sort_by_options = {
'newest': 'date',
'price_asc': 'priceasc',
'price_desc': 'pricedsc',
}
def __init__(self, site=None, area=None, category=None, filters=None,
log_level=logging.WARNING):
# Logging
self.set_logger(log_level, init=True)
self.site = site or self.default_site
if self.site not in ALL_SITES:
msg = "'%s' is not a valid site" % self.site
self.logger.error(msg)
raise ValueError(msg)
if area:
if not self.is_valid_area(area):
msg = "'%s' is not a valid area for site '%s'" % (area, site)
self.logger.error(msg)
raise ValueError(msg)
self.area = area
self.category = category or self.default_category
url_template = self.url_templates['area' if area else 'no_area']
self.url = url_template % {'site': self.site, 'area': self.area,
'category': self.category}
list_filters = get_list_filters(self.url)
self.filters = {}
for key, value in iteritems((filters or {})):
try:
filter = (self.base_filters.get(key) or
self.extra_filters.get(key) or
list_filters[key])
if filter['value'] is None:
self.filters[filter['url_key']] = value
elif isinstance(filter['value'], list):
valid_options = filter['value']
if not hasattr(value, '__iter__'):
value = [value] # Force to list
options = []
for opt in value:
try:
options.append(valid_options.index(opt) + 1)
except ValueError:
self.logger.warning(
"'%s' is not a valid option for %s"
% (opt, key)
)
self.filters[filter['url_key']] = options
elif value: # Don't add filter if ...=False
self.filters[filter['url_key']] = filter['value']
except KeyError:
self.logger.warning("'%s' is not a valid filter", key)
def set_logger(self, log_level, init=False):
if init:
self.logger = logging.getLogger('python-craiglist')
self.handler = logging.StreamHandler()
self.logger.addHandler(self.handler)
self.logger.setLevel(log_level)
self.handler.setLevel(log_level)
def is_valid_area(self, area):
base_url = self.url_templates['base']
response = requests_get(base_url % {'site': self.site},
logger=self.logger)
soup = BeautifulSoup(response.content, 'html.parser')
sublinks = soup.find('ul', {'class': 'sublinks'})
return sublinks and sublinks.find('a', text=area) is not None
def get_results(self, limit=None, start=0, sort_by=None, geotagged=False):
"""
Get results from Craigslist based on the specified filters.
If geotagged=True, the results will include the (lat, lng) in the
'geotag' attrib (this will make the process a little bit longer).
"""
if sort_by:
try:
self.filters['sort'] = self.sort_by_options[sort_by]
except KeyError:
msg = ("'%s' is not a valid sort_by option, "
"use: 'newest', 'price_asc' or 'price_desc'" % sort_by)
self.logger.error(msg)
raise ValueError(msg)
total_so_far = start
results_yielded = 0
total = 0
while True:
self.filters['s'] = start
response = requests_get(self.url, params=self.filters,
logger=self.logger)
self.logger.info('GET %s', response.url)
self.logger.info('Response code: %s', response.status_code)
response.raise_for_status() # Something failed?
soup = BeautifulSoup(response.content, 'html.parser')
if not total:
totalcount = soup.find('span', {'class': 'totalcount'})
total = int(totalcount.text) if totalcount else 0
for row in soup.find_all('p', {'class': 'result-info'}):
if limit is not None and results_yielded >= limit:
break
self.logger.debug('Processing %s of %s results ...',
total_so_far + 1, total)
link = row.find('a', {'class': 'hdrlnk'})
id = link.attrs['data-id']
name = link.text
url = urljoin(self.url, link.attrs['href'])
time = row.find('time')
if time:
datetime = time.attrs['datetime']
else:
pl = row.find('span', {'class': 'pl'})
datetime = pl.text.split(':')[0].strip() if pl else None
price = row.find('span', {'class': 'result-price'})
where = row.find('span', {'class': 'result-hood'})
if where:
where = where.text.strip()[1:-1] # remove ()
tags_span = row.find('span', {'class': 'result-tags'})
tags = tags_span.text if tags_span else ''
result = {'id': id,
'name': name,
'url': url,
'datetime': datetime,
'price': price.text if price else None,
'where': where,
'has_image': 'pic' in tags,
# TODO: Look into this, looks like all show map now
'has_map': 'map' in tags,
'geotag': None}
if self.custom_result_fields:
self.customize_result(result, row)
if geotagged and result['has_map']:
self.geotag_result(result)
yield result
results_yielded += 1
total_so_far += 1
if results_yielded == limit:
break
if (total_so_far - start) < RESULTS_PER_REQUEST:
break
start = total_so_far
def customize_result(self, result, html_row):
""" Add custom/delete/alter fields to result. """
pass # Override in subclass to add category-specific fields.
def geotag_result(self, result):
""" Adds (lat, lng) to result. """
self.logger.debug('Geotagging result ...')
if result['has_map']:
response = requests_get(result['url'], logger=self.logger)
self.logger.info('GET %s', response.url)
self.logger.info('Response code: %s', response.status_code)
if response.ok:
soup = BeautifulSoup(response.content, 'html.parser')
map = soup.find('div', {'id': 'map'})
if map:
result['geotag'] = (float(map.attrs['data-latitude']),
float(map.attrs['data-longitude']))
return result
def geotag_results(self, results, workers=8):
"""
Add (lat, lng) to each result. This process is done using N threads,
where N is the amount of workers defined (default: 8).
"""
results = list(results)
queue = Queue()
for result in results:
queue.put(result)
def geotagger():
while not queue.empty():
self.logger.debug('%s results left to geotag ...',
queue.qsize())
self.geotag_result(queue.get())
queue.task_done()
threads = []
for _ in range(workers):
thread = Thread(target=geotagger)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return results
@classmethod
def show_filters(cls, category=None):
print('Base filters:')
for key, options in iteritems(cls.base_filters):
value_as_str = '...' if options['value'] is None else 'True/False'
print('* %s = %s' % (key, value_as_str))
print('Section specific filters:')
for key, options in iteritems(cls.extra_filters):
value_as_str = '...' if options['value'] is None else 'True/False'
print('* %s = %s' % (key, value_as_str))
url = cls.url_templates['no_area'] % {
'site': cls.default_site,
'category': category or cls.default_category,
}
list_filters = get_list_filters(url)
for key, options in iteritems(list_filters):
value_as_str = ', '.join([repr(opt) for opt in options['value']])
print('* %s = %s' % (key, value_as_str))
class CraigslistCommunity(CraigslistBase):
""" Craigslist community wrapper. """
default_category = 'ccc'
class CraigslistEvents(CraigslistBase):
""" Craigslist events wrapper. """
default_category = 'eee'
extra_filters = {
'art': {'url_key': 'event_art', 'value': 1},
'athletics': {'url_key': 'event_athletics', 'value': 1},
'career': {'url_key': 'event_career', 'value': 1},
'dance': {'url_key': 'event_dance', 'value': 1},
'festival': {'url_key': 'event_festical', 'value': 1},
'fitness': {'url_key': 'event_fitness_wellness', 'value': 1},
'health': {'url_key': 'event_fitness_wellness', 'value': 1},
'food': {'url_key': 'event_food', 'value': 1},
'drink': {'url_key': 'event_food', 'value': 1},
'free': {'url_key': 'event_free', 'value': 1},
'fundraiser': {'url_key': 'event_fundraiser_vol', 'value': 1},
'tech': {'url_key': 'event_geek', 'value': 1},
'kid_friendly': {'url_key': 'event_kidfriendly', 'value': 1},
'literacy': {'url_key': 'event_literacy', 'value': 1},
'music': {'url_key': 'event_music', 'value': 1},
'outdoor': {'url_key': 'event_outdoor', 'value': 1},
'sale': {'url_key': 'event_sale', 'value': 1},
'singles': {'url_key': 'event_singles', 'value': 1},
}
class CraigslistForSale(CraigslistBase):
""" Craigslist for sale wrapper. """
default_category = 'sss'
extra_filters = {
'min_price': {'url_key': 'min_price', 'value': None},
'max_price': {'url_key': 'max_price', 'value': None},
'make': {'url_key': 'auto_make_model', 'value': None},
'model': {'url_key': 'auto_make_model', 'value': None},
'min_year': {'url_key': 'min_auto_year', 'value': None},
'max_year': {'url_key': 'max_auto_year', 'value': None},
'min_miles': {'url_key': 'min_auto_miles', 'value': None},
'max_miles': {'url_key': 'max_auto_miles', 'value': None},
}
class CraigslistGigs(CraigslistBase):
""" Craigslist gigs wrapper. """
default_category = 'ggg'
extra_filters = {
'is_paid': {'url_key': 'is_paid', 'value': None},
}
def __init__(self, *args, **kwargs):
try:
is_paid = kwargs['filters']['is_paid']
kwargs['filters']['is_paid'] = 'yes' if is_paid else 'no'
except KeyError:
pass
super(CraigslistGigs, self).__init__(*args, **kwargs)
class CraigslistHousing(CraigslistBase):
""" Craigslist housing wrapper. """
default_category = 'hhh'
custom_result_fields = True
extra_filters = {
'private_room': {'url_key': 'private_room', 'value': 1},
'private_bath': {'url_key': 'private_bath', 'value': 1},
'cats_ok': {'url_key': 'pets_cat', 'value': 1},
'dogs_ok': {'url_key': 'pets_dog', 'value': 1},
'min_price': {'url_key': 'min_price', 'value': None},
'max_price': {'url_key': 'max_price', 'value': None},
'min_ft2': {'url_key': 'minSqft', 'value': None},
'max_ft2': {'url_key': 'maxSqft', 'value': None},
'min_bedrooms': {'url_key': 'min_bedrooms', 'value': None},
'max_bedrooms': {'url_key': 'max_bedrooms', 'value': None},
'min_bathrooms': {'url_key': 'min_bathrooms', 'value': None},
'max_bathrooms': {'url_key': 'max_bathrooms', 'value': None},
'no_smoking': {'url_key': 'no_smoking', 'value': 1},
'is_furnished': {'url_key': 'is_furnished', 'value': 1},
'wheelchair_acccess': {'url_key': 'wheelchaccess', 'value': 1},
}
def customize_result(self, result, html_row):
housing_info = html_row.find('span', {'class': 'housing'})
# Default values
result.update({'bedrooms': None, 'area': None})
if housing_info:
for elem in housing_info.text.split('-'):
elem = elem.strip()
if elem.endswith('br'):
# Don't convert to int, too risky
result['bedrooms'] = elem[:-2]
if elem.endswith('2'):
result['area'] = elem
class CraigslistJobs(CraigslistBase):
""" Craigslist jobs wrapper. """
default_category = 'jjj'
extra_filters = {
'is_internship': {'url_key': 'is_internship', 'value': 1},
'is_nonprofit': {'url_key': 'is_nonprofit', 'value': 1},
'is_telecommuting': {'url_key': 'is_telecommuting', 'value': 1},
}
class CraigslistPersonals(CraigslistBase):
""" Craigslist personals wrapper. """
default_category = 'ppp'
extra_filters = {
'min_age': {'url_key': 'min_pers_age', 'value': None},
'max_age': {'url_key': 'max_pers_age', 'value': None},
}
class CraigslistResumes(CraigslistBase):
""" Craigslist resumes wrapper. """
default_category = 'rrr'
class CraigslistServices(CraigslistBase):
""" Craigslist services wrapper. """
default_category = 'bbb'
| [
"31087664+SamVarney@users.noreply.github.com"
] | 31087664+SamVarney@users.noreply.github.com |
dba98931ab1055fbc8aa7f09f7f007a014124723 | 687928e5bc8d5cf68d543005bb24c862460edcfc | /nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_dospolicy_binding.py | 465c32d9a481652819921910b414eaf9319e4bd3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | mbs91/nitro | c6c81665d6abd04de8b9f09554e5e8e541f4a2b8 | be74e1e177f5c205c16126bc9b023f2348788409 | refs/heads/master | 2021-05-29T19:24:04.520762 | 2015-06-26T02:03:09 | 2015-06-26T02:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,123 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_dospolicy_binding(base_resource) :
""" Binding class showing the dospolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_dospolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_dospolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch lbvserver_dospolicy_binding resources.
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of lbvserver_dospolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count lbvserver_dospolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of lbvserver_dospolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_dospolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_dospolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_dospolicy_binding = [lbvserver_dospolicy_binding() for _ in range(length)]
| [
"bensassimaha@gmail.com"
] | bensassimaha@gmail.com |
deed215af87156414cd9715b12228487362c1a23 | 43d2a73d979ca74bc1dcbcc8b38a86d5e4ebff66 | /static_word_vec.py | 80cb99d0d90ceb9e7b8fb86a2eeea172e24b1e25 | [] | no_license | 786440445/text_match | b0f1db5468f7068605e9ad109e090576b2d5baad | f3408d84b0e3c20e19ed7cf0944ff4ccb1d3a794 | refs/heads/master | 2023-03-27T08:56:16.501874 | 2021-03-25T08:28:28 | 2021-03-25T08:28:28 | 344,845,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,396 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :nlp_tools -> static_word_vec
@IDE :PyCharm
@Author :chengli
@Date :2020/11/18 8:36 PM
@Desc :
=================================================='''
import os, sys
import numpy as np
from tqdm import tqdm
home_dir = os.getcwd()
sys.path.append(home_dir)
from utils.load_data import load_char_vocab
from gensim.models import Word2Vec, word2vec, KeyedVectors
import pandas as pd
import jieba
#
def loadEmbedding(embeddingFile, word2id, embeddingSize=200):
""" Initialize embeddings with pre-trained word2vec vectors
Will modify the embedding weights of the current loaded model
sess:会话
embeddingFile:Tencent_AILab_ChineseEmbedding.txt的路径
word2id:自己数据集中的word2id
embeddingSize: 词向量的维度,我这里直接设置的200,和原始一样,低于200的采用我屏蔽掉的代码应该可以,我还没测
"""
print("Loading pre-trained word embeddings from %s " % embeddingFile)
with open(embeddingFile, "r", encoding='ISO-8859-1') as f:
header = f.readline()
vocab_size, vector_size = map(int, header.split())
initW = np.random.uniform(-0.25, 0.25, (len(word2id), vector_size))
for i in tqdm(range(vocab_size)):
line = f.readline()
lists = line.split(' ')
word = lists[0]
if word in word2id:
number = map(float, lists[1:])
number = list(number)
vector = np.array(number)
initW[word2id[word]] = vector
return initW
df = pd.read_csv(os.path.join(home_dir, 'data/clean_lcqmc/train.txt'), header=None, sep='\t')
p = df.iloc[:, 0].values
h = df.iloc[:, 1].values
p_seg = list(map(lambda x: list(jieba.cut(x)), p))
h_seg = list(map(lambda x: list(jieba.cut(x)), h))
common_texts = []
common_texts.extend(p_seg)
common_texts.extend(h_seg)
df = pd.read_csv(os.path.join(home_dir, 'data/clean_lcqmc/test.txt'), header=None, sep='\t')
p = df.iloc[:, 0].values
h = df.iloc[:, 1].values
p_seg = list(map(lambda x: list(jieba.cut(x)), p))
h_seg = list(map(lambda x: list(jieba.cut(x)), h))
common_texts.extend(p_seg)
common_texts.extend(h_seg)
df = pd.read_csv(os.path.join(home_dir, 'data/clean_lcqmc/dev.txt'), header=None, sep='\t')
p = df.iloc[:, 0].values
h = df.iloc[:, 1].values
p_seg = list(map(lambda x: list(jieba.cut(x)), p))
h_seg = list(map(lambda x: list(jieba.cut(x)), h))
common_texts.extend(p_seg)
common_texts.extend(h_seg)
# embeding_path = os.path.join(home_dir, "text_match/tx_embedding/500000-small.txt")
# word2idx, idx2word = load_char_vocab()
# embedding_table = loadEmbedding(embeding_path, word2id=word2idx)
# wv_from_text = KeyedVectors.load_word2vec_format(embeding_path, limit=4000000, binary=False)
# # 使用init_sims会比较省内存
# wv_from_text.init_sims(replace=True)
# # 重新保存加载变量为二进制形式
# bin_path = os.path.join(home_dir, "tx_embedding/embedding.bin")
# print(save_path)
# wv_from_text.save(save_path)
# model = Word2Vec.load(embedding_table)
print('success')
# model.init_sims(replace=True)
model = Word2Vec(common_texts, size=200, window=5, min_count=5, workers=12)
print('文本长度', len(common_texts))
model.save(os.path.join(home_dir, "output/word2vec/word2vec.model"))
| [
"matrix@ubuntu.com"
] | matrix@ubuntu.com |
d96f1eff4adca51a25a2bd3e0372c97298959132 | 3cd6c578fcc0444646aa0f6589c82c27afc3955c | /gamifi/migrations/0003_auto_20210404_2230.py | 3be10b5c3353dddf864dc07800cb2e9b2cd38847 | [] | no_license | ArashMAzizi/FitnessFriendsApp | 0715fe41993ee6053b22395f4b1683088b114104 | e6cd51cc407c667830fc7cc40414c36d6118ca6d | refs/heads/main | 2023-05-01T21:04:07.292670 | 2021-05-06T18:44:26 | 2021-05-06T18:44:26 | 368,673,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # Generated by Django 3.1.7 on 2021-04-05 02:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('gamifi', '0002_goal'),
]
operations = [
migrations.RemoveField(
model_name='goal',
name='profile',
),
migrations.AddField(
model_name='goal',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"bvndinh@gmail.com"
] | bvndinh@gmail.com |
5cbb8adb279ab49cd622f32461c26fcb84275639 | 6d9cfcce19d17da118bc2a882f64b75373a1021f | /projekat/sacuvati/predmet.py | 133f0af909a265a52bd0a437457da5b76def93bc | [] | no_license | ctecdev/OISiURS-2014 | 3e1fc4528b63cd08345d99ad2af6cddc7ae8ef88 | 04c1603d287e216c74cbff60580d5fc2e8b3dd20 | refs/heads/master | 2021-01-17T14:33:08.462342 | 2017-12-28T18:54:47 | 2017-12-28T18:54:47 | 84,091,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | '''
Created on 26.06.2014.
@author: Srky
'''
def sacuvatiPredmet(kompanije):
file=open("predmeti.txt", "w")
for kompanija in kompanije:
for objekat in kompanija.children:
for prostorija in objekat.children:
for predmet in prostorija.children:
file.write(predmet.idOznakaPred)
file.write(predmet.sirina)
file.write(predmet.opis)
file.write(str(predmet.sirina))
file.write(str(predmet.duzina))
file.write(predmet.idKodPotrazitelja)
file.write(str(predmet.datumPostPredmeta))
file.write(prostorija.idOznaka)
file.write("\n") | [
"ctecdev@gmail.com"
] | ctecdev@gmail.com |
7a4edb37df14a576b2126dca28052dce00cc7b7d | b26c1999e8642d710439ba0ce503ed94ed228ac8 | /Python/slicer/release/midasdata.py | f459b7fe8efc6a769fd95538a9737cd52179061f | [] | no_license | JD-TamayoQuintero/3D-Dental | c31070f4897cee93999e0f162bd8884326223a8e | 9e766d6fd7ae5e9b553adbe7c3c1d50d743d25ef | refs/heads/master | 2022-03-15T21:31:14.138745 | 2019-12-04T10:36:01 | 2019-12-04T10:36:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,435 | py | #!/usr/bin/env python
""" Midas NA-MIC data tree release versioning script.
This script can be used to duplicate all the Nightly data on Midas to a new folder named by the release version.
The script does not accept any input arguments. All arguments are to be provided using the option flags. For a list of the option flags, run
python release.py --help"""
from __future__ import print_function
from optparse import OptionParser
import re, sys
try:
import pydas
except ImportError as e:
print(e, "\nInstall pydas or update PYTHONPATH")
sys.exit(1)
def _error(message):
""" Print an error message and exit the program """
sys.stderr.write("error: %s\n" % message)
sys.exit(1)
def _getFolderIndex(folderChildren, name):
"""Get the index of a subfolder based on its name.
folderChildren -- Dictionary specifying children of the folder.
name -- Name of the folder to look for.
Returns the index of the folder in the dictionary."""
folders = folderChildren["folders"]
index = -1
for folder in folders:
index = index + 1
if folder["name"] == name:
return index
return -1
def _getIDfromIndex(childrenFolders, entityType, index):
""" Get the folder_id for a subfolder based on its index in the folder.
childrenFolders -- Dictionary specifying children of the folder.
entityType -- folder or item.
index -- Index of the item in the folder to find the id.
Returns the integer ID."""
entity_key = entityType + "s"
entity = childrenFolders[entity_key]
id_key = entityType + "_id"
ID = entity[index][id_key]
return ID
def itemExists(folderID, itemName, token, communicator):
"""Check if an item exists in a folder based on its name
folderID -- ID of the folder.
itemName -- Name of the item to check for.
token -- Authentication token.
communicator -- Midas session communicator.
Returns a boolean indicating if the item exists or not."""
folderChildren = communicator.folder_children(token, folderID)
folder_children_items = folderChildren["items"]
for item in folder_children_items:
if item["name"] == itemName:
return True
return False
def deleteItemByName(folderID, itemName, token, communicator):
"""Delete an item from a folder based on its name
folderID -- ID of the folder.
itemName -- Name of the item to delete.
token -- Authentication token.
communicator -- Midas session communicator.
Returns a boolean indicating success or failure."""
folderChildren = communicator.folder_children(token, folderID)
folder_children_items = folderChildren["items"]
for item in folder_children_items:
if item["name"] == itemName:
communicator.delete_item(token, item["item_id"])
return True
return False
def duplicateItem(itemID, destID, token, communicator):
"""Duplicate an item to a destination folder
Uses the request web-api as pydas does not expose all methods yet.
itemID -- ID of item to duplicate.
destID -- ID of folder to duplicate item in.
token -- Authentication token.
communicator -- Midas session communicator."""
duplicate_params = {}
duplicate_params["token"] = token
duplicate_params["id"] = itemID
duplicate_params["dstfolderid"] = destID
communicator.request("midas.item.duplicate", duplicate_params)
def duplicateFolderItems(sourceID, destID, token, communicator, overwrite):
"""Duplicate all the child items from a source to a destination
sourceID -- ID of source folder.
destID -- ID of destination folder.
token -- Authentication token.
communicator -- Midas session communicator.
overwrite -- Boolean indicating whether to overwrite existing items."""
folderChildren = communicator.folder_children(token, sourceID)
folder_children_items = folderChildren["items"]
if len(folder_children_items) > 0:
for folder_item in folder_children_items:
# If item exists and overwrite is True, delete item and duplicate
folder_item_name = folder_item["name"]
folder_item_id = folder_item["item_id"]
item_exists = itemExists(destID, folder_item_name, token, communicator)
if item_exists:
if overwrite:
deleted = deleteItemByName(destID, folder_item_name, token, communicator)
if not deleted:
_error("Could not delete existing item: " + folder_item_name + " in dest. folder with ID: " + destID)
duplicateItem(folder_item_id, destID, token, communicator)
else:
duplicateItem(folder_item_id, destID, token, communicator)
def duplicateFolderfolders(sourceID, destID, token, communicator, overwrite):
"""Duplicate all the sub-folders from source to destination
sourceID -- ID of source folder.
destID -- ID of destination folder.
token -- Authentication token.
communicator -- Midas session communicator.
overwrite -- Boolean indicating whether to overwrite existing items."""
folderChildren = communicator.folder_children(token, sourceID)
folder_subfolders = folderChildren["folders"]
destFolderChildren = communicator.folder_children(token, destID)
if len(folder_subfolders) > 0:
for subfolder in folder_subfolders:
# If needed, create a corresponding subfolder at the destination
dst_index = _getFolderIndex(destFolderChildren, subfolder["name"])
if dst_index == -1:
dst_folder = communicator.create_folder(token, subfolder["name"], destID)
dst_folderID = dst_folder["folder_id"]
else:
dst_folderID = _getIDfromIndex(destFolderChildren, "folder", dst_index)
# Duplicate recursively
duplicateFolderfolders(subfolder["folder_id"], dst_folderID, token, communicator, overwrite)
# Duplicate all the items from the source subfolder to new dest subfolder
duplicateFolderItems(subfolder["folder_id"], dst_folderID, token, communicator, overwrite)
def versionDataApplicationDirectory(sourceVersion, destVersion, token, communicator, applicationID, overwrite):
"""Version the Data/Application directory
sourceVersion -- String indicating source version.
destVersion -- String indicating destination version.
token -- Authentication token.
communicator -- Midas session communicator.
applicationID -- ID of Application folder.
overwrite -- Boolean indicating whether to overwrite existing items."""
availableVersions = communicator.folder_children(token, applicationID)
sourceIndex = _getFolderIndex(availableVersions, sourceVersion)
if sourceIndex == -1:
_error("No folder named " + sourceVersion + " in Application folder")
sourceID = _getIDfromIndex(availableVersions, "folder", sourceIndex)
# Create a new folder for destination under Application folder
print("Creating folder %s under Application directory" % destVersion)
dest_folder = communicator.create_folder(token, destVersion, applicationID)
destID = dest_folder["folder_id"]
# Duplicate the child items from source to destination
duplicateFolderItems(sourceID, destID, token, communicator, overwrite)
message = "Duplicating subfolders from %s to %s..." %(sourceVersion, destVersion)
print(message)
# Duplicate all the sub-folders from source to destination
duplicateFolderfolders(sourceID, destID, token, communicator, overwrite)
print(message + "[DONE]")
def versionDataModulesDirectory(sourceVersion, destVersion, token, communicator, modulesID, ignoreModules, overwrite):
"""Version the Data/Modules directory
sourceVersion -- String indicating source version.
destVersion -- String indicating destination version.
token -- Authentication token.
communicator -- Midas session communicator.
modulesID -- ID of Modules folder.
ignoreModules -- List of modules to ignore while versioning.
overwrite -- Boolean indicating whether to overwrite existing items."""
availableModules = communicator.folder_children(token, modulesID)
availableModulesFolders = availableModules["folders"]
ignore_indices = []
# Take modules to be ignored into account
if len(ignoreModules) > 0:
for ignore_module in ignoreModules:
ignore_module_ind = _getFolderIndex(availableModules, ignore_module)
if ignore_module_ind != -1:
ignore_indices.append(ignore_module_ind)
for num_module in range(len(availableModulesFolders)):
# Do not version if module is to be ignored
if num_module in ignore_indices:
continue
moduleFolderID = _getIDfromIndex(availableModules, "folder", num_module)
moduleName = availableModulesFolders[num_module]["name"]
availableVersions = communicator.folder_children(token, moduleFolderID)
sourceIndex = _getFolderIndex(availableVersions, sourceVersion)
if sourceIndex == -1:
_error("No folder named " + sourceVersion + " in module: " + moduleName)
sourceID = _getIDfromIndex(availableVersions, "folder", sourceIndex)
# If needed, create a new folder for destination under the module folder
destIndex = _getFolderIndex(availableVersions, destVersion)
if destIndex == -1:
print("Creating folder %s under %s module directory" % (destVersion, moduleName))
dest_folder = communicator.create_folder(token, destVersion, moduleFolderID)
destID = dest_folder["folder_id"]
else:
print("Re-using existing folder %s under %s module directory" % (destVersion, moduleName))
destID = _getIDfromIndex(availableVersions, "folder", destIndex)
# Duplicate the child items from source to destination
duplicateFolderItems(sourceID, destID, token, communicator, overwrite)
message = "Duplicating subfolders from %s to %s for %s module..." % (sourceVersion, destVersion, moduleName)
print(message)
# Duplicate all the sub-folders from source to destination
duplicateFolderfolders(sourceID, destID, token, communicator, overwrite)
print(message + "[DONE]")
def printSourceStructure(modulesID, applicationID, sourceVersion, token, communicator):
"""Print the directory structure of source version in Application and Modules under the data tree
modulesID -- ID of Modules folder.
applicationID -- ID of the Application folder
sourceVersion -- Source version folder name
token -- Authentication token.
communicator -- Midas session communicator."""
# Print Application source version directory structure
applicationChildren = communicator.folder_children(token, applicationID)
sourceVersionApplicationIndex = _getFolderIndex(applicationChildren, sourceVersion)
if sourceVersionApplicationIndex == -1:
msg = "No folder named " + sourceVersion + " in Application folder."
_error(msg)
sourceApplicationID = _getIDfromIndex(applicationChildren, "folder", sourceVersionApplicationIndex)
print("Application ( folder_id:%s )" % applicationID)
printFolderStructure(sourceApplicationID, token, communicator, 1)
print("\n")
# Print Modules and their directory structure for the source version
availableModules = communicator.folder_children(token, modulesID)
availableModulesFolders = availableModules["folders"]
for module in availableModulesFolders:
moduleChildren = communicator.folder_children(token, module["folder_id"])
sourceVersionModuleIndex = _getFolderIndex(moduleChildren, sourceVersion)
if sourceVersionModuleIndex == -1:
msg = "No folder named " + sourceVersion + " in module ", module["name"], "."
print("Warning:", msg)
continue
sourceModuleID = _getIDfromIndex(moduleChildren, "folder", sourceVersionModuleIndex)
print("Module:%s( folder_id:%s )" % (module["name"], sourceModuleID))
printFolderStructure(sourceModuleID, token, communicator, 1)
print("\n")
def printFolderStructure(folderID, token, communicator, depth = 0):
"""Print the folder structure of the sourceVersion under the Application folder
folderID -- ID of the Application folder.
token -- Authentication token.
communicator -- Midas session communicator."""
appFolder = communicator.folder_get(token, folderID)
for i in range(depth):
sys.stdout.write("'-")
sys.stdout.write(appFolder["name"])
sys.stdout.write(" ( folder_id: ")
sys.stdout.write(folderID)
sys.stdout.write(" )")
childrenFolder = communicator.folder_children(token, folderID)
if len(childrenFolder["folders"]) > 0:
for subfolder in childrenFolder["folders"]:
sys.stdout.write("\n")
cdepth = depth + 1
printFolderStructure( subfolder["folder_id"], token, communicator, cdepth)
if len(childrenFolder["items"]) > 0:
for item in childrenFolder["items"]:
sys.stdout.write("\n")
for i in range(depth+1):
sys.stdout.write("'-")
sys.stdout.write(item["name"])
sys.stdout.write(" ( item_id: ")
sys.stdout.write(item["item_id"])
sys.stdout.write(" )")
def versionData(midas_url, email, apikey, sourceVersion, destVersion, data_id, ignore_modules = [], overwrite = False, dry_run = False):
""" Version Data folder under Midas
midas_url -- Midas URL.
email -- Authentication email for user on Midas server.
apikey -- A valid api-key assigned to the user.
sourceVersion -- The source version with a valid directory name.
destVersion -- The destination version.
data_id -- A valid id for the Data folder (NA-MIC/Public/Slicer/Data).
ignore_modules -- Ignore a module while versioning. To ignore multiple modules, use this option multiple times (e.g.: -g A -g B).
overwrite -- Overwrite items if existing. If this flag is provided, duplicates by overwriting existing items. If this flag is not provided, does not duplicate existing items.
dry_run -- List modules and exit. If this flag is provided, a list of modules will be printed and nothing else will be done."""
# Instantiate a communicator and login to get an authentication token
communicator = pydas.core.Communicator(midas_url)
token = communicator.login_with_api_key(email, apikey)
# Get the sub-folders for the Data folder
# Currently only versions the Application and Modules folders
data_folders = communicator.folder_children(token, data_id)
ModulesIndex = _getFolderIndex(data_folders, "Modules")
if ModulesIndex == -1:
_error("No folder named Modules in Data folder")
ModulesID = _getIDfromIndex(data_folders, "folder", ModulesIndex)
ApplicationIndex = _getFolderIndex(data_folders, "Application")
if ApplicationIndex == -1:
_error("No folder named Application in Data folder")
ApplicationID = _getIDfromIndex(data_folders, "folder", ApplicationIndex)
# If -l or --dry_run provided, just print the structure and exit
if dry_run:
printSourceStructure(ModulesID, ApplicationID, sourceVersion, token, communicator)
sys.exit(0)
msgData = "Versioning of the NA-MIC Data tree for release %s..." % (destVersion)
print(msgData)
msgModules = "Versioning Modules..."
print(msgModules)
versionDataModulesDirectory(sourceVersion, destVersion, token, communicator, ModulesID, ignore_modules, overwrite)
print(msgModules + "[DONE]")
msgApplication = "Versioning Application..."
print(msgApplication)
versionDataApplicationDirectory(sourceVersion, destVersion, token, communicator, ApplicationID, overwrite)
print(msgApplication + "[DONE]")
print(msgData + "[DONE]")
def _checkRequiredArguments(options, parser):
"""Check the input arguments to see if all REQUIRED arguments are provided by user
options -- Dictionary of options supplied by the user.
parser -- OptionParser."""
missing_options = []
for option in parser.option_list:
if re.match(r'^\[REQUIRED\]', option.help) and eval('options.' + option.dest) is None:
missing_options.extend(option._long_opts)
if len(missing_options) > 0:
_error('Missing REQUIRED parameters: ' + str(missing_options))
def _main():
"""Main function for command-line interface.
Defines usage options
All options with [REQUIRED] in the help string do not have default values
and the user is "required" to provide them"""
usage = "Usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-u", "--url", dest="midas_url", metavar="url", help="Midas URL", default="http://localhost/midas")
parser.add_option("-e", "--email", dest="email", metavar="email", help="[REQUIRED] Authentication email for user on Midas server")
parser.add_option("-k", "--apikey", dest="apikey", metavar="apikey", help="[REQUIRED] A valid api-key assigned to the user")
parser.add_option("-s", "--source_version", dest="sourceVersion", metavar="source_version", help="[REQUIRED] The source version with a valid directory name")
parser.add_option("-d", "--dest_version", dest="destVersion", metavar="dest_version", help="[REQUIRED] The destination version. This script creates a directory")
parser.add_option("-i", "--data_id", dest="data_id", metavar="id", help="A valid id for the Data folder (NA-MIC/Public/Slicer/Data)", type=int, default=9)
parser.add_option("-g", "--ignore-module", dest="ignore_modules", metavar="module", action="append", help="Ignore a module while versioning. To ignore multiple modules, use this option multiple times (e.g.: -g A -g B)", default=[])
parser.add_option("-o", "--overwrite", dest="overwrite", action="store_true", help="Overwrite items if existing. If this flag is provided, duplicates by overwriting existing items. If this flag is not provided, does not duplicate existing items.", default=False)
parser.add_option("-l", "--dry-run", dest="dry_run", action="store_true", help="Print structure of source version directory and exit. If this flag is provided, a list of folders/items that will be copied by the script will be printed and nothing else will be done.", default=False)
# Parse input arguments
(options, args) = parser.parse_args()
_checkRequiredArguments(options,parser)
versionData(options.midas_url, options.email, options.apikey, options.sourceVersion, options.destVersion, options.data_id, options.ignore_modules, options.overwrite, options.dry_run)
if __name__=="__main__":
_main()
| [
"noreply@github.com"
] | noreply@github.com |
5f31052d82e192faebf57ef775fa436e2b703bba | 43d10f09e999bbcb065d0328a5dd15964f534bb1 | /setup.py | 105d620b86a3f3416c34878da71277947cc191cc | [] | no_license | SERVIR/aqx-india | 6148a43c63e3d92d2c9d14286baa8abfd93e6b0d | 6347e5c426d2a618a0522cf8dee8565be22f494d | refs/heads/master | 2023-01-07T18:50:41.142404 | 2020-11-03T20:32:11 | 2020-11-03T20:32:11 | 296,125,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import os
import sys
from setuptools import setup, find_namespace_packages
from tethys_apps.app_installation import find_resource_files
# -- Apps Definition -- #
app_package = 'aqx_india'
release_package = 'tethysapp-' + app_package
# -- Get Resource File -- #
resource_files = find_resource_files('tethysapp/' + app_package + '/templates','tethysapp/' + app_package )
resource_files += find_resource_files('tethysapp/' + app_package + '/public','tethysapp/' + app_package )
# -- Python Dependencies -- #
dependencies = ['xmltodict']
setup(
name=release_package,
version='0.0.1',
tags='"VIC","DSSAT"',
description='Integration of VIC and DSSAT in to on Viewer',
long_description='',
keywords='',
author='Sarva Pulla, Githika Tondapu',
author_email='',
url='',
license='',
packages=find_namespace_packages(),
include_package_data=True,
package_data={'': resource_files},
zip_safe=False,
install_requires=dependencies,
)
| [
"38189387+jsrikishen@users.noreply.github.com"
] | 38189387+jsrikishen@users.noreply.github.com |
f6ed94fa98589044a6343a7ce1fd473f8b4814d2 | 8786cfb45c92aaa7af27b7140797d790e1b1f8ec | /dymos/transcriptions/runge_kutta/components/test/test_rk_continuity_comp.py | 949ed6f041a4fb7e3b898aac1d590ed3ff055e4a | [
"Apache-2.0"
] | permissive | daoos/dymos | 32881f8ff6ec9407c87d2f1e3cc6e169a79bcf9f | 584cc6afe77585b73e48c23754099b603e13ff0f | refs/heads/master | 2023-02-25T20:38:35.684174 | 2021-02-05T21:06:18 | 2021-02-05T21:06:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,217 | py | import unittest
import numpy as np
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.testing_utils import use_tempdirs
import dymos as dm
from dymos.transcriptions.runge_kutta.components.runge_kutta_state_continuity_comp import \
RungeKuttaStateContinuityComp
from dymos.utils.testing_utils import assert_check_partials
# Modify class so we can run it standalone.
from dymos.utils.misc import CompWrapperConfig
RungeKuttaStateContinuityComp = CompWrapperConfig(RungeKuttaStateContinuityComp)
dm.options['include_check_partials'] = True
@use_tempdirs
class TestRungeKuttaContinuityComp(unittest.TestCase):
def test_continuity_comp_scalar_no_iteration_fwd(self):
num_seg = 4
state_options = {'y': {'shape': (1,), 'units': 'm', 'targets': ['y'],
'defect_scaler': None, 'defect_ref': None,
'lower': None, 'upper': None, 'connected_initial': False}}
p = om.Problem(model=om.Group())
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NonlinearRunOnce()
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000],
[1.425130208333333],
[2.639602661132812],
[4.006818970044454],
[5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0],
[1.0],
[1.0],
[1.0]])
p.run_model()
p.model.run_apply_nonlinear()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
y_f = p['states:y'][1:, ...]
y_i = p['states:y'][:-1, ...]
dy_given = y_f - y_i
dy_computed = p['state_integrals:y']
expected_resids = np.zeros((num_seg + 1, 1))
expected_resids[1:, ...] = dy_given - dy_computed
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs', out_stream=None)
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
J_fd[0, 0] = -1.0
assert_near_equal(J_fwd, J_fd)
def test_continuity_comp_connected_scalar_no_iteration_fwd(self):
num_seg = 4
state_options = {'y': {'shape': (1,), 'units': 'm', 'targets': ['y'],
'defect_scaler': None, 'defect_ref': None,
'lower': None, 'upper': None, 'connected_initial': True}}
p = om.Problem(model=om.Group())
ivc = p.model.add_subsystem('ivc', om.IndepVarComp(), promotes_outputs=['*'])
ivc.add_output('initial_states:y', units='m', shape=(1, 1))
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NonlinearRunOnce()
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['initial_states:y'] = 0.5
p['states:y'] = np.array([[0.50000000],
[1.425130208333333],
[2.639602661132812],
[4.006818970044454],
[5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0],
[1.0],
[1.0],
[1.0]])
p.run_model()
p.model.run_apply_nonlinear()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
y_f = p['states:y'][1:, ...]
y_i = p['states:y'][:-1, ...]
dy_given = y_f - y_i
dy_computed = p['state_integrals:y']
expected_resids = np.zeros((num_seg + 1, 1))
expected_resids[1:, ...] = dy_given - dy_computed
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs')
assert_check_partials(cpd)
def test_continuity_comp_scalar_nonlinearblockgs_fwd(self):
num_seg = 4
state_options = {'y': {'shape': (1,), 'units': 'm', 'targets': ['y'], 'fix_initial': True,
'fix_final': False, 'defect_scaler': None, 'defect_ref': None,
'lower': None, 'upper': None, 'connected_initial': False}}
p = om.Problem(model=om.Group())
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NonlinearBlockGS(iprint=2)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000],
[1.425130208333333],
[2.639602661132812],
[4.006818970044454],
[5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0],
[1.0],
[1.0],
[1.0]])
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000],
[1.425130208333333],
[2.639602661132812],
[4.006818970044454],
[5.301605229265987]])
p.run_model()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
expected_resids = np.zeros((num_seg + 1, 1))
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs', out_stream=None)
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
J_fd[0, 0] = -1.0
assert_near_equal(J_fwd, J_fd)
def test_continuity_comp_connected_scalar_nonlinearblockgs_fwd(self):
num_seg = 4
state_options = {'y': {'shape': (1,), 'units': 'm', 'targets': ['y'], 'fix_initial': True,
'fix_final': False, 'defect_scaler': None, 'defect_ref': None,
'lower': None, 'upper': None, 'connected_initial': True}}
p = om.Problem(model=om.Group())
ivc = p.model.add_subsystem('ivc', om.IndepVarComp(), promotes_outputs=['*'])
ivc.add_output('initial_states:y', units='m', shape=(1, 1))
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NonlinearBlockGS(iprint=2)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['initial_states:y'] = 0.5
p['states:y'] = np.array([[0.50000000],
[1.425130208333333],
[2.639602661132812],
[4.006818970044454],
[5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0],
[1.0],
[1.0],
[1.0]])
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000],
[1.425130208333333],
[2.639602661132812],
[4.006818970044454],
[5.301605229265987]])
p.run_model()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
expected_resids = np.zeros((num_seg + 1, 1))
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs', out_stream=None)
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
J_fd[0, 0] = -1.0
assert_near_equal(J_fwd, J_fd)
def test_continuity_comp_scalar_newton_fwd(self):
num_seg = 4
state_options = {'y': {'shape': (1,), 'units': 'm', 'targets': ['y'], 'fix_initial': True,
'fix_final': False, 'defect_scaler': None, 'defect_ref': None,
'lower': None, 'upper': None, 'connected_initial': False}}
p = om.Problem(model=om.Group())
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NewtonSolver(iprint=2, solve_subsystems=True)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000],
[1.425130208333333],
[2.639602661132812],
[4.006818970044454],
[5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0],
[1.0],
[1.0],
[1.0]])
p.run_model()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
expected_resids = np.zeros((num_seg + 1, 1))
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs', out_stream=None)
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
J_fd[0, 0] = -1.0
assert_near_equal(J_fwd, J_fd)
def test_continuity_comp_vector_no_iteration_fwd(self):
num_seg = 2
state_options = {'y': {'shape': (2,), 'units': 'm', 'targets': ['y'],
'defect_ref': None, 'defect_scaler': None,
'lower': None, 'upper': None, 'lower': None, 'upper': None,
'connected_initial': False}}
p = om.Problem(model=om.Group())
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NonlinearRunOnce()
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000, 2.639602661132812],
[1.425130208333333, 4.006818970044454],
[2.639602661132812, 5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0, 1.0],
[1.0, 1.0]])
p.run_model()
p.model.run_apply_nonlinear()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
y_f = p['states:y'][1:, ...]
y_i = p['states:y'][:-1, ...]
dy_given = y_f - y_i
dy_computed = p['state_integrals:y']
expected_resids = np.zeros((num_seg + 1,) + state_options['y']['shape'])
expected_resids[1:, ...] = dy_given - dy_computed
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs')
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
size = np.prod(state_options['y']['shape'])
J_fd[:size, :size] = -np.eye(size)
assert_near_equal(J_fwd, J_fd)
def test_continuity_comp_vector_nonlinearblockgs_fwd(self):
num_seg = 2
state_options = {'y': {'shape': (2,), 'units': 'm', 'targets': ['y'], 'fix_initial': True,
'fix_final': False, 'defect_ref': None, 'lower': None, 'upper': None,
'connected_initial': False}}
p = om.Problem(model=om.Group())
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NonlinearBlockGS(iprint=2)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000, 2.639602661132812],
[1.425130208333333, 4.006818970044454],
[2.639602661132812, 5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0, 1.0],
[1.0, 1.0]])
p.run_model()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
expected_resids = np.zeros((num_seg + 1, 2))
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs', out_stream=None)
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
size = np.prod(state_options['y']['shape'])
J_fd[:size, :size] = -np.eye(size)
assert_near_equal(J_fwd, J_fd)
def test_continuity_comp_connected_vector_nonlinearblockgs_fwd(self):
num_seg = 2
state_options = {'y': {'shape': (2,), 'units': 'm', 'targets': ['y'], 'fix_initial': False,
'fix_final': False, 'defect_ref': None, 'lower': None, 'upper': None,
'connected_initial': True}}
p = om.Problem(model=om.Group())
ivc = p.model.add_subsystem('ivc', om.IndepVarComp(), promotes_outputs=['*'])
ivc.add_output('initial_states:y', units='m', shape=(1, 2))
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NonlinearBlockGS(iprint=2)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['initial_states:y'] = np.array([[0.50000000, 2.639602661132812]])
p['states:y'] = np.array([[0.50000000, 2.639602661132812],
[1.425130208333333, 4.006818970044454],
[2.639602661132812, 5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0, 1.0],
[1.0, 1.0]])
p.run_model()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
expected_resids = np.zeros((num_seg + 1, 2))
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs', out_stream=None)
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
size = np.prod(state_options['y']['shape'])
J_fd[:size, :size] = -np.eye(size)
assert_near_equal(J_fwd, J_fd)
def test_continuity_comp_vector_newton_fwd(self):
num_seg = 2
state_options = {'y': {'shape': (2,), 'units': 'm', 'targets': ['y'], 'fix_initial': True,
'fix_final': False, 'defect_ref': 1, 'lower': None, 'upper': None,
'connected_initial': False}}
p = om.Problem(model=om.Group())
p.model.add_subsystem('continuity_comp',
RungeKuttaStateContinuityComp(num_segments=num_seg,
state_options=state_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
p.model.nonlinear_solver = om.NewtonSolver(iprint=2, solve_subsystems=True)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['states:y'] = np.array([[0.50000000, 2.639602661132812],
[1.425130208333333, 4.006818970044454],
[2.639602661132812, 5.301605229265987]])
p['state_integrals:y'] = np.array([[1.0, 1.0],
[1.0, 1.0]])
p.run_model()
# Test that the residuals of the states are the expected values
outputs = p.model.list_outputs(print_arrays=True, residuals=True, out_stream=None)
expected_resids = np.zeros((num_seg + 1, 2))
op_dict = dict([op for op in outputs])
assert_near_equal(op_dict['continuity_comp.states:y']['resids'], expected_resids)
# Test the partials
cpd = p.check_partials(method='cs', out_stream=None)
J_fwd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'state_integrals:y']['J_fd']
assert_near_equal(J_fwd, J_fd)
J_fwd = cpd['continuity_comp']['states:y', 'states:y']['J_fwd']
J_fd = cpd['continuity_comp']['states:y', 'states:y']['J_fd']
size = np.prod(state_options['y']['shape'])
J_fd[:size, :size] = -np.eye(size)
assert_near_equal(J_fwd, J_fd)
| [
"noreply@github.com"
] | noreply@github.com |
08495aac6e8e2c709665b0603e0eb999adca7450 | fabe9948b3821e1edf378e167c13c6a6fc351180 | /reminderbot.py | e5cc8adafb5424e24f426c94ae85dd3b3135b534 | [] | no_license | pixelistik/twitter-reminderbot | 90bdac4570c54fcc6564a434d499740999aa55ac | b5bbc79c86005d3d5811c9ce7ab064645fbaace0 | refs/heads/master | 2016-09-06T06:44:39.912399 | 2015-09-20T21:25:23 | 2015-09-20T21:25:23 | 42,602,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | import random
import tweepy
import click
def _get_random_quote():
return random.choice(_quotes)
@click.command()
@click.argument("consumer_key")
@click.argument("consumer_secret")
def authenticate(consumer_key, consumer_secret):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
try:
url = auth.get_authorization_url()
except tweepy.TweepError:
click.echo("Failed to get request token.")
click.echo("Setting up authentication. Please visit this URL:")
click.echo(url)
verifier = click.prompt("Enter the authorisation PIN from Twitter")
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
click.echo("Error! Failed to get access token.")
with open("OAUTH_CONSUMER", "w") as f:
click.echo(consumer_key, file=f)
click.echo(consumer_secret, file=f)
with open("OAUTH_TOKEN", "w") as f:
click.echo(auth.access_token, file=f)
click.echo(auth.access_token_secret, file=f)
api = tweepy.API(auth)
public_tweets = api.home_timeline()
for tweet in public_tweets:
click.echo(tweet.text)
@click.command()
def tweet():
with open("OAUTH_CONSUMER", "r") as f:
lines = f.readlines()
consumer_key = lines[0]
consumer_secret = lines[1]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
with open("OAUTH_TOKEN", "r") as f:
lines = f.readlines()
access_token = lines[0]
access_token_secret = lines[1]
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
api.update_status(_get_random_quote())
@click.group()
def cli():
pass
cli.add_command(authenticate)
cli.add_command(tweet)
if __name__ == '__main__':
cli() | [
"code@pixelistik.de"
] | code@pixelistik.de |
044c874a51c20972e607717d9b6c841e7fcf315f | 8ffabc789e8083c3d2a793378b03e9473a12d0ab | /lotogame.py | 2289ecd87def6ecb16a08b2438bdfb54ea9f79b3 | [
"MIT"
] | permissive | vv31415926/lesson_11_Lite | 8c2065712bf00a9ce91c153e52b92a61b4bf8fa5 | d58a0d6325dec3fdcc7052bc407f8a6743a7a3ec | refs/heads/main | 2023-08-15T16:01:55.283401 | 2021-10-08T16:41:36 | 2021-10-08T16:41:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | '''
0. В проекте ""Игра Лото"" перейти на новую ветку для добавления нового функционала;
1. Написать тесты для проекта с помощью pytest или unittest
2. Определить процент покрытия тестами с помощью pytest-cov:
Пример использования можно найти тут (Coverage.py: Определение объема тестируемого кода): https://sohabr.net/habr/post/448798/?version=337006
или в официальной документации: https://pytest-cov.readthedocs.io/en/latest/
Чем больше процент тестирования, тем лучше, желательно 100%
4. Создать pull request на объединение веток master и новой ветки с тестами, прислать ссылку на pull request как решение дз
'''
from go_game import Go_game
game = Go_game()
game.start_game() | [
"valery31415926@mail.ru"
] | valery31415926@mail.ru |
15f6bc9256fda61d77493197a7a52077e0e5d151 | 778f2369c6c2fe2093994e0eedbf77df9a2b20d1 | /test/categoryFeature.py | 3261714123ecb019601ff4ee53aac71d281ae387 | [] | no_license | madhumitha27/ResidentMaintenanceTracker | d65e4f3a67764b9f42a6e04021e64c52344e029e | 5bf43819ec60479426dbccdfb3c4d771d832017b | refs/heads/master | 2021-07-12T19:26:31.080227 | 2020-11-16T21:54:33 | 2020-11-16T21:54:33 | 248,668,371 | 0 | 0 | null | 2021-06-10T22:40:25 | 2020-03-20T04:32:24 | JavaScript | UTF-8 | Python | false | false | 1,610 | py | import unittest
import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
class categoryFeature(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_blog(self):
user = "instructor"
pwd = "maverick1a"
driver = self.driver
driver.maximize_window()
driver.get("http://madaad.pythonanywhere.com/admin/")
time.sleep ( 1 )
loginEle = driver.find_element_by_id("id_username")
loginEle.send_keys(user)
loginEle = driver.find_element_by_id("id_password")
loginEle.send_keys(pwd)
loginEle.send_keys(Keys.RETURN)
assert "Logged In"
time.sleep(1)
categoryLink = driver.find_element_by_xpath ( "/html/body/div/div[2]/div[1]/div[2]/table/tbody/tr[1]/th/a" ).click ( )
time.sleep(1)
addCategory = driver.find_element_by_xpath ("/html/body/div/div[3]/div/ul/li/a" ).click ( )
time.sleep ( 1)
unitID = driver.find_element_by_id ( 'id_catID' )
unitID.send_keys ( "777" )
time.sleep ( 1 )
aptNo = driver.find_element_by_id ( 'id_type' )
aptNo.send_keys ( "Building Interior" )
time.sleep ( 1 )
saveButton = driver.find_element_by_xpath ( " /html/body/div[1]/div[3]/div/form/div/div/input[1]" ).click ( )
time.sleep ( 1 )
logout = driver.find_element_by_xpath (
"/html/body/div[1]/div[1]/div[2]/a[3]" ).click ( )
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
| [
"aparabm@gmail.com"
] | aparabm@gmail.com |
5163fdf45578423513a81f0f7f1f50a3f0e5c675 | dfac1f9def9030c3a87ea800688f52b0e73a7778 | /lab4/bot/tg-bot-main.py | e419be7a93185ddfc06c54e1eb28d13dff498b4c | [] | no_license | 4kix/ml-labs-2nd-term | 8729b3c71afc5bbcb63ec7a06c5c017db1e1a4f4 | f0e71242a1f75934997a0ff071bebfb108eb3184 | refs/heads/master | 2022-04-28T18:00:45.191714 | 2020-04-23T08:50:53 | 2020-04-23T08:50:53 | 253,876,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import telebot
import constant
import number_recognizer
import keras.backend.tensorflow_backend as tb
tb._SYMBOLIC_SCOPE.value = True
bot = telebot.TeleBot(constant.token)
def process_photo_message(message):
print('message.photo =', message.photo)
fileID = message.photo[-1].file_id
print('fileID =', fileID)
file = bot.get_file(fileID)
print('file.file_path =', file.file_path)
downloaded_file = bot.download_file(file.file_path)
with open("image.jpg", 'wb') as new_file:
new_file.write(downloaded_file)
number_length, number = number_recognizer.recognize(downloaded_file)
str()
return 'My prediction is -\nNumber length: {}\nNumber: {}'.format(number_length, number)
@bot.message_handler(content_types=['photo'])
def photo(message):
recognized_num = process_photo_message(message)
bot.send_message(message.chat.id, recognized_num)
bot.polling()
| [
"falko.ilya@gmail.com"
] | falko.ilya@gmail.com |
9c12bac03eea6ed28261ea89f8c3810743a52f26 | 2ca88d41f1bb5042338faec50b2af11931db0bdd | /src/gluonts/nursery/tsbench/src/cli/analysis/__init__.py | b939423a365224aa385b570ac9ecec6deacdf291 | [
"Apache-2.0"
] | permissive | canerturkmen/gluon-ts | 2f2d46f9b01f5ee07a51a11e822b1c72c2475caa | 57ae07f571ff123eac04af077870c1f216f99d5c | refs/heads/master | 2022-09-10T23:30:26.162245 | 2022-04-20T12:44:01 | 2022-04-20T12:44:01 | 192,873,578 | 1 | 2 | Apache-2.0 | 2020-08-04T16:58:48 | 2019-06-20T07:43:07 | Python | UTF-8 | Python | false | false | 848 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from ._main import analysis
from .ensemble import ensemble # type: ignore
from .ensemble_recommender import ensemble_recommender # type: ignore
from .recommender import recommender # type: ignore
from .surrogate import surrogate # type: ignore
__all__ = ["analysis"]
| [
"noreply@github.com"
] | noreply@github.com |
f569953a3484171990dbe8280d7ddac1f7740dff | 8ca5d6daf0df67ec038be55680d93fc3ba03d5e0 | /Functions/squaring.py | 09f9661afcfa2eb1d279f69c75121c71752dd6b3 | [
"MIT"
] | permissive | Parikshit-njit/Calculator-IS601 | 83071ca63d4f9e0c81c6d6f8e6e5603f4655c839 | 929317871afec437fa9a903d9fe46dcb66da5d60 | refs/heads/main | 2023-06-16T23:16:01.495099 | 2021-07-12T12:53:06 | 2021-07-12T12:53:06 | 383,937,499 | 1 | 0 | MIT | 2021-07-10T18:40:23 | 2021-07-07T22:21:26 | Python | UTF-8 | Python | false | false | 89 | py | class Squaring:
@staticmethod
def squaring(a):
c = a * a
return c | [
"pn24@njit.edu"
] | pn24@njit.edu |
aaf1fe559120d1df56eeea0850d7137de429aede | 130243453c861ff506fdcb5db9067c5a8e170107 | /products/migrations/0011_productmodel_quantity.py | 730a9c9e44b15dcb86d7e803d32c8ac4180ed43b | [] | no_license | DaclynsDesign/Ecommerce_finale | 1c024394f79c2a9f2262e2e2010b61ff05570053 | 6b4707bacc109d62a75f82fd73f0a0ac52b95f91 | refs/heads/main | 2023-01-30T08:00:44.985710 | 2020-12-16T18:11:03 | 2020-12-16T18:11:03 | 319,884,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-11-25 14:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0010_auto_20201026_1731'),
]
operations = [
migrations.AddField(
model_name='productmodel',
name='quantity',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4)], default=2),
),
]
| [
"daclynsdesigns@gmail.com"
] | daclynsdesigns@gmail.com |
c6382167adf6ceb481f50967853db6be6262c66f | 444dc060ba378de939d10e6deacd1eb1a85c57f8 | /004conditionals and booleans.py | 853cfd6c4c83edc8064a242c2d1b12226eb4a954 | [] | no_license | Jaspreetkumar1999/python-series | e238f0679e809548c5c289896ac20f6d28c6346b | fab75c224826aeb25a5001709cb9d3bd3803f82f | refs/heads/master | 2021-05-21T08:28:36.874605 | 2020-04-09T16:49:52 | 2020-04-09T16:49:52 | 252,619,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | # language ='java'
# # language='python'
# if language == 'python':
# print('language is python')
# elif language=='java':
# print('language is java')
# elif language == 'javascript':
# print('language is javascript')
# else :
# print('not match')
# python does not have case statements elif condition is pretty much good to deal with cases
# -------------------------------------------------symbolian operations we have----------------------------------------------
# user ='admin'
# legged_in ="false"
# if user=='admin' and legged_in: AND operation if both the conditions are true
# print('you are allowed')
# if user=='admin' and legged_in:
# print('you are most welcome')
# else :
# print('sorry')
# user ='admin1'
# legged_in =False
# if user=='admin' or legged_in: OR operation
# print('welcome')
# else:
# print('sorry')
# if not legged_in =='true': It will simply perform NOT operation convert true to false and false into true
# print("please logged in")
# else:
# print("welcome")
a =[1,2,3]
# b =[1,2,3]
# print(a==b) it will return true bcz there value is same
# print(a is b) it will return false bcz these are two objects in memory
# print(id(a))
# print(id(b))
# b = a
# print(a is b) it will true bcz now these two are same objects
# condition = None it will also consider as false
# condition = 0 it will also consider as false
# condition = -2 ,2 if assign condition as non zero value it will consider as true
# condition = [] if there is any empty sequence(list,set,tuple, etc) it will consider it as false
if condition:
print('evaluated to true')
else:
print('evaluated to false')
| [
"noreply@github.com"
] | noreply@github.com |
ec4b8692af164801341e212f0d592fea4649da3d | ecc3806d0a417bfcc6d37f9bb5107bf9fa72275f | /osmanager/bin/pyweb/luoyuninfo.py | 8adf30c3e275d224fdaf0f08a10bfa6ad0335ba6 | [] | no_license | luoyun/LuoYunCloud | ef6fa76e8b3a0880392b71d7569b304009041bf1 | ee8fedd988cc39375dda69588c2f7bba1223fbe1 | refs/heads/master | 2021-12-03T05:26:47.656130 | 2021-11-30T13:16:34 | 2021-11-30T13:16:34 | 3,886,020 | 6 | 10 | null | 2013-06-25T14:04:23 | 2012-03-31T14:33:55 | JavaScript | UTF-8 | Python | false | false | 1,407 | py | import sys
import os
import datetime
def get_sysinfo(hostname = None):
"""Get system info, including
$hostname $application $version1 $version2 $curtime"""
if hostname == None or hostname == "":
f = os.popen("/sbin/ifconfig eth0")
s = f.read()
if f.close() == None:
for l in s.split('\n'):
l = l.strip()
inet = l.split(':')[0]
if inet == 'inet addr':
l = l.split(':')[1]
hostname = l.split(' ')[0]
break
version1 = "unknown"
application = "unknown"
version2 = "unknown"
f = None
try:
f = open("/LuoYun/build/VERSION")
s = f.readlines()
version1 = s[0].split()[1]
application = s[1].split()[0]
application = application.strip(':')
version2 = s[1].split()[1]
except os.error:
print "Warning: reading /LuoYun/build/VERSION error\n"
curtime = datetime.datetime.now().timetuple()
curtime = "%02d:%02d" % (curtime[3], curtime[4])
return dict(hostname=hostname.encode('utf-8'),
version1=version1.encode('utf-8'),
application=application.encode('utf-8'),
version2=version2.encode('utf-8'),
curtime=curtime.encode('utf-8'))
if __name__ == '__main__':
print "retrieve sys info"
sys.exit(0)
| [
"zengdongwu@hotmail.com"
] | zengdongwu@hotmail.com |
a45fb008d1fd51283765ef6b0e553d81306c6097 | 790edb2db1bc2fdf676a6de080684c2c4f18f013 | /myapp/views/views_spazio_aziende.py | af1e42ff5af1e839445f92a6772707167340169e | [] | no_license | TajinderSingh-1/richiestaTesi | 99c3aa25bfdb7ead59013bdd280dfbcc9c15b9f9 | d58071037edd4b9defedaa3e4a57b93a34b9e338 | refs/heads/master | 2021-08-08T05:06:57.127591 | 2017-11-09T15:43:31 | 2017-11-09T15:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | from django.shortcuts import redirect, render
from myapp.forms import OffertaForm
from myapp.models import *
def spazioAziende(request):
offerte = Offerta
if request.method == 'POST':
form = OffertaForm(request.POST)
if form.is_valid():
offerta = Offerta()
offerta.descrizione = form['descrizione'].value()
offerta.titolo = form['titolo'].value()
docenteapp = form['docente'].value()
# Se l'azienda nel form non ha specificato il docente, l'admin dovrà valutare se validare l'offerta immessa
if docenteapp != '':
docente = Docente.objects.get(id=form['docente'].value())
offerta.docente = docente
corso = Corso.objects.get(id=form['corso'].value())
offerta.corso = corso
user = request.user
azienda = Azienda.objects.get(user=user)
offerta.azienda = azienda
offerta.durata = form['durata'].value()
offerta.save()
return redirect('myapp:spazioAziende')
else:
form = OffertaForm()
user = request.user
if request.user.is_authenticated():
if user.groups.filter(name='Aziende').exists():
# lista di tutte le offerte immesse da una certa azienda
offerte = Offerta.objects.filter(azienda=Azienda.objects.get(user=user))
return render(request, 'myapp/spazioAziende.html', {'offerte': offerte,
'form': form})
| [
"tajisingh07@gmail.com"
] | tajisingh07@gmail.com |
f8927cbdf465ddbc6764f1d6a2eaf85c885e1f4a | fc9f1521b412e8347674a680ef2fcae22faa8c7c | /tests/supply_E3631A.py | 444322f10b91e018e4a69d114b6a451fdf842aa0 | [
"MIT"
] | permissive | BlancaCC/granaSat_batteries | 625a0b9987c994996c46a338ce45e4d2c1352e48 | 9302feddddc62c1d26ca4faa7684edd0eeb95aa1 | refs/heads/master | 2020-03-22T07:12:41.826162 | 2018-07-24T08:43:26 | 2018-07-24T08:43:26 | 139,685,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | class SupplyE3631A:
def __init__(self, instr):
if 'HEWLETT-PACKARD,E3631A' not in instr.query("*IDN?"):
raise NameError('Device is not HEWLETT-PACKARD E3631A')
else:
self.instr = instr
def output_on(self):
self.instr.write('OUTP ON')
def output_off(self):
self.instr.write('OUTP OFF')
def select_output(self, gpib):
if gpib == '+6V':
self.instr.write('INST P6V')
elif gpib == '+25V':
self.instr.write('INST P25V')
elif gpib == '-25V':
self.instr.write('INST N25V')
else:
raise NameError('Not an argument')
def limit_current(self, curr):
self.instr.write(f'CURR {curr}')
def set_voltage(self, volt):
self.instr.write(f'VOLT {volt}')
def current(self) -> 'Amperes':
return float(self.instr.query('MEAS:CURR?'))
def voltage(self) -> 'Volts':
return float(self.instr.query('MEAS:VOLT?'))
def write_screen(self, txt):
self.instr.write(f'DISP:TEXT "{txt}"')
if __name__ == '__main__':
import visa
rm = visa.ResourceManager()
instr = rm.open_resource('GPIB0::3::INSTR')
sup = SupplyE3631A(instr)
sup.output_off()
sup.limit_current(2.2)
sup.set_voltage(5.4)
sup.output_on()
while True:
print(f'Voltage: {sup.voltage()}, Current: {sup.current()}')
| [
"javier.luzonh@gmail.com"
] | javier.luzonh@gmail.com |
e306fd798e8a21cad730210f203b60e62fb2c784 | 37e1bedac2fdcc9e6f84d0ed459b7a4154fd385b | /Python 3.8+/Problem 357/main.py | 149457735e7b40a1a47542141a12826b03283dc2 | [] | no_license | RBaner/Project_Euler | e266239371aa0ff7c46c4be8cba2a145cfb87598 | fb2fe0fec3cfd324f9867abf3f1d72fbff4de011 | refs/heads/master | 2022-09-03T17:20:29.535940 | 2022-09-03T00:34:28 | 2022-09-03T00:34:28 | 180,717,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | #This code is written by Rohit Banerjee (2020)
#In reference to Project Euler problem 357
import sympy
import time
def pgen(n,show=False):
divisor_list = sympy.divisors(n)
for i in divisor_list[:int(len(divisor_list)/2)+1]:
if show:
print("%d+%d=%d"%(i,int(n/i),i+int(n/i)))
if not sympy.isprime(i+int(n/i)):
return(False)
return(True)
def main(n):
"""Cases below 10 are added manually, after that we first make sure the number
isn't =4(mod10)(implies a 2^2 in prime factorization)
or =6(mod10)(implies there exists a d+(n/d) that is divisible
by 5). Then we screen for numbers for which the mobius function
returns 1 or -1 so we don't check numbers with non-distinct
prime factors. Any numbers left are manually checked by pgen(i-1)"""
output = 1 + 2 + 6
for i in sympy.primerange(8,n):
if (i-1)%10 in [2,8,0]:
if abs(sympy.mobius(i-1)):
if pgen(i-1):
output += (i-1)
return(output)
def brute_force(n):
"""This function was used for testing purposes and is
the most naive but surefire way to get the solution sum"""
output = 0
for i in range(1,n+1):
if pgen(i):
output+=i
return(output)
def problem():
"""Run this to get the solution sum as well as computation time
in seconds"""
start = time.time()
print(main(100000000))
end = time.time()
print(end-start)
| [
"rohitbanerje@gmail.com"
] | rohitbanerje@gmail.com |
42bdc6a491fc5b9e2281a2f167e8e94f08cee782 | 3da168ce3709b2b9c003c1070f2182b922d63d92 | /sqlitePython.py | 8c3063da34cf8fe5618961059a044d9b3be1c88d | [] | no_license | amjulius1008/general_code | b0af22bdeaf4230eeb4a22a0a48250a6807781e6 | 149dd99928ea0e4f1ac4d6b648153e167a6ba763 | refs/heads/master | 2020-06-23T16:17:47.154708 | 2019-09-20T19:21:44 | 2019-09-20T19:21:44 | 198,676,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 10:41:17 2019
@author: amjuli
"""
def df2sqlite(dataframe, db_name = "import.sqlite", tbl_name = "import"):
import sqlite3
conn=sqlite3.connect(db_name)
cur = conn.cursor()
wildcards = ','.join(['?'] * len(dataframe.columns))
data = [tuple(x) for x in dataframe.values]
cur.execute("drop table if exists %s" % tbl_name)
col_str = '"' + '","'.join(dataframe.columns) + '"'
cur.execute("create table %s (%s)" % (tbl_name, col_str))
cur.executemany("insert into %s values(%s)" % (tbl_name, wildcards), data)
conn.commit()
conn.close()
def listTables(db_name):
import sqlite3
con = sqlite3.connect(db_name)
mycur = con.cursor()
mycur.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;")
available_table=(mycur.fetchall())
return available_table
def dbConnect(filePath,query):
import sqlite3
import pandas as pd
con = sqlite3.connect(filePath)
data = pd.read_sql_query(query, con)
con.close()
return data | [
"noreply@github.com"
] | noreply@github.com |
697aa1456f23bec2ddb47539e96eb929f3db9d54 | bbf475f0648bd8e49c56e8efc9b2f44e1d5a436a | /models/hyperseg_v1_0_unify.py | 6cf82a5ba7d34c94b738f05f081310a23728ef6e | [
"CC0-1.0"
] | permissive | leo-hao/hyperseg | cf212ef7b38723264d92edadfab14f0a7cf636ad | ad2c0582363347c56726a751d2a467e2d895fc7d | refs/heads/main | 2023-08-11T19:46:09.344232 | 2021-09-29T01:53:57 | 2021-09-29T01:53:57 | 410,199,646 | 0 | 0 | CC0-1.0 | 2021-09-25T06:53:31 | 2021-09-25T06:53:30 | null | UTF-8 | Python | false | false | 31,622 | py | import numbers
import numpy as np
from itertools import groupby
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from hyperseg.models.layers.meta_conv import MetaConv2d
from hyperseg.models.layers.meta_sequential import MetaSequential
class HyperGen(nn.Module):
""" Hypernetwork generator comprised of a backbone network, weight mapper, and a decoder.
Args:
backbone (nn.Module factory): Backbone network
weight_mapper (nn.Module factory): Weight mapper network.
in_nc (int): input number of channels.
num_classes (int): output number of classes.
kernel_sizes (int): the kernel size of the decoder layers.
level_layers (int): number of layers in each level of the decoder.
level_channels (list of int, optional): If specified, sets the output channels of each level in the decoder.
expand_ratio (int): inverted residual block's expansion ratio in the decoder.
groups (int, optional): Number of blocked connections from input channels to output channels.
weight_groups (int, optional): per level signal to weights groups in the decoder.
inference_hflip (bool): If true, enables horizontal flip of input tensor.
inference_gather (str): Inference gather type: ``mean'' or ``max''.
with_out_fc (bool): If True, add a final fully connected layer to the decoder.
decoder_groups (int, optional): per level groups in the decoder.
decoder_dropout (float): If specified, enables dropout with the given probability.
coords_res (list of tuple of int, optional): list of inference resolutions for caching positional embedding.
unify_level (int, optional): the starting level to unify the signal to weights operation from.
"""
def __init__(self, backbone, weight_mapper, in_nc=3, num_classes=3, kernel_sizes=3, level_layers=1,
level_channels=None, expand_ratio=1, groups=1, weight_groups=1, inference_hflip=False,
inference_gather='mean', with_out_fc=False, decoder_groups=1, decoder_dropout=None, coords_res=None,
unify_level=None):
super(HyperGen, self).__init__()
self.inference_hflip = inference_hflip
self.inference_gather = inference_gather
self.backbone = backbone()
feat_channels = [in_nc] + self.backbone.feat_channels[:-1]
self.decoder = MultiScaleDecoder(feat_channels, self.backbone.feat_channels[-1], num_classes, kernel_sizes,
level_layers, level_channels, with_out_fc=with_out_fc, out_kernel_size=1,
expand_ratio=expand_ratio, groups=decoder_groups, weight_groups=weight_groups,
dropout=decoder_dropout, coords_res=coords_res, unify_level=unify_level)
self.weight_mapper = weight_mapper(self.backbone.feat_channels[-1], self.decoder.param_groups)
@property
def hyper_params(self):
return self.decoder.hyper_params
def process_single_tensor(self, x, hflip=False):
x = torch.flip(x, [-1]) if hflip else x
features = self.backbone(x)
weights = self.weight_mapper(features[-1])
x = [x] + features[:-1]
x = self.decoder(x, weights)
x = torch.flip(x, [-1]) if hflip else x
return x
def gather_results(self, x, y=None):
assert x is not None
if y is None:
return x
if self.inference_gather == 'mean':
return (x + y) * 0.5
else:
return torch.max(x, y)
def forward(self, x):
assert isinstance(x, (list, tuple, torch.Tensor)), f'x must be of type list, tuple, or tensor'
if isinstance(x, torch.Tensor):
return self.process_single_tensor(x)
# Note: the first pyramid will determine the output resolution
out_res = x[0].shape[2:]
out = None
for p in x:
if self.inference_hflip:
p = torch.max(self.process_single_tensor(p), self.process_single_tensor(p, hflip=True))
else:
p = self.process_single_tensor(p)
# Resize current image to output resolution if necessary
if p.shape[2:] != out_res:
p = F.interpolate(p, out_res, mode='bilinear', align_corners=False)
out = self.gather_results(p, out)
return out
class MultiScaleDecoder(nn.Module):
""" Dynamic multi-scale decoder.
Args:
feat_channels (list of int): per level input feature channels.
signal_channels (list of int): per level input signal channels.
num_classes (int): output number of classes.
kernel_sizes (int): the kernel size of the layers.
level_layers (int): number of layers in each level.
level_channels (list of int, optional): If specified, sets the output channels of each level.
norm_layer (nn.Module): Type of feature normalization layer
act_layer (nn.Module): Type of activation layer
out_kernel_size (int): kernel size of the final output layer.
expand_ratio (int): inverted residual block's expansion ratio.
groups (int, optional): number of blocked connections from input channels to output channels.
weight_groups (int, optional): per level signal to weights.
with_out_fc (bool): If True, add a final fully connected layer.
dropout (float): If specified, enables dropout with the given probability.
coords_res (list of tuple of int, optional): list of inference resolutions for caching positional embedding.
unify_level (int, optional): the starting level to unify the signal to weights operation from.
"""
def __init__(self, feat_channels, signal_channels, num_classes=3, kernel_sizes=3, level_layers=1,
level_channels=None, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6(inplace=True), out_kernel_size=1,
expand_ratio=1, groups=1, weight_groups=1, with_out_fc=False, dropout=None,
coords_res=None, unify_level=None): # must be a list of tuples
super(MultiScaleDecoder, self).__init__()
if isinstance(kernel_sizes, numbers.Number):
kernel_sizes = (kernel_sizes,) * len(level_channels)
if isinstance(level_layers, numbers.Number):
level_layers = (level_layers,) * len(level_channels)
if isinstance(expand_ratio, numbers.Number):
expand_ratio = (expand_ratio,) * len(level_channels)
assert len(kernel_sizes) == len(level_channels), \
f'kernel_sizes ({len(kernel_sizes)}) must be of size {len(level_channels)}'
assert len(level_layers) == len(level_channels), \
f'level_layers ({len(level_layers)}) must be of size {len(level_channels)}'
assert len(expand_ratio) == len(level_channels), \
f'expand_ratio ({len(expand_ratio)}) must be of size {len(level_channels)}'
if isinstance(groups, (list, tuple)):
assert len(groups) == len(level_channels), f'groups ({len(groups)}) must be of size {len(level_channels)}'
self.level_layers = level_layers
self.levels = len(level_channels)
self.unify_level = unify_level
self.layer_params = []
feat_channels = feat_channels[::-1] # Reverse the order of the feature channels
self.coords_cache = {}
self.weight_groups = weight_groups
self.level_blocks = nn.ModuleList()
self.weight_blocks = nn.ModuleList()
self._ranges = [0]
# For each level
prev_channels = 0
for level in range(self.levels):
curr_ngf = feat_channels[level]
curr_out_ngf = curr_ngf if level_channels is None else level_channels[level]
prev_channels += curr_ngf # Accommodate the previous number of channels
curr_layers = []
kernel_size = kernel_sizes[level]
# For each layer in the current level
for layer in range(self.level_layers[level]):
if (not with_out_fc) and (level == (self.levels - 1) and (layer == (self.level_layers[level] - 1))):
curr_out_ngf = num_classes
if kernel_size > 1:
curr_layers.append(HyperPatchInvertedResidual(
prev_channels + 2, curr_out_ngf, kernel_size, expand_ratio=expand_ratio[level],
norm_layer=norm_layer, act_layer=act_layer))
else:
group = groups[level] if isinstance(groups, (list, tuple)) else groups
curr_layers.append(make_hyper_patch_conv2d_block(prev_channels + 2, curr_out_ngf,
kernel_size, groups=group))
prev_channels = curr_out_ngf
# Add level layers to module
self.level_blocks.append(MetaSequential(*curr_layers))
if level < (unify_level - 1):
self.weight_blocks.append(WeightLayer(self.level_blocks[-1].hyper_params))
else:
self._ranges.append(self._ranges[-1] + self.level_blocks[-1].hyper_params)
if level == (self.levels - 1):
hyper_params = sum([b.hyper_params for b in self.level_blocks[unify_level - 1:]])
self.weight_blocks.append(WeightLayer(hyper_params))
# Add the last layer
if with_out_fc:
out_fc_layers = [nn.Dropout2d(dropout, True)] if dropout is not None else []
out_fc_layers.append(
HyperPatchConv2d(prev_channels, num_classes, out_kernel_size, padding=out_kernel_size // 2))
self.out_fc = MetaSequential(*out_fc_layers)
else:
self.out_fc = None
# Cache image coordinates
if coords_res is not None:
for res in coords_res:
res_pyd = [(res[0] // 2 ** i, res[1] // 2 ** i) for i in range(self.levels)]
for level_res in res_pyd:
self.register_buffer(f'coord{level_res[0]}_{level_res[1]}',
self.cache_image_coordinates(*level_res))
# Initialize signal to weights
self.param_groups = get_hyper_params(self)
min_unit = max(weight_groups)
signal_features = divide_feature(signal_channels, self.param_groups, min_unit=min_unit)
init_signal2weights(self, list(signal_features), weight_groups=weight_groups)
self.hyper_params = sum(self.param_groups)
def cache_image_coordinates(self, h, w):
x = torch.linspace(-1, 1, steps=w)
y = torch.linspace(-1, 1, steps=h)
grid = torch.stack(torch.meshgrid(y, x)[::-1], dim=0).unsqueeze(0)
return grid
def get_image_coordinates(self, b, h, w, device):
cache = f'coord{h}_{w}'
if hasattr(self, cache):
return getattr(self, cache).expand(b, -1, -1, -1)
x = torch.linspace(-1, 1, steps=w, device=device)
y = torch.linspace(-1, 1, steps=h, device=device)
grid = torch.stack(torch.meshgrid(y, x)[::-1], dim=0).unsqueeze(0)
return grid.expand(b, -1, -1, -1)
def forward(self, x, s):
# For each level
p = None
for level in range(self.levels):
level_block = self.level_blocks[level]
weight_block = self.weight_blocks[min(level, self.unify_level - 1)]
# Initial layer input
if p is None:
p = x[-level - 1]
else:
# p = F.interpolate(p, scale_factor=2, mode='bilinear', align_corners=False) # Upsample x2
if p.shape[2:] != x[-level - 1].shape[2:]:
p = F.interpolate(p, x[-level - 1].shape[2:], mode='bilinear', align_corners=False) # Upsample
p = torch.cat((x[-level - 1], p), dim=1)
# Add image coordinates
p = torch.cat([self.get_image_coordinates(p.shape[0], *p.shape[-2:], p.device), p], dim=1)
# Computer the output for the current level
if level < (self.unify_level - 1):
w = weight_block(s)
p = level_block(p, w)
else:
if level == (self.unify_level - 1):
w = weight_block(s)
i = level - self.unify_level + 1
p = level_block(p, w[:, self._ranges[i]:self._ranges[i + 1]])
# Last layer
if self.out_fc is not None:
p = self.out_fc(p, s)
# Upscale the prediction the finest feature map resolution
if p.shape[2:] != x[0].shape[2:]:
p = F.interpolate(p, x[0].shape[2:], mode='bilinear', align_corners=False) # Upsample
return p
def get_hyper_params(model):
hyper_params = []
# For each child module
for name, m in model.named_children():
if isinstance(m, (WeightLayer,)):
hyper_params.append(m.target_params)
else:
hyper_params += get_hyper_params(m)
return hyper_params
def init_signal2weights(model, signal_features, signal_index=0, weight_groups=1):
# For each child module
for name, m in model.named_children():
if isinstance(m, (WeightLayer,)):
curr_feature_nc = signal_features.pop(0)
curr_weight_group = weight_groups.pop(0) if isinstance(weight_groups, list) else weight_groups
m.init_signal2weights(curr_feature_nc, signal_index, curr_weight_group)
signal_index += curr_feature_nc
else:
init_signal2weights(m, signal_features, signal_index, weight_groups)
class WeightLayer(nn.Module):
def __init__(self, target_params):
super(WeightLayer, self).__init__()
self.target_params = target_params
self.signal_channels = None
self.signal_index = None
self.signal2weights = None
def init_signal2weights(self, signal_channels, signal_index=0, groups=1):
self.signal_channels = signal_channels
self.signal_index = signal_index
weight_channels = next_multiply(self.target_params, groups)
self.signal2weights = nn.Conv2d(signal_channels, weight_channels, 1, bias=False, groups=groups)
def apply_signal2weights(self, s):
if self.signal2weights is None:
return s
w = self.signal2weights(s[:, self.signal_index:self.signal_index + self.signal_channels])[:, :self.target_params]
return w
def forward(self, s):
return self.apply_signal2weights(s)
class HyperPatchInvertedResidual(nn.Module):
def __init__(self, in_nc, out_nc, kernel_size=3, stride=1, expand_ratio=1, norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU6(inplace=True), padding_mode='reflect'):
super(HyperPatchInvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.padding_mode = padding_mode
self.padding = (1, 1)
self._padding_repeated_twice = self.padding + self.padding
self.in_nc = in_nc
self.out_nc = out_nc
self.kernel_size = _pair(kernel_size)
self.hidden_dim = int(round(in_nc * expand_ratio))
self.use_res_connect = self.stride == 1 and in_nc == out_nc
self.act_layer = act_layer
self.bn1 = norm_layer(self.hidden_dim)
self.bn2 = norm_layer(self.hidden_dim)
self.bn3 = norm_layer(self.out_nc)
# Calculate hyper params and weight ranges
self.hyper_params = 0
self._ranges = [0]
self.hyper_params += in_nc * self.hidden_dim
self._ranges.append(self.hyper_params)
self.hyper_params += np.prod((self.hidden_dim,) + self.kernel_size)
self._ranges.append(self.hyper_params)
self.hyper_params += self.hidden_dim * out_nc
self._ranges.append(self.hyper_params)
def conv(self, x, weight):
b, c, h, w = x.shape
# assert b == 1
fh, fw = weight.shape[-2:]
ph, pw = x.shape[-2] // fh, x.shape[-1] // fw
kh, kw = ph + self.padding[0] * 2, pw + self.padding[1] * 2
if self.padding_mode != 'zeros' and np.any(self._padding_repeated_twice):
x = F.pad(x, self._padding_repeated_twice, mode=self.padding_mode)
padding = _pair(0)
else:
padding = self.padding
x = x.permute(0, 2, 3, 1).unfold(1, kh, ph).unfold(2, kw, pw).reshape(1, -1, kh, kw)
if b == 1:
weight = weight.permute(0, 2, 3, 1).view(-1, weight.shape[1])
else:
weight = weight.permute(0, 2, 3, 1).reshape(-1, weight.shape[1])
# Conv1
weight1 = weight[:, self._ranges[0]:self._ranges[1]].reshape(b * fh * fw * self.hidden_dim, self.in_nc, 1, 1)
x = F.conv2d(x, weight1, bias=None, groups=b * fh * fw)
x = self.bn1(x.view(b * fh * fw, -1, kh, kw)).view(1, -1, kh, kw)
x = self.act_layer(x)
# x = self.act_layer(self.bn1(F.conv2d(x, weight1, bias=None, groups=b * fh * fw)))
# Conv2
weight2 = weight[:, self._ranges[1]:self._ranges[2]].reshape(b * fh * fw * self.hidden_dim, 1,
*self.kernel_size)
x = F.conv2d(x, weight2, bias=None, stride=self.stride, groups=b * fh * fw * self.hidden_dim)
x = self.bn2(x.view(b * fh * fw, -1, ph, pw)).view(1, -1, ph, pw)
x = self.act_layer(x)
# Conv3
weight3 = weight[:, self._ranges[2]:self._ranges[3]].reshape(b * fh * fw * self.out_nc, self.hidden_dim, 1, 1)
x = F.conv2d(x, weight3, bias=None, groups=b * fh * fw)
x = self.bn3(x.view(b * fh * fw, -1, ph, pw))
x = x.view(b, fh, fw, -1, ph, pw).permute(0, 3, 1, 4, 2, 5).reshape(b, -1, h, w)
return x
def forward(self, x, s):
if self.use_res_connect:
return x + self.conv(x, s)
else:
return self.conv(x, s)
class WeightMapper(nn.Module):
""" Weight mapper module (called context head in the paper).
Args:
in_channels (int): input number of channels.
out_channels (int): output number of channels.
levels (int): number of levels operating on different strides.
bias (bool): if True, enables bias in all convolution operations.
min_unit (int): legacy parameter, no longer used.
weight_groups (int): legacy parameter, no longer used.
"""
def __init__(self, in_channels, out_channels, levels=3, bias=False, min_unit=4, weight_groups=1):
super(WeightMapper, self).__init__()
assert levels > 0, 'levels must be greater than zero'
assert in_channels % 2 == 0, 'in_channels must be divisible by 2'
if isinstance(weight_groups, (list, tuple)):
assert len(weight_groups) == len(out_channels), \
f'groups ({len(weight_groups)}) must be of size {len(out_channels)}'
self.in_channels = in_channels
self.out_channels = out_channels
self.levels = levels
self.bias = bias
self.weight_groups = weight_groups
# Add blocks
self.down_blocks = nn.ModuleList()
self.up_blocks = nn.ModuleList()
self.in_conv = nn.Sequential(
nn.Conv2d(in_channels, in_channels // 2, kernel_size=1, stride=1, bias=bias),
nn.BatchNorm2d(in_channels // 2),
nn.ReLU(inplace=True))
for level in range(self.levels - 1):
self.down_blocks.append(nn.Sequential(
nn.Conv2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2, bias=bias),
nn.BatchNorm2d(in_channels // 2),
nn.ReLU(inplace=True)))
self.up_blocks.append(nn.Sequential(
nn.Conv2d(in_channels, in_channels // 2, 1, bias=bias),
nn.BatchNorm2d(in_channels // 2),
nn.ReLU(inplace=True)))
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
def forward(self, x):
x = self.in_conv(x)
# Down stream
feat = [x]
for level in range(self.levels - 1):
feat.append(self.down_blocks[level](feat[-1]))
# Average the last feature map
orig_shape = feat[-1].shape
if orig_shape[-2:] != (1, 1):
x = F.adaptive_avg_pool2d(feat[-1], 1)
x = F.interpolate(x, orig_shape[-2:], mode='nearest')
# Up stream
for level in range(self.levels - 2, -1, -1):
x = torch.cat((feat.pop(-1), x), dim=1)
x = self.up_blocks[level](x)
x = self.upsample(x)
# Output head
x = torch.cat((feat.pop(-1), x), dim=1)
return x
def next_multiply(x, base):
return type(x)(np.ceil(x / base) * base)
class HyperPatchNoPadding(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1):
super(HyperPatchNoPadding, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.dilation = _pair(dilation)
self.groups = groups
self.hyper_params = np.prod((out_channels, in_channels // groups) + self.kernel_size)
def forward(self, x, weight):
b, c, h, w = x.shape
fh, fw = weight.shape[-2:]
ph, pw = x.shape[-2] // fh, x.shape[-1] // fw
weight = weight.permute(0, 2, 3, 1).reshape(
b * fh * fw * self.out_channels, self.in_channels // self.groups, *self.kernel_size)
x = x.view(b, c, fh, ph, fw, pw).permute(0, 2, 4, 1, 3, 5).reshape(1, -1, ph, pw)
x = F.conv2d(x, weight, bias=None, stride=self.stride, dilation=self.dilation, groups=b * fh * fw * self.groups)
x = x.view(b, fh, fw, -1, ph, pw).permute(0, 3, 1, 4, 2, 5).reshape(b, -1, h, w)
return x
class HyperPatch(nn.Module):
def __init__(self, module: nn.Module, padding=0, padding_mode='reflect'):
super(HyperPatch, self).__init__()
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError(
f"padding_mode must be one of {valid_padding_modes}, but got padding_mode='{padding_mode}'")
self.hyper_module = module
self.padding = _pair(padding)
self.padding_mode = padding_mode
self._padding_repeated_twice = self.padding + self.padding
@property
def hyper_params(self):
return self.hyper_module.hyper_params
def forward(self, x, weight):
b, c, h, w = x.shape
fh, fw = weight.shape[-2:]
ph, pw = x.shape[-2] // fh, x.shape[-1] // fw
kh, kw = ph + self.padding[0] * 2, pw + self.padding[1] * 2
weight = weight.permute(0, 2, 3, 1).reshape(-1, weight.shape[1]).contiguous()
x = F.pad(x, self._padding_repeated_twice, mode=self.padding_mode)
x = torch.nn.functional.unfold(x, (kh, kw), stride=(ph, pw)) # B x (C x (ph x pw)) x (fh * fw)
x = x.transpose(1, 2).reshape(-1, c, kh, kw).contiguous()
x = self.hyper_module(x, weight)
x = x.view(b, fh * fw, -1, ph * pw).permute(0, 2, 3, 1).reshape(b, -1, fh * fw)
x = F.fold(x, (h, w), kernel_size=(ph, pw), stride=(ph, pw))
return x
class HyperPatchConv2d(HyperPatch):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1,
padding_mode='reflect'):
conv = MetaConv2d(in_channels, out_channels, kernel_size, stride, 0, dilation, groups)
super(HyperPatchConv2d, self).__init__(conv, padding, padding_mode)
@property
def in_channels(self):
return self.hyper_module.in_channels
@property
def out_channels(self):
return self.hyper_module.out_channels
@property
def kernel_size(self):
return self.hyper_module.kernel_size
@property
def groups(self):
return self.hyper_module.groups
def __repr__(self):
s = self.__class__.__name__ + '({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.hyper_module.dilation != (1,) * len(self.hyper_module.dilation):
s += ', dilation={dilation}'
if self.hyper_module.groups != 1:
s += ', groups={groups}'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
s += ')'
d = {**self.hyper_module.__dict__}
d['padding'] = self.padding
d['padding_mode'] = self.padding_mode
return s.format(**d)
def make_hyper_patch_conv2d_block(in_nc, out_nc, kernel_size=3, stride=1, padding=None, dilation=1, groups=1,
padding_mode='reflect', norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU(True),
dropout=None):
""" Defines a Hyper patch-wise convolution block with a normalization layer, an activation layer, and an optional
dropout layer.
Args:
in_nc (int): Input number of channels
out_nc (int): Output number of channels
kernel_size (int): Convolution kernel size
stride (int): Convolution stride
padding (int, optional): The amount of padding for the height and width dimensions
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
norm_layer (nn.Module): Type of feature normalization layer
act_layer (nn.Module): Type of activation layer
dropout (float): If specified, enables dropout with the given probability
"""
assert dropout is None or isinstance(dropout, float)
padding = kernel_size // 2 if padding is None else padding
if padding == 0:
layers = [HyperPatchNoPadding(in_nc, out_nc, kernel_size, stride, dilation, groups)]
else:
layers = [HyperPatchConv2d(in_nc, out_nc, kernel_size, stride, padding, dilation, groups, padding_mode)]
if norm_layer is not None:
layers.append(norm_layer(out_nc))
if act_layer is not None:
layers.append(act_layer)
if dropout is not None:
layers.append(nn.Dropout(dropout))
return MetaSequential(*layers)
def divide_feature(in_feature, out_features, min_unit=8):
""" Divides in_feature relative to each of the provided out_features.
The division of the input feature will be in multiplies of "min_unit".
The algorithm makes sure that equal output features will get the same portion of the input feature.
The smallest out feature will receive all the round down overflow (usually the final fc)
Args:
in_feature: the input feature to divide
out_features: the relative sizes of the output features
min_unit: each division of the input feature will be divisible by this number.
in_feature must be divisible by this number as well
Returns:
np.array: array of integers of the divided input feature in the size of out_features.
"""
assert in_feature % min_unit == 0, f'in_feature ({in_feature}) must be divisible by min_unit ({min_unit})'
units = in_feature // min_unit
indices = np.argsort(out_features)
out_features_sorted = np.array(out_features)[indices]
out_feat_groups = [(k, indices[list(g)]) for k, g in groupby(range(len(indices)), lambda i: out_features_sorted[i])]
out_feat_groups.sort(key=lambda x: x[0] * len(x[1]), reverse=True)
units_feat_ratio = float(units) / sum(out_features)
# For each feature group
out_group_units = [len(out_feat_group[1]) for out_feat_group in out_feat_groups]
remaining_units = units - sum(out_group_units)
for i, out_feat_group in enumerate(out_feat_groups): # out_feat_group: (out_feature, indices array)
if i < (len(out_feat_groups) - 1):
n = len(out_feat_group[1]) # group size
curr_out_feat_size = out_feat_group[0] * n
curr_units = max(curr_out_feat_size * units_feat_ratio, n)
curr_units = curr_units // n * n - n # Make divisible by num elements
curr_units = min(curr_units, remaining_units)
out_group_units[i] += curr_units
remaining_units -= curr_units
if remaining_units == 0:
break
else:
out_group_units[-1] += remaining_units
# Final feature division
divided_in_features = np.zeros(len(out_features), dtype=int)
for i, out_feat_group in enumerate(out_feat_groups):
for j in range(len(out_feat_group[1])):
divided_in_features[out_feat_group[1][j]] = out_group_units[i] // len(out_feat_group[1]) * min_unit
return divided_in_features
def hyperseg_efficientnet(model_name, pretrained=False, out_feat_scale=0.25, levels=3, weights_path=None, **kwargs):
from hyperseg.models.backbones.efficientnet import efficientnet
from functools import partial
weight_mapper = partial(WeightMapper, levels=levels)
backbone = partial(efficientnet, model_name, pretrained=pretrained, out_feat_scale=out_feat_scale, head=None,
return_features=True)
model = HyperGen(backbone, weight_mapper, **kwargs)
if weights_path is not None:
checkpoint = torch.load(weights_path)
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict, strict=True)
return model
def main(model='hyperseg.models.hyperseg_v1_0_unify.hyperseg_efficientnet', res=(512,),
pyramids=None,
train=False):
from hyperseg.utils.obj_factory import obj_factory
from hyperseg.utils.utils import set_device
from hyperseg.utils.img_utils import create_pyramid
from tqdm import tqdm
assert len(res) <= 2, f'res must be either a single number or a pair of numbers: "{res}"'
res = res * 2 if len(res) == 1 else res
torch.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
device, gpus = set_device()
model = obj_factory(model).to(device).train(train)
x = torch.rand(1, 3, *res).to(device)
x = create_pyramid(x, pyramids) if pyramids is not None else x
pred = model(x)
print(pred.shape)
if __name__ == "__main__":
# Parse program arguments
import argparse
parser = argparse.ArgumentParser('hyperseg test')
parser.add_argument('-m', '--model',
default='hyperseg.models.hyperseg_v1_0_unify.hyperseg_efficientnet',
help='model object')
parser.add_argument('-r', '--res', default=(512,), type=int, nargs='+',
metavar='N', help='image resolution')
parser.add_argument('-p', '--pyramids', type=int, metavar='N',
help='number of image pyramids')
parser.add_argument('-t', '--train', action='store_true',
help='If True, sets the model to training mode')
main(**vars(parser.parse_args()))
| [
"ynirkin@gmail.com"
] | ynirkin@gmail.com |
29608a73b01cd1662418e4d8be0d8b19444f9bf8 | 45cc3045763d94a2760f3eea713d1760b6c47976 | /models/mlp_classification_layer.py | 159746fd18094fefa60e6cead623980c2be2a492 | [
"MIT"
] | permissive | kiminh/prerequisite-prediction | 464f2d419df71a7b477103cd5e2a535157b30e87 | d45c2f13d5fec62e9177e63296da55aa70b1eb54 | refs/heads/master | 2022-10-02T22:05:56.089267 | 2020-06-04T07:13:05 | 2020-06-04T07:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class MLPClassification(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
mid_features = in_features // 2
self.out_features = out_features
self.fc1 = nn.Linear(in_features, mid_features)
self.fc2 = nn.Linear(mid_features, out_features)
self.dropout = nn.Dropout(0.2)
def forward(self, input):
input = self.dropout(input)
x = F.relu(self.fc1(input))
x = self.dropout(x)
x = self.fc2(x)
return x
def __repr__(self):
return '{} ({} -> {})'.format(self.__class__.__name__, self.in_features, self.out_features) | [
"luogan_62625686@126.com"
] | luogan_62625686@126.com |
f919c9e9ce31b4f2ba4ee925104693ef602a991d | f2b860cd107681925cf58c004001c71a8ec5b2bd | /antspynet/architectures/create_convolutional_autoencoder_model.py | 2bd714ea3d3389ae2bd5ba767fca6522f6261b7c | [] | no_license | zwmJohn/ANTsPyNet | dfb7f5a841bb8506d6c5f809af38937bbc2725f5 | 3bd658a8d8fc6467612a3419f38dfc65895fc679 | refs/heads/master | 2022-04-28T04:37:24.995937 | 2020-05-01T00:27:41 | 2020-05-01T00:27:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,274 | py |
from keras.models import Model
from keras.layers import Input, Conv2D, Conv2DTranspose, Dense, Flatten, Reshape
import numpy as np
import math
def create_convolutional_autoencoder_model_2d(input_image_size,
number_of_filters_per_layer=(32, 64, 128, 10),
convolution_kernel_size=(5, 5),
deconvolution_kernel_size=(5, 5)
):
"""
Function for creating a 2-D symmetric convolutional autoencoder model.
Builds an autoencoder based on the specified array definining the
number of units in the encoding branch. Ported from the Keras python
implementation here:
https://github.com/XifengGuo/DEC-keras
Arguments
---------
input_image_size : tuple
A tuple defining the shape of the 2-D input image
number_of_units_per_layer : tuple
A tuple defining the number of units in the encoding branch.
convolution_kernel_size : tuple or scalar
Kernel size for convolution
deconvolution_kernel_size : tuple or scalar
Kernel size for deconvolution
Returns
-------
Keras models
A convolutional encoder and autoencoder Keras model.
Example
-------
>>> autoencoder, encoder = create_convolutional_autoencoder_model_2d((128, 128, 3))
>>> autoencoder.summary()
>>> encoder.summary()
"""
activation = 'relu'
strides = (2, 2)
number_of_encoding_layers = len(number_of_filters_per_layer) - 1
factor = 2 ** number_of_encoding_layers
padding = 'valid'
if input_image_size[0] % factor == 0:
padding = 'same'
inputs = Input(shape = input_image_size)
encoder = inputs
for i in range(number_of_encoding_layers):
local_padding = 'same'
kernel_size = convolution_kernel_size
if i == (number_of_encoding_layers - 1):
local_padding = padding
kernel_size = tuple(np.array(convolution_kernel_size) - 2)
encoder = Conv2D(filters=number_of_filters_per_layer[i],
kernel_size=kernel_size,
strides=strides,
activation=activation,
padding=local_padding)(encoder)
encoder = Flatten()(encoder)
encoder = Dense(units=number_of_filters_per_layer[-1])(encoder)
autoencoder = encoder
penultimate_number_of_filters = \
number_of_filters_per_layer[number_of_encoding_layers-1]
input_image_size_factored = ((math.floor(input_image_size[0] / factor)),
(math.floor(input_image_size[1] / factor)))
number_of_units_for_encoder_output = (penultimate_number_of_filters *
input_image_size_factored[0] * input_image_size_factored[1])
autoencoder = Dense(units=number_of_units_for_encoder_output,
activation=activation)(autoencoder)
autoencoder = Reshape(target_shape=(*input_image_size_factored, penultimate_number_of_filters))(autoencoder)
for i in range(number_of_encoding_layers, 1, -1):
local_padding = 'same'
kernel_size = convolution_kernel_size
if i == number_of_encoding_layers:
local_padding = padding
kernel_size = tuple(np.array(deconvolution_kernel_size) - 2)
autoencoder = Conv2DTranspose(filters=number_of_filters_per_layer[i-2],
kernel_size=kernel_size,
strides=strides,
activation=activation,
padding=local_padding)(autoencoder)
autoencoder = Conv2DTranspose(input_image_size[-1],
kernel_size=deconvolution_kernel_size,
strides=strides,
padding='same')(autoencoder)
autoencoder_model = Model(inputs=inputs, outputs=autoencoder)
encoder_model = Model(inputs=inputs, outputs=encoder)
return([autoencoder_model, encoder_model])
def create_convolutional_autoencoder_model_3d(input_image_size,
number_of_filters_per_layer=(32, 64, 128, 10),
convolution_kernel_size=(5, 5, 5),
deconvolution_kernel_size=(5, 5, 5)
):
"""
Function for creating a 3-D symmetric convolutional autoencoder model.
Builds an autoencoder based on the specified array definining the
number of units in the encoding branch. Ported from the Keras python
implementation here:
https://github.com/XifengGuo/DEC-keras
Arguments
---------
input_image_size : tuple
A tuple defining the shape of the 3-D input image
number_of_units_per_layer : tuple
A tuple defining the number of units in the encoding branch.
convolution_kernel_size : tuple or scalar
Kernel size for convolution
deconvolution_kernel_size : tuple or scalar
Kernel size for deconvolution
Returns
-------
Keras models
A convolutional encoder and autoencoder Keras model.
Example
-------
>>> autoencoder, encoder = create_convolutional_autoencoder_model_3d((128, 128, 128, 3))
>>> autoencoder.summary()
>>> encoder.summary()
"""
activation = 'relu'
strides = (2, 2, 2)
number_of_encoding_layers = len(number_of_filters_per_layer) - 1
factor = 2 ** number_of_encoding_layers
padding = 'valid'
if input_image_size[0] % factor == 0:
padding = 'same'
inputs = Input(shape = input_image_size)
encoder = inputs
for i in range(number_of_encoding_layers):
local_padding = 'same'
kernel_size = convolution_kernel_size
if i == (number_of_encoding_layers - 1):
local_padding = padding
kernel_size = tuple(np.array(convolution_kernel_size) - 2)
encoder = Conv3D(filters=number_of_filters_per_layer[i],
kernel_size=kernel_size,
strides=strides,
activation=activation,
padding=local_padding)(encoder)
encoder = Flatten()(encoder)
encoder = Dense(units=number_of_filters_per_layer[-1])(encoder)
autoencoder = encoder
penultimate_number_of_filters = \
number_of_filters_per_layer[number_of_encoding_layers-1]
input_image_size_factored = ((math.floor(input_image_size[0] / factor)),
(math.floor(input_image_size[1] / factor)),
(math.floor(input_image_size[2] / factor)))
number_of_units_for_encoder_output = (penultimate_number_of_filters *
input_image_size_factored[0] * input_image_size_factored[1] *
input_image_size_factored[2])
autoencoder = Dense(units=number_of_units_for_encoder_output,
activation=activation)(autoencoder)
autoencoder = Reshape(target_shape=(*input_image_size_factored, penultimate_number_of_filters))(autoencoder)
for i in range(number_of_encoding_layers, 1, -1):
local_padding = 'same'
kernel_size = convolution_kernel_size
if i == number_of_encoding_layers:
local_padding = padding
kernel_size = tuple(np.array(deconvolution_kernel_size) - 2)
autoencoder = Conv3DTranspose(filters=number_of_filters_per_layer[i-2],
kernel_size=kernel_size,
strides=strides,
activation=activation,
padding=local_padding)(autoencoder)
autoencoder = Conv3DTranspose(input_image_size[-1],
kernel_size=deconvolution_kernel_size,
strides=strides,
padding='same')(autoencoder)
autoencoder_model = Model(inputs=inputs, outputs=autoencoder)
encoder_model = Model(inputs=inputs, outputs=encoder)
return([autoencoder_model, encoder_model])
| [
"ntustison@gmail.com"
] | ntustison@gmail.com |
7e0f6034cc8f7b8e07658fe390c52352d05802f8 | a8c7653633d9792def25c79e5f76ee411dbe5df4 | /codedigger-env/bin/pyrsa-keygen | fa4cb57f0a50e10213958f5036ee45fa0ad2c49c | [] | no_license | aaradhyaberi07/codedigger | 396a98a9602ee2107e61caf95d114ce6f2449600 | 7637e469e91f4d214cb11959b70000c116e80e26 | refs/heads/master | 2023-02-01T23:03:23.846400 | 2020-12-18T09:48:55 | 2020-12-18T09:48:55 | 322,553,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | #!/home/aaradhya/Desktop/codedigger/Auth/codedigger-env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
| [
"aaradhyaberi@gmail.com"
] | aaradhyaberi@gmail.com | |
8574fec6568f400b3cb635f5d8caf629219c87ca | 11c211dc48b6b33c2ad2cf0e6e7116ace526594b | /Applications/Voice Controlled Car/import_test.py | 845f3d7de7640abaa763bbc4b5002d4665ff42b4 | [] | no_license | thecloudist/awareness | 1f481187a1e563069338aa68631f3829b0e4ce8f | 5d55adb5921b72a54c30ca8fc235d6f6c09156a7 | refs/heads/master | 2020-09-27T03:03:16.651252 | 2016-10-18T04:38:00 | 2016-10-18T04:38:00 | 67,518,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from gopigo import *
from GpgMovement import *
import sys
from subprocess import call
from time import sleep
# from tts import *
from positioning import Dist_Enc_Tics as d2tics
from transcribe_streaming_thread import *
| [
"salvideoguy@gmail.com"
] | salvideoguy@gmail.com |
3df51616641206faf5ab0d3a8dbc5faafddb1c1b | 2a6fde51d8459ea3b82c6ddcd199b94ee586d416 | /src/writer.py | 3a7be97d1315779446de69a36d4b43d08eed8959 | [
"MIT"
] | permissive | AdamZink/excitation-source-subtraction | c9e91b16f9d2e27bf1dd668ab9ef044abca79730 | efe60f4e479b074732835d6777a257184b8ec5e1 | refs/heads/master | 2021-09-15T00:37:44.973209 | 2018-05-05T15:21:42 | 2018-05-05T15:21:42 | 112,039,299 | 1 | 0 | MIT | 2018-04-10T02:48:16 | 2017-11-25T22:40:23 | Python | UTF-8 | Python | false | false | 1,385 | py | import pandas as pd
import os
import io
import csv
class RowWriter:
def __init__(self):
# self.csv_data = io.StringIO()
# self.csv_writer = csv.writer(self.csv_data, delimiter=',')
self.data_string = ''
self.excel_out_path = None
self.sheet_name = None
self.dataframe = None
def save_row(self, row_string):
self.data_string += row_string + '\n'
# print(row_string)
# self.csv_writer.writerow([row_string])
# print(self.csv_data.getvalue())
def export_to_excel(self, filename): #, sheet_name
# self.csv_data.seek(0)
# print(self.csv_data)
# self.dataframe = pd.read_csv(self.csv_data, delimiter=',', quoting=1)
# print(self.dataframe)
self.excel_out_path = os.path.join('..', 'output', filename)
with open(self.excel_out_path, 'w') as f:
f.write(self.data_string)
# self.sheet_name = sheet_name
# pdWriter = pd.ExcelWriter(self.excel_out_path, engine='xlsxwriter')
# self.dataframe.to_excel(pdWriter, sheet_name=self.sheet_name)
# pdWriter.save()
class ColumnWriter:
def __init__(self):
self.excel_out_path = None
self.dataframe = pd.DataFrame()
def save_column(self, column_name, data):
self.dataframe[column_name] = data
def export_to_excel(self, filename, sheet_name):
excel_writer = pd.ExcelWriter(os.path.join('..', 'output', filename))
self.dataframe.to_excel(excel_writer, sheet_name)
excel_writer.save()
| [
"adamzink55@gmail.com"
] | adamzink55@gmail.com |
f8e68d69e6ac998232cd090e49d23e78371a7605 | 591609ce57682f83dce867686068bc3cd0f7ed88 | /OJ_Python/OJ_Python_2.7/password_strength.py | 4342bc59fee036c1074b26c91eed9fa2ca973fd6 | [] | no_license | MozhiJiawei/Huawei_OJ | cc5c0f0dc6572762834db9101aec0e4c4c27e9cb | b7c4557778b8b4e804e797d4e6891e0bb4c85e8a | refs/heads/master | 2021-01-12T03:22:03.369992 | 2017-03-17T09:01:13 | 2017-03-17T09:01:13 | 78,201,831 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | def get_password_strength(password):
element = {'len': len(password), 'letter_lower': 0, 'letter_upper': 0, 'num': 0, 'symbol': 0}
for c in password:
if str.isdigit(c):
element['num'] += 1
elif str.islower(c):
element['letter_lower'] += 1
elif str.isupper(c):
element['letter_upper'] += 1
else:
element['symbol'] += 1
grade = 0
if element['len'] <= 4:
grade += 5
elif element['len'] <= 7:
grade += 10
else:
grade += 25
if element['letter_lower'] == 0 and element['letter_upper'] == 0:
grade += 0
elif element['letter_lower'] == 0 or element['letter_upper'] == 0:
grade += 10
else:
grade += 20
if element['num'] == 0:
grade += 0
elif element['num'] == 1:
grade += 10
else:
grade += 20
if element['symbol'] == 0:
grade += 0
elif element['symbol'] == 1:
grade += 10
else:
grade += 25
if element['letter_lower'] != 0 and element['letter_upper'] != 0 and \
element['num'] != 0 and element['symbol'] != 0:
grade += 5
elif element['letter_upper'] != 0 and element['num'] != 0 and element['symbol'] != 0:
grade += 3
elif element['letter_lower'] != 0 and element['num'] != 0 and element['symbol'] != 0:
grade += 3
elif element['num'] != 0 and element['symbol'] != 0:
grade += 2
if grade >= 90:
return "VERY_SECURE"
elif grade >= 80:
return "SECURE"
elif grade >= 70:
return "VERY_STRONG"
elif grade >= 60:
return "STRONG"
elif grade >= 50:
return "AVERAGE"
elif grade >= 25:
return "WEAK"
else:
return "VERY_WEAK"
if __name__ == "__main__":
print get_password_strength(raw_input())
| [
"372745456@qq.com"
] | 372745456@qq.com |
62afc4696e1e1a0e69a3757d17af7e7131d8fcfb | 4cf97704bf1a3365039e1d87812527eb8080f3b1 | /util.py | de83454a602734bc6b6e3b9271ad1a32590ff017 | [
"MIT"
] | permissive | arun-94/PITM | d92bf4834570fcd089139215433fc07e7ec44500 | 47ac9fcfab89e888a5d021880b5ea81b59ed1d24 | refs/heads/master | 2022-05-14T21:43:31.006798 | 2018-11-13T05:26:07 | 2018-11-13T05:26:07 | 156,999,984 | 1 | 0 | MIT | 2022-03-22T20:44:34 | 2018-11-10T16:35:54 | JavaScript | UTF-8 | Python | false | false | 1,993 | py | from urllib.parse import urlparse
from forms import WordListForm
import re
url_regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def process_text(text, url_check=False):
words = (w.strip()
for word in text.split('\n')
for w in word.split(' ')
if w not in ('', '\r', '\n'))
if url_check:
words = [url
for url in words
if url_regex.match(url)]
return words
def add_text_to_file(text, file_, **kwargs):
words = process_text(text, **kwargs)
with open(file_, 'w') as f:
f.close()
with open(file_, 'a') as f:
for word in words:
f.write("%s\n" % word)
return words
def get_text_from_file(file_):
with open(file_) as f:
return [line.strip() for line in f.readlines()]
def _construct_form(banned_words, white_list, blackList):
form = WordListForm()
longest_banned_word = max([len(word) for word in banned_words]) + 1
longest_white_url = max([len(word) for word in banned_words]) + 1
longest_black_url = max([len(word) for word in banned_words]) + 1
columns = max(longest_banned_word, longest_white_url, longest_black_url)
form.bannedWords.render_kw = {'rows': len(banned_words) + 1, 'cols': columns + 1}
form.bannedWords.data = '\n'.join(banned_words)
form.whiteList.render_kw = {'rows': len(white_list) + 1, 'cols': columns + 1}
form.whiteList.data = '\n'.join([urlparse(url).geturl() for url in white_list])
form.blackList.render_kw = {'rows': len(blackList) + 1, 'cols': columns + 1}
form.blackList.data = '\n'.join([urlparse(url).geturl() for url in blackList])
return form
| [
"arunswaminathan94@gmail.com"
] | arunswaminathan94@gmail.com |
201a03340d5c71b5268d436ea84dedac5c148f9c | 3d8318d79f1efcfe75fd0e91be45f00f0380163a | /Packs/TruSTAR/Integrations/TruSTAR_V2/TruSTAR_V2_test.py | 8ef5830123f3f71781f81528fca26b84303d61c3 | [
"MIT"
] | permissive | nicoloereni/content | 1b75efb50531c3aeb2319b4fe2235ca6d23e8580 | ddb88044c5b39a17894dd13e7ae260d9854afc30 | refs/heads/master | 2023-08-15T11:05:03.321368 | 2020-07-07T11:06:40 | 2020-07-07T11:06:40 | 277,815,390 | 1 | 0 | MIT | 2020-07-07T12:51:13 | 2020-07-07T12:51:12 | null | UTF-8 | Python | false | false | 16,003 | py | import pytest
from TruSTAR_V2 import TrustarClient, Utils
import trustar
from trustar.models.indicator import Indicator
from trustar.models.enclave import EnclavePermissions
from trustar.models.report import Report
from trustar.models.intelligence_source import IntelligenceSource
from trustar.models.phishing_submission import PhishingSubmission, PhishingIndicator
from trustar.models.indicator_summary import IndicatorSummary, IndicatorAttribute, IndicatorScore
@pytest.fixture
def client():
client = client = TrustarClient(config={
'user_api_key': "test_api_key",
'user_api_secret': "test_api_secret",
'api_endpoint': "test_api_endpoint",
'client_type': "Python_SDK",
'client_metatag': "demisto-xsoar"
})
return client
@pytest.fixture
def enclaves():
return [
EnclavePermissions(
id="931a7386-ed4f-4acd-bda0-b13b2b6b823f71",
name="TestEnclave",
type="CLOSED",
read=True,
create=False,
update=True
)
]
@pytest.fixture
def related_indicators(mocker):
return mocker.Mock(
items=[
Indicator(
type="SHA256",
value="a127d88fb73f8f1a3671557f3084d02d981396d5f5218163ef26d61314ced3c1"
),
Indicator(
type="URL",
value="www.testUrl.com"
)
]
)
@pytest.fixture
def trending_indicators():
return [
Indicator(
correlation_count=724,
type="URL",
value="badware.info"
),
Indicator(
correlation_count=694,
type="URL",
value="botvrij.eu"
)
]
@pytest.fixture
def indicators_metadata():
return [
Indicator(
value="185.220.101.141",
first_seen=1588884576620,
last_seen=1588923302059,
correlation_count=0,
type="IP",
enclave_ids=[
'011ad71b-fd7d-44c2-834a-0d751299fb1f',
'71f337a0-9696-4331-988a-5679271656a0',
'd915e45a-d0c8-4a75-987a-775649020c96'
]
)
]
@pytest.fixture
def indicator_summaries(mocker):
return mocker.Mock(
items=[
IndicatorSummary(
value="185.220.101.141",
indicator_type="IP",
source=IntelligenceSource(key="virustotal", name="VirusTotal"),
severity_level=3,
updated=1589782796000,
enclave_id='011ad71b-fd7d-44c2-834a-0d751299fb1f',
report_id='67c60023-83ea-4376-960e-2ff8fc9fbd33',
attributes=[
IndicatorAttribute(
description='Number of associated URLs detected as bad',
name='Detected URLs',
value=1,
),
IndicatorAttribute(
description='Number of hostnames this IP resolved to',
name='Hostname Resolutions',
value=2,
),
IndicatorAttribute(
name='ASN',
value='200052',
),
],
score=IndicatorScore(name="Positives/Total Scans", value="64/75")
),
IndicatorSummary(
value="185.220.100.141",
indicator_type="IP",
source=IntelligenceSource(key="OTRO", name="VirusTotal"),
severity_level=3,
updated=1589782796000,
enclave_id='011ad71b-fd7d-44c2-834a-0d751299fb1f',
report_id='67c60023-83ea-4376-960e-2ff8fc9fbd33',
attributes=[
IndicatorAttribute(
description='Number of associated URLs detected as bad',
name='Detected URLs',
value=1,
),
IndicatorAttribute(
description='Number of hostnames this IP resolved to',
name='Hostname Resolutions',
value=2,
),
IndicatorAttribute(
name='ASN',
value='200052',
),
],
score=IndicatorScore(name="Positives/Total Scans", value="64/75")
)
]
)
@pytest.fixture
def reports(mocker):
return mocker.MagicMock(
items=[
Report(
id="1",
title="Test Report",
body="Test Body",
),
Report(
id="2",
title="Test Report2",
body="{'testField': 'test'}",
),
]
)
@pytest.fixture
def correlated_reports(mocker):
return [
Report(
id="1",
title="Test Report",
body="Test Body",
),
Report(
id="2",
title="Test Report2",
body="{'testField': 'test'}",
),
]
@pytest.fixture
def whitelisted_indicators(mocker):
return mocker.Mock(
items=[
Indicator(
type="MD5",
value="1e82dd741e908d02e4eff82461f1297e"
),
Indicator(
type="EMAIL_ADDRESS",
value="truphish1337@gmail.com"
)
]
)
@pytest.fixture
def phishing_submissions(mocker):
return mocker.Mock(
items=[
PhishingSubmission(
submission_id="TEST-SUBMISSION-ID",
title="TEST PHISHING SUBMISSION",
priority_event_score=3,
status="UNRESOLVED"
)
]
)
@pytest.fixture
def phishing_indicators(mocker):
return mocker.Mock(
items=[
PhishingIndicator(
indicator_type="URL",
value="www.test.com",
source_key="test_source",
normalized_indicator_score=3,
original_indicator_score=3
)
]
)
def test_get_enclaves(client, enclaves, monkeypatch):
def mock_get_enclaves(*args, **kwargs):
return enclaves
monkeypatch.setattr(trustar.TruStar, "get_user_enclaves", mock_get_enclaves)
response = client.get_enclaves()
expected = enclaves[0].to_dict(remove_nones=True)
assert response.get('Contents')[0] == expected
def test_related_indicators(client, related_indicators, monkeypatch):
def mock_get_related_indicators(*args, **kwargs):
return related_indicators
monkeypatch.setattr(trustar.TruStar, "get_related_indicators_page", mock_get_related_indicators)
indicators = ["a127d88fb73f8f1a3671557f3084d02d981396d5f5218163ef26d61314ced3c1", "www.testUrl.com"]
response = client.get_related_indicators(indicators)
expected = [i.to_dict(remove_nones=True) for i in related_indicators.items]
assert response.get('Contents') == expected
def test_trending_indicators(client, trending_indicators, monkeypatch):
def mock_get_trending_indicators(*args, **kwargs):
return trending_indicators
monkeypatch.setattr(trustar.TruStar, "get_community_trends", mock_get_trending_indicators)
response = client.get_trending_indicators()
expected = [i.to_dict(remove_nones=True) for i in trending_indicators]
assert response.get('Contents') == expected
def test_get_indicators_metadata(client, indicators_metadata, monkeypatch):
def mock_get_metadata(*args, **kwargs):
return indicators_metadata
monkeypatch.setattr(trustar.TruStar, "get_indicators_metadata", mock_get_metadata)
response = client.get_indicators_metadata(indicators=['185.220.101.141'])
expected = indicators_metadata[0].to_dict(remove_nones=True)
expected["firstSeen"] = Utils.normalize_time(expected.get('firstSeen'))
expected["lastSeen"] = Utils.normalize_time(expected.get('lastSeen'))
assert response.get('Contents')[0] == expected
def test_get_indicator_summaries(client, indicator_summaries, monkeypatch):
def mock_get_summaries(*args, **kwargs):
return indicator_summaries
monkeypatch.setattr(trustar.TruStar, "get_indicator_summaries_page", mock_get_summaries)
response = client.get_indicator_summaries(values=['185.220.101.141'])
expected = indicator_summaries.items[0].to_dict(remove_nones=True)
expected['indicatorType'] = expected.pop('type')
assert response.get('Contents')[0] == expected
def test_get_whitelisted_indicators(client, whitelisted_indicators, monkeypatch):
def mock_get_whitelist(*args, **kwargs):
return whitelisted_indicators
monkeypatch.setattr(trustar.TruStar, "get_whitelist_page", mock_get_whitelist)
response = client.get_whitelist()
expected = [i.to_dict(remove_nones=True) for i in whitelisted_indicators.items]
assert response.get('Contents') == expected
def test_get_indicators_for_report(client, whitelisted_indicators, monkeypatch):
def mock_get_indicators_for_report(*args, **kwargs):
return whitelisted_indicators
monkeypatch.setattr(trustar.TruStar, "get_indicators_for_report_page", mock_get_indicators_for_report)
response = client.get_indicators_for_report("76cc1321-f630-test-b82b-eb00a9022445")
expected = [i.to_dict(remove_nones=True) for i in whitelisted_indicators.items]
assert response.get('Contents') == expected
def test_move_report(client, monkeypatch):
def mock_move_report(*args, **kwargs):
return kwargs["report_id"]
report_id = "94a476d8-17e3-490a-9020-f6971b692daf"
enclave_id = "6ef1078c-a74a-4b42-9344-56c6adea0bda"
monkeypatch.setattr(trustar.TruStar, "move_report", mock_move_report)
response = client.move_report(report_id, enclave_id)
assert response == f"{report_id} has been moved to enclave id: {enclave_id}"
def test_copy_report(client, monkeypatch):
def mock_copy_report(*args, **kwargs):
return "NEW-Test-ID"
report_id = "94a476d8-17e3-490a-9020-f6971b692daf"
dest_enclave_id = "6ef1078c-a74a-4b42-9344-56c6adea0bda"
monkeypatch.setattr(trustar.TruStar, "copy_report", mock_copy_report)
response = client.copy_report(report_id, dest_enclave_id)
assert response == f"{report_id} has been copied to enclave id: {dest_enclave_id} with id: NEW-Test-ID"
def test_get_reports(client, reports, monkeypatch):
def mock_get_reports(*args, **kwargs):
return reports
monkeypatch.setattr(trustar.TruStar, "get_reports_page", mock_get_reports)
response = client.get_reports()
expected = [report.to_dict(remove_nones=True) for report in reports.items]
for e in expected:
e["reportDeepLink"] = client.get_report_deep_link(e.get("id"))
assert response.get('Contents') == expected
def test_get_report_details(client, reports, monkeypatch):
def mock_get_report_details(*args, **kwargs):
return reports.items[0]
monkeypatch.setattr(trustar.TruStar, "get_report_details", mock_get_report_details)
response = client.get_report_details(report_id="1")
expected = reports.items[0].to_dict(remove_nones=True)
expected['reportDeepLink'] = client.get_report_deep_link("1")
assert response.get('Contents') == expected
def test_update_report(client, reports, monkeypatch):
def mock_update_report(*args, **kwargs):
return reports.items[0]
monkeypatch.setattr(trustar.TruStar, "get_report_details", mock_update_report)
monkeypatch.setattr(trustar.TruStar, "update_report", lambda x, y: None)
response = client.update_report(report_id="1", title="NEW TEST TITLE")
expected = reports.items[0].to_dict()
expected['reportDeepLink'] = client.get_report_deep_link("1")
expected['title'] = "NEW TEST TITLE"
assert response.get('Contents') == expected
def test_search_reports(client, reports, monkeypatch):
def mock_search_reports(*args, **kwargs):
return reports.items
monkeypatch.setattr(trustar.TruStar, "search_reports_page", mock_search_reports)
response = client.search_reports()
expected = [r.to_dict(remove_nones=True) for r in reports.items]
assert response.get('Contents') == expected
def test_delete_report(client, monkeypatch):
report_id = "94a476d8-17e3-490a-9020-f6971b692daf"
monkeypatch.setattr(trustar.TruStar, "delete_report", lambda x, y, z: None)
response = client.delete_report(report_id)
assert response == f"Report {report_id} was successfully deleted"
def test_submit_report(client, monkeypatch, mocker):
m = mocker.Mock(id=1)
monkeypatch.setattr(trustar.TruStar, "submit_report", lambda x, y: m)
response = client.submit_report(
title="Test enclave",
report_body="TEST BODY",
enclave_ids=["testEnclaveId"]
)
assert response.get('Contents').get('id') == 1
assert response.get('Contents').get('title') == "Test enclave"
assert response.get('Contents').get('reportBody') == "TEST BODY"
def test_add_to_whitelist(client, monkeypatch):
monkeypatch.setattr(trustar.TruStar, "add_terms_to_whitelist", lambda x, y: y)
indicators = ["test@trustar.co", "www.testUrl.com"]
response = client.add_to_whitelist(indicators)
assert response == f"{indicators} added to the whitelist successfully"
def test_remove_from_whitelist(client, monkeypatch):
monkeypatch.setattr(trustar.TruStar, "delete_indicator_from_whitelist", lambda x, y: None)
indicator = "htain@trustar.co"
response = client.remove_from_whitelist(indicator)
assert response == f'{indicator} removed from the whitelist successfully'
def test_correlated_reports(client, correlated_reports, monkeypatch):
def mock_get_correlated_reports(*args, **kwargs):
return correlated_reports
monkeypatch.setattr(trustar.TruStar, "get_correlated_reports_page", mock_get_correlated_reports)
response = client.get_correlated_reports(indicators="5f67fc0a85ef8f1b6c17ee54acb55140")
expected = [r.to_dict(remove_nones=True) for r in correlated_reports]
assert response.get('Contents') == expected
def test_get_all_phishing_indicators(client, phishing_indicators, monkeypatch):
def mock_get_phishing_indicators(*args, **kwargs):
return phishing_indicators
monkeypatch.setattr(trustar.TruStar, "get_phishing_indicators_page", mock_get_phishing_indicators)
response = client.get_all_phishing_indicators()
expected = phishing_indicators.items[0].to_dict(remove_nones=True)
assert response.get('Contents')[0] == expected
def test_get_phishing_submissions(client, phishing_submissions, monkeypatch):
def mock_get_phishing_submissions(*args, **kwargs):
return phishing_submissions
monkeypatch.setattr(trustar.TruStar, "get_phishing_submissions_page", mock_get_phishing_submissions)
response = client.get_phishing_submissions()
expected = phishing_submissions.items[0].to_dict(remove_nones=True)
assert response.get('Contents')[0] == expected
def test_set_triage_status(client, monkeypatch, mocker):
m = mocker.Mock()
m.raise_for_status = lambda: None
monkeypatch.setattr(trustar.TruStar, "mark_triage_status", lambda x, y, z: m)
response = client.set_triage_status("TEST-ID", "RESOLVED")
assert response == "Submission ID TEST-ID is RESOLVED"
def test_search_indicators(client, whitelisted_indicators, monkeypatch):
def mock_search_indicators(*args, **kwargs):
return whitelisted_indicators.items
monkeypatch.setattr(trustar.TruStar, "search_indicators_page", mock_search_indicators)
response = client.search_indicators()
expected = [i.to_dict(remove_nones=True) for i in whitelisted_indicators.items]
assert response.get('Contents') == expected
| [
"noreply@github.com"
] | noreply@github.com |
6a6d2a6c735e1f1055037d69168ec88e8636d9e5 | c077f907cf703209bc3437f62ac7a64179e98863 | /bxfel/model/prior.py | eeae3b1b2367c74b9322062ef7aed5ae9b789382 | [
"MIT"
] | permissive | mmechelke/bayesian_xfel | 02d370f6a4a706491fa0edd6d594e68c4e74f766 | 9726b08494ef04daa52d95a246cac7e879f26f49 | refs/heads/master | 2021-01-11T16:30:29.923620 | 2017-04-25T09:31:32 | 2017-04-25T09:31:32 | 80,094,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,151 | py | import numpy as np
from abc import ABCMeta, abstractmethod
class Prior(object):
__metaclass__ = ABCMeta
@abstractmethod
def energy(self, x):
raise NotImplementedError("Not implemented in Base class")
@abstractmethod
def gradient(self, x):
raise NotImplementedError("Not implemented in Base class")
class LaplacePrior(Prior):
def __init__(self, k=1.):
self._k = k
def energy(self, x):
return self._k * np.sum(np.abs(x))
def gradient(self, x):
return self._k * np.sign(x)
class DoubleExpPrior(Prior):
"""
Double exponential prior with two parameters
controlling the rate for positive and negative
values independently
"""
def __init__(self, lambda_neg, lambda_pos):
self._lambda_neg = lambda_neg
self._lambda_pos = lambda_pos
def energy(self, x):
ln = self._lambda_neg
lp = self._lambda_pos
x_pos = x[x>0]
x_neg = x[x<0]
u_pos = -len(x_pos) * np.log(lp) + lp * np.sum(x_pos)
u_neg = -len(x_neg) * np.log(ln) + ln * np.sum(-x_neg)
return u_pos + u_neg
def gradient(self, x):
grad = np.zeros_like(x)
grad[x>0] = self._lambda_pos
grad[x<0] = -self._lambda_neg
return grad
class LocalMeanPrior(Prior):
def __init__(self, k, N):
"""
assumes that x is in fact
a N x N x N Tensor
"""
self._k = k
self._N = int(N)
def energy(self, x):
tmp = x.reshape((self._N, self._N, self._N))
u = 0.0
for i in range(self._N):
for j in range(self._N):
for k in range(self._N):
for l in [-1, 0, 1]:
for m in [-1, 0, 1]:
for n in [-1, 0, 1]:
if l==0 and m==0 and n==0:
continue
if (i+l >= 0 and i+l< self._N
and j+m >= 0 and j+m < self._N
and k+n >= 0 and k+n < self._N):
u += 0.5 * self._k * (tmp[i,j,k] - tmp[i+l,j+m,k+n])**2
return u
def gradient(self, x):
tmp = x.reshape((self._N, self._N, self._N))
grad = np.zeros_like(tmp)
u = 0.0
for i in range(self._N):
for j in range(self._N):
for k in range(self._N):
for l in [-1, 0, 1]:
for m in [-1, 0, 1]:
for n in [-1, 0, 1]:
if l==0 and m==0 and n==0:
continue
if (i+l >= 0 and i+l< self._N
and j+m >= 0 and j+m < self._N
and k+n >= 0 and k+n < self._N):
grad[i,j,k] += self._k * (tmp[i,j,k] - tmp[i+l,j+m,k+n])
grad[i+l,j+m,k+n] -= self._k * (tmp[i,j,k] - tmp[i+l,j+m,k+n])
return grad.ravel()
| [
"martin.mechelke@gmail.com"
] | martin.mechelke@gmail.com |
177888ef3e7a8b71ade2f3f2e6cb44e9e5367037 | 6977cb5feedcdd818852d5fd4c674dc822420230 | /fxapom/pages/fxa_test_user.py | 447c5858f2dff2814e1d011cf0eb3ea7927d1fdf | [] | no_license | AndreiH/fxapom | b5e2594ab86a0837922dca3ce1888f9e1162db43 | 60ff612d5ed9cfd6104a9101b6e9f46aa79456b7 | refs/heads/master | 2016-09-05T17:48:39.423399 | 2014-11-20T17:21:44 | 2014-11-20T17:21:44 | 26,481,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
from user import MockUser
class FxaTestUser:
"""A base test class that can be extended by other tests to include utility methods."""
email = 'webqa-%s@restmail.net' % \
os.urandom(6).encode('hex')
password = os.urandom(4).encode('hex')
def generate_new_user(self):
email = self.email
password = self.password
name=self.email.split('@')[0]
return MockUser(email=self.email, password=self.password, name=self.email.split('@')[0])
def create_user(self, mozwebqa):
if '-dev.allizom' in mozwebqa.base_url:
os.environ['PUBLIC_URL'] = 'https://stable.dev.lcip.org/auth/'
else:
os.environ['PUBLIC_URL'] = 'https://api.accounts.firefox.com/'
self.email = 'webqa-%s@restmail.net' % \
os.urandom(6).encode('hex')
self.password = os.urandom(4).encode('hex')
# Create and verify the Firefox account
subprocess.check_call(['fxa-client', '-e', self.email,
'-p', self.password, 'create'])
subprocess.check_call(['fxa-client', '-e', self.email,
'-p', self.password, 'verify'])
return MockUser(email=self.email, password=self.password, name=self.email.split('@')[0])
def verify_new_user(self, mozwebqa):
if '-dev.allizom' in mozwebqa.base_url:
os.environ['PUBLIC_URL'] = 'https://stable.dev.lcip.org/auth/'
else:
os.environ['PUBLIC_URL'] = 'https://api.accounts.firefox.com/'
subprocess.check_call(['fxa-client', '-e', self.email,
'-p', self.password, 'verify'])
| [
"andrei.hutusoru@softvision.ro"
] | andrei.hutusoru@softvision.ro |
d254869df57ee22c857fd1b15c9a63b281581ddb | 5d1121478254bbc6d6778c07281af7920d00c6f8 | /microdrop_utility/dict_as_attr_proxy.py | 7cafc839448e8908a4205977429c39a53f7e6563 | [] | no_license | wheeler-microfluidics/microdrop_utility | cc48bf1e659c52565a09be9891d1ce6b9523c69b | fdc7777404964d660a659dd8819df2480d42a9aa | refs/heads/master | 2021-05-16T02:41:59.863105 | 2017-07-20T14:33:21 | 2017-07-20T14:33:21 | 16,616,067 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | class DictAsAttrProxy(object):
'''
>>> d = dict(A=1, B=2)
>>> dp = DictAsAttrProxy(d)
>>> dp.A
1
>>> dp.B
2
>>> dp.C
Traceback (most recent call last):
...
KeyError: 'C'
>>> dp.A = 10
>>> dp.B = 20
>>> dp.C = 100
>>> d
{'A': 10, 'C': 100, 'B': 20}
'''
def __init__(self, dict_, none_on_not_found=False):
object.__setattr__(self, '_dict', dict_)
object.__setattr__(self, '_none_on_not_found', none_on_not_found)
def __setattr__(self, name, value):
dict_ = object.__getattribute__(self, '_dict')
dict_[name] = value
def __getattr__(self, name):
dict_ = object.__getattribute__(self, '_dict')
none_on_not_found = object.__getattribute__(self, '_none_on_not_found')
if none_on_not_found:
return dict_.get(name)
else:
return dict_[name]
@property
def as_dict(self):
dict_ = object.__getattribute__(self, '_dict')
return dict_
| [
"ryan@fobel.net"
] | ryan@fobel.net |
1bb73f9bd1418f1ac34f1152812d381812cc1c38 | 8db20e784e8a35cf24eec1a615627b693ed4ae21 | /DZ_WebBooking/DZ_WebBooking/settings.py | 1c8ee02039aa376bdf5eb6f46d92db5e43694b6f | [] | no_license | alvexs/iu5-web-5sem | 01cbb9d8f0807f6e0aadf17da2cf7097f1f143e3 | 44e9dac05e6391d3c3e1e6de15fb8caac6da029c | refs/heads/master | 2020-09-20T02:10:05.529046 | 2016-12-26T16:58:35 | 2016-12-26T16:58:40 | 67,216,723 | 0 | 0 | null | 2016-09-02T11:31:11 | 2016-09-02T11:21:22 | null | UTF-8 | Python | false | false | 3,317 | py | """
Django settings for Lab6 project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=iy8!#&b01utzr--0&9jk-i_h66t5dralz$30)8r^ii$17m%#r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DZ_WebBooking.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = "DZ_WebBooking.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'WebBooking_db',
'USER': 'alex',
'PASSWORD': '123',
'HOST': 'localhost',
'PORT': 3306,
'OPTIONS': {'charset': 'utf8'},
'TEST_CHARSET': 'utf8',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"a.v.ivannikov@ya.ru"
] | a.v.ivannikov@ya.ru |
4ec91989eed11e9fe7f3579a66130f297be08e11 | 2d4ca6dc3a87453bc8a5dadb45402c34ae02b441 | /pylights/device/switch.py | 26ae4a504e96536895d79be2e6ec1673ad739e6e | [] | no_license | sthomen/pylights | 85c86d5681f55e0a285cb0de56194c53997fc6c3 | ea60f061e03724b6440e7c2e6de8aacce434db6d | refs/heads/master | 2021-01-12T16:28:15.407420 | 2020-11-12T15:05:30 | 2020-11-12T15:05:30 | 69,154,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # vim:ts=4:sw=4:
from tkinter import *
from .device import Device
class Switch(Device):
def __init__(self, parent, title="Switch"):
super().__init__(parent)
self.label=Label(self, text=title)
self.label.pack()
self.on=Button(self, text="On", command=self.on)
self.on.pack(side=LEFT)
self.off=Button(self, text="Off", command=self.off)
self.off.pack(side=RIGHT)
def on(self):
if self.setcallback:
self.setcallback(1, *self.setparams)
def off(self):
if self.setcallback:
self.setcallback(0, *self.setparams)
| [
"duck@shangtai.net"
] | duck@shangtai.net |
2c54bc1449c5a2fe6700cc5e6ddd823548ad38f2 | 9d1fbc09b99c979deb9177997e60db7c4ff62c14 | /tools.py | 87759961dfbcf4919793a01ae7521defd18321b1 | [] | no_license | V-Sirivatanapa/Sequence-Classification-with-Conv1D | 2ae4ab8c5d13b1ddf8909de1284156421ef0bd35 | b42c7df03baf87a554900ceb046b6f6356f79396 | refs/heads/master | 2022-11-20T19:05:07.056213 | 2020-07-26T09:28:34 | 2020-07-26T09:28:34 | 282,617,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,371 | py | import numpy as np
import pandas as pd
import copy
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from sklearn.metrics import accuracy_score, confusion_matrix
from scipy.interpolate import CubicSpline
import tensorflow
from tensorflow.python.keras.layers import Conv1D, BatchNormalization, MaxPool1D, GlobalAveragePooling1D, Dropout, Dense
## LOAD DATA ###################################################################
""" receives 1 argument:
path of the folder containing the file """
def LOAD_FROM_DRIVE(folder_dir, file_dir):
data_dir = str(folder_dir) + '/' + str(file_dir)
data = np.load(data_dir, allow_pickle=True)
return data
## PREPROCESS ##################################################################
""" receives 1 argument, unequal array
returns padded array """
def pad_sequence(x):
arr_like = np.zeros((99, 13))
arr_like[:x.shape[0], :x.shape[1]] = x
return np.asarray(arr_like)
""" receives 2 arguments:
1. reference
2. lower limit for length
returns a mask (list) """
def remove_too_short(x, shorter_than=75):
return list(pd.Series(x).apply(lambda x: len(x)) >= shorter_than)
""" receives 3 arguments
1. 1d array (numeric)
2. class dictionary for all classes
3. ont_hot (bool) if True return one-hot encoded arrays """
def label_vectorize(y, class_dict, one_hot=False):
result = []
if one_hot:
for i in y:
arr_init = np.zeros(len(class_dict))
arr_init[class_dict[i]-1] = 1
result.append(arr_init)
return np.asarray(result).astype('float64')
else:
for i in y:
result.append(class_dict[i])
return np.asarray(result).astype('int64')
## SUPPORT #####################################################################
""" receives labels and returns a dictionary
containing each unique label and its corresponding unique value """
def get_class_dict(y):
return {name:enu for enu, name in enumerate(np.unique(y))}
""" receives 2 arguments
1. 1d array for classes
2. type of input (str)
returns a dictionary of class weights
(assigning lowest to higest weights according to
the proportion of each class in the array)
"""
def get_class_weights(y, input_type='categorical'):
if input_type=='categorical':
labels = np.unique(y)
cw = class_weight.compute_class_weight('balanced', labels ,y)
return {l:round(c,3) for l, c in zip(labels, cw)}
# elif input_type=='one_hot':
# labels, counts = np.unique(np.argmax(y, axis=1)+1, return_counts=True)
# sort_index = sorted(range(len(counts)), key=lambda x: counts[x])
# labels = labels[sort_index]
# counts = counts[sort_index][::-1]/sum(counts)
# return {l:round(c,3) for l, c in zip(labels, counts)}
else:
print('input_type: not correct')
pass
""" receives array of any dimension
returns shuffle index with length of the input array
"""
def shuffle_index(x):
p = np.random.permutation(len(x))
return p
def To_word(y_pred, class_dict):
reverse_class_dict = {v:k for k, v in class_dict.items()}
pred_words = []
for i in y_pred:
pred_words.append(reverse_class_dict[i])
return np.asarray(pred_words)
## DATA-AUGMENTATION FUNCT #####################################################
""" receives 2 arguments
1. data to be augmented (datapoint-wise)
2. augmentation method
returns augmented data """
def Augment(X, method):
X_len = len(X)
result = []
if method == 'mag_warp':
for e, i in enumerate(X):
result.append(DA_MagWarp(i))
# print('\rProcess: Magnitude Warping [', e+1, '/', X_len,']', end='')
elif method == 'slice':
for e, i in enumerate(X):
result.append(pad_sequence(Slicing(i)))
# print('\rProcess: Window Slicing [', e+1, '/', X_len,']', end='')
elif method == 'time_mask':
for e, i in enumerate(X):
result.append(pad_sequence(Time_masker(i)))
# print('\rProcess: Window Slicing [', e+1, '/', X_len,']', end='')
elif method == 'channel_mask':
for e, i in enumerate(X):
result.append(pad_sequence(Channel_masker(i)))
# print('\rProcess: Window Slicing [', e+1, '/', X_len,']', end='')
else:
print('specify a method')
pass
return np.asarray(result)
## FOR DATA-AUGMENTATION ######################################################
""" receives 2d array
returns sliced (cropped) array
"""
def Slicing(Xi):
window_size = 90
start_index = np.random.randint(0, 10)
return Xi[0+start_index: window_size + start_index, :]
""" receives 2d array
this function randomly drops rows in the input array
"""
def Time_masker(Xi):
X = copy.deepcopy(Xi)
full_len = X.shape[0]
all_index = np.arange(full_len)
percentage_del = (np.random.randint(5, 11, 1)/100)[0]
del_proportion = int(round(full_len*percentage_del))
start_index = np.random.randint(0, full_len-(del_proportion-1))
delete_index = all_index[start_index:start_index+del_proportion]
new_index = np.delete(all_index, delete_index, None)
return X[new_index]
def Channel_masker(Xi):
X = copy.deepcopy(Xi)
randint = np.random.randint(0, 13)
new_arr = np.zeros((X.shape[0]))
X[:,randint] = np.zeros((X.shape[0]))
return X
# adopted and modified code from https://github.com/terryum
def GenerateRandomCurves(Xi, sigma=0.2, knot=4, random_param=True):
if random_param:
sigma = (np.random.randint(10, 31, 1)/100)[0] #range[0.1,0.3]
knot = (np.random.randint(4, 9, 1))[0] #range[4,8]
xx = (np.ones((Xi.shape[1],1))*(np.arange(0, Xi.shape[0], (Xi.shape[0]-1)/(knot+1)))).transpose()
yy = np.random.normal(loc=1.0, scale=sigma, size=(knot+2, Xi.shape[1]))
x_range = np.arange(Xi.shape[0])
y_range = np.arange(Xi.shape[1])
result = []
for i in y_range:
result.append(CubicSpline(xx[:,i], yy[:,i])(x_range))
return np.asarray(result).transpose()
def DA_MagWarp(Xi):
return Xi * GenerateRandomCurves(Xi)
## BUILD MODEL & TRAINING #####################################################
""" Conv1d neural nerwork that receives
1. input (shape=99, 13)
2. label (int) ## use with loss='sparse_categorical_crossentropy'
Note: already (slightly) fine-tuned!
"""
def define_model():
input_shape = (99,13)
output_shape = 35
activation = 'relu'
input_layer = keras.Input(shape=input_shape)
h = Conv1D(256, 5, activation=activation, padding='same')(input_layer)
h = BatchNormalization()(h)
h = Conv1D(256, 5, activation=activation, padding='same')(h)
#h = BatchNormalization()(h)
h = MaxPool1D(3)(h)
h = Conv1D(512, 5, activation=activation, padding='same')(h)
#h = BatchNormalization()(h)
h = Dropout(0.35)(h)
h = Conv1D(512, 5, activation=activation, padding='same')(h)
h = GlobalAveragePooling1D()(h)
h = Dropout(0.5)(h)
output_layer = Dense(35, activation='softmax')(h)
model = keras.Model(inputs=input_layer, outputs=output_layer)
return model
def train_on_synthetic_data(methods):
# Original data + Magnitude warping (worked well with VGG)
def generate_synthetic_data(gen_method):
X = np.concatenate([X_train_, Augment(X_train_, gen_method)])
y = np.concatenate([y_train_, y_train_])
return X, y
models, scores, Y_PRED = [], [], []
for i in methods:
print('> Generating data: ' + i)
X, y = generate_synthetic_data(i)
# random permutation
p = shuffle_index(X)
X, y = X[p], y[p]
# define models
model = define_model()
model.compile(optimizer = tensorflow.keras.optimizers.Adam(lr=0.001),
loss = 'sparse_categorical_crossentropy',
metrics = ['acc']
)
# fit models
print('> Fitting model')
model_trained = model.fit(X, y,
batch_size = 128,
epochs = 10,
verbose = 0,
class_weight = my_class_weights,
validation_data = (X_val_, y_val_))
models.append(model_trained)
# predict
print('> Predicting')
y_pred = model.predict(X_test_)
y_pred = np.argmax(y_pred, axis=1)
# for confusion matrices
Y_PRED.append(y_pred)
# calculate score
scores.append(round(accuracy_score(y_test_, y_pred), 5))
print()
return models, scores, Y_PRED
## VISUALIZATION ##############################################################
""" receives 2 argument:
1. label (numeric, str, bool)
2. figsize (tuple) """
def plot_class_distribution(classes, figsize=(24, 8)):
x, y = np.unique(classes, return_counts=True)
x_tick = np.arange(0, len(x), 1)
# plot settings
plt.figure(figsize=figsize)
plt.bar(x_tick, y, align='center', alpha=0.6)
plt.xticks(x_tick, x, rotation=45)
plt.xlabel('Classes: ' + str(len(x)), fontsize=16)
plt.ylabel('Counts', fontsize=16)
plt.title('Distribution of Classes', fontsize=22)
plt.show()
def plot_training_history(model, title=''):
fig, axs = plt.subplots(1, 2, figsize=(18, 5))
axs[0].plot(model.history['loss'])
axs[0].plot(model.history['val_loss'])
axs[0].set_title('model loss')
axs[0].set_ylabel('loss')
axs[0].set_xlabel('epoch')
axs[0].legend(['train', 'val'], loc='upper right')
axs[1].plot(model.history['acc'])
axs[1].plot(model.history['val_acc'])
axs[1].set_title('model acc')
axs[1].set_ylabel('accuracy')
axs[1].set_xlabel('epoch')
axs[1].legend(['train', 'val'], loc='lower right')
fig.suptitle(title, fontsize=15)
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
#plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label', size=20)
plt.xlabel('Predicted label', size=20)
plt.tight_layout() | [
"noreply@github.com"
] | noreply@github.com |
da172ccf28fb4b4755f2503a7d12b244ec2007b7 | cbc0ac1c8035a5061cadf5ca68220f53d49f7d8b | /BirdSongToolbox/chunk_analysis_tools.py | baa28b553afc8f0f5781d42520f1487f1e37c6b2 | [
"Apache-2.0"
] | permissive | Darilbii/BirdSongToolbox | b5a9efeaf845e6558a6cc8ea8c517a11f9daac2c | f4853a7f6cb5c4ef0f57e9f346be08f6e153ca65 | refs/heads/master | 2023-06-29T13:57:47.621072 | 2021-08-02T22:15:31 | 2021-08-02T22:15:31 | 80,884,333 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,336 | py | """ Functions for Running Classification and Prediction analysis labeled Chunks of Free Behavior"""
import numpy as np
import scipy
import random
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedShuffleSplit
def make_templates(event_data):
"""
Parameters
----------
event_data : ndarray | (classes, instances, frequencies, channels, samples)
Neural Data that has been clipped around events of interest
Returns
-------
templates : ndarray | (classes, frequencies, channels, samples)
Mean of all instances for each label's Channel/Frequency pair
"""
templates = []
for data in event_data:
label_template = np.mean(data, axis=0) # label_data: (instances, frequencies, channels, samples)
templates.append(label_template)
return np.array(templates)
def ml_selector(event_data, identity_index, label_index, sel_instances):
""" Collects Instances of Interest from the event_data
Parameters
----------
event_data : ndarray | (classes, instances, frequencies, channels, samples)
Randomly Rebalanced Neural Data (output of balance_classes)
identity_index : ndarray | (num_instances_total,)
array of indexes that represent the individual index of a class labels
label_index : ndarray | (num_instances_total,)
array of labels that indicates the class that instance is an example of
sel_instances : ndarray | (number_instances_total,)
array of indexes that represent the individual indexes of all instances across class labels
Returns
-------
sel_data : ndarray | (classes, instances, frequencies, channels, samples)
ndarray containing the Segments (aka clippings) designated by the sel_index parameter.
Note: the number of instances aren't necessarily equal even if they are balanced prior to running this function
"""
sel_id_index = identity_index[sel_instances]
sel_label_index = label_index[sel_instances]
sel_data = []
for index, data in enumerate(event_data):
label_instances = [x for x, y in zip(sel_id_index, sel_label_index) if y == index] # Sel Instances for Label
label_data = data[np.array(label_instances)] # Array Index using the Selected Instances
# if make_template:
# label_data = np.mean(label_data, axis=0) # label_data: (instances, frequencies, channels, samples)
sel_data.append(label_data)
return np.array(sel_data)
def create_discrete_index(event_data):
"""
Parameters
----------
event_data : ndarray | (classes, instances, frequencies, channels, samples)
Randomly Rebalanced Neural Data (output of balance_classes)
Returns
-------
identity_index : array | (num_instances_total,)
array of indexes that represent the individual index of a class labels
labels_index : array | (num_instances_total,)
array of labels that indicates the class that instance is an example of
"""
identity_index = []
labels_index = []
for index, sel_class in enumerate(event_data):
label_dummy = np.zeros((sel_class.shape[0], 1))
label_dummy[:] = index
instance_dummy = np.arange(sel_class.shape[0])
identity_index.extend(instance_dummy)
labels_index.extend(label_dummy)
identity_index = np.asarray(identity_index) # Convert to ndarray
labels_index = np.asarray(labels_index)[:, 0] # Convert to a ndarray
return identity_index, labels_index
def efficient_pearson_1d_v_2d(one_dim, two_dim):
"""Finds the Pearson correlation of all rows of the two dimensional array with the one dimensional array
Source:
-------
https://www.quora.com/How-do-I-calculate-the-correlation-of-every-row-in-a-2D-array-to-a-1D-array-of-the-same-length
Info:
-----
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
Parameters
----------
one_dim = ndarray | (samples,)
1-Dimensional Array
two_dim= ndarray | (instances, samples)
2-Dimensional array it's row length must be equal to the length of one_dim
Returns
-------
pearson_values : ndarray | (samples,)
Pearson Correlation Values for each instance
Example
-------
x = np.random.randn(10)
y = np.random.randn(100, 10)
The numerators is shape (100,) and denominators is shape (100,)
Pearson = efficient_pearson_1d_v_2d(one_dim = x, two_dim = y)
"""
x_bar = np.mean(one_dim)
x_intermediate = one_dim - x_bar
y_bar = np.mean(two_dim, axis=1) # this flattens y to be (100,) which is a 1D array.
# The problem is that y is 100, so numpy's broadcasting doesn't know which axis to choose to broadcast over.
y_bar = y_bar[:, np.newaxis]
# By adding this extra dimension, we're forcing numpy to treat the 0th axis as the one to broadcast over
# which makes the next step possible. y_bar is now 100, 1
y_intermediate = two_dim - y_bar
numerators = y_intermediate.dot(x_intermediate) # or x_intermediate.dot(y_intermediate.T)
x_sq = np.sum(np.square(x_intermediate))
y_sqs = np.sum(np.square(y_intermediate), axis=1)
denominators = np.sqrt(x_sq * y_sqs) # scalar times vector
pearson_values = (numerators / denominators) # numerators is shape (100,) and denominators is shape (100,)
return pearson_values
def find_pearson_coeff(cl_data, templates, slow=False):
""" Iterates over each Template and finds the Pearson Coefficient for 1 template at a time
Note: This Function Mirrors find_power() only for finding Pearson Correlation Coefficient
Information
-----------
Note : The Number of Examples of Label does not always equal the total number of examples total as some push past
the time frame of the Epoch and are excluded
Parameters
----------
cl_data : ndarray | (instances, frequencies, channels, samples)
Array containing all the neural segments for one labels.
(As defined by Label_Instructions in label_extract_pipeline)
templates : ndarray | (labels, frequencies, channels, samples)
Array of Template Neural Data that corresponds to the Label designated (Templates are the mean of trials)
slow : bool, optional
if True the code will use the native scipy.stats.pearsonr() function which is slow (defaults to False)
Returns
-------
corr_trials : ndarray | (instances, frequencies, channels, labels/templates)
Array of Pearson Correlation Values between each instance and the LFP Template of each Label
"""
num_instances, num_frequencies, num_channels, trial_length = np.shape(cl_data)
num_temps = len(templates)
# Create Lists
corr_trials = []
if slow:
for frequency in range(num_frequencies): # Over all Frequency Bands
channel_trials = []
for channel in range(num_channels): # For each Channel
corr_holder = np.zeros([num_instances, num_temps])
for instance in range(num_instances):
for temp in range(num_temps):
corr_holder[instance, temp], _ = scipy.stats.pearsonr(cl_data[instance, frequency, channel, :],
templates[temp, frequency, channel, :])
channel_trials.append(corr_holder) # Save all of the Trials for that Frequency on that Channel
corr_trials.append(channel_trials) # Save all of the Trials for all Frequencies on each Channel
else:
for frequency in range(num_frequencies): # Over all Frequency Bands
channel_trials = []
for channel in range(num_channels): # For each Channel
corr_holder = np.zeros([num_instances, num_temps])
for temp in range(num_temps):
corr_holder[:, temp] = efficient_pearson_1d_v_2d(templates[temp, frequency, channel, :],
cl_data[:, frequency, channel, :])
channel_trials.append(corr_holder) # Save all of the Trials for that Frequency on that Channel
corr_trials.append(channel_trials) # Save all of the Trials for all Frequencies on each Channel
corr_trials = np.array(corr_trials)
corr_trials = np.transpose(corr_trials, [2, 0, 1, 3])
return corr_trials
def pearson_extraction(event_data, templates):
""" Pearson Correlation Coefficients for all Labels
Parameters
----------
event_data : ndarray | (labels, instances, frequencies, channels, samples)
Array containing all the neural segments for all labels.
(As defined by Label_Instructions in label_extract_pipeline)
templates : ndarray | (labels, frequencies, channels, samples)
Array of Template Neural Data that corresponds to the Label designated (Templates are the mean of trials)
Returns
-------
extracted_pearson : ndarray | (labels, instances, frequencies, channels, templates)
Array of Pearson Correlation Values between each instance and the LFP Template of each Label
"""
extracted_pearson = []
for label in event_data:
extracted_pearson.append(find_pearson_coeff(label, templates=templates))
return np.asarray(extracted_pearson)
def make_feature_id_ledger(num_freqs, num_chans, num_temps):
""" Make a Feature Identity Ledger for One type of Feature rows=[Freqs, Channels] | (2, Num_Features)
Parameters
----------
num_freqs : int
the number of frequency bands
num_chans : int
the number of recording channels
num_temps : int, optional
the number of pearson templates, only include if the feature type is pearson
Returns
-------
entries_ledger : ndarray | (num_total_features, [frequencies, channels, templates])
ledger of the feature identity for the scikit-learn data structure
"""
if num_temps:
entries_ledger = np.zeros((3, num_freqs, num_chans, num_temps))
# Make Ledger for Frequency
entries_ledger[0] = entries_ledger[0] + np.arange(num_freqs)[:, None, None] # Index of Freq
# Make Ledger for Channel
entries_ledger[1] = entries_ledger[1] + np.arange(num_chans)[None, :, None] # Index of Channel
# Make Ledger for templates
entries_ledger[2] = entries_ledger[2] + np.arange(num_temps)[None, None, :] # Index of Templates
entries_ledger = np.asarray(entries_ledger) # Convert to ndarray
entries_ledger = entries_ledger.reshape((3, -1)) # Convert (3, Num_Features)
else:
entries_ledger = np.zeros((2, num_freqs, num_chans))
# Make Ledger for Frequency
entries_ledger[0, :, :] = entries_ledger[0, :, :] + np.arange(num_freqs)[:, None] # Index of Freq
# Make Ledger for Channel
entries_ledger[1, :, :] = entries_ledger[1, :, :] + np.arange(num_chans)[None, :] # Index of Channel
entries_ledger = np.asarray(entries_ledger) # Convert to ndarray
entries_ledger = entries_ledger.reshape((2, -1)) # Convert (2, Num_Features)
return np.transpose(entries_ledger)
def ml_order(extracted_features_array):
"""
Parameters
----------
extracted_features_array : ndarray | (labels, instances, frequencies, channels, templates)
Array of Pearson Correlation Values between each instance and the LFP Template of each Label
Returns
-------
ordered_trials : ndarray | (n_samples, n_features)
Data array that is structured to work with the SciKit-learn Package
n_samples = Num of Instances Total
n_features = Num_Ch * Num_Freq)
ml_labels : ndarray | (n_training_samples, )
1-d array of Labels of the ordered_trials instances )(n_samples)
"""
ml_labels = []
ordered_trials = []
for index, label in enumerate(extracted_features_array):
# Machine Learning Data
num_instances = len(label)
reshaped = np.reshape(label, (num_instances, -1))
ordered_trials.extend(reshaped)
# Machine Learning Labels
label_dummy = np.zeros((num_instances, 1))
label_dummy[:] = index
ml_labels.extend(label_dummy)
ordered_trials = np.array(ordered_trials)
ml_labels = np.array(ml_labels)[:, 0]
return ordered_trials, ml_labels
def make_feature_dict(ordered_index, drop_type: str):
"""Creates a Dictionary of the the indexes for each Channel's features in the ordered_index
Parameters
----------
ordered_index : ndarray | (num_total_features, [frequencies, channels, templates])
ledger of the feature identity for the scikit-learn data structure
drop_type : str
Controls whether the dictionary indexes the channel number of the frequency band
Returns
-------
feature_dict : dict | {feature: [list of Indexes]}
dictionary to be used to remove all features for either a single channel or frequency band
"""
options = ['channel', 'frequency']
assert drop_type in options
if drop_type == 'frequency':
sel = 0
elif drop_type == 'channel':
sel = 1
ordered_index_shape = np.max(ordered_index, axis=0) + 1
sel_len = int(ordered_index_shape[sel])
feature_dict = {}
for i in range(sel_len):
feature_dict[i] = [index for index, description in enumerate(ordered_index) if description[sel] == i]
return feature_dict
def drop_features(features, keys, desig_drop_list):
"""Function for Selectively Removing Columns for Feature Dropping
Parameters
----------
features : ndarray | (n_samples, n_features)
Data array that is structured to work with the SciKit-learn Package
n_samples = Num of Instances Total
n_features = Num_Ch * Num_Freq)
keys : dict | {feature: [list of Indexes]}
dictionary to be used to remove all features for either a single channel or frequency band
desig_drop_list : list
list of features to be dropped
Returns
-------
remaining_features : ndarray | (n_samples, n_features_remaining)
Data array that is structured to work with the SciKit-learn Package
full_drop : list
list of list of all Features (columns) to be dropped
"""
# flatten_matrix = [val
# for sublist in matrix
# for val in sublist]
full_drop = [val for i in desig_drop_list for val in keys[i]] # Store the Index of Features to be dropped
remaining_features = np.delete(features, full_drop, axis=1)
return remaining_features, full_drop
def clip_classification(ClassObj, train_set, train_labels, test_set, test_labels):
""" This Function is a Flexible Machine Learning Function that Trains One Classifier and determines metrics for it
The metrics it determines are:
[1] Accuracy
[2] Confusion Matrix
Parameters
----------
ClassObj : class
classifier object from the scikit-learn package
train_set : ndarray | (n_samples, n_features)
Training data array that is structured to work with the SciKit-learn Package
n_samples = Num of Instances Total
n_features = Num_Ch * Num_Freq)
train_labels : ndarray | (n_training_samples, 1)
1-d array of Labels of the Corresponding n_training_samples
test_set : ndarray | (n_samples, n_features)
Testing data Array that is structured to work with the SciKit-learn Package
n_samples = Num of Instances Total
n_features = Num_Ch * Num_Freq)
test_labels : ndarray | (n_test_samples, 1)
1-d array of Labels of the Corresponding n_test_samples
Returns
-------
acc : int
the accuracy of the trained classifier
classifier : class
a trained classifier dictacted by the ClassObj Parameter from scikit-learn
confusion : array
Confusion matrix, shape = [n_classes, n_classes]
"""
classifier = ClassObj
classifier.fit(train_set, train_labels) # Train the Classifier
test_pred = classifier.predict(test_set) # Test the Classifier
confusion = confusion_matrix(test_labels, test_pred).astype(float) # Determine the Confusion mattrix
num_test_trials = len(test_labels) # Get the number of trials
acc = sum(np.diag(confusion)) / num_test_trials # accuracy = number_-right/ total_number
return acc, classifier, confusion
def random_feature_dropping(train_set: np.ndarray, train_labels: np.ndarray, test_set: np.ndarray,
test_labels: np.ndarray, ordered_index, drop_type, Class_Obj, verbose=False):
""" Repeatedly trains/test models to create a feature dropping curve (Originally for Pearson Correlation)
Parameters
----------
train_set : ndarray | (n_samples, n_features)
Training data array that is structured to work with the SciKit-learn Package
train_labels : ndarray | (n_training_samples, )
1-d array of Labels of the Corresponding n_training_samples
test_set : ndarray | (n_samples, n_features)
Testing data Array that is structured to work with the SciKit-learn Package
test_labels : ndarray | | (n_training_samples, )
1-d array of Labels of the Corresponding n_test_samples
ordered_index : ndarray | (num_total_features, [frequencies, channels, templates])
ledger of the feature identity for the scikit-learn data structure
Power: (Num of Features, [frequencies, channels])
Pearson: (Num of Features, [frequencies, channels, templates])
drop_type : str
Controls whether the dictionary indexes the channel number of the frequency band
Class_Obj : class
classifier object from the scikit-learn package
verbose : bool
If True the funtion will print out useful information for user as it runs, defaults to False.
Returns
-------
dropping_curve : ndarray
ndarray of accuracy values from the feature dropping code (values are floats)
(Number of Features (Decreasing), Number of Nested Folds)
"""
# 1.) Initiate Lists for Curve Components
feat_ids = make_feature_dict(ordered_index=ordered_index, drop_type=drop_type) # Convert ordered_index to a dict
num_channels = len(feat_ids.keys()) # Determine the Number of Dropping indexes
dropping_curve = np.zeros([num_channels + 1, 1]) # Create Empty array for Dropping Curves
drop_list = []
# 2.) Print Information about the Feature Set to be Dropped
if verbose:
print("Number of columns dropped per cycle", len(feat_ids[0])) # Print number of columns per dropped feature
print("Number of Channels total:", len(feat_ids)) # Print number of Features
temp = feat_ids.copy() # Create a temporary internal *shallow? copy of the index dictionary
# 3.) Begin Feature Dropping steps
# Find the first Accuracy
first_acc, _, _ = clip_classification(ClassObj=Class_Obj, train_set=train_set, train_labels=train_labels,
test_set=test_set, test_labels=test_labels)
if verbose:
print("First acc: %s..." % first_acc)
# print("First Standard Error is: %s" % first_err_bars) ###### I added this for the error bars
dropping_curve[0, :] = first_acc # Append BDF's Accuracy to Curve List
index = 1
while num_channels > 2: # Decrease once done with development
ids_remaining = list(temp.keys()) # Make List of the Keys(Features) from those that remain
num_channels = len(ids_remaining) # keep track of the number of Features
# Select the index for Feature to be Dropped from list of keys those remaining (using random.choice())
drop_feat_ids = random.choice(ids_remaining)
if verbose:
print("List of Channels Left: ", ids_remaining)
print("Number of Channels Left:", num_channels)
print("Channel to be Dropped:", drop_feat_ids)
# Remove Key and Index for Designated Feature
del temp[drop_feat_ids] # Delete key for Feature Designated to be Dropped from overall list
drop_list.append(drop_feat_ids) # Add Designated Drop Feature to Drop list
# Remove sel feature from train feature array
train_remaining_features, _ = drop_features(features=train_set, keys=feat_ids, desig_drop_list=drop_list)
# Remove sel feature from test feature array
test_remaining_features, _ = drop_features(features=test_set, keys=feat_ids, desig_drop_list=drop_list)
acc, _, _ = clip_classification(ClassObj=Class_Obj, train_set=train_remaining_features,
train_labels=train_labels, test_set=test_remaining_features,
test_labels=test_labels)
dropping_curve[index, :] = acc # Append Resulting Accuracy to Curve List
if verbose:
print("Drop accuracies: ", acc)
print("Dropping Feature was %s..." % drop_feat_ids)
index += 1
return dropping_curve
def random_feature_drop_multi_narrow_chunk(event_data, ClassObj, drop_temps, k_folds=5, seed=None, verbose=False):
""" Runs the Random Channel Feature Dropping algorithm on a set of pre-processed data (defaults to 5K repeats)
Parameters
----------
event_data : ndarray | (classes, instances, frequencies, channels, samples)
Randomly Rebalanced Neural Data (output of balance_classes)
ClassObj : class
classifier object from the scikit-learn package
drop_temps : list
list of the indexes of templates to not use as features
k_folds : int
Number of Folds to Split between Template | Train/Test sets, defaults to 5,
seed : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is the RandomState instance used by np.random.
verbose : bool
If True the funtion will print out useful information for user as it runs, defaults to False.
Returns
-------
"""
# 1.) Make Array for Holding all of the feature dropping curves
nested_dropping_curves = [] # np.zeros([])
# 2.) Create INDEX of all instances of interests : create_discrete_index()
label_identities, label_index = create_discrete_index(event_data=event_data)
identity_index = np.arange(len(label_index))
sss = StratifiedShuffleSplit(n_splits=k_folds, random_state=seed)
sss.get_n_splits(identity_index, label_index)
if verbose:
print(sss)
fold_number = 0
# --------- For Loop over possible Training Sets---------
for train_index, test_index in sss.split(identity_index, label_index):
if verbose:
print("TRAIN:", train_index, "TEST:", test_index)
fold_number += 1
print("On Fold #" + str(fold_number) + ' of ' + str(k_folds))
X_train, X_test = identity_index[train_index], identity_index[test_index]
y_train, y_test = label_index[train_index], label_index[test_index]
# 4.) Use INDEX to Break into corresponding [template/training set| test set] : ml_selector()
# 4.1) Get template set/training : ml_selector(event_data, identity_index, label_index, sel_instances)
sel_train = ml_selector(event_data=event_data, identity_index=label_identities, label_index=label_index,
sel_instances=X_train, )
# 4.1) Get test set : ml_selector()
sel_test = ml_selector(event_data=event_data, identity_index=label_identities, label_index=label_index,
sel_instances=X_test)
# 5.) Use template/training set to make template : make_templates(event_data)
templates = make_templates(event_data=sel_train)
# 5.2) Remove Template that aren't needed from train
templates = np.delete(templates, drop_temps, axis=0)
# 6.1) Use template/training INDEX and template to create Training Pearson Features : pearson_extraction()
train_pearson_features = pearson_extraction(event_data=sel_train, templates=templates)
# 6.2) Use test INDEX and template to create Test Pearson Features : pearson_extraction()
test_pearson_features = pearson_extraction(event_data=sel_test, templates=templates)
# 7.1) Reorganize Test Set into Machine Learning Format : ml_order_pearson()
ml_trials_train, ml_labels_train = ml_order(extracted_features_array=train_pearson_features)
# 7.2) Get Ledger of the Features
num_freqs, num_chans, num_temps = np.shape(train_pearson_features[0][0]) # Get the shape of the Feature data
ordered_index = make_feature_id_ledger(num_freqs=num_freqs, num_chans=num_chans, num_temps=num_temps)
# 7.3) Reorganize Training Set into Machine Learning Format : ml_order_pearson()
ml_trials_test, ml_labels_test = ml_order(extracted_features_array=test_pearson_features)
repeated_freq_curves = []
test_list = list(np.arange(num_chans))
random.seed(0)
for index in range(5000):
drop_order = random.sample(test_list, k=len(test_list))
fold_frequency_curves = []
for freq in range(num_freqs):
# if verbose:
# print("On Frequency Band:", freq, " of:", num_freqs)
ml_trials_train_cp = ml_trials_train.copy() # make a copy of the feature extracted Train data
ml_trials_test_cp = ml_trials_test.copy() # make a copy of the feature extracted Test data
ordered_index_cp = ordered_index.copy() # make a copy of the ordered_index
all_other_freqs = list(np.delete(np.arange(num_freqs), [freq])) # Make a index of the other frequencies
temp_feature_dict = make_feature_dict(ordered_index=ordered_index_cp,
drop_type='frequency') # Feature Dict
# reduce to selected frequency from the COPY of the training data
ml_trials_train_freq, full_drop = drop_features(features=ml_trials_train_cp, keys=temp_feature_dict,
desig_drop_list=all_other_freqs)
# reduce to but the selected frequency from the COPY of test data
ml_trials_test_freq, _ = drop_features(features=ml_trials_test_cp, keys=temp_feature_dict,
desig_drop_list=all_other_freqs)
ordered_index_cp = np.delete(ordered_index_cp, full_drop,
axis=0) # Remove features from other frequencies
# 8.) Perform Nested Feature Dropping with K-Fold Cross Validation
nested_drop_curve = ordered_feature_dropping(train_set=ml_trials_train_freq,
train_labels=ml_labels_train,
test_set=ml_trials_test_freq, test_labels=ml_labels_test,
ordered_index=ordered_index_cp, drop_type='channel',
Class_Obj=ClassObj, order=drop_order, verbose=False)
fold_frequency_curves.append(nested_drop_curve) # For each Individual Frequency Band
if verbose:
if index % 100 == 0:
print('on loop' + str(index))
repeated_freq_curves.append(fold_frequency_curves) # Exhaustive Feature Dropping
nested_dropping_curves.append(repeated_freq_curves) # All of the Curves
# 9.) Combine all curve arrays to one array
all_drop_curves = np.array(nested_dropping_curves) # (folds, 5K Repeats, frequencies, num_dropped, 1)
# 10.) Calculate curve metrics
fold_mean_curve = np.mean(all_drop_curves, axis=0)
mean_curve = np.mean(fold_mean_curve, axis=0)
# std_curve = np.std(all_drop_curves, axis=0, ddof=1) # ddof parameter is set to 1 to return the sample std
std_curve = scipy.stats.sem(fold_mean_curve, axis=0)
return mean_curve, std_curve
def ordered_feature_dropping(train_set: np.ndarray, train_labels: np.ndarray, test_set: np.ndarray,
test_labels: np.ndarray, ordered_index, drop_type, Class_Obj, order, verbose=False):
""" Repeatedly trains/test models to create a feature dropping curve (Originally for Pearson Correlation)
Parameters
----------
train_set : ndarray | (n_samples, n_features)
Training data array that is structured to work with the SciKit-learn Package
train_labels : ndarray | (n_training_samples, )
1-d array of Labels of the Corresponding n_training_samples
test_set : ndarray | (n_samples, n_features)
Testing data Array that is structured to work with the SciKit-learn Package
test_labels : ndarray | | (n_training_samples, )
1-d array of Labels of the Corresponding n_test_samples
ordered_index : ndarray | (num_total_features, [frequencies, channels, templates])
ledger of the feature identity for the scikit-learn data structure
Power: (Num of Features, [frequencies, channels])
Pearson: (Num of Features, [frequencies, channels, templates])
drop_type : str
Controls whether the dictionary indexes the channel number of the frequency band
Class_Obj : class
classifier object from the scikit-learn package
verbose : bool
If True the funtion will print out useful information for user as it runs, defaults to False.
Returns
-------
dropping_curve : ndarray
ndarray of accuracy values from the feature dropping code (values are floats)
(Number of Features (Decreasing), Number of Nested Folds)
"""
# 1.) Initiate Lists for Curve Components
feat_ids = make_feature_dict(ordered_index=ordered_index, drop_type=drop_type) # Convert ordered_index to a dict
num_channels = len(feat_ids.keys()) # Determine the Number of Dropping indexes
dropping_curve = np.zeros([num_channels + 1, 1]) # Create Empty array for Dropping Curves
drop_list = []
# 2.) Print Information about the Feature Set to be Dropped
if verbose:
print("Number of columns dropped per cycle", len(feat_ids[0])) # Print number of columns per dropped feature
print("Number of Channels total:", len(feat_ids)) # Print number of Features
temp = feat_ids.copy() # Create a temporary internal *shallow? copy of the index dictionary
# 3.) Begin Feature Dropping steps
# Find the first Accuracy
first_acc, _, _ = clip_classification(ClassObj=Class_Obj, train_set=train_set, train_labels=train_labels,
test_set=test_set, test_labels=test_labels)
if verbose:
print("First acc: %s..." % first_acc)
# print("First Standard Error is: %s" % first_err_bars) ###### I added this for the error bars
dropping_curve[0, :] = first_acc # Append BDF's Accuracy to Curve List
# index = 1
# while num_channels > 2: # Decrease once done with development
for index, channel in enumerate(order[:-1]):
ids_remaining = list(temp.keys()) # Make List of the Keys(Features) from those that remain
num_channels = len(ids_remaining) # keep track of the number of Features
# Select the index for Feature to be Dropped from list of keys those remaining (using random.choice())
drop_feat_ids = channel
if verbose:
print("List of Channels Left: ", ids_remaining)
print("Number of Channels Left:", num_channels)
print("Channel to be Dropped:", drop_feat_ids)
# Remove Key and Index for Designated Feature
del temp[drop_feat_ids] # Delete key for Feature Designated to be Dropped from overall list
drop_list.append(drop_feat_ids) # Add Designated Drop Feature to Drop list
# Remove sel feature from train feature array
train_remaining_features, _ = drop_features(features=train_set, keys=feat_ids, desig_drop_list=drop_list)
# Remove sel feature from test feature array
test_remaining_features, _ = drop_features(features=test_set, keys=feat_ids, desig_drop_list=drop_list)
acc, _, _ = clip_classification(ClassObj=Class_Obj, train_set=train_remaining_features,
train_labels=train_labels, test_set=test_remaining_features,
test_labels=test_labels)
dropping_curve[index + 1, :] = acc # Append Resulting Accuracy to Curve List
if verbose:
print("Drop accuracies: ", acc)
print("Dropping Feature was %s..." % drop_feat_ids)
return dropping_curve
def random_feature_drop_chunk(event_data, ClassObj, k_folds=5, seed=None, verbose=False):
""" Runs the Random Channel Feature Dropping algorithm on a set of pre-processed data (All Features Together)
Parameters
----------
event_data : ndarray | (classes, instances, frequencies, channels, samples)
Randomly Rebalanced Neural Data (output of balance_classes)
ClassObj : class
classifier object from the scikit-learn package
k_folds : int
Number of Folds to Split between Template | Train/Test sets, defaults to 5,
seed : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is the RandomState instance used by np.random.
verbose : bool
If True the function will print out useful information for user as it runs, defaults to False.
Returns
-------
"""
# 1.) Make Array for Holding all of the feature dropping curves
nested_dropping_curves = [] # np.zeros([])
# 2.) Create INDEX of all instances of interests : create_discrete_index()
label_identities, label_index = create_discrete_index(event_data=event_data)
identity_index = np.arange(len(label_index))
sss = StratifiedShuffleSplit(n_splits=k_folds, random_state=seed)
sss.get_n_splits(identity_index, label_index)
if verbose:
print(sss)
# --------- For Loop over possible Training Sets---------
for train_index, test_index in sss.split(identity_index, label_index):
if verbose:
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = identity_index[train_index], identity_index[test_index]
y_train, y_test = label_index[train_index], label_index[test_index]
# 4.) Use INDEX to Break into corresponding [template/training set| test set] : ml_selector()
# 4.1) Get template set/training : ml_selector(event_data, identity_index, label_index, sel_instances)
sel_train = ml_selector(event_data=event_data, identity_index=label_identities, label_index=label_index,
sel_instances=X_train, )
# 4.1) Get test set : ml_selector()
sel_test = ml_selector(event_data=event_data, identity_index=label_identities, label_index=label_index,
sel_instances=X_test)
# 5.) Use template/training set to make template : make_templates(event_data)
templates = make_templates(event_data=sel_train)
# 6.1) Use template/training INDEX and template to create Training Pearson Features : pearson_extraction()
train_pearson_features = pearson_extraction(event_data=sel_train, templates=templates)
# 6.2) Use test INDEX and template to create Test Pearson Features : pearson_extraction()
test_pearson_features = pearson_extraction(event_data=sel_test, templates=templates)
# 7.1) Reorganize Test Set into Machine Learning Format : ml_order_pearson()
ml_trials_train, ml_labels_train = ml_order(extracted_features_array=train_pearson_features)
# 7.2) Get Ledger of the Features
num_freqs, num_chans, num_temps = np.shape(train_pearson_features[0][0]) # Get the shape of the Feature data
ordered_index = make_feature_id_ledger(num_freqs=num_freqs, num_chans=num_chans, num_temps=num_temps)
# 7.3) Reorganize Training Set into Machine Learning Format : ml_order_pearson()
ml_trials_test, ml_labels_test = ml_order(extracted_features_array=test_pearson_features)
# 8.) Perform Nested Feature Dropping with K-Fold Cross Validation
nested_drop_curve = random_feature_dropping(train_set=ml_trials_train, train_labels=ml_labels_train,
test_set=ml_trials_test, test_labels=ml_labels_test,
ordered_index=ordered_index, drop_type='channel',
Class_Obj=ClassObj, verbose=False)
nested_dropping_curves.append(nested_drop_curve)
# 9.) Combine all curve arrays to one array
all_drop_curves = np.array(nested_dropping_curves) # (folds, frequencies, num_dropped, 1)
# 10.) Calculate curve metrics
mean_curve = np.mean(all_drop_curves, axis=0)
# std_curve = np.std(all_drop_curves, axis=0, ddof=1) # ddof parameter is set to 1 to return the sample std
std_curve = scipy.stats.sem(all_drop_curves, axis=0)
return mean_curve, std_curve
| [
"darilbii@gmail.com"
] | darilbii@gmail.com |
41c74a36d37594b12e4e223ddad17e093507dc9b | 45eb71388fd28ea37fdaab7b29043e81464c222a | /python/hamming/hamming.py | 1f48021f2fa7fbec2879d1a06ae47928b0e9c39d | [] | no_license | jpleyvat/excercism | b08460134975e40e60bd16de66c25c54d4e0d542 | d3dc93013007af68a096792686b538c1962641c1 | refs/heads/main | 2023-05-04T22:15:06.796636 | 2021-05-25T16:37:54 | 2021-05-25T16:37:54 | 370,416,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | def distance(strand_a, strand_b):
if len(strand_a) != len(strand_b):
raise ValueError("Strangs must have the same length")
diffs = [strand_a[i] == strand_b[i] for i in range(len(strand_a))]
return diffs.count(False)
| [
"jpleyvat@gmail.com"
] | jpleyvat@gmail.com |
92d349d1183ad8d98d35f92dfe3999a7489d12fb | 8b1c25efcf88db14f8d545b770e9f745932fef03 | /2D_heat_transfer_FDM/python_validation/src/fdm_heat_validation_utility/__init__.py | 74fdfeab26154f799982e6f2567254726081dccb | [] | no_license | class4kayaker/GPGPU_self_study | d8b11695d89169c11ef8f2c03e4ad9b870a12bb4 | 23b6db522a4f98e6f01a172bb2f8709d6dadc573 | refs/heads/master | 2023-03-20T20:53:54.406684 | 2021-03-23T03:24:38 | 2021-03-23T03:24:38 | 251,442,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | """Utilities to validate a FDM implementation based on providing
and checking the error using HDF5 files generated"""
__version__ = "0.0.1"
from .initial_conditions import init_by_name
from .model_utils import FDM_State
from .calculate_diff import FDM_Diff
__all__ = ["init_by_name", "FDM_State", "FDM_Diff"]
| [
"class4kayaker@gmail.com"
] | class4kayaker@gmail.com |
3d3e8fcf087d8de0e65fbdcaf9fe9d927cb76013 | 177de5863feb6d48d8cc686fe788f5f4c803d9dc | /Functions.py | 3fedef9bfd85a045a3142a454a27ff0bc0100462 | [] | no_license | elena23sarov/I_want_to_be_an_Artezio_employee | 1798750388386f2ace40a01f2809d40c5b2bc540 | c4d3c1915cbba2d24ba06bef2876761f876e0c86 | refs/heads/master | 2020-06-15T18:54:23.163814 | 2016-12-26T08:14:01 | 2016-12-26T08:14:01 | 75,269,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | py | """Functions - zip(), list comprehensions."""
def zip_list(*args):
"""Do the same as function zip() does."""
zipped_lists = []
min_len = min(len(arg) for arg in args)
for j in range(min_len):
elem = []
for arg in args:
elem.append(arg[j])
zipped_lists.append(tuple(elem))
return zipped_lists
def squares(my_list):
"""Return squares."""
return [a**2 for a in my_list]
def seconds(my_list):
"""Return each second element."""
return [a for a in my_list[1::2]]
def super_squares(my_list):
"""Return squares of even elements on odd positions."""
return [a**2 for a in my_list[1::2] if a % 2 == 0]
VALID_INPUT = False
print "Firstly, look on these list comprehensions. Input a list:"
while not VALID_INPUT:
try:
X = map(int, raw_input().split())
VALID_INPUT = True
except ValueError as err:
print 'Please insert only numbers. ({})'.format(err)
print "Squares: \t", squares(X)
print "Each second elem: \t", seconds(X)
print "Squares of even elem in odd positions: \t", super_squares(X)
print "-"*20
print "Try zip() function. How many lists do you want to zip? (min = 1)"
LISTS_TO_ZIP = []
VALID_INPUT = False
while not VALID_INPUT:
try:
N = int(raw_input())
if N > 0:
VALID_INPUT = True
else:
print "Zip function works for 1 or more lists. Try again"
except ValueError as err:
print 'Please insert only numbers. ({})'.format(err)
for i in range(N):
print "Ok, insert list #{}:".format(i)
VALID_INPUT = False
while not VALID_INPUT:
try:
Y = map(int, raw_input().split())
VALID_INPUT = True
except ValueError as err:
print 'Please insert only numbers. ({})'.format(err)
LISTS_TO_ZIP.append(Y)
print zip_list(*LISTS_TO_ZIP)
| [
"noreply@github.com"
] | noreply@github.com |
e69bdbc314fdffa129309e8dfa440b4d6dfdaef4 | 7942f22ff5b5b3ff1882492cc4d558f0d3640c5d | /main.py | 08f064901d7ac92ae2840c0e78c337179c95703f | [] | no_license | naka345/car_number | f886691155c17440051c2200003232accdd8c940 | c82af3fa878558e2420904a20d057778836b586f | refs/heads/master | 2022-10-26T14:45:58.590219 | 2019-09-23T13:11:03 | 2019-09-23T13:11:03 | 271,181,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | import numpy as np
import pandas as pd
import re
import glob
import os
import sys
path = os.path.join(os.path.dirname(os.path.abspath("__file__")), 'keras_yolo3/')
sys.path.append(path)
from PIL import Image
from my_yolo import MyYOLO
from utils import PlateUtils
myYolo= MyYOLO()
pu = PlateUtils()
columns = ["lu_x","lu_y","ru_x","ru_y","ld_x","ld_y","rd_x","rd_y"]
df_concat = pd.Series([0,0,0,0,0,0,0,0],index = columns)
date = 20190125
excel_df = pd.read_excel(f'./{date}/{date}list.xlsx',names=columns, index=0)
df_na = (excel_df.iloc[1:,:]).dropna()
df = df_na[1:]
img_path = f"/Users/naka345/Desktop/deeplearning/number_plate/{date}/{date}img"
output_path = "/Users/naka345/Desktop/deeplearning/number_plate/output/car/"
output_csv_path = "/Users/naka345/Desktop/deeplearning/number_plate/output/csv/"
ls = glob.glob(img_path + "/*.JPG")
c=0
for path in ls:
file_name = path.split('/')[-1]
file_num = re.sub(r'\D', '', file_name)
vertex = df.loc[int(file_num)]
print(file_name)
image = Image.open(path)
image = image.rotate(270, expand=True)
image_size = image.size
org_image = image.copy()
image, out_boxes, out_scores, out_classes = myYolo.detect_image(image)
image.save(output_path + '../' + file_name)
predict_pos = pu.choice_box(vertex, out_boxes, out_scores, out_classes, image_size)
if predict_pos is None:
del image,org_image,vertex
continue
plate_npx=np.array([vertex["lu_x"],vertex["ld_x"],vertex["ru_x"],vertex["rd_x"]])
plate_npy=np.array([vertex["lu_y"],vertex["ld_y"],vertex["ru_y"],vertex["rd_y"]])
# one car
one_car_img = org_image.crop((predict_pos['left'], predict_pos['top'], predict_pos['right'], predict_pos['bottom']))
one_car_img.save(output_path + file_name)
moved_vertex = pu.number_plate_crop(one_car_img, vertex, predict_pos, file_name)
df_concat = pd.concat([df_concat, moved_vertex],axis=1)
# detect_char_on_plate()
del image,org_image,vertex
df_T=df_concat.T
df_T[1:].to_csv(f'{output_csv_path}{date}.csv')
| [
"naka345@naka345noMacBook-Air.local"
] | naka345@naka345noMacBook-Air.local |
afa31021cdfa59468182052566b74556a996eae1 | fa0ad49b2f2b1dbd61b056f1d48e5feab0117889 | /accounts/migrations/0029_remove_asset_name.py | 312d03bc5c2a57e9999a50e37f32bf1d2e9b8567 | [] | no_license | jimmyzhoujcc/salmon | dd88ab19af3282aa228fd9293f7d9c251f5ae591 | 4201d20342a29dc056593f04459f349f85db3d16 | refs/heads/master | 2022-12-16T10:02:02.068677 | 2018-04-21T07:00:23 | 2018-04-21T07:00:23 | 130,444,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-05 04:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0028_auto_20180305_1215'),
]
operations = [
migrations.RemoveField(
model_name='asset',
name='name',
),
]
| [
"jimmyzhoujcc@gmail.com"
] | jimmyzhoujcc@gmail.com |
eeb6eb58ee42c5bc5f72743af750f3d566f3361e | aaa204ad7f134b526593c785eaa739bff9fc4d2a | /tests/providers/amazon/aws/hooks/test_glacier.py | 4ed3f6aaa2e24f18b4e5a28d34007275140c31de | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | cfei18/incubator-airflow | 913b40efa3d9f1fdfc5e299ce2693492c9a92dd4 | ffb2078eb5546420864229cdc6ee361f89cab7bd | refs/heads/master | 2022-09-28T14:44:04.250367 | 2022-09-19T16:50:23 | 2022-09-19T16:50:23 | 88,665,367 | 0 | 1 | Apache-2.0 | 2021-02-05T16:29:42 | 2017-04-18T20:00:03 | Python | UTF-8 | Python | false | false | 5,075 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import unittest
from unittest import mock
from airflow.providers.amazon.aws.hooks.glacier import GlacierHook
CREDENTIALS = "aws_conn"
VAULT_NAME = "airflow"
JOB_ID = "1234abcd"
REQUEST_RESULT = {"jobId": "1234abcd"}
RESPONSE_BODY = {"body": "data"}
JOB_STATUS = {"Action": "", "StatusCode": "Succeeded"}
class TestAmazonGlacierHook(unittest.TestCase):
def setUp(self):
with mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.__init__", return_value=None):
self.hook = GlacierHook(aws_conn_id="aws_default")
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_should_return_job_id(self, mock_conn):
# Given
job_id = {"jobId": "1234abcd"}
# when
mock_conn.return_value.initiate_job.return_value = job_id
result = self.hook.retrieve_inventory(VAULT_NAME)
# then
mock_conn.assert_called_once_with()
assert job_id == result
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_should_log_mgs(self, mock_conn):
# given
job_id = {"jobId": "1234abcd"}
# when
with self.assertLogs() as log:
mock_conn.return_value.initiate_job.return_value = job_id
self.hook.retrieve_inventory(VAULT_NAME)
# then
self.assertEqual(
log.output,
[
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieving inventory for vault: {VAULT_NAME}",
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Initiated inventory-retrieval job for: {VAULT_NAME}",
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieval Job ID: {job_id.get('jobId')}",
],
)
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_results_should_return_response(self, mock_conn):
# when
mock_conn.return_value.get_job_output.return_value = RESPONSE_BODY
response = self.hook.retrieve_inventory_results(VAULT_NAME, JOB_ID)
# then
mock_conn.assert_called_once_with()
assert response == RESPONSE_BODY
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_results_should_log_mgs(self, mock_conn):
# when
with self.assertLogs() as log:
mock_conn.return_value.get_job_output.return_value = REQUEST_RESULT
self.hook.retrieve_inventory_results(VAULT_NAME, JOB_ID)
# then
self.assertEqual(
log.output,
[
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieving the job results for vault: {VAULT_NAME}...",
],
)
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_describe_job_should_return_status_succeeded(self, mock_conn):
# when
mock_conn.return_value.describe_job.return_value = JOB_STATUS
response = self.hook.describe_job(VAULT_NAME, JOB_ID)
# then
mock_conn.assert_called_once_with()
assert response == JOB_STATUS
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_describe_job_should_log_mgs(self, mock_conn):
# when
with self.assertLogs() as log:
mock_conn.return_value.describe_job.return_value = JOB_STATUS
self.hook.describe_job(VAULT_NAME, JOB_ID)
# then
self.assertEqual(
log.output,
[
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieving status for vault: {VAULT_NAME} and job {JOB_ID}",
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Job status: {JOB_STATUS.get('Action')}, code status: {JOB_STATUS.get('StatusCode')}",
],
)
| [
"noreply@github.com"
] | noreply@github.com |
54e4fa82e7cacc8e802c9e7e3a89c7c75f98bce2 | 8fe831010ca1e463d33c53a57e985cec67db2e7a | /config/settings/local.py | b2ec1d341b1a8de3779a3e53fa778d874d859c9c | [
"MIT"
] | permissive | andresbv0620/carros | 7df0f53ed194fc32adadb9586fb618f5d228bb5d | 206ba1812fb2c98c1e652276e39cdce2b82860a6 | refs/heads/master | 2020-04-15T01:33:52.501887 | 2016-09-06T04:29:57 | 2016-09-06T04:29:57 | 61,994,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='&0f&)acsyfqt@r_a_&_+g-^=2%+*+zl)x@wlbg@tlz7wwjoy7(')
# Mail settings
# ------------------------------------------------------------------------------
# EMAIL_PORT = 1025
# EMAIL_HOST = 'localhost'
# EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
# default='django.core.mail.backends.console.EmailBackend')
EMAIL_PORT = 25
EMAIL_HOST = 'mx.adiktivo.com'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_USER = 'andres@adiktivo.com'
EMAIL_HOST_PASSWORD = 'patacore'
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
########## CELERY
# In development, all tasks will be executed locally by blocking until the task returns
CELERY_ALWAYS_EAGER = True
########## END CELERY
# Your local stuff: Below this line define 3rd party library settings
| [
"andresbv0620@hotmail.com"
] | andresbv0620@hotmail.com |
0a6bf67bbb3f6363667bff9f7ddbf631bd5bb554 | 40d9992e4f516ecd9914a3b3f998b0563f4fcc53 | /ansys/mapdl/core/mapdl_console.py | eeafff6a9b7223afa8e795cf6d7fa660f5c39c66 | [
"MIT"
] | permissive | AdrlVMA/pymapdl | 4f8d38626e49ef2f8d091901e916ed6c98eee503 | 43325cfcdfb8f7fa3e0fd59bf8a02afed72e6988 | refs/heads/master | 2023-03-31T15:57:25.477257 | 2021-03-27T20:12:16 | 2021-03-27T20:12:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,026 | py | """Module to control interaction with an ANSYS shell instance.
Used when launching Mapdl via pexpect on Linux when <= 17.0
"""
import os
import time
import re
# from ansys.mapdl.core.misc import kill_process
from ansys.mapdl.core.mapdl import _MapdlCore
from ansys.mapdl.core.errors import MapdlExitedError
ready_items = [
rb'BEGIN:',
rb'PREP7:',
rb'SOLU_LS[0-9]+:',
rb'POST1:',
rb'POST26:',
rb'RUNSTAT:',
rb'AUX2:',
rb'AUX3:',
rb'AUX12:',
rb'AUX15:',
# continue
rb'YES,NO OR CONTINUOUS\)\=',
rb'executed\?',
# errors
rb'SHOULD INPUT PROCESSING BE SUSPENDED\?',
# prompts
rb'ENTER FORMAT for',
]
CONTINUE_IDX = ready_items.index(rb'YES,NO OR CONTINUOUS\)\=')
WARNING_IDX = ready_items.index(rb'executed\?')
ERROR_IDX = ready_items.index(rb'SHOULD INPUT PROCESSING BE SUSPENDED\?')
PROMPT_IDX = ready_items.index(rb'ENTER FORMAT for')
nitems = len(ready_items)
expect_list = []
for item in ready_items:
expect_list.append(re.compile(item))
ignored = re.compile(r'[\s\S]+'.join(['WARNING', 'command', 'ignored']))
def launch_pexpect(exec_file=None, run_location=None, jobname=None, nproc=None,
additional_switches='', start_timeout=60):
"""Launch MAPDL as a pexpect process.
Limited to only a linux instance
"""
import pexpect
command = '%s -j %s -np %d %s' % (exec_file, jobname, nproc,
additional_switches)
process = pexpect.spawn(command, cwd=run_location)
process.delaybeforesend = None
try:
index = process.expect(['BEGIN:', 'CONTINUE'],
timeout=start_timeout)
except: # capture failure
raise RuntimeError(process.before.decode('utf-8'))
if index: # received ... press enter to continue
process.sendline('')
process.expect('BEGIN:', timeout=start_timeout)
return process
class MapdlConsole(_MapdlCore):
"""Control interaction with an ANSYS shell instance.
Only works on Linux.
"""
def __init__(self, loglevel='INFO', log_apdl='w', use_vtk=True,
**start_parm):
"""Opens an ANSYS process using pexpect"""
self._auto_continue = True
self._continue_on_error = False
self._process = None
self._launch(start_parm)
super().__init__(loglevel=loglevel, use_vtk=use_vtk, log_apdl=log_apdl,
**start_parm)
def _launch(self, start_parm):
"""Connect to MAPDL process using pexpect"""
self._process = launch_pexpect(**start_parm)
def _run(self, command):
"""Sends command and returns ANSYS's response"""
self._reset_cache()
if not self._process.isalive():
raise MapdlExitedError('ANSYS exited')
command = command.strip()
if not command:
raise ValueError('Cannot run empty command')
if command[:4].lower() == '/out':
items = command.split(',')
if len(items) > 1:
self._output = '.'.join(items[1:])
else:
self._output = ''
# send the command
self._log.debug('Sending command %s', command)
self._process.sendline(command)
# do not expect
if '/MENU' in command:
self._log.info('Enabling GUI')
self._process.sendline(command)
return
full_response = ''
while True:
i = self._process.expect_list(expect_list, timeout=None)
response = self._process.before.decode('utf-8')
full_response += response
if i >= CONTINUE_IDX and i < WARNING_IDX: # continue
self._log.debug('Continue: Response index %i. Matched %s',
i, ready_items[i].decode('utf-8'))
self._log.info(response + ready_items[i].decode('utf-8'))
if self._auto_continue:
user_input = 'y'
else:
user_input = input('Response: ')
self._process.sendline(user_input)
elif i >= WARNING_IDX and i < ERROR_IDX: # warning
self._log.debug('Prompt: Response index %i. Matched %s',
i, ready_items[i].decode('utf-8'))
self._log.warning(response + ready_items[i].decode('utf-8'))
if self._auto_continue:
user_input = 'y'
else:
user_input = input('Response: ')
self._process.sendline(user_input)
elif i >= ERROR_IDX and i < PROMPT_IDX: # error
self._log.debug('Error index %i. Matched %s',
i, ready_items[i].decode('utf-8'))
self._log.error(response)
response += ready_items[i].decode('utf-8')
raise Exception(response)
elif i >= PROMPT_IDX: # prompt
self._log.debug('Prompt index %i. Matched %s',
i, ready_items[i].decode('utf-8'))
self._log.info(response + ready_items[i].decode('utf-8'))
raise RuntimeError('User input expected. '
'Try using ``with mapdl.non_interactive``')
else: # continue item
self._log.debug('continue index %i. Matched %s',
i, ready_items[i].decode('utf-8'))
break
# return last response and all preceding responses
return full_response
def exit(self, close_log=True, timeout=3):
"""Exit MAPDL process.
Parameters
----------
timeout : float
Maximum time to wait for MAPDL to exit. Set to 0 or
``None`` to not wait until MAPDL stops.
"""
self._log.debug('Exiting ANSYS')
if self._process is not None:
try:
self._process.sendline('FINISH')
self._process.sendline('EXIT')
except:
pass
if close_log:
self._close_apdl_log()
self._exited = True
# edge case: need to wait until process dies, otherwise future
# commands might talk to a dead process...
if timeout:
tstart = time.time()
while self._process.isalive():
time.sleep(0.05)
telap = tstart - time.time()
if telap > timeout:
return 1
return 0
def kill(self):
""" Forces ANSYS process to end and removes lock file """
if self._process is not None:
try:
self.exit()
except:
try:
os.kill(self._process.pid, 9)
except:
self._log.warning('Unable to kill process %d', self._process.pid)
self._log.debug('Killed process %d', self._process.pid)
| [
"noreply@github.com"
] | noreply@github.com |
d0dcdb18c7dac7250976b62a5d3e226b82d457e3 | 973a5e6995ef4cc6b5457b3aac97756ed3314925 | /single_file_projects/8_PresentParticipleForm.py | cd89775844165abb41b43e9534d45f366d0b80aa | [] | no_license | paulinaJaworska/mini-python-projects | 5d006839ef509b1d5106b118be12cf131810322c | 01745a56e4f6f0e4d4eea4f72a97169d379836f1 | refs/heads/master | 2020-09-07T15:16:44.063093 | 2019-11-10T20:29:43 | 2019-11-10T20:29:43 | 220,824,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | import sys
def present_participle():
# creates present participle form from the verb
if len(sys.argv) > 1:
for item in sys.argv[1:]:
try:
word = str(item)
consonants = ['B', 'b', 'C', 'c', 'D', 'd', 'F', 'f', 'G', 'g', 'H', 'h', 'J', "j", 'K', 'k', 'L', 'l', 'M', 'm', 'N', 'n', 'P', 'p', 'Q', 'q', 'R', 'r', "S", 's', 'T', 't', 'V', 'v', 'X', 'x', 'Z', 'z']
vowels = ['A', 'a', 'E', 'e', 'I', 'i', 'O', 'o', 'U', 'u']
if word[-2:] == 'ie':
# converts words ending with ie
word = word[:-3]
print(str(word) + "ying")
elif word[-1] == 'e' and word[-2] != 'e':
# converts words ending with e and exclude exceptions of words ending with double ee
word = word[:-1]
print(str(word) + "ing")
elif len(word) == 3 and word[0] in consonants and word[1] in vowels and word[2] in consonants:
# converts words like consonant - vowel - consonant
word = word + word[-1]
print(str(word) + "ing")
else:
# adds ing at the end of the argument for the rest of the worlds without converting them
print(str(word) + "ing")
except ValueError:
print("Please, enter words only!")
else:
print("Please, specify aruments.")
present_participle()
| [
"paulinajaworska9@gmail.com"
] | paulinajaworska9@gmail.com |
ebbfa4ce3921743a2cac5d388e06a808086b00de | 71cc62fe3fec8441794a725b7ce3037dc2723107 | /ifreewallpapers/apps/profile/views/profileviews.py | 71d4bc4644c89e4257d2ab7d6120ca761ceb5375 | [] | no_license | tooxie/django-ifreewallpapers | bda676dc5a6c45329ad6763862fe696b3e0c354b | 75d8f41a4c6aec5c1091203823c824c4223674a6 | refs/heads/master | 2020-05-21T12:50:36.907948 | 2011-01-19T04:28:33 | 2011-01-19T04:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | # coding=UTF-8
from profile.models import Profile
# from profile import settings as _settings
from utils.decorators import render_response
to_response = render_response('profile/')
# from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
# from django.core.urlresolvers import reverse
# from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
"""
@to_response
def overview(request, ):
return 'profile.html'
"""
@to_response
def public(request, slug):
profile = Profile.objects.get(slug=slug)
return 'public.html', {'profile': profile}
| [
"alvaro@mourino.net"
] | alvaro@mourino.net |
8e021335ee2eff39517bd3888f2a9effb212d25e | cc95732b845a31ea51dc58fb00f0a7c8d7c083dc | /Trabalho 5 - Leitura de arquivo PLY/Main.py | 0ee57835dc48e4c3a86207281de11d5b269296b2 | [] | no_license | BernardoPLPSO/Computacao-Grafica | 2cffc291e2c265651279b1ed5385b5c039e2148a | e69f6ac3bcc09e9f705a68e9d41cc360fcd8f2c4 | refs/heads/master | 2020-05-02T10:37:37.481995 | 2019-07-09T02:00:43 | 2019-07-09T02:00:43 | 177,902,226 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
from PLYFileLoader import *
def display():
global obj
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glRotatef(2,1,3,0)
glCallList(obj.gl_list)
glutSwapBuffers()
def timer(i):
glutPostRedisplay()
glutTimerFunc(50,timer,1)
def reshape(w,h):
glViewport(0,0,w,h)
glMatrixMode(GL_PROJECTION)
gluPerspective(45,float(w)/float(h),0.1,50.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0,0,0.5,0,0,0,0,1,0)
def init():
global obj
glLightfv(GL_LIGHT0, GL_POSITION, (5, 5, 5, 1.0))
glLightfv(GL_LIGHT0, GL_AMBIENT, (0.4, 0.4, 0.4, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.6, 0.6, 0.6, 1.0))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
glClearColor(0.0,0.0,0.0,0.0)
glShadeModel(GL_SMOOTH)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
obj = PLY("bun_zipper.ply")
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH | GLUT_MULTISAMPLE)
glutInitWindowSize(800,600)
glutCreateWindow("Obj")
glutReshapeFunc(reshape)
glutDisplayFunc(display)
glutTimerFunc(50,timer,1)
init()
glutMainLoop()
main() | [
"36808813+BernardoPLPSO@users.noreply.github.com"
] | 36808813+BernardoPLPSO@users.noreply.github.com |
7b1bae4103e32687a123aeebfa23414fa32490d3 | 749de43379dced8ad1c542fecf95bb09ae37da5a | /item/migrations/0002_auto_20160214_1428.py | 095be8e0aedc37eedf9a85a63e7c13cea8c57d4a | [] | no_license | GregoryBuligin/example_django_rest | c03ac3ac8b254f3f2357cf523eba6c9a84d000fa | 1de26df733531bd61c84bffed2abf1f3a7eb2935 | refs/heads/master | 2021-01-10T17:41:33.542394 | 2016-03-06T16:00:24 | 2016-03-06T16:00:24 | 53,249,271 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-14 14:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('item', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='item_category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='category', to='category.Category'),
),
]
| [
"gri9996@yandex.ru"
] | gri9996@yandex.ru |
f560c25459fe8731edd4ca170e93db28fb5e8adf | 096c38cc967a164a31d5f18a2c12b391b382754e | /Analysis/Analysis_Act_vs_t0.py | 5b1a83b46d36b3a4b24546a269f9b4b55d9ef8b6 | [] | no_license | SmoothCB/pyUrns | ff0b3398101ea60e9303e3ed21d9aeeab9bab7d7 | 171ffaa76b0b9f2ab81456a601921691cc97182e | refs/heads/master | 2023-03-18T14:26:48.683658 | 2019-11-02T10:11:02 | 2019-11-02T10:11:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | import os, sys,gzip
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
IDir = sys.argv[1]
ODir = sys.argv[2]
if True: #gzipped files...
Apri = gzip.open
else:
Apri = open
Files = sorted(os.listdir(IDir))
nFiles = len(Files)
Dat = {}
eve_time = np.zeros(nFiles)
NEve = 0
for Find, fn in enumerate(Files):
with Apri(os.path.join(IDir, fn), 'rb') as f:
for l in f:
v = l.strip().split()
clr = v[0]
cld = v[1]
Dat.setdefault(clr, {"a": .0, "t0m": (Find+1.)-.5,\
"t0e": NEve})
Dat[clr]["a"] += 1.
Dat.setdefault(cld, {"a": .0, "t0m": (Find+1.)-.5,\
"t0e": NEve})
NEve += 1
eve_time[Find] = NEve
sys.stdout.write("File %s - %03d of %03d done...\r"\
%(fn, Find+1, len(Files)))
sys.stdout.flush()
print ""
print "Done!"
print ""
Acts = [.0]*len(Dat)
T0s_mo = [.0]*len(Dat)
T0s_ev = [.0]*len(Dat)
for ID, Vals in enumerate(Dat.values()):
Acts[ID] = Vals["a"]
T0s_mo[ID] = Vals["t0m"]
T0s_ev[ID] = Vals["t0e"]
Acts = np.array(Acts, dtype=np.double)
T0s_mo = np.array(T0s_mo, dtype=np.double)
T0s_ev = np.array(T0s_ev, dtype=np.double)
if not os.path.exists(ODir):
os.mkdir(ODir)
ODir = os.path.join(ODir, "00")
if not os.path.exists(ODir):
os.mkdir(ODir)
ODir = os.path.join(ODir, "rhos")
if not os.path.exists(ODir):
os.mkdir(ODir)
with open(os.path.join(ODir, "zzz_Act_vs_t0.dat"), "wb") as of:
for A, tm, te in zip(Acts, T0s_mo, T0s_ev):
of.write("%d\t%.01f\t%.03e\n" % (A, tm, te))
plt.hexbin(T0s_mo, Acts, bins='log', cmap=plt.cm.YlOrRd_r)
plt.axis([T0s_mo.min(), T0s_mo.max(), Acts.min(), Acts.max()])
plt.xlabel(r"$t_0$[months]")
plt.ylabel(r"activity")
plt.savefig(os.path.join(ODir, "zzz_Act_vs_t0_file.pdf"))
plt.close()
plt.hexbin(T0s_ev, Acts, bins='log', cmap=plt.cm.YlOrRd_r)
plt.axis([T0s_ev.min(), T0s_ev.max(), Acts.min(), Acts.max()])
plt.xlabel(r"$t_0$[events]")
plt.ylabel(r"activity")
plt.savefig(os.path.join(ODir, "zzz_Act_vs_t0_events.pdf"))
| [
"enricoubaldi@gmail.com"
] | enricoubaldi@gmail.com |
74839d9264baf7b7925f1bee6d71589799100930 | a05d2cff839815635b474220d066f6881094492c | /the_last_figure.py | 6b36edb98e7cb905fdb6fd462ded5ced5c308b6d | [] | no_license | Assessor/Phytnon-Coursera | d629387f475e3c0d24247c5a10a29f6c4fb31b17 | 68223cc5892e5050a2023414d5df0db071913714 | refs/heads/master | 2021-01-25T01:21:31.788820 | 2018-03-28T18:48:14 | 2018-03-28T18:48:14 | 123,308,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | '''
Дано натуральное число. Выведите его последнюю цифру.
'''
num = int(input())
print(num % 10)
| [
"ivan.maltsev78@gmail.com"
] | ivan.maltsev78@gmail.com |
e28c324b92c57e76fe118f4191f692caec54dda4 | 11fcc56eaefbc994a585bcbdac3ab11b5158f3d8 | /Programming a Robotic Car/hw3-6.py | d4a3b92218cc958129fd9472003d6906457a8b99 | [] | no_license | mstamboroski/Udacity_Projects | 1c6a81e84cc1fbf1a4fbc3e4e46f6b84b9d38e5c | 9c9340ad1938188e901ff17680b2c97e95c839c6 | refs/heads/master | 2021-01-19T05:59:33.059047 | 2016-08-03T00:09:21 | 2016-08-03T00:09:21 | 64,800,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,903 | py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Maycon
#
# Created: 11/03/2012
# Copyright: (c) Maycon 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
def main():
pass
if __name__ == '__main__':
main()
# --------------
# USER INSTRUCTIONS
#
# Now you will put everything together.
#
# First make sure that your sense and move functions
# work as expected for the test cases provided at the
# bottom of the previous two programming assignments.
# Once you are satisfied, copy your sense and move
# definitions into the robot class on this page, BUT
# now include noise.
#
# A good way to include noise in the sense step is to
# add Gaussian noise, centered at zero with variance
# of self.bearing_noise to each bearing. You can do this
# with the command random.gauss(0, self.bearing_noise)
#
# In the move step, you should make sure that your
# actual steering angle is chosen from a Gaussian
# distribution of steering angles. This distribution
# should be centered at the intended steering angle
# with variance of self.steering_noise.
#
# Feel free to use the included set_noise function.
#
# Please do not modify anything except where indicated
# below.
from math import *
import random
# --------
#
# some top level parameters
#
max_steering_angle = pi / 4.0 # You do not need to use this value, but keep in mind the limitations of a real car.
bearing_noise = 0.1 # Noise parameter: should be included in sense function.
steering_noise = 0.1 # Noise parameter: should be included in move function.
distance_noise = 5.0 # Noise parameter: should be included in move function.
tolerance_xy = 15.0 # Tolerance for localization in the x and y directions.
tolerance_orientation = 0.25 # Tolerance for orientation.
# --------
#
# the "world" has 4 landmarks.
# the robot's initial coordinates are somewhere in the square
# represented by the landmarks.
#
# NOTE: Landmark coordinates are given in (y, x) form and NOT
# in the traditional (x, y) format!
landmarks = [[0.0, 100.0], [0.0, 0.0], [100.0, 0.0], [100.0, 100.0]] # position of 4 landmarks in (y, x) format.
world_size = 100.0 # world is NOT cyclic. Robot is allowed to travel "out of bounds"
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation
#
def __init__(self, length = 20.0):
self.x = random.random() * world_size # initial x position
self.y = random.random() * world_size # initial y position
self.orientation = random.random() * 2.0 * pi # initial orientation
self.length = length # length of robot
self.bearing_noise = 0.0 # initialize bearing noise to zero
self.steering_noise = 0.0 # initialize steering noise to zero
self.distance_noise = 0.0 # initialize distance noise to zero
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_b_noise, new_s_noise, new_d_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.bearing_noise = float(new_b_noise)
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
# --------
# measurement_prob
# computes the probability of a measurement
#
def measurement_prob(self, measurements):
# calculate the correct measurement
predicted_measurements = self.sense(0) # Our sense function took 0 as an argument to switch off noise.
# compute errors
error = 1.0
for i in range(len(measurements)):
error_bearing = abs(measurements[i] - predicted_measurements[i])
error_bearing = (error_bearing + pi) % (2.0 * pi) - pi # truncate
# update Gaussian
error *= (exp(- (error_bearing ** 2) / (self.bearing_noise ** 2) / 2.0) /
sqrt(2.0 * pi * (self.bearing_noise ** 2)))
return error
def __repr__(self): #allows us to print robot attributes.
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y),
str(self.orientation))
############# ONLY ADD/MODIFY CODE BELOW HERE ###################
# --------
# move:
def move(self, motion): # Do not change the name of this function
x=self.x
y=self.y
teta=self.orientation
L=self.length
d=motion[1]
alfa=motion[0]
alfa += random.gauss(0, self.steering_noise)
d += random.gauss(0,self.distance_noise)
beta=(d*tan(alfa))/L
if abs(beta)<0.001:
newx= x + d*cos(teta)
newy= y + d*sin(teta)
newteta=(teta+beta)%(2*pi)
else:
R=L/tan(alfa)
Cx= x - R*sin(teta)
Cy= y + R*cos(teta)
newx= Cx + R*sin(beta+teta)
newy= Cy - R*cos(beta+teta)
newteta=(teta+beta)%(2*pi)
result = robot()
result.set(newx,newy,newteta)
return result # make sure your move function returns an instance
# of the robot class with the correct coordinates.
# copy your code from the previous exercise
# and modify it so that it simulates motion noise
# according to the noise parameters
# self.steering_noise
# self.distance_noise
# --------
# sense:
#
def sense(self,on): #do not change the name of this function
Z = []
for i in range(len(landmarks)):
deltax=(landmarks[i][1]-self.x)
deltay=(landmarks[i][0]-self.y)
dist=(atan2(deltay,deltax)% (2*pi))-self.orientation
if on !=0:
dist += random.gauss(0, self.bearing_noise)
Z.append(dist)
return Z #Leave this line here. Return vector Z of 4 bearings.
# copy your code from the previous exercise
# and modify it so that it simulates bearing noise
# according to
# self.bearing_noise
############## ONLY ADD/MODIFY CODE ABOVE HERE ####################
# --------
#
# extract position from a particle set
#
def get_position(p):
x = 0.0
y = 0.0
orientation = 0.0
for i in range(len(p)):
x += p[i].x
y += p[i].y
# orientation is tricky because it is cyclic. By normalizing
# around the first particle we are somewhat more robust to
# the 0=2pi problem
orientation += (((p[i].orientation - p[0].orientation + pi) % (2.0 * pi))
+ p[0].orientation - pi)
return [x / len(p), y / len(p), orientation / len(p)]
# --------
#
# The following code generates the measurements vector
# You can use it to develop your solution.
#
def generate_ground_truth(motions):
myrobot = robot()
myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
Z = []
T = len(motions)
for t in range(T):
myrobot = myrobot.move(motions[t])
Z.append(myrobot.sense(0))
#print 'Robot: ', myrobot
return [myrobot, Z]
# --------
#
# The following code prints the measurements associated
# with generate_ground_truth
#
def print_measurements(Z):
T = len(Z)
print 'measurements = [[%.8s, %.8s, %.8s, %.8s],' % \
(str(Z[0][0]), str(Z[0][1]), str(Z[0][2]), str(Z[0][3]))
for t in range(1,T-1):
print ' [%.8s, %.8s, %.8s, %.8s],' % \
(str(Z[t][0]), str(Z[t][1]), str(Z[t][2]), str(Z[t][3]))
print ' [%.8s, %.8s, %.8s, %.8s]]' % \
(str(Z[T-1][0]), str(Z[T-1][1]), str(Z[T-1][2]), str(Z[T-1][3]))
# --------
#
# The following code checks to see if your particle filter
# localizes the robot to within the desired tolerances
# of the true position. The tolerances are defined at the top.
#
def check_output(final_robot, estimated_position):
error_x = abs(final_robot.x - estimated_position[0])
error_y = abs(final_robot.y - estimated_position[1])
error_orientation = abs(final_robot.orientation - estimated_position[2])
error_orientation = (error_orientation + pi) % (2.0 * pi) - pi
correct = error_x < tolerance_xy and error_y < tolerance_xy \
and error_orientation < tolerance_orientation
return correct
def particle_filter(motions, measurements, N=500): # I know it's tempting, but don't change N!
# --------
#
# Make particles
#
N=1000
p = []
for i in range(N):
r = robot()
r.set_noise(bearing_noise, steering_noise, distance_noise)
p.append(r)
# --------
#
# Update particles
#
for t in range(len(motions)):
# motion update (prediction)
p2 = []
for i in range(N):
p2.append(p[i].move(motions[t]))
p = p2
# measurement update
w = []
for i in range(N):
w.append(p[i].measurement_prob(measurements[t]))
# resampling
p3 = []
index = int(random.random() * N)
beta = 0.0
mw = max(w)
for i in range(N):
beta += random.random() * 2.0 * mw
while beta > w[index]:
beta -= w[index]
index = (index + 1) % N
p3.append(p[index])
p = p3
return get_position(p)
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out.
##
## You can test whether your particle filter works using the
## function check_output (see test case 2). We will be using a similar
## function. Note: Even for a well-implemented particle filter this
## function occasionally returns False. This is because a particle
## filter is a randomized algorithm. We will be testing your code
## multiple times. Make sure check_output returns True at least 80%
## of the time.
## --------
## TEST CASES:
##
##1) Calling the particle_filter function with the following
## motions and measurements should return a [x,y,orientation]
## vector near [x=93.476 y=75.186 orient=5.2664], that is, the
## robot's true location.
##
motions = [[2. * pi / 10, 20.] for row in range(8)]
measurements = [[4.746936, 3.859782, 3.045217, 2.045506],
[3.510067, 2.916300, 2.146394, 1.598332],
[2.972469, 2.407489, 1.588474, 1.611094],
[1.906178, 1.193329, 0.619356, 0.807930],
[1.352825, 0.662233, 0.144927, 0.799090],
[0.856150, 0.214590, 5.651497, 1.062401],
[0.194460, 5.660382, 4.761072, 2.471682],
[5.717342, 4.736780, 3.909599, 2.342536]]
print particle_filter(motions, measurements)
## 2) You can generate your own test cases by generating
## measurements using the generate_ground_truth function.
## It will print the robot's last location when calling it.
##
##
number_of_iterations = 6
motions = [[2. * pi / 20, 12.] for row in range(number_of_iterations)]
x = generate_ground_truth(motions)
final_robot = x[0]
measurements = x[1]
estimated_position = particle_filter(motions, measurements)
print_measurements(measurements)
print 'Ground truth: ', final_robot
print 'Particle filter: ', estimated_position
print 'Code check: ', check_output(final_robot, estimated_position)
| [
"mstamboroski@gmail.com"
] | mstamboroski@gmail.com |
742c4ddf5eaa9d24d8ab85cf042455635e024227 | ff692d927c95f7337339599d523f986f720449f5 | /plugins/init.py | 692fec3901386220bb48bf4cea4ae5a20c1c2897 | [] | no_license | mwesterhof/pyjeeves | de567966636954aed7d88a5d51e74df85feeaba3 | 46b35f56056603330f7636a745e13fa045c884f1 | refs/heads/master | 2022-12-16T11:11:06.276555 | 2019-08-09T09:10:21 | 2019-08-09T09:10:21 | 296,265,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | import os
from plugin import BasePlugin
class Plugin(BasePlugin):
'''
Initialize an HQ in the current directory
'''
def run_command(self, args):
print('creating jeeves headquarters in {0}'.format(os.getcwd()))
os.makedirs('.jeeves')
| [
"m.westerhof@lukkien.com"
] | m.westerhof@lukkien.com |
491429f9b6b4c991742a818ea2264a44a4ce7e04 | 5506eeee48ed37c23693621c34815f7c1116760d | /node_modules/mongodb/node_modules/mongodb-core/node_modules/kerberos/build/config.gypi | 10f73ae526e8063fee0ca98b38f72410f444e94e | [
"Apache-2.0"
] | permissive | BereniceFang/thrush | 24f74b115c9be66ac4eab5054bf604d67fc0b4df | 26ec38a113fb6907f19f9aa737aefa2816a10efc | refs/heads/master | 2021-01-21T01:02:40.854111 | 2015-06-06T06:27:34 | 2015-06-06T06:27:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,779 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "/",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/mcdong/.node-gyp/0.12.2",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/mcdong/.nvm/versions/node/v0.12.2/etc/npmignore",
"init_author_url": "",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "http://registry.npm.taobao.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/mcdong/.nvm/versions/node/v0.12.2/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/mcdong/.npm-init.js",
"userconfig": "/Users/mcdong/.npmrc",
"node_version": "0.12.2",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/mcdong/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.7.4 node/v0.12.2 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/qh/by4gjn2x0_lg9p_90v__p0sm0000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/Users/mcdong/.nvm/versions/node/v0.12.2"
}
}
| [
"wangdong5@xiaomi.com"
] | wangdong5@xiaomi.com |
73873c98659273917bae3e7d6262e347825dd1ea | 05ed5efbed8e2f68053e1f2bfb934145ff9c7a65 | /speech_recognition.py | 3a19ea930dd924f05419782ba75efecbad79c36f | [] | no_license | AljonViray/RobotDog | 20a7b508ccf1eb148b1dc4432a810a9bedae09e6 | cfcc2b19cddc0c944f7e5b071b4e2d5e3327fc25 | refs/heads/main | 2023-04-21T03:30:23.840899 | 2021-05-12T21:51:10 | 2021-05-12T21:51:10 | 329,702,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | # Written by: Alexandra Zhang Jiang
# Guide: pypi.org/project/SpeechRecognition/
# Source Code: Anthony Zhang (Uberu) in github
import speech_recognition as sr
from led_lights import *
# obtain audio from the microphone
r = sr.Recognizer()
while(True):
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
# recognize speech using Google Speech Recognition
try: # using default API key
uni_text = r.recognize_google(audio) #this is in unicode
str_text = uni_text.encode('ascii','ignore')
print("Google Speech Recognition thinks you said: " + str_text)
#turn on/off led voice commands
if str_text.lower() == "turn on red led":
turn_on_red_led()
elif str_text.lower() == "turn off red led":
turn_off_red_led()
elif str_text.lower() == "turn on green led":
turn_on_green_led()
elif str_text.lower() == "turn off green led":
turn_off_green_led()
elif str_text.lower() == "turn on yellow led":
turn_on_yellow_led()
elif str_text.lower() == "turn off yellow led":
turn_off_yellow_led()
elif str_text.lower() == "turn on all leds":
turn_on_red_led()
turn_on_green_led()
turn_on_yellow_led()
elif str_text.lower() == "turn off all leds":
turn_off_red_led()
turn_off_green_led()
turn_off_yellow_led()
# exit command
elif str_text.lower() == "exit":
break;
except sr.UnknownValueError:
print("Google Speech Recognition could not recognize audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e)) | [
"noreply@github.com"
] | noreply@github.com |
d1778b0fca2c668342e5496525f847e39c73da8a | 65b25c8b9613893721735b392f566c02c6b4a526 | /old docs/oehfu.py | fdcf37e5650133a3182e88fe13d4a90bb1aecc0f | [] | no_license | micahsmith1/ETL-Project | a712f2478dead07e3b8f3654044cd7ede3101c9c | 94b1e10fd1cc36ec2d9ad2e071bfcedb7fcd3fb9 | refs/heads/main | 2023-02-04T19:28:18.521358 | 2020-12-10T03:01:55 | 2020-12-10T03:01:55 | 317,714,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from flask import Flask, render_template, redirect, jsonify
from flask_pymongo import PyMongo
import scrape
from sqlalchemy import create_engine
import sqlalchemy
| [
"miyshmiysh@Miyshaels-MBP.attlocal.net"
] | miyshmiysh@Miyshaels-MBP.attlocal.net |
94ef7ad99668a3d0c890a8be2fd256bf28ab9194 | 1b8530ef1c108e098edfa3755e96824b31d4a2ad | /scripts/fixup_recommender_v1beta1_keywords.py | c0fe44525fc4175047ea2372ca698f42c4445c7e | [
"Apache-2.0"
] | permissive | renovate-bot/python-recommender | 4b3d0b9e0332eab0f71bd044a6832b67fe6827fa | d0ff05f566d2a7bfe6c9f403252a833fe4bb776b | refs/heads/master | 2023-06-08T00:27:33.316110 | 2021-08-18T13:40:32 | 2021-08-18T13:40:32 | 239,139,952 | 0 | 0 | Apache-2.0 | 2020-02-08T13:52:09 | 2020-02-08T13:52:08 | null | UTF-8 | Python | false | false | 6,468 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class recommenderCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'get_insight': ('name', ),
'get_recommendation': ('name', ),
'list_insights': ('parent', 'page_size', 'page_token', 'filter', ),
'list_recommendations': ('parent', 'page_size', 'page_token', 'filter', ),
'mark_insight_accepted': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_claimed': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_failed': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_succeeded': ('name', 'etag', 'state_metadata', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=recommenderCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the recommender client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| [
"noreply@github.com"
] | noreply@github.com |
8992bd4abb48df9497bbf3ae5f4afe99f571a053 | e4545ace83d22cb9e49a5325a7847eaff94bb801 | /0409/0409/jimmy/apps.py | 25e2352046b08e0949fb5d09537330cad6ed6eb7 | [] | no_license | MingHuangZheng/JerryHW | 60f566323b8dd2a94b7b7069b1d8d6b43ef13b82 | 2c2f838bcb03a5147e75aa6cb6fae99d48acdc67 | refs/heads/master | 2022-11-23T20:20:54.116273 | 2018-05-25T14:29:49 | 2018-05-25T14:29:49 | 124,842,079 | 0 | 1 | null | 2022-11-03T19:51:43 | 2018-03-12T06:14:06 | Python | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class JimmyConfig(AppConfig):
name = 'jimmy'
| [
"noreply@github.com"
] | noreply@github.com |
59655e6a2e6c1bf1df975866337b053b89e1ae57 | 111866dd2150170e90e3717df008aa703d7ef30c | /filemanager/domain/__init__.py | 7f428c6590b0064d5266fb7018c0cdcc07f789b4 | [] | no_license | arXiv/arxiv-filemanager | 106c572a6551445a2109c279ce086b7c96a0bcd5 | dfb71a40125324b1c1f4eb865c84cd9d2e512e6c | refs/heads/develop | 2023-04-18T09:45:35.338067 | 2020-03-09T14:59:19 | 2020-03-09T14:59:19 | 113,456,994 | 5 | 6 | null | 2022-12-08T05:50:07 | 2017-12-07T13:55:34 | PostScript | UTF-8 | Python | false | false | 388 | py | """Core concepts and constraints of the file manager service."""
from .uploads import UserFile, Workspace, IChecker, SourceLog, SourceType, \
IStorageAdapter, SourcePackage, ICheckableWorkspace, Readiness, \
Status, LockState
from .file_type import FileType
from .uploads import ICheckingStrategy
from .error import Error, Severity, Code
from .index import NoSuchFile, FileIndex
| [
"brp53@cornell.edu"
] | brp53@cornell.edu |
7adeb154143a4cfd6b5b6ee2b93edaf9c86afaa2 | b526aecc3aeb35c0931339ede80397f8f1561fbc | /src/dascasi/__init__.py | b262c04aa7f3b2df6609d02475bf132c1456c87d | [
"Apache-2.0"
] | permissive | space-physics/dascasi | 30e021976529dfc4072ea96181db8d9d1921a07c | 4d72aa91e471a495566044c3fc387344dd12461f | refs/heads/main | 2023-04-17T09:24:22.325605 | 2023-03-21T02:18:44 | 2023-03-21T02:30:15 | 51,016,067 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | # Copyright 2023 SciVision, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.0"
from .web import download
from .io import load, loadcal
from .hdf5 import save_hdf5
__all__ = ["download", "load", "loadcal", "save_hdf5"]
| [
"scivision@users.noreply.github.com"
] | scivision@users.noreply.github.com |
a08a315424fe3ec0a52037d139af8281fff9e7c5 | aa6d030ca75983b097f9eac3acc98023f040dc1b | /ex2_1.py | cc72045b45194a756c618c0cb94ec35294200e90 | [] | no_license | twr14152/Bottle_stuff | 868771754b445cd7256572133f17041d0d6fc806 | 8a594dda6b0f92b9aed74201da525d0bce30dc47 | refs/heads/master | 2022-09-22T12:33:09.220737 | 2020-06-05T01:15:13 | 2020-06-05T01:15:13 | 269,496,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from bottle import redirect, run, route
@route('/')
def index():
return 'Please login'
@route('/restricted')
def restricted():
#authenticate func
#if it fails rediect
redirect('/')
run(host = '0.0.0.0', port = 8080, debug = True)
| [
"twr14152@gmail.com"
] | twr14152@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.