blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d867b62b99227ba02054237442fe629f1f9f3a34 | bf8d984a6bd87dc047d4d0566f505fefd5eb93d5 | /acedview/wsgi.py | 53f71dc2ac560702ea9e25c54ec33309109d5c30 | [] | no_license | abhilasha1996/MyApp | 93386f3d8e2e45be25fbfabf1ec7c267569c2a61 | 7a01c262a7f43a41415c882aca10fd621c93aed4 | refs/heads/master | 2021-01-19T14:12:58.175049 | 2017-08-20T20:12:28 | 2017-08-20T20:12:28 | 100,885,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
WSGI config for acedview project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "acedview.settings")
application = get_wsgi_application()
| [
"abhilashasethi1996@gmail.com"
] | abhilashasethi1996@gmail.com |
327bf3ff951ee285a77e0a2dfa30a0a852ac1426 | cceb97ce3d74ac17090786bc65f7ed30e37ad929 | /server/newfirst/migrations/0005_auto_20201024_0316.py | baaa7f017786874e8c0a9b6e7a9c50db448d3ef2 | [] | no_license | Catxiaobai/project | b47310efe498421cde794e289b4e753d843c8e40 | 76e346f69261433ccd146a3cbfa92b4e3864d916 | refs/heads/master | 2023-01-08T04:37:59.232492 | 2020-11-10T12:00:34 | 2020-11-10T12:00:34 | 291,014,545 | 1 | 4 | null | 2020-11-09T01:22:11 | 2020-08-28T10:08:16 | Python | UTF-8 | Python | false | false | 424 | py | # Generated by Django 3.1.1 on 2020-10-23 19:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newfirst', '0004_scenes'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='item_date',
),
migrations.RemoveField(
model_name='item',
name='item_leader',
),
]
| [
"2378960008@qq.com"
] | 2378960008@qq.com |
beebb21a85bdef1dce90ba8d97d52f96682ff140 | 4979596576baa5b9306664d418f4a20b486d9fc9 | /src/movies/migrations/0012_movie_number_of_views.py | 13f1482e00980d0b684869bdce1103ff27ba8a13 | [] | no_license | AleksandarFa/pocket-imdb-backend | 23b6fad944831d8d70f82bff3c0030c1ae64fd92 | 6315dfddcf896fcf8348e456b27b9a2e71540f86 | refs/heads/master | 2023-05-12T12:20:03.967050 | 2021-06-04T09:15:47 | 2021-06-04T09:15:47 | 368,444,715 | 0 | 0 | null | 2021-06-04T09:15:48 | 2021-05-18T07:48:19 | Python | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.1.7 on 2021-05-26 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0011_auto_20210525_0954'),
]
operations = [
migrations.AddField(
model_name='movie',
name='number_of_views',
field=models.IntegerField(default=0),
),
]
| [
"aleksandar.fa@vivifyideas.com"
] | aleksandar.fa@vivifyideas.com |
b203bcc7a5daf7db43e5600f125c31d1404bb997 | 0a1db233b58fd4c12325447ea5783130a4760124 | /src/lightSource.py | 2f6a7e55af5bb209c659236f5391fc886e91eb48 | [] | no_license | anthonykawa/Intro-Python-II | 9bf936c47448dd1cc859f1d7b11f55bda5db4275 | 8e2bd7e4b10c2a5bd42fd34f74cb61567a5a6168 | refs/heads/master | 2022-11-17T08:39:43.721551 | 2020-07-15T16:55:29 | 2020-07-15T16:55:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from item import Item
class LightSource(Item):
def __init__(self, name, description):
super().__init__(name, description) | [
"anthonyk2020@gmail.com"
] | anthonyk2020@gmail.com |
dd481a8700e475bd2c82b82241d3ad689f39f95f | 56b60cb4e3dfa065839ce0dce5a50e163a4f9f3a | /api_part/__init__.py | fe26d15ead612aa115e93e6bb3c25e8b71983fcf | [] | no_license | Humbertzhang/DocTrans | 8acdd6634361130cb4f0d960baabd2a28de07332 | 242c0efbdbb660325df0de33910449566148bdb5 | refs/heads/master | 2021-01-20T05:13:58.521265 | 2017-08-31T08:07:11 | 2017-08-31T08:07:11 | 101,422,930 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | from ._init_ import __init__content
from .static import static_content | [
"504490160@qq.com"
] | 504490160@qq.com |
91db8116494945ac4447f2c14fec8b83a4d5f470 | 66d184a2b36ab1db564305ea36be891aaf0e236b | /py/Python_Crash_Course/project2/two_d8.py | 52743e7fbb2329663e1615be5f979d1fb0082ff0 | [] | no_license | joyDDT/python_code | bef57936a1167fa65e28b6c52ab7857b34dc74a8 | 3aae56c51660579a4eaaa087ac2459c9bf2f2e23 | refs/heads/master | 2021-10-30T10:22:21.328633 | 2019-04-26T04:45:01 | 2019-04-26T04:45:01 | 112,004,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import pygal
from die import Die
#创建两个个D8
die_1 = Die(8)
die_2 = Die(8)
#投掷色子多次,并将结果储存在一个列表中
results = [ ]
for roll_num in range(1000):
result = die_1.roll( ) + die_2.roll( )
results.append(result)
#结果分析
frequencies = [ ]
max_num = die_1.num_sides + die_2.num_sides
for value in range(2, max_num+1):
frequency = results.count(value)
frequencies.append(frequency)
#结果可视化
hist = pygal.Bar( )
hist.title = 'Results of rolling two D8 1000 times.'
hist.x_labels = [x for x in range(2, max_num+1)]
hist.x_title = 'Result'
hist.y_title = 'Frequency of Result'
hist.add('D8+D8', frequencies)
hist.render_to_file('two_d8.svg')
| [
"15894500833@163.com"
] | 15894500833@163.com |
737ec987dfe8f44ec60ce95839fb21130c803793 | 2a1a175efc9c482db0e6d96569f92b9583990acc | /eventex/subscriptions/tests/test_view_new.py | 351daeb6ab3b8abda88f2861141510e7c1378d8c | [] | no_license | mazulo/wttd_eventex | 2e97e3724f2b8396b8cc73175d15defd09b4a86b | 691008562d2143cc57c8b4bb5042aa2c1fdc6602 | refs/heads/master | 2021-01-10T07:29:20.343157 | 2016-03-16T18:21:10 | 2016-03-16T18:21:10 | 48,304,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,017 | py | from django.core import mail
from django.test import TestCase
from django.shortcuts import resolve_url as r
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
class SubscriptionsNewGet(TestCase):
def setUp(self):
self.resp = self.client.get(r('subscriptions:new'))
def test_get(self):
"""GET /inscricao/ must return status code 200"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""Must use subscriptions/subscription_form.html"""
self.assertTemplateUsed(
self.resp,
'subscriptions/subscription_form.html'
)
def test_html(self):
"""Html must contain input tags"""
tags = (
('<form', 1),
('<input', 6),
('type="text"', 3),
('type="email"', 1),
('type="submit"', 1),
)
for text, count in tags:
with self.subTest():
self.assertContains(self.resp, text, count)
def test_csrf(self):
"""Html must contain csrf"""
self.assertContains(self.resp, 'csrfmiddlewaretoken')
def test_has_form(self):
"""Context must have subscription form"""
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
class SubscriptionsNewPost(TestCase):
def setUp(self):
data = dict(name='Patrick Mazulo', cpf='03286218383',
email='pmazulo@gmail.com', phone='86-99988-7848')
self.resp = self.client.post(r('subscriptions:new'), data)
def test_post(self):
"""Valid POST should redirect to /inscricao/1/"""
self.assertRedirects(self.resp, r('subscriptions:detail', 1))
def test_send_subscribe(self):
self.assertEqual(1, len(mail.outbox))
def test_save_subscription(self):
self.assertTrue(Subscription.objects.exists())
class SubscriptionsNewPostInvalid(TestCase):
def setUp(self):
self.resp = self.client.post(r('subscriptions:new'), {})
def test_post(self):
"""Invalid POST should not redirect"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
self.assertTemplateUsed(self.resp,
'subscriptions/subscription_form.html')
def test_has_form(self):
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
def test_form_has_errors(self):
form = self.resp.context['form']
self.assertTrue(form.errors)
def test_dont_save_subscription(self):
self.assertFalse(Subscription.objects.exists())
class TestTemplateRegressionTest(TestCase):
def test_template_has_non_field_errors(self):
invalid_data = dict(name='Patrick Mazulo', cpf='03286218383')
response = self.client.post(r('subscriptions:new'), invalid_data)
self.assertContains(response, '<ul class="errorlist nonfield">')
| [
"pmazulo@gmail.com"
] | pmazulo@gmail.com |
b113e7e6e71c42480977c18e82a7bf4d3ecbfc8a | 2e10314f0a6a32cbfdce6b80c7767b84de421741 | /精品真题/精品-one.py | e2135999ef9f92009ca10a79d4df38384cd13fdb | [] | no_license | tang1323/Ing_Interview | 06a9cb19c932b2852dd55655b0d46b814ffa9095 | a1068d3739d2088a2edcf8314e18659e0e9003f8 | refs/heads/master | 2023-04-06T14:17:37.757618 | 2021-04-14T14:14:01 | 2021-04-14T14:14:01 | 357,929,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py |
# def add_Run(L=None):
# if L is None:
# L = []
# L.append('Run')
# return L
# add_Run()
# add_Run()
# print(add_Run(['Lying']))
# ds = {'av':2, 'vr':4, 'ls':9, 'path':6}
# print(ds.popitem(), len(ds))
# with open('D:/Py-Project/Ing_Interview/精品真题/txt/a', 'r') as f:
# print(f.read().split(','))
# aaa = [8, 5, 2, 2]
# with open('D:/Py-Project/Ing_Interview/精品真题/txt/output', 'w') as f:
# for aa in aaa:
# f.write(';'.join.str(aa))
# x, y = 1, 2
# while x < 20:
# x, y = y, x + y
# print(x)
# ls = [2, 0, 6]
# x = 100
# try:
# for i in ls:
# y = 100 // i
# print(y)
# except:
# print('error')
# import random as r
# zmb = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz'
# r.seed(1)
# code = ''
# for i in range(4):
# code += r.choice(zmb)
# print(code)
# import turtle as t
#
# color = ['red','pink','green']
# ra = [20, 50, 100]
# for i in range(3):
# t.pu()
# t.goto(0, -ra[i])
# t.pd()
# t.pencolor(color[i])
# t.circle(ra[i])
# t.done()
| [
"1171242903@qq.com"
] | 1171242903@qq.com |
d458b27375d8eadb606441a04abcd9b20947e15f | a832ad52a2ee31006d615ad44d038557a3c13ebf | /Controllers/MenuController.py | 125956f1a638e9a63e7ce98de01e3d78aa28fa2a | [] | no_license | GRTerpstra/Embedded-Systems | 549769e940adca17831c0ce0b7933f247b053fa5 | adfccbbe9578f9d4bb4dec75a25328eaed42f85e | refs/heads/master | 2020-08-13T12:12:29.555010 | 2019-11-18T13:38:54 | 2019-11-18T13:38:54 | 214,966,421 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | class MenuController:
def __init__(self, mainModel, menuModel):
self.mainModel = mainModel
self.menuModel = menuModel
return | [
"a.j.witwerts@st.hanze.nl"
] | a.j.witwerts@st.hanze.nl |
030eb6da27cae4ea65d35f762fb79921dd2c1fb7 | a3fcbcb1360669df5c2fe5d5286950296bafb04b | /ecomapp/migrations/0008_broadcast_email.py | addc6b39e1f037e77db966e940ddd3123110e613 | [] | no_license | Emadfaried-div/new-ecommerce | f5a4165d4a8d7f7215735e86e9617012aa3f04fb | 48842649c82291e2fb7c584b71294202d263962d | refs/heads/master | 2023-05-15T08:50:30.184259 | 2021-06-11T15:49:59 | 2021-06-11T15:49:59 | 355,204,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | # Generated by Django 3.2 on 2021-05-17 21:34
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecomapp', '0007_faq'),
]
operations = [
migrations.CreateModel(
name='BroadCast_Email',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('created', models.DateTimeField(auto_now_add=True)),
('message', ckeditor.fields.RichTextField()),
],
options={
'verbose_name': 'BroadCast Email to all Member',
'verbose_name_plural': 'BroadCast Email',
},
),
]
| [
"78530477+Emadfaried-div@users.noreply.github.com"
] | 78530477+Emadfaried-div@users.noreply.github.com |
44e9934e9e2aafd04f6991d39415b44cee2581f8 | d6a1c73104b9f1e3c829d18812e9bf2dd6d535a1 | /main/serializers.py | 11275c8b893ca85e2aeac26ca856e9989492d972 | [] | no_license | koreicnurs/blog_drf | 2623eec6c4d41f564bd2754359823b4d56d4221f | 8c6660fc92387ef9349753aea61e0e500736802a | refs/heads/master | 2022-12-04T13:53:14.184924 | 2020-08-13T14:35:21 | 2020-08-13T14:35:21 | 286,912,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | from rest_framework import serializers
from main.models import Post, Category
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class PostSerializer(serializers.ModelSerializer):
category = CategorySerializer()
author = serializers.EmailField(source='author.email')
class Meta:
model = Post
fields = ('id', 'text', 'category', 'author', 'create_at', 'image')
def __get_image_url(self, obj):
request = self.context.get('request')
if obj.image:
url = obj.image.url
if request is not None:
url = request.build_absolute_uri(url)
else:
url = ''
return url
def to_representation(self, instance):
representation = super(PostSerializer, self).to_representation(instance)
representation['image'] = self.__get_image_url(instance)
return representation
| [
"koreicnurs@gmail.com"
] | koreicnurs@gmail.com |
65be7aa9587f2f337ee74a04b8fb020b199fa90b | ded2e06b4cd01bbdb1db1fe553c1f62e0a70376a | /py_action_pkg/py_action_pkg/maze_action_client.py | 36e29d8286114201167c42ed44f67d6ffe869edc | [] | no_license | maxpark/gcamp_ros2_basic | b716a457f8ae8da90ee51196306877bb2e26362e | 67fd8d2ba83cc683402bb374c0a11e4c2cd7fa76 | refs/heads/main | 2023-07-18T03:52:45.620235 | 2021-09-11T07:49:08 | 2021-09-11T07:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,033 | py | #!/usr/bin/env/ python3
#
# Copyright 2021 Seoul Business Agency Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from custom_interfaces.action import Maze
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
"""
Maze.action structure
int32[] turning_sequence
---
bool success
---
string feedback_msg
"""
class MazeActionClient(Node):
def __init__(self):
super().__init__('maze_action_client')
self.action_client = ActionClient(self, Maze, 'diffbot/maze_action')
self.get_logger().info('=== Maze Action Client Started ====')
def send_goal(self, turning_list):
goal_msg = Maze.Goal()
goal_msg.turning_sequence = turning_list
if self.action_client.wait_for_server(10) is False:
self.get_logger().error('Server Not exists')
self._send_goal_future = self.action_client.send_goal_async(
goal_msg, feedback_callback=self.feedback_callback
)
self._send_goal_future.add_done_callback(self.goal_response_callback)
def feedback_callback(self, feedback_message):
feedback = feedback_message.feedback
self.get_logger().info(f'Received feedback: {feedback.feedback_msg}')
def goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected')
return
self.get_logger().info('Goal accepted')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.get_result_callback)
def get_result_callback(self, future):
result = future.result().result
self.get_logger().warn(f'Action Done !! Result: {result.success}')
rclpy.shutdown()
def main(args=None):
rclpy.init(args=args)
maze_action_client = MazeActionClient()
user_inputs = []
# Input Logic
try:
maze_action_client.get_logger().info('Enter numbers [or stop] : ')
while True:
user_inputs.append(int(input()))
# if the input is not-integer, just print the list
except Exception:
maze_action_client.get_logger().info(f'Your sequence list : {user_inputs}')
maze_action_client.get_logger().info('==== Sending Goal ====')
maze_action_client.send_goal(user_inputs)
# You can get Future for additional functoins
# future = maze_action_client.send_goal(user_inputs)
rclpy.spin(maze_action_client)
if __name__ == '__main__':
main()
| [
"tge1375@naver.com"
] | tge1375@naver.com |
cea465796b8fce53c1a8fcb1e94b6c554c15aa25 | 84c7873c296c4c06038eaf0ee842987014d791d8 | /alumnos/57089-agustin-aguero/clases/clase1/ejercitacion1.py | 41270055a67c902edb17b1e12db68d385a211c8e | [] | no_license | agustinaguero97/lab | 0c2338456ef367c3a0d066c61f89dfb3b944e271 | 436e4303ddcd3cba433ac08f14b37a72ec0a7fad | refs/heads/main | 2021-11-27T20:20:00.380362 | 2021-08-19T03:01:31 | 2021-08-19T03:01:31 | 348,434,234 | 0 | 0 | null | 2021-03-16T17:24:55 | 2021-03-16T17:24:54 | null | UTF-8 | Python | false | false | 609 | py |
"""
1 - realize un programa que lea todos los datos ingresados desde stdin,
e invierta el orden de las letras en cada palabra, enviandolo a stdout.
Ejemplo de funcionamiento
# echo -e "hola mundo \n nos vemos" | ./invierte.py
aloh odnum
son somev
"""
#!/usr/bin/python3
import sys
while True:
stdin_fileno = sys.stdin.readline()
entrada = str(stdin_fileno)
lista = list(entrada.split(" "))
lista_b = []
for x in lista:
lista_b.append(x[::-1])
linea = (' '.join(lista_b)).strip('')
sys.stdout.write(linea )
#el programa termina con: ctrl + z
| [
"agustin1997aguero@gmail.com"
] | agustin1997aguero@gmail.com |
5a2d36688d95ca553c9c799a2e0ad167f48d382e | 1a626613c7d3be8cf4db765d4c9f1c5de138a9ab | /execicio3.py | 78077643e177e49500d158e1dca95b7aae61024b | [] | no_license | leonardo111003/Infosatc-lp-avaliativo-02 | c629c5621e436b008bb2f552907ddc280c833b17 | 28a2e1fdffefd0a4e0560c41149bb44dcafe3d38 | refs/heads/master | 2022-12-22T19:33:47.703641 | 2020-09-30T20:05:04 | 2020-09-30T20:05:04 | 293,796,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | lista = [ 5 , 1 , 4 , 3 ]
print ( sum ( lista )) | [
"leocatra123@hotmail.com"
] | leocatra123@hotmail.com |
ae8370a890bcf5fce614f8df6b15ff1cb153f540 | e48a5534277cabce9ab6c02c2247a7af7ae1a298 | /apprest/tests/views/test_users.py | c40855638809b0d25a81fdfcbf8e51235f09f375 | [
"MIT"
] | permissive | dsanchez-cells/calipsoplus-backend | 671a1dbe27a0919c9478be3bd819320a23781f31 | a199bc5af4e636fe95e82444b4471519820409ae | refs/heads/master | 2020-04-17T22:54:16.057899 | 2019-01-22T14:46:54 | 2019-01-22T14:46:54 | 166,428,164 | 0 | 0 | MIT | 2019-01-18T15:42:13 | 2019-01-18T15:42:13 | null | UTF-8 | Python | false | false | 2,573 | py | from django.contrib.auth.models import User
import logging
from rest_framework import status
from rest_framework.utils import json
from apprest.tests.utils import CalipsoTestCase
logger = logging.getLogger(__name__)
class UserViewsTestCase(CalipsoTestCase):
logger = logging.getLogger(__name__)
def setUp(self):
self.credentials = {
'username': 'testuser',
'password': 'secret'}
self.test_user = User.objects.create_user(**self.credentials)
def test_login_user_200(self):
self.logger.debug('#### test_login_user_200')
url = '/login/'
data_str = json.dumps(self.credentials)
response = self.client.post(url, format='json', content_type='application/json', data=data_str)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_login_user_401(self):
self.logger.debug('#### test_login_user_401')
self.credentials = {
'username': 'testuser',
'password': 'surprise'}
url = '/login/'
data_str = json.dumps(self.credentials)
response = self.client.post(url, format='json', content_type='application/json', data=data_str)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_login_invalid_credentials_no_pass_400(self):
self.logger.debug('#### test_login_invalid_credentials_no_pass_400')
self.credentials = {'username': 'testuser'}
url = '/login/'
data_str = json.dumps(self.credentials)
response = self.client.post(url, format='json', content_type='application/json', data=data_str)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_login_invalid_credentials_no_username_400(self):
self.logger.debug('#### test_login_invalid_credentials_no_username_400')
self.credentials = {'password': 'secret'}
url = '/login/'
data_str = json.dumps(self.credentials)
response = self.client.post(url, format='json', content_type='application/json', data=data_str)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_login_invalid_credentials_400(self):
self.logger.debug('#### test_login_invalid_credentials_400')
self.credentials = ''
url = '/login/'
data_str = json.dumps(self.credentials)
response = self.client.post(url, format='json', content_type='application/json', data=data_str)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| [
"acampsm@cells.es"
] | acampsm@cells.es |
73a0f05630281ebbc6d8ccf164f1b66426de58d4 | 4fe3fae28227272ddbe18009f8a0b08436bc8308 | /ProblemSolving/Staircase/Solution.py | 1581513ae436ef159be7619a8165d825abd89cf9 | [] | no_license | alexhong2020/HackerRank | 4ec4ce82b6e72efce2fb206cb14f27b712773946 | e900f204703388e87f0816cb1dfb88dc4bdc69a4 | refs/heads/main | 2023-05-12T21:34:37.209476 | 2021-05-28T20:59:01 | 2021-05-28T20:59:01 | 364,686,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'staircase' function below.
#
# The function accepts INTEGER n as parameter.
#
def staircase(n):
# Write your code here
for i in range(1, n+1):
for j in range(0, n - i):
print(" ", end="")
for k in range(0, i):
print("#", end="")
print()
if __name__ == '__main__':
n = int(input().strip())
staircase(n)
| [
"alexhong2020@gmail.com"
] | alexhong2020@gmail.com |
f4b489d712dd7cfe2283e05c6c133a882fdcbb32 | 8be9ae66465536f343ab4f86a5c90b27de8f5fc7 | /until/readConfig.py | e665622847576ce1f4dfea813f15670de90a94e2 | [] | no_license | obj1/autotest | 3db6d8dbcca0ae8979557f654fbc18bd9f911498 | 4a962cf88b877fd633a07103fa17cef16758a664 | refs/heads/master | 2023-03-12T21:58:26.157960 | 2021-02-24T07:40:17 | 2021-02-24T07:40:17 | 339,034,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,135 | py | '''1.读取配置文件
2.项目下所有文件的相对路径
'''
import yaml,os
# 1.读取配置文件
class YamlRead:
def __init__(self,yamlPath):
'''如果是第一次调用,读取yaml文件,否则直接返回之前保存的数据'''
if os.path.exists(yamlPath):
self.yamlPath=yamlPath
else:
raise FileNotFoundError('yaml文件不存在')
self._data=None #保存yaml的数据
@property #把一个方法变成属性来调用,
def getData(self):
if not self._data:
with open(self.yamlPath,mode='rb') as f:
self._data=yaml.safe_load(f)
return self._data
def write(self,data):
'''写入yaml,存放提取的数据'''
with open(self.yamlPath,mode='a',encoding='utf-8') as file:
yaml.dump(data,file,allow_unicode=True)
# 2.项目下所有文件的相对路径
class Config:
# 项目下所有文件的相对路径
Base_Path=os.path.abspath(__file__+'\..'+'\..')
Base_Data=Base_Path+'\config\data.yaml'
Base_LOG= Base_Path+'\log'
ChromeDriver_Path=Base_Path+'\lib\chromedriver.exe'
FirefoxDriver_Path=Base_Path+'\lib\geckodriver.exe'
Picture_Path = Base_Path + '\picture'
Api_CaseInfo_Path_Yaml = Base_Path + '\config\\apitestcases.yaml'
Api_CaseInfo_Path_excel = Base_Path + r'\config\apitestcase.xlsx'
Tiqu_Path = Base_Path + r'\config\tiqu.yaml'
# 获取基础数据daya.yaml的数据
def __init__(self):
'''获取daya.yaml所有的数据'''
self.config=YamlRead(Config.Base_Data).getData
@property
def webUrl(self):
return self.config['webUrl']
@property
def browser(self):
return self.config['Browser']
@property
def api(self):
return self.config['Api']
@property
def database(self):
return self.config['database']
@property
def runApi(self):
return self.config['RunApi']
@property
def runApis(self):
return self.config['RunApis']
readConfig=Config()
# print(readConfig.config)
| [
"1364283713@qq.com"
] | 1364283713@qq.com |
d5ee690578c3bfc570f357bece4bc75e99aa569d | 45860b4c7a289f053d22a7638608703634cb66b9 | /main.py | 8cb045cec686070f1feaf1ad86e30a81643da157 | [] | no_license | ksesalebangim/camera | 407a62ed04723e8386bc60006fed90764d913d9b | b99acd0969179df1aa89da4c21e02bf49c4b175c | refs/heads/master | 2022-01-06T17:52:16.643711 | 2019-06-03T13:10:31 | 2019-06-03T13:10:31 | 114,571,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | from flask import Flask
import io
import base64
from PIL import Image
import time
import glob
import subprocess
dropboxInLocation = ""
dropboxOutLocation = ""
printerMac = ""
app = Flask(__name__,static_folder='public', static_url_path='/public')
def getCamImage():
return Image.open("/home/ben/1.jpg")
#TODO:add page reload every 1 sec
@app.route('/')
@app.route('/index')
def getImg():
image = getCamImage()
in_mem_file = io.BytesIO()
image.save(in_mem_file, format="PNG")
# reset file pointer to start
in_mem_file.seek(0)
img_bytes = in_mem_file.read()
base64_encoded_result_bytes = base64.b64encode(img_bytes)
base64_encoded_result_str = base64_encoded_result_bytes.decode('ascii')
return '<img id="content_img" src="data:image/png;base64,'+base64_encoded_result_str+'" />'
@app.route('/processed')
def processed():
images = glob.glob(dropboxOutLocation + '*.jpg')
ret = []
for x in images:
ret.append(x.split("/")[-1])
return str(ret)
@app.route('/print/<filename>')
def printFile(filename):
subprocess.Popen("obexftp --nopath --noconn --uuid none --bluetooth 70:2C:1F:2B:7D:85 --channel 4 -p "+dropboxOutLocation+filename+" "+filename, stdout=subprocess.PIPE, shell=True).stdout.read()
return "move back to start of loop"
@app.route('/process/<fileData>')
def processImage(fileData):
if str(fileData).startswith("data:image/png;base64,"):
fileData = str(fileData).split("data:image/png;base64,",1)[1]
mtime = int(time.time())
pfile = open(dropboxInLocation+mtime+".jpg","w")
pfile.write(fileData)
pfile.close()
app.run(host='0.0.0.0') | [
"ben.feher@cyiot.net"
] | ben.feher@cyiot.net |
7ada0a9c9a71e609284946d53a7496cb678e7804 | f839e5533e23380df02778378dafe0df674a60c9 | /sphinx_rosmsgs/__init__.py | d5dd32c573f72aa1fae119c7b2b17a07553bd39c | [] | no_license | MatteoRagni/sphinx_rosmsgs | cd332cafa2815b5eb6490b8527f2cc060ed20488 | 8b30cfe779ece3c97e67dfdbd9f0fd4b61d40f66 | refs/heads/master | 2022-04-28T16:44:50.918822 | 2020-04-28T12:25:05 | 2020-04-28T12:25:05 | 259,610,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | from sphinx_rosmsgs.__version__ import __version__
from sphinx_rosmsgs.message_directive import MessageDirective
from sphinx_rosmsgs.message_indexer import MessageIndexer
def on_config_inited(app, *args):
r"""
The event is used to collect the user configuration and register
a global message indexer accordingly to user configuration.
The global indexer will be used inside the directive to parse the
actual files.
:param app: sphinx app, for configuration
:param args: unused arguments
"""
paths = app.config["rosmsg_path_root"]
if isinstance(paths, str):
paths = [paths]
MessageIndexer.register_global(paths)
def setup(app):
r"""
Entry point for the extension
:param app: sphinx application
:return: disctionary with extension's information
:rtype: dict
"""
app.add_config_value('rosmsg_path_root', [], 'env')
app.add_directive("ros_message", MessageDirective)
app.connect('config-inited', on_config_inited)
return {
'version': __version__,
}
| [
"matteo.ragni.it@gmail.com"
] | matteo.ragni.it@gmail.com |
46183352278dccebb8b04ef8a8ad9433ab6dc02c | 0be31b914365fd06d201f3d2a3f10863805678e9 | /MachineLearnn/venv/LogisticRegression.py | 54a7fecc1b9e7e64df6a8cb2cff3136af1da2a66 | [] | no_license | huanchilin/MachineLearning | aa682e47a6803ad3cf5dd772972a5ba62c841718 | 48c091f3dbb2cfaaea26d78eeaf3288396d9be53 | refs/heads/main | 2023-03-29T11:57:49.377194 | 2021-04-11T09:21:56 | 2021-04-11T09:21:56 | 356,818,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,522 | py | import numpy as np
from sklearn.linear_model import LogisticRegression
from exam import hours_studied_scaled, passed_exam, exam_features_scaled_train,\
exam_features_scaled_test, passed_exam_2_train, passed_exam_2_test, guessed_hours_scaled
# Create and fit logistic regression model here
model = LogisticRegression()
model.fit(hours_studied_scaled, passed_exam)
# Save the model coefficients and intercept here
calculated_coefficients = model.coef_
intercept = model.intercept_
print(calculated_coefficients)
print(intercept)
# Predict the probabilities of passing for next semester's students here
passed_predictions = model.predict_proba(guessed_hours_scaled)
# Create a new model on the training data with two features here
model_2 = LogisticRegression()
model_2.fit(exam_features_scaled_train, passed_exam_2_train)
# Predict whether the students will pass here
passed_predictions_2 = model_2.predict(exam_features_scaled_test)
print(passed_predictions_2)
print(passed_exam_2_test)
# Assign and update coefficients
coefficients = model_2.coef_
coefficients = coefficients.tolist()[0]
# Plot bar graph
plt.bar([1, 2], coefficients)
plt.xticks([1, 2], ['hours studied', 'math courses taken'])
plt.xlabel('feature')
plt.ylabel('coefficient')
plt.show()
################# project: Titantic survive
import codecademylib3_seaborn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Load the passenger data
passengers = pd.read_csv('passengers.csv')
# look at data frame names
print(passengers.columns)
# Update sex column to numerical
passengers['Sex'] = passengers['Sex'].map({'female': 1, 'male': 0})
# Fill the nan values in the age column
passengers['Age'].fillna(value = round(passengers['Age'].mean()), inplace = True)
# Create a first class column
passengers['FirstClass'] = passengers['Pclass'].apply(lambda p: 1 if p == 1 else 0)
# Create a second class column
passengers['SecondClass'] = passengers['Pclass'].apply(lambda p: 1 if p == 2 else 0)
# Select the desired features
features = passengers[['Sex', 'Age', 'FirstClass', 'SecondClass']]
survival = passengers['Survived']
# Perform train, test, split
train_features, valid_features, train_labels, valid_labels = train_test_split(features, survival, test_size = 0.8)
# Scale the feature data so it has mean = 0 and standard deviation = 1
scalar = StandardScaler()
train_features = scalar.fit_transform(train_features)
valid_features = scalar.transform(valid_features)
# Create and train the model
classifier = LogisticRegression()
classifier.fit(train_features, train_labels)
# Score the model on the train data
score = classifier.score(train_features, train_labels)
print(score)
# Score the model on the test data
score = classifier.score(valid_features, valid_labels)
print(score)
# Analyze the coefficients
coeff = classifier.coef_
print(coeff)
# Sample passenger features
Jack = np.array([0.0,20.0,0.0,0.0])
Rose = np.array([1.0,17.0,1.0,0.0])
You = np.array([0.0,25,1.0,0.0])
# Combine passenger arrays
sample_passengers = np.array([Jack, Rose, You])
# Scale the sample passenger features
sample_passengers = scalar.transform(sample_passengers)
print(sample_passengers)
# Make survival predictions!
survive_ans = classifier.predict(sample_passengers)
print(survive_ans)
survive_prob = classifier.predict_proba(sample_passengers)
print(survive_prob) | [
"noreply@github.com"
] | noreply@github.com |
1ac116f975e16120cbbd8caa43dc5181d2366329 | 2185217abc9d39919d4e7efd796f0dfb4dc70303 | /advent_of_code_2019/day_02.py | 8f1385437d7f8b866d2131fd27dae5590d1c30ce | [] | no_license | HappyTreeBeard/Advent_of_Code_2019 | 78b6061da74bb427e1b2b70c17eb6e630a0618e4 | 7d6cb8c04c6d509095b8c61bcd5b1a93f19a68b4 | refs/heads/master | 2020-11-24T17:42:01.344355 | 2020-01-08T02:34:03 | 2020-01-08T02:34:03 | 228,277,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,688 | py | import copy
import unittest
from enum import IntEnum
from pathlib import Path
from typing import List
class OpCode(IntEnum):
""" Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values used immediately after an opcode,
if any, are called the instruction's parameters."""
ADD = 1
MULTIPLY = 2
FINISHED = 99
def run_intcode_program(intcode: List[int]):
i = 0 # Start at index 0
while True:
opt_code_int = intcode[i]
try:
opt_code = OpCode(opt_code_int)
except ValueError:
# Encountering an unknown opcode means something went wrong.
raise ValueError(f'Unexpected OpCode: {opt_code_int}')
if opt_code == OpCode.FINISHED:
# 99 means that the program is finished and should immediately halt.
i += 1
break
else:
# Extract the two values to use in the operation
num0_i = intcode[i + 1]
num1_i = intcode[i + 2]
result_i = intcode[i + 3] # Index of where the result will be stored
num0 = intcode[num0_i]
num1 = intcode[num1_i]
if opt_code == OpCode.ADD:
result = num0 + num1
elif opt_code == OpCode.MULTIPLY:
result = num0 * num1
else:
raise ValueError(f'Unhandled OpCode: {opt_code}')
intcode[result_i] = result
# Once you're done processing an opcode, move to the next one by stepping forward 4 positions.
i += 4
return intcode
class Day2Tests(unittest.TestCase):
def test_int_code_program_0(self):
values = [1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50]
expected = [3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50]
self.assertEqual(run_intcode_program(values), expected)
def test_int_code_program_1(self):
values = [1, 0, 0, 0, 99]
expected = [2, 0, 0, 0, 99]
self.assertEqual(run_intcode_program(values), expected)
values = [1, 0, 0, 0, 99]
expected = [2, 0, 0, 0, 99]
self.assertEqual(run_intcode_program(values), expected)
self.assertEqual(run_intcode_program([1, 0, 0, 0, 99]), [2, 0, 0, 0, 99])
self.assertEqual(run_intcode_program([2, 3, 0, 3, 99]), [2, 3, 0, 6, 99])
self.assertEqual(run_intcode_program([2, 4, 4, 5, 99, 0]), [2, 4, 4, 5, 99, 9801])
self.assertEqual(run_intcode_program([1, 1, 1, 4, 99, 5, 6, 0, 99]), [30, 1, 1, 4, 2, 5, 6, 0, 99])
def day_2(txt_path: Path) -> List[int]:
# Load puzzle input as List[int]
with open(str(txt_path), mode='r', newline='') as f:
base_intcode = [int(x) for x in f.readline().split(',')]
# Part 1
# Once you have a working computer, the first step is to restore the gravity assist program (your puzzle input)
# to the "1202 program alarm" state it had just before the last computer caught fire. To do this, before running
# the program, replace position 1 with the value 12 and replace position 2 with the value 2.
intcode = copy.copy(base_intcode)
intcode[1] = 12 # Noun
intcode[2] = 2 # Verb
result_data = run_intcode_program(intcode=intcode)
# What value is left at position 0 after the program halts?
part_1_answer = result_data[0]
# Part 2
# Determine what pair of inputs produces the output 19690720. What is 100 * noun + verb?
# The inputs should still be provided to the program by replacing the values at addresses 1 and 2, just like before.
# In this program, the value placed in address 1 is called the noun, and the value placed in address 2 is called the
# verb. Each of the two input values will be between 0 and 99, inclusive.
expected_output = 19690720
match_found = False
part_2_answer = None
for noun in range(99):
for verb in range(99):
intcode = copy.copy(base_intcode)
intcode[1] = noun
intcode[2] = verb
result_data = run_intcode_program(intcode=intcode)
result = result_data[0]
if result == expected_output:
match_found = True
answer = 100 * noun + verb
part_2_answer = answer
break
if match_found:
break
return [part_1_answer, part_2_answer]
def main():
txt_path = Path(Path(__file__).parent, 'input_data', 'day_2_input.txt')
answer = day_2(txt_path=txt_path)
print(f'Day 1 Answers: {repr(answer)}')
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Day2Tests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
main()
| [
"34220817+HappyTreeBeard@users.noreply.github.com"
] | 34220817+HappyTreeBeard@users.noreply.github.com |
02533cd4d4d9f6d1ef621cfbd2a2b01cbc88a02b | 42d88f2cef9f919328ca7576a4b87d4a6b31c56b | /hw7_release/util.py | d9ab309351418ab8684d1db8c24a8d2d4629aa19 | [] | no_license | zx563147474/standford-CS131 | a0a041b97b21f15c37c9f456ac1e6db89a7e9445 | eb3c436253749201d07511e725deda7c63a15649 | refs/heads/master | 2023-01-29T05:31:40.296839 | 2020-12-03T05:30:58 | 2020-12-03T05:30:58 | 311,215,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,767 | py | import numpy as np
from detection import *
from skimage.transform import rescale, resize, downscale_local_mean
from skimage.filters import gaussian
# def read_face_labels(image_paths):
# label_path = "list_bbox_celeba.txt"
# n_images = len(image_paths)
# f = open(label_path, "r")
# f.readline()
# f.readline()
# faces = np.array([],dtype=np.int).reshape(0,4)
# for line in f:
# if faces.shape[0]>40:
# break
# parts = line.strip().split(' ')
# parts = list(filter(None, parts))
# #print(line,parts)
# image_file = parts[0]
# if image_file in image_paths:
# x_1 = int(parts[1])
# y_1 = int(parts[2])
# width = int(parts[3])
# height = int(parts[4])
# faces = np.vstack((faces, np.asarray([y_1, x_1, height, width])))
# return faces
def read_facial_labels(image_paths):
label_path = "list_landmarks_align_celeba.txt"
n_images = len(image_paths)
f = open(label_path, "r")
f.readline()
f.readline()
lefteyes = np.array([],dtype=np.int).reshape(0,2)
righteyes = np.array([],dtype=np.int).reshape(0,2)
noses = np.array([],dtype=np.int).reshape(0,2)
mouths = np.array([],dtype=np.int).reshape(0,2)
for line in f:
if lefteyes.shape[0]>40:
break
parts = line.strip().split(' ')
parts = list(filter(None, parts))
#print(line,parts)
image_file = parts[0]
if image_file in image_paths:
lefteye_c = int(parts[1])
lefteye_r = int(parts[2])
righteye_c = int(parts[3])
righteye_r = int(parts[4])
nose_c = int(parts[5])
nose_r = int(parts[6])
leftmouth_c = int(parts[7])
leftmouth_r = int(parts[8])
rightmouth_c = int(parts[9])
rightmouth_r = int(parts[10])
mouth_c = int((leftmouth_c+rightmouth_c)/2)
mouth_r = int((leftmouth_r+rightmouth_r)/2)
lefteyes = np.vstack((lefteyes, np.asarray([lefteye_r, lefteye_c])))
righteyes = np.vstack((righteyes, np.asarray([righteye_r, righteye_c])))
noses = np.vstack((noses, np.asarray([nose_r, nose_c])))
mouths = np.vstack((mouths, np.asarray([mouth_r, mouth_c])))
parts = (lefteyes, righteyes, noses, mouths)
return parts
def get_detector(part_h, part_w, parts, image_paths):
n = len(image_paths)
part_shape = (part_h,part_w)
avg_part = np.zeros((part_shape))
for i,image_path in enumerate(image_paths):
image = io.imread('./face/'+image_path, as_gray=True)
part_r = parts[i][0]
part_c = parts[i][1]
#print(image_path, part_r, part_w, part_r-part_h/2, part_r+part_h/2)
part_image = image[int(part_r-part_h/2):int(part_r+part_h/2), \
int(part_c-part_w/2):int(part_c+part_w/2)]
avg_part = np.asarray(part_image)+np.asarray(avg_part)
avg_part = avg_part/n
return avg_part
def get_heatmap(image, face_feature, face_shape, detectors_list, parts):
_, _, _, _, face_response_map = pyramid_score \
(image, face_feature, face_shape, stepSize = 30, scale = 0.8)
face_response_map=resize(face_response_map,image.shape)
face_heatmap_shifted = shift_heatmap(face_response_map, [0,0])
for i,detector in enumerate(detectors_list):
part = parts[i]
max_score, r, c, scale,response_map = pyramid_score\
(image, face_feature, face_shape,stepSize = 30, scale=0.8)
mu, std = compute_displacement(part, face_shape)
response_map = resize(response_map, face_response_map.shape)
response_map_shifted = shift_heatmap(response_map, mu)
heatmap = gaussian(response_map_shifted, std)
face_heatmap_shifted+= heatmap
return face_heatmap_shifted
def intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
| [
"zx563147474@gmail.com"
] | zx563147474@gmail.com |
3768f45c0c9890b403aa36c2f3d6a85d1d0ae4fb | 02c7f9eb8cb9cd58aa9f37221d0de56c8fe91a90 | /src/ques_gen_model/reinforce_evaluate.py | c9d94d1a20b780e1ab52878f8c695f062a60b3f0 | [] | no_license | pajenterprise/style_clarification_question_generation | 71c89eacae1e3b65f0f2414999a44ebd4049992c | e0ef9e015e77d497d93f657147473c83b17eded1 | refs/heads/master | 2020-12-11T16:28:51.044964 | 2019-05-02T21:46:55 | 2019-05-02T21:46:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | from .constants import *
from .helper import *
from ques_gen_model.helper import *
import numpy as np
import torch
from torch.autograd import Variable
def evaluate_relevance(context_model, question_model, relevance_model, c, cl, q, ql, args):
with torch.no_grad():
context_model.eval()
question_model.eval()
relevance_model.eval()
cm = get_masks(cl, args.max_post_len)
qm = get_masks(ql, args.max_ques_len)
c = torch.LongTensor(c)
cm = torch.FloatTensor(cm)
q = torch.LongTensor(q)
qm = torch.FloatTensor(qm)
if USE_CUDA:
c = c.cuda()
cm = cm.cuda()
q = q.cuda()
qm = qm.cuda()
c_hid, c_out = context_model(torch.transpose(c, 0, 1))
cm = torch.transpose(cm, 0, 1).unsqueeze(2)
cm = cm.expand(cm.shape[0], cm.shape[1], 2*HIDDEN_SIZE)
c_out = torch.sum(c_out * cm, dim=0)
q_hid, q_out = question_model(torch.transpose(q, 0, 1))
qm = torch.transpose(qm, 0, 1).unsqueeze(2)
qm = qm.expand(qm.shape[0], qm.shape[1], 2*HIDDEN_SIZE)
q_out = torch.sum(q_out * qm, dim=0)
predictions = relevance_model(torch.cat((c_out, q_out), 1)).squeeze(1)
predictions = torch.nn.functional.sigmoid(predictions)
return predictions
| [
"sudhra@microsoft.com"
] | sudhra@microsoft.com |
9ca0718f800bb4b6ae81f84aeac29ecbf4a10ee1 | 2a73bb7d1ff60a778fea56918af93932a1406c2a | /tests/data_loader/tests_data_filter.py | baddd7e518b64025f54b8d00118de8b4b41fbfb5 | [
"MIT"
] | permissive | usert5432/slice_lid | 0b7acc6ced17bb13092ea74551a26658cbce23f0 | 731eda41364aa0b4f292fe60ee6577b95b9c6752 | refs/heads/master | 2023-01-13T03:05:11.843510 | 2020-11-20T20:32:12 | 2020-11-20T20:35:25 | 272,578,796 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,480 | py | """Test `IDataLoader` data filtering by a `DataFilter` decorator"""
import unittest
from lstm_ee.data.data_loader.dict_loader import DictLoader
from slice_lid.data.data_loader.data_filter import DataFilter
from .tests_data_loader_base import FuncsDataLoaderBase
class TestsDataFilter(unittest.TestCase, FuncsDataLoaderBase):
"""Test `DataFilter` decorator"""
def test_filter_simple(self):
"""Simple filtering tests"""
data = {
'pdg' : [ 1, 2, 0, 1, 2 ],
'iscc' : [ 0, 1, 0, 1, 0 ],
'idx' : [ 0, 1, 2, 3, 4 ],
}
keep_pdg_iscc_list = [ (0, 0), (1, 0) ]
slice_data = { 'idx' : [ 0, 2 ] }
data_loader = DataFilter(
DictLoader(data), 'pdg', 'iscc', keep_pdg_iscc_list
)
self._compare_scalar_vars(slice_data, data_loader, 'idx')
def test_filter_missing_value(self):
"""Test filtering with filter that does not match anything"""
data = {
'pdg' : [ 1, 2, 0, 1, 2 ],
'iscc' : [ 0, 1, 0, 1, 0 ],
'idx' : [ 0, 1, 2, 3, 4 ],
}
keep_pdg_iscc_list = [ (0, 0), (-1, 0) ]
slice_data = { 'idx' : [ 2 ] }
data_loader = DataFilter(
DictLoader(data), 'pdg', 'iscc', keep_pdg_iscc_list
)
self._compare_scalar_vars(slice_data, data_loader, 'idx')
def test_filter_pass_all(self):
"""Test filtering that should not filter anything"""
data = {
'pdg' : [ 1, 2, 0, 1, 2 ],
'iscc' : [ 0, 1, 0, 1, 0 ],
'idx' : [ 0, 1, 2, 3, 4 ],
}
keep_pdg_iscc_list = [ (0, 0), (1, 0), (1, 1), (2, 0), (2, 1) ]
slice_data = { 'idx' : [ 0, 1, 2, 3, 4 ] }
data_loader = DataFilter(
DictLoader(data), 'pdg', 'iscc', keep_pdg_iscc_list
)
self._compare_scalar_vars(slice_data, data_loader, 'idx')
def test_filter_pass_none(self):
"""Test filtering that should reject all samples"""
data = {
'pdg' : [ 1, 2, 0, 1, 2 ],
'iscc' : [ 0, 1, 0, 1, 0 ],
'idx' : [ 0, 1, 2, 3, 4 ],
}
keep_pdg_iscc_list = [ (-1, -1) ]
slice_data = { 'idx' : [ ] }
data_loader = DataFilter(
DictLoader(data), 'pdg', 'iscc', keep_pdg_iscc_list
)
self._compare_scalar_vars(slice_data, data_loader, 'idx')
def test_filter_wildcard_pdg(self):
"""Test filtering with wildcard PDG pattern"""
data = {
'pdg' : [ 1, 2, 0, 1, 2 ],
'iscc' : [ 0, 1, 0, 1, 0 ],
'idx' : [ 0, 1, 2, 3, 4 ],
}
keep_pdg_iscc_list = [ (None, 1) ]
slice_data = { 'idx' : [ 1, 3 ] }
data_loader = DataFilter(
DictLoader(data), 'pdg', 'iscc', keep_pdg_iscc_list
)
self._compare_scalar_vars(slice_data, data_loader, 'idx')
def test_filter_wildcard_iscc(self):
"""Test filtering with wildcard ISCC pattern"""
data = {
'pdg' : [ 1, 2, 0, 1, 2 ],
'iscc' : [ 0, 1, 0, 1, 0 ],
'idx' : [ 0, 1, 2, 3, 4 ],
}
keep_pdg_iscc_list = [ (1, None) ]
slice_data = { 'idx' : [ 0, 3 ] }
data_loader = DataFilter(
DictLoader(data), 'pdg', 'iscc', keep_pdg_iscc_list
)
self._compare_scalar_vars(slice_data, data_loader, 'idx')
if __name__ == '__main__':
unittest.main()
| [
"torbu001@umn.edu"
] | torbu001@umn.edu |
af77dd42ce1cca29d442ec4fcf465c879a990a29 | 3bb5f04a697143f0b64b36c5928ba34a0ee83597 | /searchalgorithms/binarysearch.py | f83b198a5f2b2c269e7fd0b5c312219f6f714ae6 | [] | no_license | abhisheksaxena1998/DataStructures-Algorithms-Python | 0ed5f74a8e6ff4f864c5d6cbe68fb5d76cfb44f6 | 318b042b5ca14dd0b251d762c61765f06cede89e | refs/heads/master | 2022-12-19T19:19:00.725383 | 2020-10-01T05:32:09 | 2020-10-01T05:32:09 | 300,156,985 | 2 | 0 | null | 2020-10-01T05:31:16 | 2020-10-01T05:31:15 | null | UTF-8 | Python | false | false | 523 | py | ''' Key Points
Binary Search is only applied on sorted array
Time Complexity O(logn)
T(n) = T(n/2) + 1
'''
def binSearch(arr,key,left,right):
if left > right:
return -1
mid = (left + right) // 2
if arr[mid]==key:
return mid
elif arr[mid] < key:
return binSearch(arr,key,mid+1,right)
else:
return binSearch(arr,key,left,mid-1)
t=int(input())
while t:
arr = list(map(int,input().split()))
key = int(input())
print(binSearch(arr,key,0,len(arr)-1))
t=t-1
| [
"princegaur1998@gmail.com"
] | princegaur1998@gmail.com |
2527f247fec61f2141bb48302517b78430fe6068 | 4e85fc9a611cab613a1a5fa19ceb3d0cca8a49ad | /citcall/settings.py | 77d081b90d6680066fa90ed12ed049e15c81a5f5 | [] | no_license | novanBule/citcall-django | dc08063428e6c24b0f9d450a859caee142f3081d | be04cc79d0816b1d1009d0ff89dd37708d4a0603 | refs/heads/master | 2021-01-02T15:24:17.516279 | 2020-02-21T11:16:38 | 2020-02-21T11:16:38 | 239,680,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,244 | py | """
Django settings for citcall project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u#@j8ng$ub#z$-88+u647l3%zab6r9mm@x$ya6##ub781w0si+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'miscall', # register miscall app
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'citcall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'], # template folder
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'citcall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# static files directory
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
] | [
"maaf.emailgualupa@gmail.com"
] | maaf.emailgualupa@gmail.com |
b7044b5e11a30bb0ed1d82430f08f2377f6f5145 | 9bb93695591f5792357f8740202dca3bfc331b5f | /latex/code/rejectionsample.py | 3df3227cf0689b6fc5b42206f4db5de80abfd389 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | ciphergoth/sansreplace | 7e2c153b5c567624417c99f36a87f15ed63ecb93 | ae01be85ea5d164d6616209c2bc6efe97cacd606 | refs/heads/master | 2023-01-10T18:20:38.893389 | 2023-01-09T00:06:16 | 2023-01-09T00:23:20 | 153,832,473 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | def random_choose(n, k):
d = []
r = set()
while len(d) < k:
x = random.randrange(n)
if x not in r:
d.append(x)
r.add(x)
return d
| [
"paulcrowley@google.com"
] | paulcrowley@google.com |
e330b2c4ec0d709e389aa70c7b230a248e40cdff | 500b03fa6cb776c1d51db4a3a3aa252ddf5a50e6 | /book_exercise/py_intro/basics/Chapter 3: Numbers/name_random.py | f71fc4d460bda5325b87858fc9109a256951f46c | [] | no_license | carloslvm/learning-python | b3796a0a5b751baae8c551a9f6fe262f98980691 | 07f885454cf21b7d215a58da7fcb907715e546bd | refs/heads/master | 2022-07-27T21:39:11.937801 | 2022-07-09T17:47:56 | 2022-07-09T17:47:56 | 163,447,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | #!/usr/bin/python3
# Printing a name randomly between 1 and 10.
from random import randint
for name in range(randint(1, 10)+ 1):
name = 'David'
print(name)
| [
"cvaldez553@gmail.com"
] | cvaldez553@gmail.com |
819c11fb2ff6e9bbda0cb03380c26525458095b7 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=35/sched.py | a3b742d5b6b87242902b200cb99b1c50add5a6e7 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | -X FMLP -Q 0 -L 3 120 400
-X FMLP -Q 0 -L 3 93 400
-X FMLP -Q 0 -L 3 80 400
-X FMLP -Q 1 -L 2 73 400
-X FMLP -Q 1 -L 2 63 250
-X FMLP -Q 2 -L 1 55 200
-X FMLP -Q 2 -L 1 45 400
-X FMLP -Q 3 -L 1 35 125
-X FMLP -Q 3 -L 1 35 150
22 100
21 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
697dbd3b72ffe02a00fcfbaa0642539839c37fc2 | c74b417bc9c9e036045051139a798841b94831b9 | /tests/features/terrain/html/offsite_page.py | aeca5690595f8e7d07eb2395b80387e6c61b7af3 | [] | no_license | pculture/unisubs-testing | ea2406389b1a91da1615de02093442288e6b7a19 | 0fc5a398d9c9448d5a9d6dd1a315740b341ed3bf | refs/heads/master | 2020-05-17T15:07:10.269386 | 2012-07-20T04:58:34 | 2012-07-20T04:58:34 | 729,217 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | #!/usr/bin/env python
from unisubs_page import UnisubsPage
class OffsitePage(UnisubsPage):
"""Main page for all offsite testing to drive playback and menus.
"""
_CAPTIONS = "span.unisubs-captionSpan"
_WIDGET_MENU = "span.unisubs-tabTextchoose"
def start_playback(self, video_position):
self.browser.execute_script("unisubs.widget.Widget.getAllWidgets()[%s].play()" % video_position)
def pause_playback(self, video_position):
self.browser.execute_script("unisubs.widget.Widget.getAllWidgets()[%s].pause()" % video_position)
def open_subs_menu(self, video_position):
self.browser.execute_script("unisubs.widget.Widget.getAllWidgets()[%s].openMenu()" % video_position)
def displays_subs_in_correct_position(self):
"""Return true if subs are found in correct position on video.
"""
size = self.get_size_by_css(self._CAPTIONS)
height = size["height"]
if 10 < height < 80:
return True
else:
self.record_error()
def pause_playback_when_subs_appear(self, video_position):
self.scroll_to_video(video_position)
self.wait_for_element_visible(self._CAPTIONS)
self.pause_playback(video_position)
def scroll_to_video(self, video_position):
self.wait_for_element_present(self._WIDGET_MENU)
elements_found = self.browser.find_elements_by_css_selector(self._WIDGET_MENU)
elem = elements_found[video_position]
elem.send_keys("PAGE_DOWN")
| [
"jed@pculture"
] | jed@pculture |
acf787541822adce48bbe0cfee1ec25c650fcc59 | 1ed96881640ecc5a7fbeda14611512ae890d7f6d | /team12.py | e5fed2c18c0715bb93f978719970839cba2b0cdc | [] | no_license | CSP-WHSPeriod2/iterative-prisoners-dilemma | 387eece5101c7a9932db1715c2618d131d2c091e | 646f4c463b8af0d3b905bc198266fa94b14d92d5 | refs/heads/master | 2020-03-31T22:33:03.398285 | 2018-10-18T19:44:32 | 2018-10-18T19:44:32 | 152,622,380 | 0 | 30 | null | 2018-10-18T17:00:49 | 2018-10-11T16:26:53 | Python | UTF-8 | Python | false | false | 1,944 | py | ####
# Each team's file must define four tokens:
# team_name: a string
# strategy_name: a string
# strategy_description: a string
# move: A function that returns 'c' or 'b'
####
team_name = 'team_IslandRoyale' # Only 10 chars displayed.
strategy_name = 'Check for majority'
strategy_description = 'Returns the most often occuring result of the last 3'
def checker(their_history, my_history, my_score):
if my_score >= -999:
if their_history[-1] + their_history[-2] + their_history[-3] == 'cbc' or\
their_history[-1] + their_history[-2] + their_history[-3] == 'ccb' or\
their_history[-1] + their_history[-2] + their_history[-3] == 'bcc':
return 'c'
elif their_history[-1] + their_history[-2] + their_history[-3]== 'bcb' or\
their_history[-1] + their_history[-2] + their_history[-3] == 'cbb' or\
their_history[-1] + their_history[-2] + their_history[-3] == 'bbc':
return 'b'
elif my_score <= -1000:
return 'b'
def move(my_history, their_history, my_score, their_score):
''' Arguments accepted: my_history, their_history are strings.
my_score, their_score are ints.
Make my move.
Returns 'c' or 'b'.
'''
if len(their_history)<=2:
return 'b'
else:
return checker(their_history, my_history, my_score)
# my_history: a string with one letter (c or b) per round that has been played with this opponent.
# their_history: a string of the same length as history, possibly empty.
# The first round between these two players is my_history[0] and their_history[0].
# The most recent round is my_history[-1] and their_history[-1].
# Analyze my_history and their_history and/or my_score and their_score.
# Decide whether to return 'c' or 'b'. | [
"noreply@github.com"
] | noreply@github.com |
33ec3d2938d023c2ff70848a180de6d531405866 | 34253092df18677cd616bb984c96b412781c3e93 | /solve.py | d5fdf6d7f416e173c531ea6b62ce10d6672c3e3c | [
"BSD-2-Clause"
] | permissive | lacop/BubbleSolver | 003b20957f376f9537448e1165b0b07e398e9493 | 2e31a83b87a537af4fc5ddc3610100d63043d8ec | refs/heads/master | 2020-06-06T11:15:01.693088 | 2014-08-06T18:38:18 | 2014-08-06T18:38:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | import memo
# 1: blue, 2: yellow, 3: green, 4: red
next = {'1': '2', '2': '3', '3': '4', '4': '.'}
def solve(level, width, moves):
#print('solve', level, moves)
if moves == 0:
# Test for empty board
for c in level:
if c != '.':
return None
return []
def popdir(inrange, coord):
i = 1
while inrange(coord(i)):
if poplevel[coord(i)] != '.':
pop(coord(i))
return
i += 1
def pop(i):
balls = [(None, lambda _: True, lambda _: i)]
while len(balls) > 0:
newballs = []
for j, inrange, pos in balls:
p = pos(j)
if inrange(p) and p >= 0 and p < len(poplevel):
if poplevel[p] == '.':
newballs.append((j+1, inrange, pos))
else:
poplevel[p] = next[poplevel[p]]
# If popped (max level), pop neighbours
if poplevel[p] == '.':
# Capture value of p in lambda
newballs.append((1, lambda x,p=p : x//width == p//width, lambda k,p=p: p-k)) # Left
newballs.append((1, lambda x,p=p : x//width == p//width, lambda k,p=p: p+k)) # Right
newballs.append((1, lambda _: True, lambda k,p=p: p-k*width)) # Up
newballs.append((1, lambda _: True, lambda k,p=p: p+k*width)) # Down
balls = newballs
empty = True
for i in range(len(level)):
# Skip empty
if level[i] == '.':
continue
empty = False
poplevel = level[:]
pop(i)
#print('poplev', poplevel)
res = solve(poplevel[:], width, moves-1)
#print('res', res)
if res is not None:
return [(i, poplevel)] + res
if empty:
return []
return None
def solveboard(board, width, moves):
sol = solve(list(board), width, moves)
if sol is None:
print('No solution found!')
return
def formboard(b):
return '|'.join([b[start:start+width] for start in range(0, len(b), width)])
print('ROW\tCOL\tBOARD')
print(' \t \t|{}|'.format(formboard(board)))
for step,board in sol:
row = step // width + 1
col = step % width + 1
print('{}\t{}\t|{}|\t::{}'.format(row, col, formboard(''.join(board)), ''.join(board)))
| [
"lacop@lacop.net"
] | lacop@lacop.net |
8799587af23d45f613e72c763c6650d93bba8f46 | ecee6e84ba18100b621c7e06f493ae48e44a34fe | /devel/lib/python2.7/dist-packages/costmap_2d/cfg/VoxelPluginConfig.py | ca382f3ac25f62dda8cec625bd10f2ab8217430e | [] | no_license | theleastinterestingcoder/Thesis | 6d59e06b16cbe1588a6454689248c88867de2094 | 3f6945f03a58f0eff105fe879401a7f1df6f0166 | refs/heads/master | 2016-09-05T15:30:26.501946 | 2015-05-11T14:34:15 | 2015-05-11T14:34:15 | 31,631,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | ## *********************************************************
##
## File autogenerated for the costmap_2d package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 233, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 262, 'description': 'Whether to use this plugin or not', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'enabled', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 262, 'description': 'Max Obstacle Height', 'max': 50.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_obstacle_height', 'edit_method': '', 'default': 2.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 262, 'description': 'The z origin of the map in meters.', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'origin_z', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 262, 'description': 'The z resolution of the map in meters/cell.', 'max': 50.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'z_resolution', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 262, 'description': 'The number of voxels to in each vertical column.', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'z_voxels', 'edit_method': '', 'default': 10, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 262, 'description': 'The number of unknown cells allowed in a column considered to be known', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'unknown_threshold', 'edit_method': '', 'default': 15, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 262, 'description': 'The maximum number of marked cells allowed in a column considered to be free', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'mark_threshold', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 262, 'description': 'Method for combining two layers', 'max': 2, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'combination_method', 'edit_method': "{'enum_description': 'Method for combining layers enum', 'enum': [{'srcline': 15, 'description': 'b', 'srcfile': '/home/alfred/quan_ws/src/navigation/costmap_2d/cfg/VoxelPlugin.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Overwrite'}, {'srcline': 16, 'description': 'a', 'srcfile': '/home/alfred/quan_ws/src/navigation/costmap_2d/cfg/VoxelPlugin.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Maximum'}]}", 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
VoxelPlugin_Overwrite = 0
VoxelPlugin_Maximum = 1
| [
"quanzhou64@gmail.com"
] | quanzhou64@gmail.com |
2c3b054d93f45003c87f98cb0129da2c90a07b02 | 5551361c02ee4a78036e2452fea615fc912f406b | /tut4.py | 465358ee61083a2301be6d3e8df1a5bc8be26084 | [
"MIT"
] | permissive | Demfier/cs116-tut-solns | 3d93752e4ca2d3debbb36c901a13e7201e5bf0fe | 8277dae848ebf66522245fe15492ab91e48dac93 | refs/heads/master | 2020-12-13T13:32:52.551815 | 2020-02-14T05:25:45 | 2020-02-14T05:25:45 | 234,433,306 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | # This file contains solutions to CS116, Tutorial 4
import math
import check
# CQ1: E)
def create_cards(values, suits):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda x, y: [x, y], values, suits))
def create_cards_alt(values, suits):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda i: [x[i], y[i]], range(len(values))))
# Tests for create_cards go here
def choose_by_color(loC, color): # Abs. list impl. (really unoptimized!!)
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if color == 'red':
lookup_list = ['diamonds', 'hearts']
else:
lookup_list = ['spades', 'clubs']
return list(map(lambda x: x[0], filter(lambda x: x[1] in lookup_list, loC)))
def filter_and_convert(loC, lookup_list, val_list):
if loC == []:
return val_list
if loC[0][1] in lookup_list:
val_list.append(loC[0][0])
return filter_and_convert(loC[1:], lookup_list, val_list)
def choose_by_color(loC, color): # recursive impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if color == 'red':
lookup_list = ['diamonds', 'hearts']
elif color == 'black':
lookup_list = ['spades', 'clubs']
return filter_and_convert(loC, lookup_list, [])
# Tests for choose_by_color go here
def flip_color(c): # fancy impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
flip_list_1 = ['hearts', 'spades']
flip_list_2 = ['diamonds', 'clubs']
# new_index = len(flip_list) - index of curr suit in flip_list - 1
if c[1] in flip_list_1:
new_index = 1-flip_list_1.index(c[1])
c[1] = flip_list_1[new_index]
else:
new_index = 1-flip_list_2.index(c[1])
c[1] = flip_list_2[new_index]
def flip_color(c): # bland impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if c[1] == 'spades':
c[1] = 'hearts'
elif c[1] == 'hearts':
c[1] = 'spades'
elif c[1] == 'diamonds':
c[1] = 'clubs'
else:
c[1] = 'diamonds'
# Tests for flip_color go here
def flip_hand_helper(loC, pos):
if pos == len(loC) or loC == []:
return loC
flip_color(loC[pos])
return flip_hand_helper(loC, pos+1)
def flip_hand(loC):
return flip_hand_helper(loC, 0)
# Tests for flip_hand go here
def last_occ_index(list_of_vals, val, pos):
if pos < 0:
return -1
if list_of_vals[pos] == val:
return pos
return last_occ_index(list_of_vals, val, pos-1)
def modify_list(nums, n):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if n not in nums:
nums.append(n)
elif nums.count(n) == 1:
nums.remove(n)
elif nums.count(n) >= 2:
nums.remove(n)
nums.pop(last_occ_index(nums, n, len(nums) - 1))
# Tests for modify_list go here
def sanitize(s):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return ''.join(list(filter(lambda c: c.isalnum(), s)))
# Tests for sanitize go here
def reversed_list(L):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda i: L[-(i+1)], range(len(L))))
def reversed_list_alt(L):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(L.pop, [-1]*len(L)))
# Tests for reversed_list go here
| [
"sahu.gaurav719@gmail.com"
] | sahu.gaurav719@gmail.com |
491c9aea69c437d575dd966a6e72e1d9b9ab14f3 | 1436f58d5673dba004371fceb5e90af53e64d26e | /errors_life_long.py | e35d1af4c66699e192c1ee65077180a6bc6f8b28 | [
"MIT"
] | permissive | mfkiwl/uav-autonomous-landing | 8bf9d45dcacbb0ae1cb070319cb596f77f047167 | 555aae11046b85b1cae20a91c49580771470f5e1 | refs/heads/master | 2022-04-05T16:58:09.555100 | 2020-03-02T11:22:13 | 2020-03-02T11:22:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,442 | py | import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import os
import sys
import itertools
import statistics as st
DPI = 5000
###########################################################################################
input_dir = "/home/pablo/ws/log/errors"
print("Reading from", input_dir)
###########################################################################################
## linear speed
if len(sys.argv) == 2:
linear_speed = sys.argv[1]
else:
print("Exiting...")
exit()
## read files
errors_pred = pd.read_csv(input_dir + "/errors_pred_lifelong_{}.csv".format(linear_speed))
## save dir
save_dir = "/home/pablo/ws/log/errors/lifelong_{}".format(linear_speed)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# get only some lines from the data
zoom_in = True
if zoom_in:
row_200_sec = errors_pred[errors_pred['t'].gt(200)].index[0]
errors_pred = errors_pred.iloc[0:row_200_sec]
# extract data from DataFrame
t_pred = errors_pred['t'].tolist()
ex_pred_real = errors_pred['ex_real'].tolist()
ey_pred_real = errors_pred['ey_real'].tolist()
ez_pred_real = errors_pred['ez_real'].tolist()
ex_pred_target = errors_pred['ex_target'].tolist()
ey_pred_target = errors_pred['ey_target'].tolist()
ez_pred_target = errors_pred['ez_target'].tolist()
## error x
fig, ax = plt.subplots()
ax.plot(t_pred, ex_pred_real, 'g')
ax.set(xlabel='time (s)', ylabel='error in x (m)')
bottom, top = plt.ylim() # return the current ylim
#plt.ylim((-1, 1)) # set the ylim to bottom, top
ax.grid()
fig.savefig(os.path.join(save_dir, "ex.pdf"), format='pdf', dpi=DPI)
plt.close()
## error y
fig, ax = plt.subplots()
ax.plot(t_pred, ey_pred_real, 'g')
ax.set(xlabel='time (s)', ylabel='error in y (m)')
#plt.ylim((-0.5, 0.5)) # set the ylim to bottom, top
ax.grid()
fig.savefig(os.path.join(save_dir, "ey.pdf"), format='pdf', dpi=DPI)
plt.close()
# error z
fig, ax = plt.subplots()
ax.plot(t_pred, ez_pred_real, 'g')
ax.set(xlabel='time (s)', ylabel='error in z (m)')
ax.grid()
fig.savefig(os.path.join(save_dir, "ez.pdf"), format='pdf', dpi=DPI)
plt.close()
abs_ex = [abs(ex) for ex in ex_pred_real]
abs_ey = [abs(ey) for ey in ey_pred_real]
print("Total test time", t_pred[-1])
print()
print("Min error x", min(abs_ex))
print("Max error x", max(abs_ex))
print("Mean error x", st.mean(abs_ex))
print()
print("Min error y", min(abs_ey))
print("Max error y", max(abs_ey))
print("Mean error y", st.mean(abs_ey))
| [
"pablorpalafox@gmail.com"
] | pablorpalafox@gmail.com |
cdbdb1570651243d1a2166dee11b37fe1b42826e | 4f0437097dc18ea19c1b9fbd2671f6296158b429 | /etl.py | 10832390832cca9829f3286431f65630e7fb26ae | [
"MIT"
] | permissive | keyanyang/udacity-data-engineering-capstone | e915da5205a6697ef07fd392743b1595589655c3 | 457ead73c99220002f1555b4d3916df9f8f89c3c | refs/heads/main | 2023-02-15T09:10:26.558118 | 2021-01-15T17:42:27 | 2021-01-15T17:42:27 | 328,462,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | import pandas as pd
import os
from dotenv import load_dotenv, find_dotenv
from src.utility import *
from src.data_model import *
# load environment variables
load_dotenv(find_dotenv())
DATABASE_URL = os.getenv("DB_URL")
def main():
"""
- Load data from different sources
- Process Spark Dataframes
- Build the database and tables
"""
spark = create_spark_session()
# read data
df_i94 = spark.read.parquet("./data/raw/sas_data")
df_airport = spark.read.csv("./data/raw/airport-codes_csv.csv", header=True, inferSchema=True)
df_demo = spark.read.csv("./data/raw/us-cities-demographics.csv", header=True, inferSchema=True, sep=';')
df_temp = spark.read.csv("./data/raw/GlobalLandTemperaturesByCity.csv", header=True, inferSchema=True)
# drop duplicates
df_i94 = df_i94.drop_duplicates(['cicid'])
df_airport = df_airport.drop_duplicates(['ident'])
df_demo = df_demo.drop_duplicates(['City', 'State', 'Race'])
df_temp = df_temp.drop_duplicates(['dt', 'City', 'Country'])
# drop missing
df_i94 = df_i94.dropna(how='all')
df_airport = df_airport.dropna(how='all')
df_demo = df_demo.dropna(how='all')
df_temp = df_temp.dropna(how='all')
# drop others
df_i94 = df_i94.drop('occup', 'entdepu','insnum')
df_temp = df_temp.dropna(subset=['AverageTemperature'])
i94port_name_code_dict = build_i94_port_dict('./data/raw/i94port.txt')
i94port_codes = [code for name, code in i94port_name_code_dict.items()]
# clean i94 df
df_i94 = df_i94.filter(df_i94.i94port.isin(i94port_codes))
# create tables
i94_fact = create_i94_fact(df_i94)
visa_dim = create_visa_dim(df_i94)
temperature_dim = create_temperature_dim(df_temp, i94port_name_code_dict)
airport_dim = create_airport_dim(df_airport, i94port_name_code_dict)
demo_dim = create_demographics_dim(df_demo, i94port_name_code_dict)
output_tables = {
"i94_fact": i94_fact,
"visa_dim": visa_dim,
"temperature_dim": temperature_dim,
"airport_dim": airport_dim,
"demo_dim": demo_dim
}
# save data into database
for name, table in output_tables.items():
save_table_to_database(table, name, DATABASE_URL)
print("ETL is completed.")
if __name__ == "__main__":
main()
| [
"kxy156@case.edu"
] | kxy156@case.edu |
0fbd59a2bd71646db731adef431c5bf3708e5a7e | 612f8c06db1660fb364adcd0c666308400230620 | /Lecture4-Python/add2numbers.py | 8410f87b26a12758073e558d056e848c273b83fa | [] | no_license | zackychan97/Stanford-Code-in-Place | b349e167f10ec45efe9315936448c5e7821ce976 | 5e694afb835a8ee195bf39653782a18efacdffbf | refs/heads/master | 2022-09-01T02:29:00.824593 | 2020-05-28T00:26:34 | 2020-05-28T00:26:34 | 255,403,203 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | """
File: add2numbers.py
--------------------
Another python program to get some practice with variables.
This program asks the user for two integers and prints their sum.
"""
def main():
print("This program adds two numbers.")
num1 = input("Enter first number: ")
num1 = int(num1)
num2 = input("Enter second number: ")
num2 = int(num2)
total = num1 + num2
print("The total is " + str(total) + ".")
# This provided line is required at the end of a Python file
# to call the main() function.
if __name__ == '__main__':
main()
| [
"upwork.ztb@gmail.com"
] | upwork.ztb@gmail.com |
c616722734885ced6763e3ca047faf01fab36c81 | 89fac230fd073774fe3236e49c7fb0e471413e4b | /Customer Churn prediction/logistic_regression.py | c56e28081a514c992ae5d7378cb85a957bd63688 | [] | no_license | Venkat-Rajgopal/Kaggle-projects | 31bb6afc54f5c5e382696defab75f90513698d86 | a93daf2cb84348a4d8a19d6985307ba3955b9ebe | refs/heads/master | 2021-11-24T07:44:50.409523 | 2021-11-08T20:49:38 | 2021-11-08T20:49:38 | 148,202,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,502 | py | import os
from os import chdir, getcwd
wd=getcwd()
chdir(wd)
import numpy as np
import pandas as pd
# Modelling imports from Sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, roc_auc_score, roc_curve
from cm_plot import plot_cm
from data_prep import prepare_data
# Visualization
import matplotlib.style as style
import matplotlib.pyplot as plt
style.use('seaborn')
# ------------------------------------------------------------------------
df_final = prepare_data()
# Train Test Split and set targets
train, test = train_test_split(df_final, test_size = .2, random_state = 10)
feats = [c for c in train.columns if c not in ['Churn']]
target = ['Churn']
train_x = train[feats]
train_y = np.ravel(train[target])
test_x = test[feats]
test_y = np.ravel(test[target])
# ------------------------------------------------------------------------
# Train model and evaluate
clf = LogisticRegression(solver = 'liblinear')
param_grid = {'C': np.logspace(-4, 4, 100, base=10) }
metrics = ['roc_auc', 'accuracy']
gs = GridSearchCV(clf, param_grid = param_grid, cv = 5, scoring = metrics ,verbose=1, refit = 'roc_auc')
gs.fit(train_x, train_y)
[(m, gs.cv_results_['mean_test_{}'.format(m)][gs.best_index_]) for m in metrics]
preds = gs.predict(test_x)
probs = gs.predict_proba(test_x)
print ("Accuracy : ", accuracy_score(test_y, preds))
print("Classification report : \n", classification_report(test_y, preds))
# confusion matrix
cm = confusion_matrix(test_y, preds)
# roc_auc_score
model_roc_auc = roc_auc_score(test_y, preds)
print('ROC_AUC score: ' ,model_roc_auc)
fpr,tpr,thresholds = roc_curve(test_y, probs[:,1])
# ------------------------------------------------------------------------
# Plot confusion matrix and roc curve
out_path = os.path.abspath('plots')
fig = plt.figure(figsize=(10, 5))
plt.subplot(1,2,1)
plot_cm(cm, classes=np.unique(df.Churn), mtd = 'Logistic')
plt.subplot(1,2,2)
#plt.plot(fpr, tpr, linestyle = '-', color = "royalblue", linewidth = 2)
plt.plot(fpr, tpr, color='royalblue', label='{} {}'.format('Logistic_regression AUC:',np.round(model_roc_auc,3)))
plt.plot([0, 1], [0, 1], linestyle='--', color='darkorange')
plt.legend(loc="lower right")
fig.savefig(os.path.join(out_path, 'log_reg_cm_roc.png'), bbox_inches='tight', dpi=100)
plt.show()
| [
"venkatramani.r@gmail.com"
] | venkatramani.r@gmail.com |
fc80dafb1d1d6e219b60357d8bd2a5f407c26ca4 | 228ebc9fb20f25dd3ed2a6959aac41fd31314e64 | /google/cloud/aiplatform_v1beta1/types/index.py | 289ef763b8977f8503af013acbc9cfaa2abd7f63 | [
"Apache-2.0"
] | permissive | orionnye/python-aiplatform | 746e3df0c75025582af38223829faeb2656dc653 | e3ea683bf754832340853a15bdb0a0662500a70f | refs/heads/main | 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 | Apache-2.0 | 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null | UTF-8 | Python | false | false | 5,038 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import deployed_index_ref
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"Index",},
)
class Index(proto.Message):
r"""A representation of a collection of database items organized
in a way that allows for approximate nearest neighbor (a.k.a
ANN) algorithms search.
Attributes:
name (str):
Output only. The resource name of the Index.
display_name (str):
Required. The display name of the Index.
The name can be up to 128 characters long and
can be consist of any UTF-8 characters.
description (str):
The description of the Index.
metadata_schema_uri (str):
Immutable. Points to a YAML file stored on Google Cloud
Storage describing additional information about the Index,
that is specific to it. Unset if the Index does not have any
additional information. The schema is defined as an OpenAPI
3.0.2 `Schema
Object <https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject>`__.
Note: The URI given on output will be immutable and probably
different, including the URI scheme, than the one given on
input. The output URI will point to a location where the
user only has a read access.
metadata (google.protobuf.struct_pb2.Value):
An additional information about the Index; the schema of the
metadata can be found in
[metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri].
deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]):
Output only. The pointers to DeployedIndexes
created from this Index. An Index can be only
deleted if all its DeployedIndexes had been
undeployed first.
etag (str):
Used to perform consistent read-modify-write
updates. If not set, a blind "overwrite" update
happens.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]):
The labels with user-defined metadata to
organize your Indexes.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Index was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Index was most recently
updated. This also includes any update to the contents of
the Index. Note that Operations working on this Index may
have their
[Operations.metadata.generic_metadata.update_time]
[google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time]
a little after the value of this timestamp, yet that does
not mean their results are not already reflected in the
Index. Result of any successfully completed Operation on the
Index is reflected in it.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
metadata_schema_uri = proto.Field(proto.STRING, number=4,)
metadata = proto.Field(proto.MESSAGE, number=6, message=struct_pb2.Value,)
deployed_indexes = proto.RepeatedField(
proto.MESSAGE, number=7, message=deployed_index_ref.DeployedIndexRef,
)
etag = proto.Field(proto.STRING, number=8,)
labels = proto.MapField(proto.STRING, proto.STRING, number=9,)
create_time = proto.Field(
proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | noreply@github.com |
1ba39a261ac7344665d4fd8f17200b7e46f003d4 | 67b10cc09fa31d82c3c1961366ae56875449185b | /Jerry/Ling-4375473-hw6/exer2.py | e6cb8acb19ebffacd46ec7623e85c0bbebe07d38 | [
"WTFPL"
] | permissive | Moelf/S19-129L | b698b65ab138d2ecacf6bb773afc0f8c620557c7 | 6085ad5b1e09aedc81319090f1187c0d9bee423e | refs/heads/master | 2020-04-18T08:48:33.925210 | 2019-03-23T19:42:55 | 2019-03-23T19:42:55 | 167,409,038 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | import math
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# constants given in the problem
N = int(input("N = "))
mu = int(input("mu = "))
sigma = float(input("sigma = "))
# pick 100 spaced fixed x's, S>0
xs = np.linspace(0, 20, 2000)
# normal(3,0.5) virtually don't have anything smaller than 0,
# it's 6 sigmas away
# N y's will be drawn from this distribution
def G(n): return np.random.normal(mu, sigma, n)
# integrate with MC integral with 1000 samples at given x
def integrand(x, sample=1500):
y = G(sample)
return np.sum(np.exp(-x-y) * np.power((x+y), N)) / sample
# set up 100 list of list of y values ( so we have 100 y at the each fixed x)
ys = [integrand(x) for x in xs]
# notmalization
integral = np.trapz(ys, xs)
ubound = 0
for i in range(1, len(ys)):
# find from the TAIL till are > 5% of integral
if np.trapz(ys[-i:], xs[-i:]) > 0.05*integral:
ubound = -i
print("We are excluding S > {:.2f}, at 95% CL".format(xs[-i]))
break
# plot all y values overlapping
plt.scatter(xs, ys, marker='.', label="pdf")
plt.title("MC integral bands")
plt.xlabel("x")
plt.ylim(bottom=0)
plt.ylabel("$f(x) = \int_0^{\infty} exp(-x-y)\cdot (x+y)^N$")
plt.fill_between(xs[ubound:], 0, ys[ubound:],
label="5 % integral", color="green")
plt.legend()
plt.show()
| [
"jerryling315@gmail.com"
] | jerryling315@gmail.com |
917db0f72decd79edcafec2875bee0865c643e64 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/v1_0/identitydirmgt_v1_0/azext_identitydirmgt_v1_0/vendored_sdks/identitydirmgt/aio/_identity_directory_management.py | 8f6baa89e013ef5e2e83f0e742393afc730fc7cf | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 9,603 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import IdentityDirectoryManagementConfiguration
from .operations import ContactsOrgContactOperations
from .operations import ContactsOperations
from .operations import ContractsContractOperations
from .operations import ContractsOperations
from .operations import DevicesDeviceOperations
from .operations import DevicesOperations
from .operations import DirectoryDirectoryOperations
from .operations import DirectoryOperations
from .operations import DirectoryAdministrativeUnitsOperations
from .operations import DirectoryRolesDirectoryRoleOperations
from .operations import DirectoryRolesOperations
from .operations import DirectoryRoleTemplatesDirectoryRoleTemplateOperations
from .operations import DirectoryRoleTemplatesOperations
from .operations import DomainsDomainOperations
from .operations import DomainsOperations
from .operations import OrganizationOrganizationOperations
from .operations import OrganizationOperations
from .operations import SubscribedSkusSubscribedSkuOperations
from .operations import UsersOperations
from .. import models
class IdentityDirectoryManagement(object):
"""IdentityDirectoryManagement.
:ivar contacts_org_contact: ContactsOrgContactOperations operations
:vartype contacts_org_contact: identity_directory_management.aio.operations.ContactsOrgContactOperations
:ivar contacts: ContactsOperations operations
:vartype contacts: identity_directory_management.aio.operations.ContactsOperations
:ivar contracts_contract: ContractsContractOperations operations
:vartype contracts_contract: identity_directory_management.aio.operations.ContractsContractOperations
:ivar contracts: ContractsOperations operations
:vartype contracts: identity_directory_management.aio.operations.ContractsOperations
:ivar devices_device: DevicesDeviceOperations operations
:vartype devices_device: identity_directory_management.aio.operations.DevicesDeviceOperations
:ivar devices: DevicesOperations operations
:vartype devices: identity_directory_management.aio.operations.DevicesOperations
:ivar directory_directory: DirectoryDirectoryOperations operations
:vartype directory_directory: identity_directory_management.aio.operations.DirectoryDirectoryOperations
:ivar directory: DirectoryOperations operations
:vartype directory: identity_directory_management.aio.operations.DirectoryOperations
:ivar directory_administrative_units: DirectoryAdministrativeUnitsOperations operations
:vartype directory_administrative_units: identity_directory_management.aio.operations.DirectoryAdministrativeUnitsOperations
:ivar directory_roles_directory_role: DirectoryRolesDirectoryRoleOperations operations
:vartype directory_roles_directory_role: identity_directory_management.aio.operations.DirectoryRolesDirectoryRoleOperations
:ivar directory_roles: DirectoryRolesOperations operations
:vartype directory_roles: identity_directory_management.aio.operations.DirectoryRolesOperations
:ivar directory_role_templates_directory_role_template: DirectoryRoleTemplatesDirectoryRoleTemplateOperations operations
:vartype directory_role_templates_directory_role_template: identity_directory_management.aio.operations.DirectoryRoleTemplatesDirectoryRoleTemplateOperations
:ivar directory_role_templates: DirectoryRoleTemplatesOperations operations
:vartype directory_role_templates: identity_directory_management.aio.operations.DirectoryRoleTemplatesOperations
:ivar domains_domain: DomainsDomainOperations operations
:vartype domains_domain: identity_directory_management.aio.operations.DomainsDomainOperations
:ivar domains: DomainsOperations operations
:vartype domains: identity_directory_management.aio.operations.DomainsOperations
:ivar organization_organization: OrganizationOrganizationOperations operations
:vartype organization_organization: identity_directory_management.aio.operations.OrganizationOrganizationOperations
:ivar organization: OrganizationOperations operations
:vartype organization: identity_directory_management.aio.operations.OrganizationOperations
:ivar subscribed_skus_subscribed_sku: SubscribedSkusSubscribedSkuOperations operations
:vartype subscribed_skus_subscribed_sku: identity_directory_management.aio.operations.SubscribedSkusSubscribedSkuOperations
:ivar users: UsersOperations operations
:vartype users: identity_directory_management.aio.operations.UsersOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = IdentityDirectoryManagementConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.contacts_org_contact = ContactsOrgContactOperations(
self._client, self._config, self._serialize, self._deserialize)
self.contacts = ContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.contracts_contract = ContractsContractOperations(
self._client, self._config, self._serialize, self._deserialize)
self.contracts = ContractsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.devices_device = DevicesDeviceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.devices = DevicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_directory = DirectoryDirectoryOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory = DirectoryOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_administrative_units = DirectoryAdministrativeUnitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_roles_directory_role = DirectoryRolesDirectoryRoleOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_roles = DirectoryRolesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_role_templates_directory_role_template = DirectoryRoleTemplatesDirectoryRoleTemplateOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_role_templates = DirectoryRoleTemplatesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domains_domain = DomainsDomainOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.organization_organization = OrganizationOrganizationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.organization = OrganizationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subscribed_skus_subscribed_sku = SubscribedSkusSubscribedSkuOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users = UsersOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "IdentityDirectoryManagement":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
c0d9310f0cd5790e4e0888b620c63cf325dc4d58 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-aom/huaweicloudsdkaom/v2/model/list_log_items_response.py | 4e3d6377a22a133cbef0e053c955f2b4d0817543 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,115 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListLogItemsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'error_code': 'str',
'error_message': 'str',
'result': 'str'
}
attribute_map = {
'error_code': 'errorCode',
'error_message': 'errorMessage',
'result': 'result'
}
def __init__(self, error_code=None, error_message=None, result=None):
"""ListLogItemsResponse
The model defined in huaweicloud sdk
:param error_code: 响应码,SVCSTG_AMS_2000000代表正常返回。
:type error_code: str
:param error_message: 响应信息描述。
:type error_message: str
:param result: 查询结果元数据信息,包括返回总数及结果。
:type result: str
"""
super(ListLogItemsResponse, self).__init__()
self._error_code = None
self._error_message = None
self._result = None
self.discriminator = None
if error_code is not None:
self.error_code = error_code
if error_message is not None:
self.error_message = error_message
if result is not None:
self.result = result
@property
def error_code(self):
"""Gets the error_code of this ListLogItemsResponse.
响应码,SVCSTG_AMS_2000000代表正常返回。
:return: The error_code of this ListLogItemsResponse.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this ListLogItemsResponse.
响应码,SVCSTG_AMS_2000000代表正常返回。
:param error_code: The error_code of this ListLogItemsResponse.
:type error_code: str
"""
self._error_code = error_code
@property
def error_message(self):
"""Gets the error_message of this ListLogItemsResponse.
响应信息描述。
:return: The error_message of this ListLogItemsResponse.
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""Sets the error_message of this ListLogItemsResponse.
响应信息描述。
:param error_message: The error_message of this ListLogItemsResponse.
:type error_message: str
"""
self._error_message = error_message
@property
def result(self):
"""Gets the result of this ListLogItemsResponse.
查询结果元数据信息,包括返回总数及结果。
:return: The result of this ListLogItemsResponse.
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this ListLogItemsResponse.
查询结果元数据信息,包括返回总数及结果。
:param result: The result of this ListLogItemsResponse.
:type result: str
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListLogItemsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
25ab3da1de0c4e34d1d09e361769c36803cd8317 | e37a1088cd272a88765c4c232f4432bc43b8b558 | /Cyber_News/game_pages/migrations/0002_auto_20191207_0122.py | 8e2faed42baefe1ef7c535b599ee2f42f5545b11 | [] | no_license | Farad2020/Cyber_News | 79931c613a7e766b0bcf92b94fc9e077c1113db4 | 46b6001504d7e85b014306c2bca9eee58c43611f | refs/heads/master | 2020-08-29T01:18:51.879649 | 2019-12-10T13:24:45 | 2019-12-10T13:24:45 | 217,876,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | # Generated by Django 2.2.6 on 2019-12-06 19:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('game_pages', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ratingsystem',
name='rater_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='game',
name='followers',
field=models.ManyToManyField(blank=True, default=None, null=True, to=settings.AUTH_USER_MODEL),
),
]
| [
"fnirc@mail.ru"
] | fnirc@mail.ru |
3879b85ac5c2d9040cb6c9f5d03562f767ce762e | 4d2979e9b1804a1b8f37353cc9aae70e989390db | /ngrams.py | 5281a412c203f5a95ef17656e37d49523b888497 | [] | no_license | chjunginger/charts2 | a4fed8ef18aed7a4eae9f497831e4e5f3cfa4692 | eaed6de715bc1cc814f66f2d49b72e03e10766a5 | refs/heads/master | 2016-09-05T19:37:09.649356 | 2015-06-25T09:02:39 | 2015-06-25T09:02:39 | 31,538,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | __author__ = 'christianjunginger'
#gets all ngrams and counts how many distinct songs use it (per year)
import re
import json
n=3 #length of ngram
def PreprocessLyric(lyric):
lyric = re.sub(r'[^\w\s\']',' ',lyric)
lyric=lyric.lower()
return lyric.split()
def GetNgramSongCountsForYear(AllLyricsOfYear):
for OneTo100 in AllLyricsOfYear:
AlreadyCountedNgramOfLyric=[]
text=PreprocessLyric(AllLyricsOfYear[OneTo100])
for i in range(len(text)-(n-1)):
ngram="%s %s %s"%(tuple(text[i:i+n]))
if ngram not in AlreadyCountedNgramOfLyric:
AlreadyCountedNgramOfLyric.append(ngram)
if ngram in wordcount:
wordcount[ngram]+=1
else:
wordcount[ngram]=1
return wordcount
for Year in range(1956,1957):
print(Year)
wordcount={}
AllLyricsOfYear=json.loads(open('prep_lyrics_%s.txt'%Year).read())
fo=open("%i_grms_%s.txt"%(n,Year),"wb")
NgramCountList=GetNgramSongCountsForYear(AllLyricsOfYear)
for (i,w) in enumerate(sorted(NgramCountList,key=NgramCountList.get,reverse=True)):
fo.write ("%i,%s,%s\n"%(i+1,w,NgramCountList[w]))
fo.close()
| [
"chjunginger@users.noreply.github.com"
] | chjunginger@users.noreply.github.com |
8a832fba39554e5d8e91909eebf256b825a84b89 | 53e8762caede13acfdc2071a2b8def57128dd3e4 | /Arshia_phase_2/hyperstar_for_glove_w2v/identity3.py | 1b153e432a90b6ccbdd0720bd66358b1db24de53 | [] | no_license | manikyaswathi/SemEval2018HypernymDiscovery | 8c17cf4d16fa48b2719381752b18386acde6c4ee | 03ee054bf0266fed5337b2a8bba14e8d7fec31aa | refs/heads/master | 2020-03-10T14:52:28.582128 | 2017-12-15T20:40:32 | 2017-12-15T20:40:32 | 129,437,112 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | #!/usr/bin/env python
import argparse
import csv
import sys
import gensim
import numpy as np
import os
parser = argparse.ArgumentParser(description='Identity Evaluation.')
parser.add_argument('--w2v', default='all.norm-sz100-w10-cb0-it1-min100.w2v', nargs='?', help='Path to the word2vec model.')
parser.add_argument('--subsumptions', default='subsumptions-test.txt', nargs='?', help='Path to the test subsumptions.')
args = vars(parser.parse_args())
w2v = gensim.models.KeyedVectors.load_word2vec_format(args['w2v'], binary=True, unicode_errors='ignore')
w2v.init_sims(replace=True)
subsumptions_test = []
with open(args['subsumptions']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
subsumptions_test.append((row[0], row[1]))
def compute_ats(measures):
return [sum(measures[j].values()) / len(subsumptions_test) for j in range(len(measures))]
def compute_auc(ats):
return sum([ats[j] + ats[j + 1] for j in range(0, len(ats) - 1)]) / 2 / 10
def sort_list(hypernym_dict) :
sorted_candidates = list()
for word in sorted(hypernym_dict, key=hypernym_dict.get, reverse=True):
sorted_candidates.append(word)
return sorted_candidates
measures = [{} for _ in range(0, 10)]
file_ptr_ms = open("i_test_candidates3",'w')
file_ptr_hypo = open("i_test_hypo3",'w')
file_ptr_gold = open("i_test_gold3",'w')
prev_hypo = ''
gold_list = ''
out_ms = ''
count = 0
temp_hyper_list = {}
for i, (hyponym, hypernym) in enumerate(subsumptions_test):
actual = [w for w, _ in w2v.most_similar(positive=[w2v[hyponym]], topn=10)]
if count==0 or prev_hypo == hyponym :
gold_list = gold_list + hypernym + '\t'
for word in actual:
if word not in temp_hyper_list.keys() :
temp_hyper_list[word]=1
else:
temp_hyper_list[word]+=1
prev_hypo = hyponym
count = 1
elif prev_hypo != hyponym :
gold_list = gold_list + '\n'
sorted_hyper_list = sort_list(temp_hyper_list)
for word in temp_hyper_list :
out_ms = out_ms + str(word.encode("utf8")) + "\t"
out_ms = out_ms + '\n'
file_ptr_ms.write(out_ms)
file_ptr_hypo.write(prev_hypo + '\n')
file_ptr_gold.write(gold_list)
gold_list = ''
out_ms = ''
temp_hyper_list = {}
prev_hypo = hyponym
gold_list = gold_list + hypernym + '\t'
for word in actual:
if word not in temp_hyper_list.keys() :
temp_hyper_list[word]=1
else:
temp_hyper_list[word]+=1
for j in range(0, len(measures)):
measures[j][(hyponym, hypernym)] = 1. if hypernym in actual[:j + 1] else 0.
if (i + 1) % 100 == 0:
ats = compute_ats(measures)
auc = compute_auc(ats)
ats_string = ', '.join(['A@%d=%.6f' % (j + 1, ats[j]) for j in range(len(ats))])
print('%d examples out of %d done for identity: %s. AUC=%.6f.' % (
i + 1,
len(subsumptions_test),
ats_string,
auc))
file_ptr_ms.close()
file_ptr_hypo.close()
file_ptr_gold.close()
ats = [sum(measures[j].values()) / len(subsumptions_test) for j in range(len(measures))]
auc = sum([ats[j] + ats[j + 1] for j in range(0, len(ats) - 1)]) / 2 / 10
ats_string = ', '.join(['A@%d=%.4f' % (j + 1, ats[j]) for j in range(len(ats))])
print('For identity: overall %s. AUC=%.6f.' % (ats_string, auc))
| [
"noreply-github@umn.edu"
] | noreply-github@umn.edu |
02afa1f3ac1912f2e42968b1a9f8c171135d839e | fa795af74cda4d92604fa3332179ba939460a9b5 | /JUBioactivities/QSARDB/Papa_Property_pkNO3_Degradation_by_NO3_radicals_as_logkNO3/__init__.py | ed8bd3df2d2321a9d042e6cc65b02e98c183d8a1 | [] | no_license | JenniferHemmerich/JUBioactivities | 7329a89db0e2790aff9bcfe153ab4dcd2c19a489 | 87054ac135d91e034dcfb6028562b4a7930a3433 | refs/heads/master | 2020-04-26T03:56:36.177955 | 2019-03-07T13:08:08 | 2019-03-07T13:08:08 | 173,284,341 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import os.path
import glob
import pandas as pd
import numpy as np
from ... import utils
__data_src__ = list(sorted(glob.glob(os.path.join(__path__[0], "compounds/0*.mol"))))
__data_src__ += [os.path.join(__path__[0], "properties/pkNO3.txt")]
def read_data(raw=False):
df = pd.DataFrame({'pkNO3_Index_Papa': np.loadtxt(__data_src__[-1], usecols=1, skiprows=1, delimiter='\t')},
index=__data_src__[:-1])
inchi_index = utils.convert_index(df.index, filenames=True)
df.index = inchi_index
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='cont')
return df
def read_structures(raw=False):
df = pd.DataFrame(index=__data_src__[:-1])
df = utils.get_smiles_from_index(df, filenames=True)
inchi_index = utils.convert_index(df.index, filenames=True)
df.index = inchi_index
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='str')
return df | [
"jennifer.hemmerich@univie.ac.at"
] | jennifer.hemmerich@univie.ac.at |
7f2fb82245f1aad011c002e5e5e19e127851738f | 0561de867928b827115a855ed3a9943657e7ad71 | /src/api/config/config.py | 91fa24bf24e3b429a99776e3fc210e19d49cd0da | [] | no_license | homemix/flask_api_sample | c3c57742e2dc07f5a0451a5b6508551716e262ae | 0e23a61cdfb26fb8ae32a004d4811adebce3e7ef | refs/heads/master | 2023-06-15T16:09:11.353193 | 2021-07-01T10:02:35 | 2021-07-01T10:02:35 | 381,985,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | import os
class Config(object):
DEBUG = True
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://<db_url>:<port>/<db_name>"
SQLALCHEMY_ECHO = False
JWT_SECRET_KEY = 'JWT-SECRET'
SECRET_KEY = 'SECRET-KEY'
SECURITY_PASSWORD_SALT = 'SECRET-KEY-PASSWORD'
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
SQLALCHEMY_ECHO = False
JWT_SECRET_KEY = 'JWT-SECRET'
SECRET_KEY = 'SECRET-KEY'
SECURITY_PASSWORD_SALT = 'SECRET-KEY-PASSWORD'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://<db_url>:<port>/<db_name>"
SQLALCHEMY_ECHO = False
JWT_SECRET_KEY = 'JWT-SECRET'
SECRET_KEY = 'SECRET-KEY'
SECURITY_PASSWORD_SALT = 'SECRET-KEY-PASSWORD'
| [
"kenmutati@gmail.com"
] | kenmutati@gmail.com |
b36a400f9ec8c7f93a466522a7d0624e68a7e56a | 16e6e7171f0611c8f17a355c48bec3b003f65b06 | /10PRINT.py | d3b1e2a87c9917f7768b77137d35dbfb53ed33c9 | [] | no_license | SamR5/Turtle-drawings | ab92e9c27c744384b7e9884e2945f2f8505d1436 | 259bb8b84ec510b4d94f8b36d742d77e76c1c271 | refs/heads/master | 2020-08-08T05:07:17.651595 | 2019-10-08T19:16:53 | 2019-10-08T19:16:53 | 213,721,801 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import random as r
import turtle
import math as m
def random01grid(W, H):
""""""
return [[r.choice((0, 1)) for _ in range(W)] for _ in range(H)]
def filter_squares(sequences):
"""Avoid the closed squares with the random01grid generator"""
for i in range(1, len(sequences) - 1):
for j in range(1, len(sequences[0])):
if all([sequences[i-1][j-1] == 0,
sequences[i-1][j] == 1,
sequences[i][j-1] == 1,
sequences[i][j] == 0]):
if r.random() > 0.5:
sequences[i][j] = 1
else:
sequences[i-1][j] = 0
return sequences
def rand_bars(seqs, angle, size):
""""""
T = turtle.Turtle(visible=False)
T.pensize(2)
turtle.tracer(50, 200)
turtle.bgcolor('black')
T.up(); T.goto(-1920//2, 1080//2); T.down()
#T.up(); T.goto(-700, 500); T.down()
s2 = size/2
size2 = float(size)/(m.sin(m.radians(angle)))
T.pencolor('orange')
for seq in seqs:
xline, yline = T.position()
for i in seq:
x, y = T.position()
if i:
# 26.56 5**0.5
T.up(); T.goto(x, yline+s2); T.down()
T.right(angle); T.forward(size2); T.left(angle)
else:
T.up(); T.goto(x, yline-s2); T.down()
T.left(angle); T.forward(size2); T.right(angle)
T.up(); T.goto(xline, yline-size); T.down()
turtle.update()
if __name__ == "__main__":
rand_bars(filter_squares(random01grid(128, 72)), angle=45, size=15)
#rand_bars(random01grid(128, 72), angle=45, size=15)
pass
| [
"rami.samuel@gmx.fr"
] | rami.samuel@gmx.fr |
2cc437f24c473125f7825b073b35dbc910657b40 | 963cac9e78c4b742f7e7800200de8d1582799955 | /lib/veetou/pzh/pzhmodel_.py | fe393d78cc1da6d7aec46d2741a126f14b156e44 | [] | no_license | ptomulik/veetou | c79ceb3ca3d7ef7b261b2219489b6f0a7a83e1fa | b30be2a604f4426f832ec9805547ecd6cc9083fe | refs/heads/master | 2021-01-22T17:28:57.271251 | 2019-01-05T01:46:43 | 2020-05-04T16:23:44 | 85,016,513 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,677 | py | # -*- coding: utf8 -*-
"""`veetou.pzh.pzmodel_`
Defines data model for pzh (Protokół Zaliczeń - HTML)
"""
from veetou.model import *
__all__ = ( 'PzHReport',
'PzHPreamble',
'PzHTr',
'PzHSummary',
'PzHDataModel' )
##def strn(s=None):
## if s is None:
## return None
## else:
## return str(s)
PzHReport = declare( DataType, 'PzhReport',
('source', 'datetime'),
# 5 * (strn,),
plural = 'PzhReports'
)
PzHPreamble = declare( DataType, 'PzhPreamble',
( 'title', 'sheet_id', 'semester_code', 'sheet_serie',
'sheet_number', 'sheet_type', 'sheet_state', 'subj_name',
'subj_department', 'subj_code', 'subj_grade_type', 'subj_tutor',
'return_date', 'approved_by', 'modified_datetime', 'modified_date',
'modified_time', 'return_deadline'),
## 17 * (strn,),
plural = 'PzhPreambles'
)
PzHTr = declare( DataType, 'PzhTr',
( 'tr_ord_no', 'student_name', 'student_index', 'subj_grade',
'subj_grade_final', 'subj_grade_project', 'subj_grade_lecture',
'subj_grade_class', 'subj_grade_lab', 'subj_grade_seminar',
'subj_grade_p', 'subj_grade_n', 'edited_by', 'edited_datetime',
'edited_date', 'edited_time' ),
## 16 * (strn,),
plural = 'PzhTrs'
)
PzHSummary = declare( DataType, 'PzhSummary',
( 'caption', 'th', 'content' ),
## 3 * (strn,),
plural = 'PzhSummaries'
)
class PzHDataModel(DataModel):
_datatypes = ( PzHReport,
PzHPreamble,
PzHTr,
PzHSummary )
def _mk_initial_tables(self):
tables = map( lambda t: (tablename(t), t), map(lambda dt : tableclass(dt)(), self._datatypes))
self.tables.update(tables)
def _mk_initial_relations(self):
strings = ( ( 'pzh_report_preamble', ('pzh_reports', 'pzh_preambles'), ('pzh_preamble', 'pzh_report') ),
( 'pzh_report_trs', ('pzh_reports', 'pzh_trs'), ('pzh_trs', 'pzh_report') ) )#,
#( 'report_summary', ('reports', 'summaries'), ('summary', 'report') ) )
relations = map( lambda x : (x[0],Junction(map(self.tables.__getitem__,x[1]),x[2])), strings )
self.relations.update(relations)
def __init__(self):
super().__init__()
self._mk_initial_tables()
self._mk_initial_relations()
@property
def prefix(self):
return 'pzh_'
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
| [
"ptomulik@meil.pw.edu.pl"
] | ptomulik@meil.pw.edu.pl |
86aa8e4a31017d6d63b19ac4cd3b040d922f3902 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200628181659.py | ef1424ea237252dfb40fa01bde4bf24ab2c06ba7 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 2,603 | py | # -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
import lxml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593305918113.1593310282256.42; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593140975947.1593145813576.21; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593310282; _lxsdk_s=172f8db8281-bbf-e4f-981%7C%7C1',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
yield scrapy.Request(url=url,headers=self.header,callback=self.parse)
def parse(self, response):
selec
soup = bs(response.text,'html.parser')
for i in soup.find_all('div',attrs={'class' : 'movie-item-info'}):
item = MaoyanspidersItem()
title = i.find('p',attrs={'class':'name'}).find('a')
name = title.get('title')
link = 'https://maoyan.com'+ title.get('href')
time = i.find('p',attrs={'class' : 'releasetime'}).text
item['films_name'] = name
item['release_time'] = time
print(link)
yield scrapy.Request(url=link, headers = self.header, meta={'item':item},callback=self.parse1)
def parse1(self, response):
item = response.meta['item']
# soup = bs(response.text,'html.parser')
soup = bs('./week01/homework02/1375.html')
type = soup.find('div',attrs={'class' :'banner'}).find_all('li')[0].text.replace('\n',' ')
print(soup)
# print(type)
item['films_type'] = type
print(item)
yield item
| [
"31039587+ydbB@users.noreply.github.com"
] | 31039587+ydbB@users.noreply.github.com |
134a7fdb80d75f63678b62ea1e3d5cf549604860 | 838c166ed4b416d25bcd0089ec58c2306fe67c4c | /hsbro_port/run_all_case.py | 522714bca57908ec22b53ef371725702b10b0ec5 | [] | no_license | wuyuchao1992/hsbTest | df9f039f3ba35f2437c64383ea40bee722d7e640 | 9dd41f28778cfa984aca924d924b03abfadc2737 | refs/heads/master | 2020-04-08T21:41:49.701211 | 2019-08-15T03:08:44 | 2019-08-15T03:08:44 | 159,754,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | #coding:utf-8
import unittest
import HTMLTestRunner
import os
def all_case():
# 执行用例目录
case_dir = "C:\\Python35\\hsbro_prot\\case"
testCase = unittest.TestSuite()
discover = unittest.defaultTestLoader.discover(case_dir,pattern="test*.py",top_level_dir=None)
for test_suite in discover:
for test_case in test_suite:
# 添加用例到testCase
testCase.addTests(test_case)
print(testCase)
return testCase
if __name__=="__main__":
# 返回实例
runner = unittest.TextTestRunner()
cwd = os.getcwd()
fp = open(cwd,"wb")
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,
title=u'自动化测试报告',
description=u'用例执行情况:' )
runner.run(all_case())
fp.close()
| [
"550535582@qq.com"
] | 550535582@qq.com |
422ed439d6cd3fa32bc7b7b30dcfab4aeb976286 | 07926ae91fe78d850b8db916163934d1d6333371 | /contest/w29/day-of-the-programmer.py | 7ffcda39dea19bc40d4e7c2530b171b4c75289d0 | [] | no_license | wllmnc/hackerRank | a3ec8e7866474788944c765eb61cf18c714fa57b | 410bc5a2fd0139052e8180abf897ba89ecec09fa | refs/heads/master | 2021-10-22T14:04:52.181345 | 2021-10-11T05:46:58 | 2021-10-11T05:46:58 | 57,273,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #https://www.hackerrank.com/contests/w29/challenges/day-of-the-programmer
#!/bin/python
import sys
y = int(raw_input().strip())
# your code goes here
day='0'
month='09'
if y<1918:
day=('12' if y%4==0 else '13')
elif y==1918:
day='31'
mont='08'
else:
day=('12' if ((y%4==0 and y%100!=0) or y%400==0) else '13')
print(day+'.'+month+'.'+str(y))
| [
"noreply@github.com"
] | noreply@github.com |
558316d2d02f00c53809d2a6dc8daad74994b50e | f0b2de121b17169b3decaadc39d87de85b6b7fdb | /yahoo_options_data.py | 917423fb80897dde66956640a8432def6efe05a0 | [] | no_license | lchen0113/APT_lab1 | 531b5c6885ebee46c723f37aa2082ca3dfd2aaaa | 1c4ff3922190497b392ffdea813f92f9f9cec23e | refs/heads/master | 2016-09-06T10:23:19.837301 | 2015-09-12T04:41:22 | 2015-09-12T04:41:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | import re
import urllib
import locale
from bs4 import BeautifulSoup
def contractAsJson(filename):
file=open(filename,'r')
soup=BeautifulSoup(file,'html.parser')
jsonQuoteData = {"currPrice":0.0,"dateUrls":[],"optionQuotes":[]}
# find the currPrice
currPrice=soup.find( attrs={"class","time_rtq_ticker"})
jsonQuoteData["currPrice"]=float(currPrice.contents[0].contents[0])
#find date Urls
for item in soup.find_all( href=re.compile("(\/q\/[a-z]+\?s=)[a-zA-Z_0-9]*&m=[0-9-]+" ) ):
web_prefix="http://finance.yahoo.com"
jsonQuoteData["dateUrls"].append( str(web_prefix + item['href'].replace('&','&')))
#find the individual contacts
#my code could not pass this step!!!
print jsonQuoteData
return jsonQuoteData
| [
"lchen0113@gmail.com"
] | lchen0113@gmail.com |
bb575f540480070a9ac4bbc049ef56876951d516 | 8d1d6da96122bb8cf9d9d5b9a04a86d005e60221 | /slicing.py | 0d59073d8189be98695b82a5ea9d74edf60fc4ef | [] | no_license | feverrro/Python | 85dd6c6bf7097b434e7509e86d2ada1b2ab09fde | a8ac41a7c3e449144b84c364e751f344bee1da2e | refs/heads/master | 2020-05-23T09:42:27.585597 | 2019-06-02T23:40:15 | 2019-06-02T23:40:15 | 186,711,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | my_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
# -10,-9,-8,-7,-6,-5,-4,-3,-2,-1
# list[start:end:step]
# print(my_list[::-1])
sample_url = 'http://questionmark.com'
print(sample_url)
# Reverse the url
print(sample_url[::-1])
# Get the top level domain
print(sample_url[-4:])
# # Print the url without the http://
print(sample_url[7:])
# # Print the url without the http:// or the top level domain
print(sample_url[7:-4]) | [
"50637065+feverrro@users.noreply.github.com"
] | 50637065+feverrro@users.noreply.github.com |
6f18042887b709dbed8c5c7127dfd8b31351d80f | bafa89d999fd2a06063d7c0768670525d3b9b60e | /example/web-demo/session.py | 5aa1a3fab0336539d8305e3bffde9880af4a3c17 | [] | no_license | yifangyun/fangcloud-python-sdk | 0a54bc5054579a28d72d7aa7ac35d12f1c2ee1b4 | ffefd6d2d625841643160c3c9d5a4b55190bc49f | refs/heads/master | 2021-01-19T21:00:40.555935 | 2018-10-10T13:16:04 | 2018-10-10T13:16:04 | 88,589,490 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | import hashlib
import time
def md5():
m = hashlib.md5()
m.update(bytes(str(time.time()), encoding='utf-8'))
return m.hexdigest()
class MemorySession:
container = {}
def __init__(self, handler):
random_str = handler.get_cookie('yfy_session_id')
if random_str:
if random_str in MemorySession.container:
self.r_str = random_str
else:
random_str = md5()
MemorySession.container[random_str] = {}
self.r_str = random_str
else:
random_str = md5()
MemorySession.container[random_str] = {}
self.r_str = random_str
handler.set_cookie('yfy_session_id', random_str, expires=time.time() + 200)
def __setitem__(self, key, value):
MemorySession.container[self.r_str][key] = value
def __getitem__(self, item):
return MemorySession.container[self.r_str].get(item, None)
def __delitem__(self, key):
del MemorySession.container[self.r_str][key] | [
"linrenjun@egeio.com"
] | linrenjun@egeio.com |
ef55375899974c8431de8eade1ae04cf626550e9 | 3330807a7ece9ad99048a0917f969e433fe2b512 | /IPython/external/temboo/Library/eBay/Trading/FetchToken.py | 91f7d41df326a13a8b7eab9765b0d9038024a6f9 | [
"BSD-3-Clause"
] | permissive | montyz/ipython | b8f9387a20b32e8d54453dbabc5e8cef4f2be2c7 | e9c74a856ea3841515db7b4e49de4fc9a0cfdb9d | refs/heads/master | 2021-01-15T20:08:29.242060 | 2015-02-04T17:24:51 | 2015-02-04T17:24:51 | 30,228,181 | 0 | 0 | null | 2015-02-03T06:12:11 | 2015-02-03T06:12:10 | null | UTF-8 | Python | false | false | 5,427 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# FetchToken
# Completes the authentication process by retrieving an eBay user token after they have visited the authorization URL returned by the GetSessionID Choreo and clicked "I agree".
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FetchToken(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FetchToken Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FetchToken, self).__init__(temboo_session, '/Library/eBay/Trading/FetchToken')
def new_input_set(self):
return FetchTokenInputSet()
def _make_result_set(self, result, path):
return FetchTokenResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FetchTokenChoreographyExecution(session, exec_id, path)
class FetchTokenInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FetchToken
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AppID(self, value):
"""
Set the value of the AppID input for this Choreo. ((required, string) The unique identifier for the application.)
"""
super(FetchTokenInputSet, self)._set_input('AppID', value)
def set_CertID(self, value):
"""
Set the value of the CertID input for this Choreo. ((required, string) The certificate that authenticates the application when making API calls.)
"""
super(FetchTokenInputSet, self)._set_input('CertID', value)
def set_DevID(self, value):
"""
Set the value of the DevID input for this Choreo. ((required, string) The unique identifier for the developer's account.)
"""
super(FetchTokenInputSet, self)._set_input('DevID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(FetchTokenInputSet, self)._set_input('ResponseFormat', value)
def set_SandboxMode(self, value):
"""
Set the value of the SandboxMode input for this Choreo. ((optional, boolean) Indicates that the request should be made to the sandbox endpoint instead of the production endpoint. Set to 1 to enable sandbox mode.)
"""
super(FetchTokenInputSet, self)._set_input('SandboxMode', value)
def set_SessionID(self, value):
"""
Set the value of the SessionID input for this Choreo. ((required, string) The SessionID returned from PayPal. This gets passed to the FetchToken Choreo after the user authorizes the request.)
"""
super(FetchTokenInputSet, self)._set_input('SessionID', value)
def set_SiteID(self, value):
"""
Set the value of the SiteID input for this Choreo. ((optional, string) The eBay site ID that you want to access. Defaults to 0 indicating the US site.)
"""
super(FetchTokenInputSet, self)._set_input('SiteID', value)
def set_Timeout(self, value):
"""
Set the value of the Timeout input for this Choreo. ((optional, integer) The amount of time (in seconds) to poll eBay to see if your app's user has allowed or denied the request for access. Defaults to 20. Max is 60.)
"""
super(FetchTokenInputSet, self)._set_input('Timeout', value)
class FetchTokenResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FetchToken Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from eBay.)
"""
return self._output.get('Response', None)
def get_UserToken(self):
"""
Retrieve the value for the "UserToken" output from this Choreo execution. ((string) An eBay Auth Token which can be used to make requests the user's behalf.)
"""
return self._output.get('UserToken', None)
class FetchTokenChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FetchTokenResultSet(response, path)
| [
"monty@Montys-Mac-Mini.local"
] | monty@Montys-Mac-Mini.local |
e48b011a4281f1c3089723f1597597fce601faaa | 3791dbf95468f63e8b99ebb5b87609ad86dba124 | /Python/Fundamentals/type.py | 268555e012d39cd52ecff5ad24a0a03c7be6e99a | [] | no_license | Mbank8/DojoAssignments | 81c91d2dcc71664d26fa9b7b9d3d88aa928fe41e | e71a077abb9da3e55482d0396c8284e4d8aad8cf | refs/heads/master | 2020-12-02T16:45:46.285331 | 2017-09-04T19:06:39 | 2017-09-04T19:06:39 | 96,580,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py |
sI = 45
mI = 100
bI = 455
eI = 0
spI = -23
sS = "Rubber baby buggy bumpers"
mS = "Experience is simply the name we give our mistakes"
bS = "Tell me and I forget. Teach me and I remember. Involve me and I learn."
eS = ""
aL = [1,7,4,21]
mL = [3,5,7,34,3,2,113,65,8,89]
lL = [4,34,22,68,9,13,3,5,7,9,2,12,45,923]
eL = []
spL = ['name','address','phone number','social security number']
#these are the test objects
tester = mI
the_type = type(tester)
if the_type is int:
if the_type >= 100:
print "Thats a big number!"
else:
print "Thats a small number"
elif the_type is str:
if len(the_type) >= 50:
print "Long Sentence"
else:
print "Short sentnece"
elif isinstance(tester, list):
if len(tester) >= 10:
print "Big List!"
else:
print "Short List."
| [
"mattbank8@gmail.com"
] | mattbank8@gmail.com |
a7a89e0b98c823da3182800cda0c3e9b0acfaecc | 09a1d8a920ddb9193dfcc9b05ddd842b83b18e0d | /aerosandbox_legacy_v0/examples_legacy_v0/vlm4_conventional.py | 6d4ece711f7bb8d718aaa5d3f6e9995720f1a915 | [
"MIT"
] | permissive | aqreed/AeroSandbox | 8564b6adb1f297e94aec96872b55f59171ae8ac1 | a0c5f3b2760fcddee28cff2715eeddcb8bcbe655 | refs/heads/master | 2021-03-24T21:02:14.881986 | 2020-03-15T22:43:55 | 2020-03-15T22:43:55 | 247,564,677 | 1 | 0 | MIT | 2020-03-15T22:46:25 | 2020-03-15T22:46:24 | null | UTF-8 | Python | false | false | 3,281 | py | from aerosandbox import *
glider = Airplane(
name="Conventional",
xyz_ref=[0, 0, 0], # CG location
wings=[
Wing(
name="Main Wing",
xyz_le=[0, 0, 0], # Coordinates of the wing's leading edge
symmetric=True,
xsecs=[ # The wing's cross ("X") sections
WingXSec( # Root
xyz_le=[0, 0, 0], # Coordinates of the XSec's leading edge, relative to the wing's leading edge.
chord=0.18,
twist=2, # degrees
airfoil=Airfoil(name="naca4412"),
control_surface_type='symmetric', # Flap # Control surfaces are applied between a given XSec and the next one.
control_surface_deflection=0, # degrees
control_surface_hinge_point=0.75 # as chord fraction
),
WingXSec( # Mid
xyz_le=[0.01, 0.5, 0],
chord=0.16,
twist=0,
airfoil=Airfoil(name="naca4412"),
control_surface_type='asymmetric', # Aileron
control_surface_deflection=30,
control_surface_hinge_point=0.75
),
WingXSec( # Tip
xyz_le=[0.08, 1, 0.1],
chord=0.08,
twist=-2,
airfoil=Airfoil(name="naca4412"),
)
]
),
Wing(
name="Horizontal Stabilizer",
xyz_le=[0.6, 0, 0.1],
symmetric=True,
xsecs=[
WingXSec( # root
xyz_le=[0, 0, 0],
chord=0.1,
twist=-10,
airfoil=Airfoil(name="naca0012"),
control_surface_type='symmetric', # Elevator
control_surface_deflection=0,
control_surface_hinge_point=0.75
),
WingXSec( # tip
xyz_le=[0.02, 0.17, 0],
chord=0.08,
twist=-10,
airfoil=Airfoil(name="naca0012")
)
]
),
Wing(
name="Vertical Stabilizer",
xyz_le=[0.6, 0, 0.15],
symmetric=False,
xsecs=[
WingXSec(
xyz_le=[0, 0, 0],
chord=0.1,
twist=0,
airfoil=Airfoil(name="naca0012"),
control_surface_type='symmetric', # Rudder
control_surface_deflection=0,
control_surface_hinge_point=0.75
),
WingXSec(
xyz_le=[0.04, 0, 0.15],
chord=0.06,
twist=0,
airfoil=Airfoil(name="naca0012")
)
]
)
]
)
# glider.set_paneling_everywhere(20, 20)
ap = vlm4(
airplane=glider,
op_point=OperatingPoint(
velocity=10,
alpha=5,
beta=0,
p=0,
q=0,
r=0,
),
)
ap.run()
ap.draw()
# Answer you should get: (XFLR5)
# CL = 0.797
# CDi = 0.017
# CL/CDi = 47.211
| [
"peterdsharpe@gmail.com"
] | peterdsharpe@gmail.com |
4469104b4cf522163c52e095d0631a2f498a2975 | bea0fcd95b6dcfaf6e79ad476593985bdbca23be | /src/venvdir/venvs.py | 0d993b0c813808d3cd914ad87ab7ca22bbb2f5de | [] | no_license | antazoey/venvdir | 6b7ff6bedb892943e50c440ac2480c80d7705c3a | 28220fb18747045ea1747486bce73bca681f524b | refs/heads/main | 2023-05-04T22:51:44.395747 | 2020-10-21T02:14:30 | 2020-10-21T02:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | import os
from venv import create as create_venv
from os.path import exists as does_path_exist
from venvdir.error import VenvDirBaseError
from venvdir._configparser import config_parser
from venvdir.util import get_default_venvs_path
from venvdir.util import remove_directory
class ManagedVirtualEnvironment:
def __init__(self, name, entry):
self._name = name
self._entry = entry
def __getitem__(self, item):
return self._entry[item]
@property
def name(self):
return self._name
@property
def path(self):
return self._entry["path"]
def get(self, item):
if item.lower() == "name":
return self.name
return self._entry.get(item)
def items(self):
items = list(self._entry.items())
if len(items):
items.append(("name", self.name))
return items
def keys(self):
keys = list(self._entry.keys())
if len(keys):
keys.append("name")
return keys
def __repr__(self):
return "Virtual Env: (name={}, path={})".format(self.name, self.path)
def __str__(self):
return "Virtual Env: (name={}, path={})".format(self.name, self.path)
def get_entries():
names = config_parser.entries
return [get_entry(name) for name in names]
def create_entry(name, path=None):
if not path:
path = get_default_venvs_path()
elif not does_path_exist(path):
raise VenvDirBaseError("Base path '{}' does not exist.".format(path))
env_path = os.path.join(path, name)
if does_path_exist(env_path):
raise VenvDirBaseError(
"Virtual environment '{}' already exists.".format(env_path)
)
create_venv(env_path, with_pip=True)
config_parser.create_entry(name, path)
def add_entry(name, path):
if not does_path_exist(path):
raise VenvDirBaseError("Venv path '{}' does not exist.".format(path))
config_parser.create_entry(name, path)
def get_entry(name):
config_entry = config_parser.get_entry(name)
return ManagedVirtualEnvironment(name, config_entry)
def remove_entry(name):
entry = get_entry(name)
remove_directory(entry.path)
config_parser.remove_entry(name)
| [
"yingthi@live.com"
] | yingthi@live.com |
a0b4d2b6558019c0e406f1ef097a97fcefb6b50f | e5f49057eac43349a7fa999d90cb951e49617440 | /filter/docclass.py | e6a113f9f13d3049b9b891fe7adaa77184535832 | [] | no_license | Hsingmin/CI_py2.7 | 2ae9464c687a1ecfadc7928c6e4915d828ffc10e | ef2906755d498a054beec20a99c4784351816cce | refs/heads/master | 2021-08-30T06:23:09.630058 | 2017-12-16T13:01:19 | 2017-12-16T13:01:19 | 110,184,772 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,701 | py |
# docclass.py
import re
import math
def sampletrain(c1):
c1.train('Nobody owns the water.', 'good')
c1.train('the quick rabbit jumps fences', 'good')
c1.train('buy pharmaceuticals now', 'bad')
c1.train('make quick money at the online casino', 'bad')
c1.train('the quick brown fox jumps', 'good')
def getwords(doc):
splitter = re.compile('\\W*')
words = [s.lower() for s in splitter.split(doc) if len(s)>2 and len(s)<20]
return dict([w,1] for w in words)
class classifier:
def __init__(self, getfeatures, filename = None):
self.fc = {}
self.cc = {}
self.getfeatures = getfeatures
self.thresholds = {}
def incf(self, f, cat):
self.fc.setdefault(f, {})
self.fc[f].setdefault(cat, 0)
self.fc[f][cat] += 1
def incc(self, cat):
self.cc.setdefault(cat, 0)
self.cc[cat] += 1
def fcount(self, f, cat):
if f in self.fc and cat in self.fc[f]:
return float(self.fc[f][cat])
return 0.0
def catcount(self, cat):
if cat in self.cc:
return float(self.cc[cat])
return 0
def totalcount(self):
return sum(self.cc.values())
def categories(self):
return self.cc.keys()
def train(self, item, cat):
features = self.getfeatures(item)
for f in features:
self.incf(f, cat)
self.incc(cat)
def fprob(self, f, cat):
if self.catcount(cat) == 0:
return 0
return self.fcount(f, cat)/self.catcount(cat)
def weightedprob(self, f, cat, prf, weight=1.0, ap=0.5):
basicprob = prf(f, cat)
totals = sum([self.fcount(f,c) for c in self.categories()])
bp = ((weight*ap) + (totals*basicprob))/(weight+totals)
return bp
def setthresholds(self, cat, t):
self.thresholds[cat] = t
def getthresholds(self, cat):
if cat not in self.thresholds:
return 1.0
return self.thresholds[cat]
class naivebayes(classifier):
def docprob(self, item, cat):
features = self.getfeatures(item)
p = 1
for f in features:
p *= self.weightedprob(f, cat, self.fprob)
return p
def prob(self, item, cat):
catprob = self.catcount(cat)/self.totalcount()
docprob = self.docprob(item, cat)
return catprob*docprob
def classify(self, item, default = None):
probs = {}
max = 0.0
for cat in self.categories():
probs[cat] = self.prob(item, cat)
if probs[cat] > max:
max = probs[cat]
best = cat
for cat in probs:
if cat == best:
continue
if probs[cat]*self.getthresholds(best)>probs[best]:
return default
return best
class fisherclassifier(classifier):
def __init__(self, getfeatures):
classifier.__init__(self, getfeatures)
self.minimums = {}
def setminimum(self, cat, min):
self.minimums[cat] = min
def getminimum(self, cat):
if cat not in self.minimums:
return 0
return self.minimums[cat]
def classify(self, item, default = None):
best = default
max = 0.0
for c in self.categories():
p = self.fisherprob(item, c)
if p > self.getminimum(c) and p > max:
best = c
max = p
return best
def cprob(self, f, cat):
clf = self.fprob(f, cat)
if clf == 0:
return 0
freqsum = sum([self.fprob(f, c) for c in self.categories()])
p = clf/freqsum
return p
def fisherprob(self, item, cat):
p = 1
features = self.getfeatures(item)
for f in features:
p *= (self.weightedprob(f, cat, self.cprob))
fscores = -2*math.log(p)
return self.invchi2(fscores, len(features)*2)
def invchi2(self, chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df//2):
term *= m/i
sum += term
return min(sum, 1.0)
| [
"alfred_bit@sina.cn"
] | alfred_bit@sina.cn |
f05d7438fcbd0cc6346fdb97f5e45c2d6a27018d | 0c575380b862ef3b7e06b57bd923cae589c042a6 | /python/problem63.py | 9a55875cdf9b9e896682850faa6f2232465ed31d | [] | no_license | mawaldne/projecteuler | 9e6bbef2b49fd7a37b1f33c6e4a9868f37606fe5 | b9f158abd9dec4d54e6c0f17ca5e554a0a31c6c4 | refs/heads/master | 2020-08-05T00:58:27.950931 | 2015-05-23T21:03:26 | 2015-05-23T21:03:26 | 1,083,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | c = 0
for i in range(1,100):
for j in range(1,100):
k = i**j
if (len(str(k)) == j):
print(i,j,k)
c+=1
print("count=" + str(c))
| [
"mike.waldner@gmail.com"
] | mike.waldner@gmail.com |
914b43d53ab351be628c97bce1fc06c882bacfd6 | d1cdf08eb18f9abf7a53228e2aadb62b85c06c3f | /Q5.py | 1a99fdfaed66b09bbe695480d1512f0036393d36 | [
"MIT"
] | permissive | ashutoshvrm8/Python-Programs | cdeab603cdbde51d5a0c8ee834ccb6296fc0fd79 | 190ac85dbaa4870c0f2fb22a91e8c6d8a8788f44 | refs/heads/master | 2020-03-26T05:19:33.959320 | 2018-08-13T17:45:12 | 2018-08-13T17:45:12 | 144,550,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 14:09:19 2018
@author: Ashutosh Verma
"""
'''
Define a class which has at least two methods:
getString: to get a string from console input
printString: to print the string in upper case.
Also please include simple test function to test the class methods.
'''
class InputOutString(object):
def __init__(self):
self.s = ""
def getString(self):
self.s = input()
def printString(self):
print (self.s.upper())
strObj = InputOutString()
strObj.getString()
strObj.printString() | [
"noreply@github.com"
] | noreply@github.com |
63ccdfe5804f1f8a3ed9608807bce43b88ac5db3 | 1f731283124223185a7b59a1b44891c56e965d4f | /plotter_raster.py | 74153a9fa8957da6b8bb758a08aec6f391a1f45a | [] | no_license | KalelR/Chialvo | ad2a5ecdd6a9eafd34fa7b8d2f7f0d8bb2fe3a10 | 6f3b7226c11268ed9f435ea642a93bf99cacc3d9 | refs/heads/master | 2022-09-29T18:58:12.848856 | 2020-06-06T17:41:13 | 2020-06-06T17:41:13 | 267,693,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | # -*- coding: UTF-8 -*-
#--------------------------------- RUN WITH PYTHON2
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os.path
from os import path
alpha = '1.8'
v_eps = ["0.021"]
N = 525
seed = '1002'
k_mean = '0.03'; sigma = '0.0035'
DIR = "/home/kalel/hoggar/chialvo/"
num_perm = "524"
prefix = "shuffled_uniform_min_" + (k_mean) + "_sigma_" + sigma
for i in range(0, len(v_eps)):
eps = v_eps[i]
print(eps)
fileIn = "results/" + prefix + "_chialvo_spikeTimes_powerlaw_alpha_" + alpha + "_N_" + str(N) + "_seed_" + seed + "_eps_" + eps + ".dat"
if(path.exists(fileIn)):
mTempos = [np.array(map(float, line.split())) for line in open(fileIn)]
else:
print('erro, nao achou o arquivo' + fileIn)
break;
for j in range(N):
v_idxNeuron = np.linspace(j, j, len(mTempos[j][:]), dtype=int)
plt.plot(v_idxNeuron, mTempos[j][:], 'k.', markersize=1)
plt.ylabel('t', fontsize=8)
plt.xlim(0, N)
# plt.ylim(100000, 100300)
plt.xlabel('Neuron #', fontsize=8)
eps_2 = ('%.4f' % float(eps))
plt.savefig('results/' + prefix + '_chialvo_powerlaw_RP_alpha_' + alpha + '_N_' + str(N) + '_seed_' + str(seed) + '_eps_' + eps_2 + '.png')
plt.clf()
| [
"kalelluizrossi@gmail.com"
] | kalelluizrossi@gmail.com |
f64456cd8aa6edb9dff8e61db95c050245376561 | 1f9042242fdd6f0bb67674c84192cef7554f9274 | /DjangoWebProject2/DjangoWebProject2/settings.py | 82cd9a9bcdddeef3d2ab503ea369d9d4ec7410f9 | [] | no_license | zhouwengqing/python | e8e86eb4d371ef20333f65292f728cb5f15a4aae | 8558582c9f73ce1301a8a1eab4e75c621f522fed | refs/heads/master | 2020-08-31T15:40:36.055254 | 2019-10-31T09:04:31 | 2019-10-31T09:04:31 | 218,724,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,328 | py | """
Django settings for DjangoWebProject2 project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import posixpath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ea1d444f-1b02-45a4-836a-15fe58c93e4f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app',
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoWebProject2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjangoWebProject2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
| [
"597031327@qq.com"
] | 597031327@qq.com |
e4e0fd9b1a065f32e5e1b0c354d3c525a313a27e | a8cf9fd981ab062dc72fd11404373fee332b923d | /models/asr_config.py | 23a63ac8a5929ee42e24b04953820f62375d7ca5 | [] | no_license | liyunbin/asr | bdd8df76b7be692fb480448792551a215398ffde | 196f94e740965a53f9b4dc92ab105b5a074596ad | refs/heads/master | 2020-04-07T16:53:44.068500 | 2018-11-26T11:19:37 | 2018-11-26T11:19:37 | 158,547,639 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,893 | py | import data_utils
import numpy as np
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
# 模型参数这设置
baidu_deep_speech_2 = {
'window_size':.025, # 帧长短 单位:秒
'window_stride': .01, # 帧窗口 单位:秒
'window':np.hamming,
'feature_normalize':True,
'feature_type':'mfcc',
'feature_num':13,
'sample_rate':44100,
'max_time_steps':2000,
'keep_prob':[.5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5],
'num_hidden':200,
'num_hidden_fc':11, # 及就是多少个中文字符,简体字加上罗马字母表加上空格
'max_char_len':4,
'rnn_type':'GRU',
'rnn_stack_num':3,
'nb_epoch':1000,
'batch_size':100,
'check_point':'./checkout'
}
classify_dict = {
'window_size':.025, # 帧长短 单位:秒
'window_stride': .01, # 帧窗口 单位:秒
'window':np.hamming,
'feature_normalize':True,
'feature_type':'mfcc',
'feature_num':13,
'sample_rate':44100,
'max_time_steps':2000,
'keep_prob':[.5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5],
'num_hidden':200,
'num_hidden_fc':10, # 及就是多少个中文字符,简体字加上罗马字母表加上空格
'max_char_len':4,
'rnn_type':'GRU',
'rnn_stack_num':3,
'nb_epoch':1000,
'batch_size':40,
'check_point':'./classify_checkpoints'
}
classify_single_dict = {
'window_size':.025, # 帧长短 单位:秒
'window_stride': .01, # 帧窗口 单位:秒
'window':np.hamming,
'feature_normalize':True,
'feature_type':'mfcc',
'feature_num':13,
'sample_rate':44100,
'max_time_steps':100,
'keep_prob':[.5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5],
'num_hidden':200,
'num_hidden_fc':10, # 及就是多少个中文字符,简体字加上罗马字母表加上空格
'max_char_len':1,
'rnn_type':'GRU',
'rnn_stack_num':3,
'nb_epoch':1000,
'batch_size':40,
'check_point':'./classify_single_checkpoints'
}
tencent_speech_dict = {
'window_size':.02, # 帧长短 单位:秒
'window_stride': .01, # 帧窗口 单位:秒
'window':'hamming',
'feature_normalize':True,
'feature_type':'spect',
'feature_num':161,
'sample_rate':16000,
'max_time_steps':2000,
'keep_prob':[.5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5],
'num_hidden':800,
'num_hidden_fc':11, # 及就是多少个中文字符,简体字加上罗马字母表加上空格
'max_char_len':100,
'nb_epoch':1000,
'batch_size':5,
'check_point':'./checkout'
}
deep_speech_2 = Struct(**baidu_deep_speech_2)
tencent_speech = Struct(**tencent_speech_dict)
classify_config = Struct(**classify_dict)
classify_single_config = Struct(**classify_single_dict) | [
"LYb1987920"
] | LYb1987920 |
42c55265daabb2470cae40ea23ca66ff4211931f | 94567834d7ca69fba178a9d2d0ae89a73f813741 | /analyses/monthly_time_series/China_prmsl/plot_ts.py | 630af662e02fee6a55708d2f481b49b4f71496c9 | [] | no_license | philip-brohan/Yangtze_floods | 41b6d655fd4f06f8129c4e5c10d51d5e74d6cec4 | 8ad376328f5b7866d82dd3613e6157cfa31abea1 | refs/heads/master | 2021-09-07T23:56:51.250070 | 2021-08-23T16:43:58 | 2021-08-23T16:43:58 | 243,798,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | #!/usr/bin/env python
# 20CRv3 time-series: Monthly average, regional average.
# Each ensemble member as a seperate line.
# Uses pre-calculated time-series.
import os
import iris
import numpy
import datetime
import pickle
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
start=datetime.datetime(1926,1,1,0,0)
end=datetime.datetime(1935,12,31,23,59)
ylim = (-300,250)
dts=[]
ndata=None
for year in range(start.year,end.year+1,1):
sfile="%s/20CR/version_3/analyses/Yangtze_ts/PRMSL_v3/%04d.pkl" % \
(os.getenv('SCRATCH'),year)
with open(sfile, "rb") as f:
(ndyr,dtyr) = pickle.load(f)
dts.extend([dtyr[0:11]])
if ndata is None:
ndata = ndyr[0:11,:]
else:
ndata = numpy.ma.concatenate((ndata,ndyr[0:11,:]))
# Plot the resulting array as a set of line graphs
fig=Figure(figsize=(19.2,6), # Width, Height (inches)
dpi=300,
facecolor=(0.5,0.5,0.5,1),
edgecolor=None,
linewidth=0.0,
frameon=False,
subplotpars=None,
tight_layout=None)
canvas=FigureCanvas(fig)
font = {'family' : 'sans-serif',
'sans-serif' : 'Arial',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
# Plot the lines
ax = fig.add_axes([0.05,0.05,0.93,0.93],
xlim=((start-datetime.timedelta(days=1)),
(end+datetime.timedelta(days=1))),
ylim=ylim)
ax.set_ylabel('PRMSL anomaly')
for m in range(80):
ax.add_line(Line2D(dts,
ndata[:,m],
linewidth=0.5,
color=(0,0,1,1),
alpha=0.1,
zorder=200))
fig.savefig('PRMSL_ts.png')
| [
"philip@brohan.org"
] | philip@brohan.org |
eb377e37d8705e70dde3ea336062f491f5577c50 | 7c2be4611efc0a672627d19b6decb7a9eead93d9 | /module/settings.py | f192310a959dbc734753d00895588a643dd47b81 | [] | no_license | wyattsam/jam | c9372e00a2a0deb34f0eca2fd9a11707609ea6e5 | 2851a848908100a4e308383e4621dddcf8290a66 | refs/heads/master | 2020-05-29T11:05:19.285347 | 2015-06-12T17:52:17 | 2015-06-12T17:52:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | import yaml
import os
environment = os.getenv('ENV','devel')
_here = os.path.dirname(os.path.abspath(__file__))
try:
path = 'config/{0}.yml'
pathEnv = os.path.join(_here, '..', path.format(environment))
_config_file = open(pathEnv, 'r')
config = yaml.load(_config_file)
except IOError as e:
errstr = (
"Unable to find a valid config file at: config/{0}.yml. "
"Please check README.md for more info.\n"
"https://github.com/10gen/corp#setting-up-your-development-environment"
)
print(errstr.format(environment))
raise SystemExit
try:
_private_config_file = open(os.path.join(_here, '..', 'config/private.yml'), 'r')
_private_config = yaml.load(_private_config_file)
config.update(_private_config)
except IOError as e:
pass
print("Using {0} environment configuration".format(environment))
jira_conf = config.get('jira', {})
jira_url = jira_conf['url']
| [
"lizhifan@usc.edu"
] | lizhifan@usc.edu |
f063aa1d6f700e179430b1f88ff384085054a662 | 68359f6f4eaf33e5632e8f5dfff11120786a55f1 | /coodepool/coodepool/wsgi.py | 96847f88b70c9011c9b37bf933ffb8e3dae267be | [] | no_license | bogiSrbac/vendingMashine | d84e2d244bdff57078ab6837ca1451142ab10b74 | acd8e3136dcf0706f2850175cfd4c8e65f99e30c | refs/heads/main | 2023-07-17T06:16:40.396834 | 2021-08-23T23:51:31 | 2021-08-23T23:51:31 | 399,273,194 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for coodepool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'coodepool.settings')
application = get_wsgi_application()
| [
"momir.bogosavac@yahoo.com"
] | momir.bogosavac@yahoo.com |
76b015554549951c9624095bf2d5901ec911f980 | 73f06b4bc66bc5ee5cbb6021f555cf5a132adac3 | /api/urls.py | e8976d703cbccd6f9bb638b0f64f8dac2e435513 | [] | no_license | monicasegu/fulproject | addd201baa47cc8cc4ca33d4583bb91b0991dfa8 | 00b9b914a86162585b55c8930a710389dac4edb3 | refs/heads/master | 2021-01-14T17:31:02.575940 | 2020-02-24T09:37:58 | 2020-02-24T09:37:58 | 242,697,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py |
from django.urls import path, re_path
urlpatterns = [
path("grams/",GramsView.as_view())
]
| [
"segumonica@gmail.com"
] | segumonica@gmail.com |
920bde8494004fccb4a049249d10f17b7726fe68 | f0181afd2eea9b086ce9487fb8d7fd949282140a | /bin/countgenbank.py | 173a4ff2ea62bc564b9bd89f321a8135b513e0b3 | [
"MIT"
] | permissive | linsalrob/EdwardsLab | 4a571676859c8b7238e733a0d3ad98ceb2e83c63 | 3c466acc07f1a56b575860ad26c92f900b272a53 | refs/heads/master | 2023-08-20T17:13:35.466103 | 2023-08-17T09:17:36 | 2023-08-17T09:17:36 | 25,702,093 | 36 | 25 | MIT | 2020-09-23T12:44:44 | 2014-10-24T18:27:16 | Python | UTF-8 | Python | false | false | 1,679 | py | """
Count features in a genbank file or directory of files
"""
import os
import sys
import argparse
from roblib import message, genbank_seqio
__author__ = 'Rob Edwards'
__copyright__ = 'Copyright 2020, Rob Edwards'
__credits__ = ['Rob Edwards']
__license__ = 'MIT'
__maintainer__ = 'Rob Edwards'
__email__ = 'raedwards@gmail.com'
def count_feats(gbkf, verbose=False):
if verbose:
message(f"Reading {gbkf}", "BLUE")
count = {}
for seq in genbank_seqio(gbkf):
for feat in seq.features:
count[feat.type] = count.get(feat.type, 0) + 1
return count
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=" ")
parser.add_argument('-f', help='genbank file')
parser.add_argument('-d', help='directory of genbank files')
parser.add_argument('-t', help='feature type(s) (at least one must be provided)', nargs="+")
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
files = []
if args.f:
files.append(args.f)
if args.d:
for f in os.listdir(args.d):
files.append(os.path.join(args.d, f))
if len(files) == 0:
message("Fatal. Either -d or -f is required", "RED")
if len(args.t) == 0:
message("Fatal. Please provide at least one feature type to count", "RED")
print("File", end="")
for t in args.t:
print(f"\t{t}", end="")
print()
for f in files:
c = count_feats(f, args.v)
print(f, end="")
for t in args.t:
if t in c:
print(f"\t{c[t]}", end="")
else:
print("\t0")
print()
| [
"raedwards@gmail.com"
] | raedwards@gmail.com |
248be39ec105936bcc88d6125a53761ab7d02f6d | d757122d418770f8184daa8d471b7013cd4c2943 | /cobropago/transactions/tests/factories.py | 61207a60ca41d8e6396736c8ca8ae11303e6eae0 | [] | no_license | luisfernandobarrera/cobropago | 623211338310fa3d20dc7bcd45629d5c4667d2ba | 89ba7e026a00a78516bbef809f99c1c538ee581c | refs/heads/master | 2021-09-05T16:00:36.032311 | 2018-01-29T14:12:44 | 2018-01-29T14:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | import uuid
import factory
import random
import datetime
class AccountFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'transactions.Account'
django_get_or_create = ('name', 'ledger', 'user')
id = factory.Sequence(lambda n: uuid.uuid4())
name = factory.Faker('name')
class PayeeFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'transactions.Payee'
django_get_or_create = ('name', 'ledger', 'user')
id = factory.Sequence(lambda n: uuid.uuid4())
name = factory.Faker('company')
class LedgerFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'transactions.Ledger'
django_get_or_create = ('name',)
id = factory.Sequence(lambda n: uuid.uuid4())
name = factory.Faker('name')
class TransactionFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'transactions.Transaction'
django_get_or_create = ('name', 'ledger', 'account', 'payee')
id = factory.Sequence(lambda n: uuid.uuid4())
date = factory.Faker('date')
check = factory.Sequence(lambda n: "CHK" + str(n * 100 + 1))
amount = factory.Sequence(lambda n: random.randint(-1000000, 1000000) / 100)
memo = factory.Faker('text')
payee = factory.SubFactory(PayeeFactory)
account = factory.SubFactory(AccountFactory)
| [
"luisfernando@informind.com"
] | luisfernando@informind.com |
138079087bc6109c11aa7274f355d5231054b646 | 0a6a70540c96cc259374375b037d39eab1139b95 | /letters.py | cc8dca9126511cec927fbc4a21aaa30a5029b657 | [] | no_license | bl4ck-op4l/umbraria | ef2c9208607253c32c9f58eef7a15a0c37785f5d | 346f739c89fb104e4269c12fb4d7d3da6c838a97 | refs/heads/master | 2023-05-29T06:04:17.579776 | 2021-06-14T20:01:14 | 2021-06-14T20:01:14 | 376,925,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | from cv2 import cv2
import json
import os.path as path
_alphabet = None
def get_alphabet():
global _alphabet
if _alphabet is not None:
return _alphabet
_alphabet = Alphabet()
return _alphabet
class Alphabet:
def __init__(self):
with open('alphabet/letters.json', 'r') as f:
self.data = json.load(f)
self._alph = [Letter(item) for item in self.data.items()]
def __iter__(self):
return iter(self._alph)
class Letter:
def __init__(self, item):
self.char = item[0]
self.image_path = path.join('alphabet', item[1])
self.image = cv2.imread(path.join('alphabet', item[1]), cv2.IMREAD_GRAYSCALE)
if self.image is None:
raise Exception('Image not found!')
self.plain = self.char
if self.plain == '█':
self.plain = '?'
self.threshold = .855
if self.char == ' ':
self.threshold = .7
elif self.char == 'd':
self.threshold = .875
elif self.char == ',':
self.threshold = .86
| [
"dark.hole1@yandex.ru"
] | dark.hole1@yandex.ru |
3922cf0e75dcaeacc14c827a6d0028e6d446c8b1 | 254cb0c780d34e6e9907e1f2fa2c2bf91ccf38aa | /games/exceptions.py | b0914ff30ee4b666d8ace6c1ba208499eef35a84 | [] | no_license | JamesDevJim/game-zulu | f9d6d2db32996c014a6b31103c4ce7d737a177ff | bb9024db3b11960dd7df077c1efbf44db5ecdc3a | refs/heads/master | 2020-12-23T09:24:16.174149 | 2020-02-27T03:57:43 | 2020-02-27T03:57:43 | 237,109,565 | 0 | 1 | null | 2020-02-27T03:57:44 | 2020-01-30T00:24:08 | Python | UTF-8 | Python | false | false | 254 | py | class QuitGame(Exception):
"""Custom exception class to signal that the game must be quit"""
pass
class ChangeGame(Exception):
def __init__(self, message="", *, new_game):
super().__init__(message)
self.new_game = new_game
| [
"janis@lesinskis.com"
] | janis@lesinskis.com |
4079d5185261835ffa9df17e29142566cf46c3bd | dece3eb22be792aeac65ea12a1f183dd73498add | /coding/Mysql/1.py | 10119b94c419e57e3114923e1eb5292e80410ffd | [] | no_license | santanu5670/Python | 352515ad82f94157e7f74467c5e7dedd6c9069c8 | 48c2779ccf934588f2bfce7cd322088acec04080 | refs/heads/master | 2023-06-24T09:22:49.925654 | 2021-07-22T15:17:34 | 2021-07-22T15:17:34 | 387,683,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | import mysql.connector
mydb=mysql.connector.connect(host='localhost',user='nsec',password='nsec',database='mysql')
print(mydb)
if(mydb):
print('Connection successful')
else:
print('Connection Unsuccessful') | [
"santanu2539@gmail.com"
] | santanu2539@gmail.com |
192348d3292cb7bb944ce9f7b56ba8fd915b8483 | 092beb9039bbd50c9ab00023dc22e5e337794bef | /iqwig_load.py | 91ede2bd44fd99b986ead3f1d48bd152a25a06d2 | [] | no_license | edwelker/pmh_img | 1321b84b9041e0ba4d98243f074d0bf5684fd868 | 4f16e9c416a2a200c1776dce3e242d26e679618c | refs/heads/master | 2016-09-05T19:33:56.967095 | 2013-08-05T21:36:07 | 2013-08-05T21:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | import os
import sys
sys.path.insert(0, '/home/welkere/python')
sys.path.insert(0, '/home/welkere/python/pmh_img')
os.environ['DJANGO_SETTINGS_MODULE'] = 'pmh_img.settings'
from images.models import Image
from lxml import etree
parser = etree.XMLParser()
xml = etree.parse('import/iqwig.xml')
for el in xml.getroot():
image = name = pmhid = pmh_figure_source = None
alt_text = caption = ''
if el.tag == 'image':
image = 'originals/iqwig/' + el.get('name')
name = el.get('name')
for child in el:
if child.tag == 'pmhid':
pmhid = child.text.encode('utf8')
elif child.tag == 'caption':
if child.text:
caption = child.text.encode('utf8')
elif child.tag == 'alt-text':
if child.text:
alt_text = child.text.encode('utf8')
#print "%s - %s - %s - %s\n" % (name, pmhid, alt_text, caption)
try:
img = Image.objects.get(image=image)
img.pmhid += ', ' + pmhid
img.caption += ', ' + caption
img.alt_text += ', ' + alt_text
img.save()
except Image.DoesNotExist:
image_model = Image.objects.create(image=image, caption=caption, alt_text=alt_text,
name = name, pmhid=pmhid, name_of_source='IQWiG',
pmh_figure_source='Institute for Quality and Efficiency in Health Care')
| [
"eddie.welker@gmail.com"
] | eddie.welker@gmail.com |
71b8a3e5f5dd1c40eff94c6cfb5c2f4d514efc70 | 21c253a03971bf79513f03d4f1f99d3a67f97d49 | /08/08.py | 753cca7c4fe147fbed0f07c90bc33ee893a14b8c | [] | no_license | hinzed1127/aoc2019 | 9d2d07cc03af4fc0e10da1bccc8721dd6ab9dd2f | 98b6d2049e2331c2583d47d05061690b87ea0f2b | refs/heads/master | 2020-09-29T06:32:11.089890 | 2020-01-03T23:49:05 | 2020-01-03T23:49:05 | 226,976,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | pixels = [int(x) for x in open('input.txt').read().strip()]
width = 25
height = 6
# part 1
layers = []
while len(pixels) > 0:
layer = []
for rows in range(height):
row = []
for digits in range(width):
row.append(pixels.pop(0))
layer.append(row)
layers.append(layer)
fewest_zeros = 99999
layer_count = {
'zeros': fewest_zeros,
'ones': 0,
'twos': 0
}
for layer in layers:
zeros = 0
ones = 0
twos = 0
for row in layer:
for element in row:
if element == 0:
zeros += 1
if element == 1:
ones += 1
if element == 2:
twos += 1
if zeros < fewest_zeros:
fewest_zeros = zeros
layer_count = {
'zeros': fewest_zeros,
'ones': ones,
'twos': twos
}
print(layer_count)
print(layer_count['ones'] * layer_count['twos'])
# part 2
# initialize an "image"
image = [[2] * width for _ in range(height)]
for layer in layers:
for i, row in enumerate(layer):
for j, element in enumerate(row):
if image[i][j] == 2:
image[i][j] = layer[i][j]
# pixels[i,j] = layer[i][j]
# print(i,j)
# raw 0s and 1s
for row in image:
print(''.join(str(x) for x in row))
# "colorized"/decoded image
for row in image:
print(''.join(str(x).replace('0', ' ').replace('1', '***') for x in row))
| [
"dan.hinze@adhocteam.us"
] | dan.hinze@adhocteam.us |
7e3284f637fab8035de3d8707ac87fc2b0b02936 | dc090f77a992b1e2a8f101a40285bad294318fbd | /CNN_WGAN_GP.py | 5877d944c91904dc9fdb8cf21b2c1f2355bb5127 | [
"MIT"
] | permissive | tkddnr7671/SinusoidalGAN | 2cf023ec0760364934d075df026e4196aea5ac2a | 72e1464e5c5c5a02effb951578a8ab74a49a6160 | refs/heads/master | 2020-04-15T03:20:19.133339 | 2019-12-18T19:39:27 | 2019-12-18T19:39:27 | 164,345,066 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,431 | py | # import packages
import argparse
from ops import *
from utilities import *
if __name__ == '__main__':
parser = argparse.ArgumentParser('')
parser.add_argument('--phones', type=str, default='mono')
parser.add_argument('--freq', type=str, default='1.0kHz')
parser.add_argument('--snr', type=str, default='99dB')
parser.add_argument('--batch_size', type=int, default=32) # ICML ~ 16
parser.add_argument('--max_epoch', type=int, default=100000)
args = parser.parse_args()
# real data loading
phones = args.phones
freq = args.freq
snr = args.snr
wavDir = "./database/{0:s}/{1:s}/{2:s}".format(phones, snr, freq)
WavData, nData, nLength = WaveRead(wavDir)
WavData = WaveNormalization(WavData)
# audio parameters
FS = 16000
FrmLeng = 512
FrmOver = int(FrmLeng * 3 / 4)
total_epochs = args.max_epoch
maxValue = 32767 # max value of short integer(2 byte)
maxValue = maxValue / 2 # heuristically modification
# transform from wave to spectrogram
SpecData, nFre, nFrm = wav2spec(WavData, FS, FrmLeng, FrmOver)
# training parameters
batch_size = args.batch_size
learning_rate = 0.000001
lamda = 10
eps = 1.0e-4
# generating parameters
random_dim = 100
# module 1: Generator
def generator(z):
with tf.variable_scope(name_or_scope="G") as scope:
# define weights for generator
weights = {
'gw1': tf.get_variable(name='gw1', shape=[random_dim, FrmLeng], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)),
'gw2': tf.get_variable(name='gw2', shape=[FrmLeng, FrmLeng], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)),
'gw3': tf.get_variable(name='gw3', shape=[FrmLeng, int(FrmLeng)], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)),
'gw4': tf.get_variable(name='gw4', shape=[int(FrmLeng/2), nLength], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01))
}
bias = {
'gb1': tf.get_variable(name='gb1', shape=[FrmLeng], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)),
'gb2': tf.get_variable(name='gb2', shape=[FrmLeng], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)),
'gb3': tf.get_variable(name='gb3', shape=[FrmLeng], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01))
}
fc = tf.nn.relu(tf.layers.batch_normalization(tf.add(tf.matmul(z, weights['gw1']), bias['gb1'])))
fc = tf.nn.relu(tf.layers.batch_normalization(tf.add(tf.matmul(fc, weights['gw2']), bias['gb2'])))
fc = tf.cos(tf.layers.batch_normalization(tf.add(tf.matmul(fc, weights['gw3']), bias['gb3'])))
fc1 = tf.slice(input_=fc, begin=[0, 0], size=[batch_size, int(FrmLeng/2)])
fc2 = tf.slice(input_=fc, begin=[0, int(FrmLeng/2)], size=[batch_size, int(FrmLeng/2)])
fc = tf.add(tf.matmul(fc1, weights['gw4']), tf.matmul(fc2, weights['gw4']))
return tf.nn.tanh(fc)
# module 2: Discriminator
def discriminator(x, reuse=False):
if reuse == False:
with tf.variable_scope(name_or_scope="D") as scope:
weights = {
'dw1': tf.get_variable(name='dw1', shape=[17 * 4 * 16, 1], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01))
}
bias = {
'db1': tf.get_variable(name='db1', shape=[1], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01))
}
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(x, 2, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(hconv, 4, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(hconv, 8, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(hconv, 16, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
else:
with tf.variable_scope(name_or_scope="D", reuse=True) as scope:
weights = {
'dw1': tf.get_variable(name='dw1', shape=[17 * 4 * 16, 1], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01))
}
bias = {
'db1': tf.get_variable(name='db1', shape=[1], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01))
}
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(x, 2, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(hconv, 4, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(hconv, 8, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
hconv = tf.nn.relu(tf.layers.batch_normalization(conv2d(hconv, 16, [3, 3], [1, 1])))
hconv = maxpool2d(hconv, [2, 2], [2, 2])
hconv = tf.reshape(hconv, shape=[-1, 17 * 4 * 16])
output = tf.nn.sigmoid(tf.add(tf.matmul(hconv, weights['dw1']), bias['db1']))
return output
# module 3: Random noise as an input
def random_noise(batch_size):
return np.random.normal(size=[batch_size, random_dim]), np.zeros(shape=[batch_size, 1])
# Make a graph
g = tf.Graph()
with g.as_default():
# input node
X = tf.placeholder(tf.float32, [batch_size, nFre, nFrm, 1]) # for real data
Z = tf.placeholder(tf.float32, [batch_size, random_dim]) # for generated samples
# Results in each module; G and D
fake_x = generator(Z)
fake_spec = tensor_stft(fake_x, FrmLeng=FrmLeng, FrmOver=FrmOver)
# Probability in discriminator
result_of_fake = discriminator(fake_spec)
result_of_real = discriminator(X, True)
# for WGAN: Loss function in each module: G and D => it must be maximize
g_loss = tf.reduce_mean(result_of_fake)
d_loss = tf.reduce_mean(result_of_real) - tf.reduce_mean(result_of_fake)
# WGAN_GP training
alpha = tf.random_uniform(shape=[batch_size, 1], minval=eps, maxval=1.0 - eps)
diff = fake_spec - X
interpolation = []
for iter in range(batch_size):
temp = X[iter] + tf.scalar_mul(scalar=alpha[iter, 0], x=diff[iter])
interpolation.append(temp)
interpolation = tf.convert_to_tensor(interpolation, dtype=tf.float32)
result_of_interpolation = discriminator(interpolation, True)
gradients = tf.gradients(result_of_interpolation, [interpolation])[0]
gradients = tf.reduce_sum(tf.square(gradients), reduction_indices=[1, 2])
gradient_penalty = tf.reduce_mean((gradients - 1.) ** 2)
# Optimization procedure
t_vars = tf.trainable_variables()
gr_vars = [var for var in t_vars if "gw4" in var.name]
g_vars = [var for var in t_vars if "G" in var.name]
d_vars = [var for var in t_vars if "D" in var.name]
w_vars = [var for var in t_vars if ("D" or "G") in var.name]
# Regularization for weights
gr_loss = tf.contrib.layers.apply_regularization(regularizer=tf.contrib.layers.l1_regularizer(1.0e-6),
weights_list=gr_vars)
g_loss_reg = g_loss - gr_loss
d_loss_reg = d_loss - lamda * gradient_penalty
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
g_train = optimizer.minimize(-g_loss_reg, var_list=g_vars)
gw_train = optimizer.minimize(-g_loss_reg, var_list=gr_vars)
d_train = optimizer.minimize(-d_loss_reg, var_list=d_vars)
# Training graph g
saver = tf.train.Saver(var_list=w_vars)
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state('./model/WGAN_GP')
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join('./model/WGAN_GP', ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
else:
counter = 0
total_batchs = int(WavData.shape[0] / batch_size)
logPath = "./result/GAN_result.log"
log_fp = open(logPath, 'w')
log = "Class: %s, nData: %d, max_epoch: %d, batch_size: %d, random_dim: %d" \
% (phones, nData, total_epochs, batch_size, random_dim)
print(log)
log_fp.write(log + "\n")
for epoch in range(counter, total_epochs):
avg_G_loss = 0
avg_D_loss = 0
data_indices = np.arange(nData)
np.random.shuffle(data_indices)
SpecData = SpecData[data_indices]
for batch in range(total_batchs):
batch_x = SpecData[batch*batch_size:(batch+1)*batch_size]
noise, nlabel = random_noise(batch_size)
sess.run(d_train, feed_dict={X: batch_x, Z: noise})
sess.run(g_train, feed_dict={Z: noise})
sess.run(gw_train, feed_dict={Z: noise})
gl, dl = sess.run([g_loss_reg, d_loss_reg], feed_dict={X: batch_x, Z: noise})
avg_G_loss += gl
avg_D_loss += dl
avg_G_loss /= total_batchs
avg_D_loss /= total_batchs
if (epoch + 1) % 1000 == 0 or epoch == 0:
log = "=========Epoch : %d ======================================" % (epoch + 1)
print(log)
log_fp.write(log + "\n")
log = "G_loss : %.15f" % avg_G_loss
print(log)
log_fp.write(log + "\n")
log = "D_loss : %.15f" % avg_D_loss
print(log)
log_fp.write(log + "\n")
# Generating wave
sample_input, _ = random_noise(batch_size)
generated = sess.run(fake_x, feed_dict={Z: sample_input})
# Writing the generated wave
savePath = './wave_log/{}.wav'.format(str(epoch + 1).zfill(3))
WriteWave(savePath, 1, 2, FS, generated[5], maxValue)
log = "Writing generated audio to %s" % savePath
print(log)
if (epoch + 1) % 5000 == 0 or epoch == 0:
# save model
modelPath = "./model/WGAN_GP/{0:s}_{1:s}_{2:s}".format(phones, freq, snr)
saver.save(sess=sess, save_path=modelPath, global_step=(epoch + 1))
# Generating wave
sample_noise, _ = random_noise(batch_size)
generated = sess.run(fake_x, feed_dict={Z: sample_noise})
# Writing the generated wave
for i in range(batch_size):
savePath = './wave/WGAN_GP/{}.wav'.format(str(i).zfill(3))
WriteWave(savePath, 1, 2, FS, generated[i], maxValue)
print("Writing generated audio to " + savePath)
log = "Complete Audio GAN"
print(log)
log_fp.write(log + "\n")
log_fp.close()
| [
"noreply@github.com"
] | noreply@github.com |
32cb4744b0a27ce5fff742501ffb32eb53ceb386 | 37236e21173c7a3f9287be616905405708bb9b5c | /linqsh/main.py | 28564680e333a5200008f07e5a034140a408e556 | [] | no_license | foriequal0/linqsh | 7c0c2fd0f392fe8ce4ce0c6a17875854824e7442 | 969a99555cf4f25d922cf978f4fcd1ce5cf73930 | refs/heads/master | 2020-02-26T15:20:35.162408 | 2015-08-30T14:36:33 | 2015-08-30T14:36:33 | 41,629,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | import argparse
import sys
import os.path
import linqsh.env as env
import linqsh.cmds
import importlib
import linqsh.utils.split
from asq.initiators import query
from asq.selectors import identity
def add_common_arguments(parser):
pass
def main():
basename = os.path.basename(sys.argv[0])
if basename in ['linqsh', 'linqsh.py']:
symbolic=False
else:
symbolic=True
symbolic_cmd = basename
env.import_env_var()
parser = argparse.ArgumentParser()
if symbolic:
module = importlib.import_module('linqsh.cmds.' + symbolic_cmd)
env.add_arguments(parser)
add_common_arguments(parser)
module.add_arguments(parser)
args = parser.parse_args()
env.override_from_args(args)
module.cmd_main(args)
else:
module_dict = query(linqsh.cmds.__all__)\
.to_dictionary(identity,
lambda x: importlib.import_module('linqsh.cmds.' + x, ))
subparsers = parser.add_subparsers(dest='cmd')
for cmd in module_dict:
subparser = subparsers.add_parser(cmd)
add_common_arguments(subparser)
env.add_arguments(subparser)
module_dict[cmd].add_arguments(subparser)
args = parser.parse_args()
env.override_from_args(args)
module_dict[args.cmd].cmd_main(args)
if __name__ == "__main__":
main()
| [
"foriequal@gmail.com"
] | foriequal@gmail.com |
9a8d835fc0417f099a283d9cda4fda5b43d186f5 | ea395e2c1c6ec29ded901aac071c49067cb52e1f | /apps/goods/views.py | 7fd8d383b65ec268101ee56471419007763f3847 | [] | no_license | QiuPeng92/dailyfresh | f2defda84f0357ba23cc98cc64f9473347c913bc | dce616a06633dd705f5c06546dbc28609e2d50b5 | refs/heads/master | 2022-12-27T17:03:18.302790 | 2020-10-11T11:01:17 | 2020-10-11T11:01:17 | 297,071,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,363 | py | from django.core.cache import cache
from django.shortcuts import render, redirect, reverse
from django.views.generic import View
from django_redis import get_redis_connection
from goods.models import GoodsType, GoodsSKU, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
from order.models import OrderGoods
from django.core.paginator import Paginator
# Create your views here.
# class Test(object):
# def __init__(self):
# self.name = 'abc'
#
# t = Test()
# t.age = 10
# print(t.age)
# http://127.0.0.1:8000
class IndexView(View):
'''首页'''
def get(self, request):
'''显示首页'''
# 尝试从缓存中获取数据
context = cache.get('index_page_data')
if context is None:
print('设置缓存')
# 缓存中没有数据
# 获取商品的种类信息
types = GoodsType.objects.all()
# 获取首页轮播商品信息
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
# 获取首页促销活动信息
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
# 获取首页分类商品展示信息
for type in types: # GoodsType
# 获取type种类首页分类商品的图片展示信息
image_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1).order_by('index')
# 获取type种类首页分类商品的文字展示信息
title_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0).order_by('index')
# 动态给type增加属性,分别保存首页分类商品的图片展示信息和文字展示信息
type.image_banners = image_banners
type.title_banners = title_banners
context = {'types': types,
'goods_banners': goods_banners,
'promotion_banners': promotion_banners}
# 设置缓存
# key value timeout
cache.set('index_page_data', context, 3600)
# 获取用户购物车中商品的数目
user = request.user
cart_count = 0
if user.is_authenticated():
# 用户已登录
conn = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = conn.hlen(cart_key)
# 组织模板上下文
context.update(cart_count=cart_count)
# 使用模板
return render(request, 'index.html', context)
# /goods/商品id
class DetailView(View):
'''详情页'''
def get(self, request, goods_id):
'''显示详情页'''
try:
sku = GoodsSKU.objects.get(id=goods_id)
except GoodsSKU.DoesNotExist:
# 商品不存在
return redirect(reverse('goods:index'))
# 获取商品的分类信息
types = GoodsType.objects.all()
# 获取商品的评论信息
sku_orders = OrderGoods.objects.filter(sku=sku).exclude(comment='')
# 获取新品信息
new_skus = GoodsSKU.objects.filter(type=sku.type).order_by('-create_time')[:2]
# 获取同一个SPU的其他规格商品
same_spu_skus = GoodsSKU.objects.filter(goods=sku.goods).exclude(id=goods_id)
# 获取用户购物车中商品的数目
user = request.user
cart_count = 0
if user.is_authenticated():
# 用户已登录
conn = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = conn.hlen(cart_key)
# 添加用户的浏览记录
conn = get_redis_connection('default')
history_key = 'history_%d' % user.id
# 移除列表中的goods_id
conn.lrem(history_key, 0, goods_id)
# 把goods_id插入到列表左侧
conn.lpush(history_key, goods_id)
# 只保存用户最新浏览的5条信息
conn.ltrim(history_key, 0, 4)
# 组织上下文
context = {
'sku': sku,
'types': types,
'sku_orders': sku_orders,
'new_skus': new_skus,
'cart_count': cart_count,
'same_spu_skus': same_spu_skus,
}
# 使用模板
return render(request, 'detail.html', context)
# 种类id 页码 排序方式
# /list/种类id/页码?sort=排序方式
class ListView(View):
'''列表页'''
def get(self, request, type_id, page):
'''显示列表页'''
try:
type = GoodsType.objects.get(id=type_id)
except GoodsType.DoesNotExist:
# 种类不存在
return redirect(reverse('goods:index'))
# 先获取种类信息
types = GoodsType.objects.all()
# 获取排序的方式
# sort=default 按照默认id排序
# sort=price 按照商品价格排序
# sort=hot 按照商品的销量排序
sort = request.GET.get('sort')
# 获取商品的分类信息
if sort == 'price':
skus = GoodsSKU.objects.filter(type=type).order_by('price')
elif sort == 'hot':
skus = GoodsSKU.objects.filter(type=type).order_by('-sales')
else:
sort = 'default'
skus = GoodsSKU.objects.filter(type=type).order_by('-id')
# 对数据进行分页
paginator = Paginator(skus, 1)
# 获取第page页的内容
try:
page = int(page)
except Exception as e:
page = 1
if page > paginator.num_pages:
page = 1
# 获取第page页实例对象
skus_page = paginator.page(page)
# 进行页码的控制,页面锁上最多显示5个页码
# 1. 总页数小于5页,页面上显示所有页码
# 2. 如果当前页是前3页,显示1-5页
# 3. 如果当前页是后3页,显示后5页
# 4. 其他情况,显示当前页的前两页,当前页,当前页的后两页
num_pages = paginator.num_pages
if num_pages < 5:
pages = range(1, num_pages + 1)
elif page <= 3:
pages = range(1, 6)
elif num_pages - page <= 2:
pages = range(num_pages - 4, num_pages + 1)
else:
pages = range(page - 2, page + 3)
# 获取新品信息
new_skus = GoodsSKU.objects.filter(type=type).order_by('-create_time')[:2]
# 获取用户购物车中商品的数目
user = request.user
cart_count = 0
if user.is_authenticated():
# 用户已登录
conn = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = conn.hlen(cart_key)
# 组织模板上下文
context = {
'type': type,
'types': types,
'skus_page': skus_page,
'new_skus': new_skus,
'cart_count': cart_count,
'pages': pages
}
return render(request, 'list.html', context)
| [
"976384846@qq.com"
] | 976384846@qq.com |
387f7ab70a3051285c0fb672dfc1abd0bcc59647 | 9085df69c15b6359978b597baceed0d78f10dd41 | /datasets/coco_custom_builtin_meta.py | eb783916f943f9c9d19fabacd6f4c6419fc0d8be | [] | no_license | CharelBIT/OSDetector.detectron2 | e92bf0f3f0e3401944ee4949318034db64f79dad | 07a9b7b3fc50ec0ae6f84650df4bdfce1709ad55 | refs/heads/master | 2023-02-21T19:02:42.006051 | 2021-01-31T06:58:41 | 2021-01-31T06:58:41 | 334,592,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py |
COCO_STEEL_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "1"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "2"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "3"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "4"},]
def _get_coco_steel_instances_meta():
thing_ids = [k["id"] for k in COCO_STEEL_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in COCO_STEEL_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 4, len(thing_ids)
# Mapping from the incontiguous COCO category id to an id in [0, 79]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in COCO_STEEL_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret | [
"31466099+CharelBIT@users.noreply.github.com"
] | 31466099+CharelBIT@users.noreply.github.com |
a7677befea7a6b552e60676b28cd22a65f524086 | 91e1854f8d7150a9af2f006279d5999775c8cac2 | /gitchat/t1.py | ee5c70d83eb722485e7069678d758d32be6e50fd | [] | no_license | cmic1980/gitchat | d8048a4936f690f41f07ae38d1cbf97623a37b0c | dde1d1587f68ccfd58ab8ceaf3fa2f44c3e70dd0 | refs/heads/master | 2022-12-04T12:35:45.379061 | 2020-08-24T10:01:32 | 2020-08-24T10:01:32 | 289,866,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | import pdfkit
import gitchat.settings as settings
confg = pdfkit.configuration(wkhtmltopdf='D:/dev/wkhtmltox/bin/wkhtmltopdf.exe')
url = 'https://gitbook.cn/books/5f3a7fefd8cfc5171638e2f4/index.html' # 一篇博客的url
options = {'cookie': []}
for item in settings.COOKIE.items():
options['cookie'].append(item)
print(options)
pdfkit.from_url(url, './data/1.pdf', configuration=confg, options=options)
| [
"miao.yu@ctx.sg"
] | miao.yu@ctx.sg |
8fe1adc8a2f7cc5727b0cd5a4a916d96de3c62be | 287230b6695941701830dd513273d516c7235ba9 | /prebuilts/gcc/linux-x86/arm/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/share/gdb/python/gdb/command/explore.py | 3ae3ddf4d4507770ac8b76efca9071c9d41cd773 | [] | no_license | haohlliang/rv1126 | 8279c08ada9e29d8973c4c5532ca4515bd021454 | d8455921b05c19b47a2d7c8b682cd03e61789ee9 | refs/heads/master | 2023-08-10T05:56:01.779701 | 2021-06-27T14:30:42 | 2021-06-27T14:30:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:41b594eab49a76825efa70b96626c28561c2c913f05efdf6b4dfa05d7838aed5
size 26824
| [
"geierconstantinabc@gmail.com"
] | geierconstantinabc@gmail.com |
0ae5aa472863f78daed685a05bb3eafc6c6f559c | fb6e7922df3da2e9cdc37a00150d6d7663e907ff | /environment/rtfm/dynamics/item/weapon/tridents.py | 7f513dd0c963025c039f762d29b6e88477da154d | [
"Apache-2.0"
] | permissive | Spiph/GTG | c54a587002c42a032c89e8eceb5ec638f6c8c05f | 4a45032290d0c1364e4398684582c51094b245f5 | refs/heads/main | 2023-09-02T14:44:14.946624 | 2021-10-27T12:29:05 | 2021-10-27T12:29:05 | 393,086,007 | 0 | 0 | Apache-2.0 | 2021-08-05T15:09:07 | 2021-08-05T15:09:07 | null | UTF-8 | Python | false | false | 465 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_weapon import Weapon
from ... import dice as D, material as M
class BaseTrident(Weapon):
pass
class Trident(BaseTrident):
def __init__(self):
super().__init__('trident', weight=25, damage=D.Dice.from_str('3d4'), material=M.Iron, hit=0)
| [
"jzyjiangzhengyao@gmail.com"
] | jzyjiangzhengyao@gmail.com |
1f9f4decc5db879cfc598fe5c9b819fbed4f43a3 | b79bce0cf363d2b6dd11371d378d78d48e973270 | /kashgari/tasks/classification/base_model.py | 7000ff01695ba716d958546221c048d0d0394381 | [
"Apache-2.0"
] | permissive | CharlotteSean/Kashgari | 2d9338761b16d9804fb81ff92ce2ab1d256c80a7 | ab9970ecf6c0164416bfbbec1378c690b0f00d76 | refs/heads/master | 2022-01-22T03:52:12.284458 | 2019-07-17T03:48:04 | 2019-07-17T03:48:04 | 197,900,673 | 2 | 0 | Apache-2.0 | 2019-07-20T08:15:03 | 2019-07-20T08:15:03 | null | UTF-8 | Python | false | false | 8,165 | py | # encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_classification_model.py
# time: 2019-05-22 11:23
import random
import logging
import kashgari
from typing import Dict, Any, Tuple, Optional, List
from kashgari.tasks.base_model import BaseModel, BareEmbedding
from kashgari.embeddings.base_embedding import Embedding
from sklearn import metrics
class BaseClassificationModel(BaseModel):
__task__ = 'classification'
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
super(BaseClassificationModel, self).__init__(embedding, hyper_parameters)
if hyper_parameters is None and \
self.embedding.processor.__getattribute__('multi_label') is True:
last_layer_name = list(self.hyper_parameters.keys())[-1]
self.hyper_parameters[last_layer_name]['activation'] = 'sigmoid'
logging.warning("Activation Layer's activate function changed to sigmoid for"
" multi-label classification question")
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None and self.embedding.processor.multi_label:
kwargs['loss'] = 'binary_crossentropy'
super(BaseClassificationModel, self).compile_model(**kwargs)
def predict(self,
x_data,
batch_size=32,
multi_label_threshold: float = 0.5,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
multi_label_threshold:
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
if self.embedding.processor.multi_label:
if debug_info:
logging.info('raw output: {}'.format(pred))
pred[pred >= multi_label_threshold] = 1
pred[pred < multi_label_threshold] = 0
else:
pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(pred)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return res
def predict_top_k_class(self,
x_data,
top_k=5,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None) -> List[Dict]:
"""
Generates output predictions with confidence for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
top_k: int
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
single-label classification:
[
{
"label": "chat",
"confidence": 0.5801531,
"candidates": [
{ "label": "cookbook", "confidence": 0.1886314 },
{ "label": "video", "confidence": 0.13805099 },
{ "label": "health", "confidence": 0.013852648 },
{ "label": "translation", "confidence": 0.012913573 }
]
}
]
multi-label classification:
[
{
"candidates": [
{ "confidence": 0.9959336, "label": "toxic" },
{ "confidence": 0.9358089, "label": "obscene" },
{ "confidence": 0.6882098, "label": "insult" },
{ "confidence": 0.13540423, "label": "severe_toxic" },
{ "confidence": 0.017219543, "label": "identity_hate" }
]
}
]
"""
if predict_kwargs is None:
predict_kwargs = {}
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
new_results = []
for sample_prob in pred:
sample_res = zip(self.label2idx.keys(), sample_prob)
sample_res = sorted(sample_res, key=lambda k: k[1], reverse=True)
data = {}
for label, confidence in sample_res[:top_k]:
if 'candidates' not in data:
if self.embedding.processor.multi_label:
data['candidates'] = []
else:
data['label'] = label
data['confidence'] = confidence
data['candidates'] = []
continue
data['candidates'].append({
'label': label,
'confidence': confidence
})
new_results.append(data)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return new_results
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
output_dict=False,
debug_info=False) -> Optional[Tuple[float, float, Dict]]:
y_pred = self.predict(x_data, batch_size=batch_size)
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y : {}'.format(y_data[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
if self.pre_processor.multi_label:
y_pred_b = self.pre_processor.multi_label_binarizer.fit_transform(y_pred)
y_true_b = self.pre_processor.multi_label_binarizer.fit_transform(y_data)
report = metrics.classification_report(y_pred_b,
y_true_b,
target_names=self.pre_processor.multi_label_binarizer.classes_,
output_dict=output_dict,
digits=digits)
else:
report = metrics.classification_report(y_data,
y_pred,
output_dict=output_dict,
digits=digits)
if not output_dict:
print(report)
else:
return report
if __name__ == "__main__":
print("Hello world")
| [
"eliyar917@gmail.com"
] | eliyar917@gmail.com |
259e794cad0040bcd4708de22d5d229d14681030 | c085b06c9eb220eb40b5ada840886c09a152f053 | /Libros de Python/web/web/ejer/tema-01/compose1.py.txt | d8040cca2291387aa8843870ff3af3f23cb0674a | [] | no_license | thanggc/libros | 7d3bf564c5a227f08390fbcc6721a0aed160e3e0 | 430c03fe97544d263b5c3a665327b527d9c223a8 | refs/heads/master | 2020-12-25T13:23:38.527089 | 2013-04-29T23:14:08 | 2013-04-29T23:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | txt |
def compose (*funcs):
def composed (x):
for f in reversed (funcs):
x = f (x)
return x
return composed
| [
"mini.guero@hotmail.com"
] | mini.guero@hotmail.com |
5a39eb4515a0c355d70391a8ba46300d1c5c0dda | 21021d152b1ad38d16aec37de02b286726481d59 | /submitter.py | d88edcb1f5ab845cae22b6ba95225cacc1bc80f4 | [] | no_license | drewtu2/neu_timesheet_bot | 53f0a7bb9fc020091942c94ac65bd3b4e2191170 | 575f7092fc0c5b565883422a8e2572cabc25dd4c | refs/heads/master | 2021-09-14T23:08:20.488549 | 2018-05-21T21:21:07 | 2018-05-21T21:21:07 | 115,580,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,287 | py | #! python3
import pyautogui
import logging
import mylogger
import datetime
import csv
import cv2
import os
import sys
from math import pow
from time import sleep
DAYS_OF_WEEK = ["Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"]
def generateEntries(config_file):
"""
Converts a csv file to a list of dicitonaries. The format of the csv is as
follows:
Monday 9:00AM Monday 9:45AM
Tuesday 9:00PM Tuesday 9:30PM
Wednesday 10:00AM Wednesday 11:00AM
Returned list is as follows:
entry = {
"Start": Day1,
"End": Day2
}
day1 = {
"Day": "Monday",
"Time": "9:00AM"
}
day2 = {
"Day": "Tuesday",
"Time": "9:00AM"
}
entries = [day1, day2, day3]
"""
def _make_entry(start, end):
return {"Start": start, "End": end}
def _make_day(day, time):
return {"Day": day, "Time": time}
if config_file is None:
config_file = "myconfig.csv"
logging.info("Generating entries from %s", config_file)
fieldNames = ["DayStart", "TimeStart", "DayEnd", "TimeEnd"]
entries = []
directory = "configs/"
try:
if not os.path.exists(directory):
os.makedirs(directory)
with open(directory + config_file, "r") as fp:
myfilter=filter(lambda row: row[0]!='#', fp)
configReader = csv.DictReader(myfilter, fieldnames=fieldNames, delimiter=" ")
for row in configReader:
row_dict = dict(row)
entry = _make_entry(
_make_day(row_dict["DayStart"], row_dict["TimeStart"]),
_make_day(row_dict["DayEnd"], row_dict["TimeEnd"]))
entries.append(entry)
except FileNotFoundError as e:
logging.error(e)
quit()
logging.debug("Entries generated")
return entries
def generateTimeList():
"""
Returns a containing two lists of 15 minute interval Times.
"""
dtfmt = '%Y-%m-%d %I:%M:%S%p'
def perdelta(start, end, delta):
curr = start
while curr <= end:
yield curr
curr += delta
def genDay(start_time, end_time):
start = datetime.datetime.strptime(start_time, dtfmt)
end = datetime.datetime.strptime(end_time, dtfmt)
results = [result for result in perdelta(start, end, datetime.timedelta(minutes=15))]
results = [result.time() for result in results]
return results
start_range_start = '2000-01-01 12:00:00AM'
start_range_end = '2000-01-01 11:45:00PM'
Start_Times = genDay(start_range_start, start_range_end)
end_range_start = '2000-01-01 12:15:00AM'
end_range_end = '2000-01-02 5:00:00AM'
End_Times = genDay(end_range_start, end_range_end)
return (Start_Times, End_Times)
# List of times from [12:00AM - 11:45PM] at 15 minute intervals
TIMES = generateTimeList()
class neu_job_bot():
# Images
ADD_ENTRY = "form_items/AddEntry.png"
START_TIME = "form_items/StartTime.png"
END_TIME = "form_items/EndTime.png"
DAY = "form_items/Day.png"
ADD = "form_items/Add.png"
SMALL_SCREEN = "form_items/SmallScreen.png"
FULL_SCREEN = "form_items/FullScreen.png"
#
LOAD_DELAY = 1
IS_START = 0
IS_END = 1
def __init__(self):
self.num_retries = 5
pass
def run(self, configFile=None):
entries = generateEntries(configFile)
# Give the user a chance to kill the script.
print('>>> 5 SECOND PAUSE TO LET USER PRESS CTRL-C <<<')
sleep(5)
for entry in entries:
sleep(self.LOAD_DELAY)
self.add_entry()
self.set_start(entry["Start"])
self.set_end(entry)
self.set_submit()
def add_entry(self, retried=False):
try:
logging.debug("Trying \"Add Entry\"")
location = self.click_image(self.ADD_ENTRY)
logging.info("Clicked \"Add Entry\"")
sleep(self.LOAD_DELAY)
except ValueError as e:
self.retry(self.add_entry, None, retried)
print(e)
def set_day(self, day, retried = False):
index = DAYS_OF_WEEK.index(day)
try:
location = self.click_image(self.DAY)
logging.info("Clicked \"Day\"")
pyautogui.typewrite(["down" for i in range(index)])
pyautogui.typewrite(["enter"])
logging.info("Entered Day")
sleep(self.LOAD_DELAY)
except ValueError as e:
logging.error(e)
self.retry(self.set_day, day, retried)
def set_start(self, start, retried = False):
try:
self.set_day(start["Day"])
logging.debug("Trying \"set_start\"")
self.click_image(self.START_TIME)
logging.info("Clicked \"set_start\"")
self.set_time(start["Time"], self.IS_START)
logging.info("Set Start Time")
sleep(self.LOAD_DELAY)
except ValueError as e:
logging.error(e)
self.retry(self.set_start, start, retried)
def set_end(self, entry):
# If the start and end date are not equal, then we've gone into the next
# day
new_day = entry["Start"]["Day"] != entry["End"]["Day"]
end_time = entry["End"]["Time"]
try:
logging.debug("Trying \"set_end\"")
self.click_image(self.END_TIME)
logging.info("Clicked \"set_end\"")
self.set_time(end_time, self.IS_END, new_day)
logging.info("Set End Time")
sleep(self.LOAD_DELAY)
except ValueError as e:
logging.error(e)
self.retry(self.set_end, start_time, retried)
def set_submit(self):
try:
self.click_image(self.ADD)
sleep(self.LOAD_DELAY)
except ValueError as e:
logging.error(e)
self.retry(self.set_submit, None, retried)
def set_time(self, time, is_end, new_day=False):
dtfmt = '%I:%M%p'
start = "8:00AM"
start = datetime.datetime.strptime(start, dtfmt).time()
time = datetime.datetime.strptime(time, dtfmt).time()
index_current = TIMES[is_end].index(start) + is_end
if new_day:
# If this is a new day, we need the last time the value occured.
indicies = [i for i,val in enumerate(TIMES[is_end]) if val==time]
index_goal = indicies[-1]
else:
# Otherwise, this value only occured once.
index_goal = TIMES[is_end].index(time)
index_delta = index_goal - index_current
if index_delta > 0:
pyautogui.typewrite(["down" for i in range(index_delta)])
elif index_delta < 0:
pyautogui.typewrite(["up" for i in range(index_delta)])
pyautogui.typewrite(["enter"])
def click_image(self, png_name):
"""
"""
button = pyautogui.locateCenterOnScreen(png_name, confidence =.8)
if button is None:
error_message = "Couldn't find the button"
raise ValueError(error_message)
x = button[0]/2
y = button[1]/2
pyautogui.moveTo(x,y)
pyautogui.click()
logging.debug("Location returned: %s", button)
return button
def retry(self, fx, arg, retried):
if retried < self.num_retries:
retried += 1
delay = retried
logging.info("Retrying %s attempt %d after %d second delay...",
fx.__name__,
retried,
delay)
sleep(delay)
if arg:
fx(arg, retried=retried)
else:
fx(retried=retried)
else:
logging.info("Retry failed....")
quit()
if __name__ == "__main__":
mylogger.config_logs()
#generateEntries()
bot = neu_job_bot()
if len(sys.argv) == 2:
bot.run(sys.argv[1])
else:
bot.run()
| [
"drewtu2@yahoo.com"
] | drewtu2@yahoo.com |
badf45471c3b327d2c70128f37e7ce72f490e432 | 4681fa51f8d7389229dafa6124b948a08ee49baf | /mypetshop/wsgi.py | c0748ffeec8857066295faffd2e59f9057d6e4d8 | [] | no_license | kattyxDD/PetShop_kt | 40d2e8329e98777b10603d345f158caf0f617a30 | 92a4ee3eb0be70d0dbcb7400d16f6fea18b3d20f | refs/heads/master | 2020-08-27T13:39:12.609981 | 2019-11-20T21:56:41 | 2019-11-20T21:56:41 | 217,391,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for mypetshop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mypetshop.settings')
application = get_wsgi_application()
| [
"kat.vargasm@alumnos.duoc.cl"
] | kat.vargasm@alumnos.duoc.cl |
f189e83fa455180aaf2bfc05218e55cf1702ea2d | 1d190884f14859b5859257bba8f6c32093c8670a | /plugin/Generator.py | c7a19cafbd52cbf06efcb15b2916a9ad5fac7d6a | [] | no_license | umlfri-old/addon_code_generation | ed8aeb1fa1560ab0a1f12fee58adf50782729c1a | f04d85b68930ef4a49b66a01a3caf6d7799a0c3d | refs/heads/master | 2021-01-20T16:25:02.586449 | 2015-02-11T06:01:34 | 2015-02-11T06:01:34 | 90,839,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import os
import os.path
#from Documentation import CDocumentation
class CGenerator:
def __init__(self, type, path = None):
self.type = type
self.path = path
def GetType(self):
return self.type
def SetPath(self, type):
self.type = type
def GetPath(self):
return self.path
def SetPath(self, path):
self.path = path
def GenerateElement(self, elementObj):
template = self.type.GetElement(elementObj.type.name)
if (template is not None):
template.Generate(self.type.GetElements(), elementObj, self.path)
#def GenerateDocumentation(self, name, project, rootNode = None):
# if rootNode is None:
# rootNode = project.GetRoot()
# element = CDocumentation(name, rootNode)
# template = self.type.GetElement("documentation")
# template.Generate(template, element, self.path)
# del element | [
"bax007@gmail.com"
] | bax007@gmail.com |
aaf8ee7c32b2e9d10a2f3f84ac1cab0e7b8e8bf5 | b37a6f215e52ef3884c39cf337a9f2ebdbc142aa | /test/test_raysync_z_download.py | 0bc3e760a8e4737f7a08eb8d349def3d9d1be8fb | [] | no_license | wangxinlei995/test | 9fe73c04a095f4c876a2c0c442eebebda94f3461 | 891fd9c3514f692a4f22bcce9f415080400849be | refs/heads/master | 2020-04-08T19:07:46.499539 | 2018-11-29T09:38:27 | 2018-11-29T09:38:27 | 159,641,187 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,423 | py | import ctypes
import unittest
from public.config import Config,SDK_PATH,DOWNLOAD_PATH
import os
from public.log import logger
import time
from public.transfer_public import upload_task,statechanged_func
class TestRaysyncDownload(unittest.TestCase):
'''测试下载基本功能'''
URL = Config().get('URL')
port = Config().get('PORT')
username = Config().get('USERNAME')
password = Config().get('PASSWORD')
lib = ctypes.CDLL(SDK_PATH)
def setUp(self):
self.instance = self.lib.Raysync_CreateRaysyncInterface()
try:
self.lib
except:
logger.info("dll文件不存在")
#确认是否存在dll文件
try:
self.lib.Raysync_Connect(self.instance, 500)
except:
logger.info("Raysync_Connect 失败")
#与dll文件建连
try:
self.lib.Raysync_Login(self.instance, bytes(self.URL, encoding='gbk'), self.port, bytes(self.username, encoding='gbk'),
bytes(self.password, encoding='gbk'))
except:
logger.info('登录失败,请检查服务器地址/端口/用户名/密码是否正确')
#登录客户端,地址,端口号,用户名,密码可在config.yml中修改
#登录server
upload_task.task_state = 0 # 初始化upload_task.task_state = 0
self.lib.Raysync_List(self.instance, "/")
#list操作
time.sleep(2)
self.lib.Raysync_DeleteAllTask(self.instance) #清空传输列表
time.sleep(1)
def test_download_1(self):
'''正常下载单个文件'''
self.lib.Raysync_SetTaskStateChangedCallback(self.instance, statechanged_func)
#设置任务状态回调
files = (ctypes.c_char_p * 2)()
# 将上传文件转化为c的数组,ctyps.c_char_p * 文件数量 + 1
files[0] = ctypes.c_char_p(b'burpsuite_community_windows-x64_v1_7_36.exe')
# 格式化167-mov.mov 文件
self.lib.Raysync_Download(self.instance, bytes(DOWNLOAD_PATH, encoding='utf8'), '/', files, None, 'download_task_1')
time.sleep(2)
while True:
if upload_task.task_state >= 9:
break
else:
time.sleep(1)
self.assertTrue(upload_task.task_state == 10)
self.assertTrue(os.path.exists(DOWNLOAD_PATH + '\\burpsuite_community_windows-x64_v1_7_36.exe'))
def test_download_2(self):
'''正常下载单个文件夹'''
self.lib.Raysync_SetTaskStateChangedCallback(self.instance, statechanged_func)
files = (ctypes.c_char_p * 2)()
# 将上传文件转化为c的数组,ctyps.c_char_p * 文件数量 + 1
files[0] = ctypes.c_char_p(b'upload_task')
self.lib.Raysync_Download(self.instance,bytes(DOWNLOAD_PATH, encoding='utf8') ,'/',files,None,'download_task_2')
#上传upload_task目录
while True:
if upload_task.task_state >= 9:
break
else:
time.sleep(1)
self.assertTrue(upload_task.task_state == 10)
self.assertTrue(os.path.exists(DOWNLOAD_PATH + '\\upload_task'))
def test_download_3(self):
'''正常下载多个文件'''
self.lib.Raysync_SetTaskStateChangedCallback(self.instance, statechanged_func)
upload_file = ['167_MPG.mpg', '英文max-webm.webm', '中文maya_mp4格式.mp4', '中文maya—WNV.wmv']
files = (ctypes.c_char_p * (len(upload_file) + 1))()
#将上传文件转化为c的数组,ctyps.c_char_p * 文件数量 + 1
a = 0
for i in upload_file:
files[a] = ctypes.c_char_p(bytes(i, encoding='utf8'))
a = a + 1
self.lib.Raysync_Download(self.instance,bytes(DOWNLOAD_PATH, encoding='utf8') , '/' , files , None , 'download_task_3')
#判断raysync.exe文件是否在列表中,注意bytes格式,二进制格式
while True:
if upload_task.task_state >= 9:
break
else:
time.sleep(1)
self.assertTrue(upload_task.task_state == 10)
def test_download_4(self):
'''下载单个文件至本地,指定名称为test.mov'''
try:
os.remove(DOWNLOAD_PATH,'167-mov.mov')
except:
logger.info('无需删除')
self.lib.Raysync_SetTaskStateChangedCallback(self.instance, statechanged_func)
#设置任务状态回调
files = (ctypes.c_char_p * 2)()
# 将上传文件转化为c的数组,ctyps.c_char_p * 文件数量 + 1
files[0] = ctypes.c_char_p(b'burpsuite_community_windows-x64_v1_7_36.exe')
# 格式化167-mov.mov 文件
files_download = (ctypes.c_char_p * 2)()
files_download[0] = ctypes.c_char_p(b'test.mov')
self.lib.Raysync_Download(self.instance, bytes(DOWNLOAD_PATH, encoding='utf8'), '/', files, files_download, 'download_task_4')
# 上传单个167-mov.mov 文件
while True:
if upload_task.task_state >= 9:
break
else:
time.sleep(1)
self.assertTrue(upload_task.task_state == 10)
self.assertTrue(os.path.exists(DOWNLOAD_PATH + '\\test.mov'))
def tearDown(self):
self.lib.Raysync_DestroyRaysyncInterface(self.instance)
#每个用例测试结束时,销毁实例 | [
"772725218@qq.com"
] | 772725218@qq.com |
57f17c6d24bea0ddbab1fe2adb4696e3bc7d2812 | b10c95923eadb9abb6aff4a7a6dd8aa199a0c54b | /app_ex/Mercury/views.py | d2076f749901cbd82b477511f031e06f93a4e664 | [
"Apache-2.0"
] | permissive | huisezhiyin/Solar | 4a720d4ea1c034f0b2ff674411ab792d695d62cd | e1c9e548c2e922ef63764ac90b9ee237d005b8fc | refs/heads/master | 2020-05-16T01:42:41.197115 | 2019-04-22T06:17:25 | 2019-04-22T06:17:25 | 182,609,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | from rest_framework.viewsets import GenericViewSet
from rest_framework.decorators import action
from rest_framework.response import Response
from app_ex.Mercury.models import User
from django.contrib.auth import login, logout, authenticate
from django.http.response import HttpResponseRedirect
from rest_framework.permissions import IsAuthenticated
from app_ex.Mercury.serializers import UserInfoSerializer
# Create your views here.
class UserViewSet(GenericViewSet):
@action(methods=["POST"], detail=False)
def user_register(self, request, *args, **kwargs):
nickname = request.data.get("nickname")
username = request.data.get("username")
password = request.data.get("password")
user = User.objects.create_user(nickname=nickname, username=username, password=password)
login(request, user)
return HttpResponseRedirect(redirect_to="/logged_user/user_info/")
@action(methods=["POST"], detail=False)
def user_login(self, request, *args, **kwargs):
username = request.data.get("username")
password = request.data.get("password")
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(redirect_to="/home/")
else:
return Response(status=403, data={"message": "帐号或密码错误"})
class LoggedUserViewSet(GenericViewSet):
permission_classes = (IsAuthenticated,)
@action(methods=["POST"], detail=False)
def user_logout(self, request, *args, **kwargs):
logout(request)
return HttpResponseRedirect(redirect_to="/home/")
@action(methods=["GET"], detail=False)
def user_info(self, request, *args, **kwargs):
user = request.user
serializer = UserInfoSerializer(user)
return Response(data=serializer.data)
| [
"837364695@qq.com"
] | 837364695@qq.com |
c31f8fda7fccbe3ee02c224b83ad81987d17fbf7 | c579e5c86dd506f25a6566e2a43ed4a28c46b9cc | /myapp/migrations/0005_auto_20190715_0002.py | 40687135b9101501cf7760cc140318001d77274f | [] | no_license | VincentMarx/upload_file_to_GCS_with_django | 15bf0ab8b3ad420557f6f25082aada49532cfa45 | 797fab4020c9d6dda199389e05ed6d2a8adb1a20 | refs/heads/master | 2022-06-19T10:11:35.946483 | 2019-08-02T13:10:40 | 2019-08-02T13:10:40 | 195,653,835 | 0 | 0 | null | 2022-05-25T03:38:27 | 2019-07-07T13:22:38 | Python | UTF-8 | Python | false | false | 513 | py | # Generated by Django 2.2.3 on 2019-07-14 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0004_auto_20190713_2326'),
]
operations = [
migrations.AlterField(
model_name='document',
name='filename',
field=models.Field(),
),
migrations.AlterField(
model_name='document',
name='uploadedby',
field=models.Field(),
),
]
| [
"q1w2e3r4"
] | q1w2e3r4 |
6ee20a4a8435db0bfc40638cceef71df51f88e65 | 4e4c5827ed94024d499982279ce611b893c03572 | /Azure Firewall/Script - Migrate Checkpoint config to Azure Firewall Policy/chkp2azfw.py | 3d99c0cb6d834b73264997c9c8125e14c234c1a6 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/Azure-Network-Security | 19a51076e5eda76e9808845792421b82ea5afb84 | 32141bb734518d5ae51bed5f7ca824a01b04ab49 | refs/heads/master | 2023-08-30T20:53:07.435480 | 2023-08-28T15:55:12 | 2023-08-28T15:55:12 | 215,905,001 | 690 | 264 | MIT | 2023-09-11T06:38:17 | 2019-10-17T23:46:28 | Python | UTF-8 | Python | false | false | 43,780 | py | import argparse
import json
import re
import os
import sys
import copy
# https://docs.python.org/3/library/ipaddress.html
import ipaddress
# Helper functions
# Arguments
parser = argparse.ArgumentParser(description='Generate an ARM template to create a Rule Collection Group from a Checkpoint ruleset exported with the Show Package Tool (https://support.checkpoint.com/results/sk/sk120342).')
parser.add_argument('--json-index-file', dest='json_index_file', action='store',
default="./index.json",
help='Local file containing in JSON the links to the rest of the exported JSON files. The default is "./index.json"')
parser.add_argument('--policy-name', dest='policy_name', action='store',
default="azfwpolicy",
help='Name for the Azure Firewall Policy. The default is "azfwpolicy"')
parser.add_argument('--policy-sku', dest='policy_sku', action='store',
default="Standard",
help='SKU for the Azure Firewall Policy. Possible values: Standard, Premium (default: Standard)')
parser.add_argument('--do-not-create-policy', dest='dont_create_policy', action='store_true',
default=False,
help='If specified, do not include ARM code for the policy, only for the rule collection group. Use if the policy already exists.')
parser.add_argument('--rcg-name', dest='rcg_name', action='store',
default="importedFromCheckpoint",
help='Name for the Rule Collection Group to create in the Azure Firewall Policy. The default is "importedFromCheckpoint"')
parser.add_argument('--rcg-priority', dest='rcg_prio', action='store',
default="10000",
help='Priority for the Rule Collection Group to create in the Azure Firewall Policy. The default is "10000"')
parser.add_argument('--no-ip-groups', dest='use_ipgroups', action='store_false',
default=True,
help='Whether some address groups should be converted to Azure IP Groups (default: True)')
parser.add_argument('--no-app-rules', dest='use_apprules', action='store_false',
default=True,
help='Whether it will be attempted to convert network rules using HTTP/S to application rules. Note that this might be a problem if a explicit network deny exists (default: True)')
parser.add_argument('--max-ip-groups', dest='max_ipgroups', action='store', type=int, default=50,
help='Optional, maximum number of IP groups that will be created in Azure')
parser.add_argument('--rule-uid-to-name', dest='rule_id_to_name', action='store_true',
default=False,
help='Includes the UID of the Checkpoint rule in the name of the Azure rule, useful for troubleshooting (default: False)')
parser.add_argument('--remove-explicit-deny', dest='remove_explicit_deny', action='store_true',
default=False,
help='If a deny any/any is found, it will not be converted to the Azure Firewall syntax. Useful if using application rules (default: False)')
parser.add_argument('--output', dest='output', action='store',
default="none",
help='Output format. Possible values: json, none')
parser.add_argument('--pretty', dest='pretty', action='store_true',
default=False,
help='Print JSON in pretty mode (default: False)')
parser.add_argument('--log-level', dest='log_level_string', action='store',
default='warning',
help='Logging level (valid values: error/warning/info/debug/all/none. Default: warning)')
args = parser.parse_args()
# Variables
az_app_rcs = []
az_net_rcs = []
ipgroups = []
discarded_rules = []
rcg_name = args.rcg_name
rcg_prio = args.rcg_prio
rc_net_name = 'from-chkp-net'
rc_net_prio_start = "10000"
rc_app_name = 'from-chkp-app'
rc_app_prio_start = "11000"
cnt_apprules = 0
cnt_allow = 0
cnt_deny = 0
cnt_disabledrules = 0
cnt_apprules = 0
cnt_netrules_ip = 0
cnt_netrules_fqdn = 0
cnt_chkp_rules = 0
# Returns true if the string is a number
def is_number(value):
for character in value:
if character.isdigit():
return True
return False
# Returns a string formatted to be used as a name in Azure
def format_to_arm_name(name):
name = name.replace(".", "-")
name = name.replace("/", "-")
name = name.replace(" ", "_")
return name
# Returns true if the string is a UID
def is_uid(value):
if len(value) == 36 and value[8] == '-' and value[13] == '-' and value[18] == '-' and value[23] == '-':
return True
# Finds an object in a list by its UID
def find_uid(object_list, uid):
for object in object_list:
if object['uid'] == uid:
return object
return None
# Returns true if there is an IP group with the same chkp id
def is_ipgroup(ipgroup_list, uid):
for ipgroup in ipgroup_list:
if ipgroup['id'] == uid:
return True
return False
# Returns IP Group corresponding to the chkp id
def find_ipgroup(ipgroup_list, uid):
for ipgroup in ipgroup_list:
if ipgroup['id'] == uid:
return ipgroup
return None
# True if parameter is a valid FQDN according to RFCs 952, 1123
def is_fqdn(str_var):
return bool(re.match(r"(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,4}$)",str(str_var)))
# True if parameter is a valid IP address (with or without mask)
# The regex is quite simple (for example it would match 999.999.999.999/99), but we assume that the IP addresses in the original policy are valid
def is_ipv4(str_var):
return bool(re.match(r"^([0-9]{1,3}\.){3}[0-9]{1,3}($|/[0-9]{1,2}$)",str(str_var)))
# Perform some checks on the rule to add, and append it to the list of rules provided in the 2nd argument
# Some rules need to be broken down in multiple ones, so the function adds a suffix to the created rules in this case
def append_rule(rule_to_be_appended, rules_to_append_to):
if log_level >= 8:
print("DEBUG: appending to rules:", str(rule_to_be_appended), file=sys.stderr)
src_fields = ('sourceAddresses', 'sourceIpGroups', 'sourceServiceTags')
dst_fields = ('destinationAddresses', 'destinationIpGroups', 'destinationFqdns', 'destinationServiceTags')
all_fields = src_fields + dst_fields
# Count how many rules we will be splitting (to avoid unnecessary suffixes if there is only one rule)
total_rule_no = 0
for src_field in src_fields:
for dst_field in dst_fields:
if len(rule_to_be_appended[src_field]) > 0 and len(rule_to_be_appended[dst_field]) > 0:
total_rule_no += 1
# Process the rule
split_rule_counter = 0
for src_field in src_fields:
for dst_field in dst_fields:
# Only look at combinations where the src_field and dst_field are non-zero
if len(rule_to_be_appended[src_field]) > 0 and len(rule_to_be_appended[dst_field]) > 0:
# Should we split a rule that contains both IP addresses and service tags in either sourceAddresses or destinationAddresses?
temp_rule = copy.copy(rule_to_be_appended)
split_rule_counter += 1
if total_rule_no > 1:
temp_rule['name'] = temp_rule['name'] + '-' + str(split_rule_counter)
else:
temp_rule['name'] = temp_rule['name']
# Blank all the rest fields
for blank_field in all_fields:
if blank_field != src_field and blank_field != dst_field:
temp_rule [blank_field] = []
rules_to_append_to.append(temp_rule)
# The fields 'sourceServiceTags' and 'destinationServiceTags' are not supported in Azure Firewall, so we need to change them to 'sourceAddresses' and 'destinationAddresses'
if src_field == 'sourceServiceTags':
temp_rule['sourceAddresses'] = temp_rule['sourceServiceTags']
temp_rule.pop('sourceServiceTags')
if dst_field == 'destinationServiceTags':
temp_rule['destinationAddresses'] = temp_rule['destinationServiceTags']
temp_rule.pop('destinationServiceTags')
if split_rule_counter > 1:
if log_level >= 7:
print("DEBUG: Checkpoint rule {0} has been split in {1} Azure Firewall rules".format(rule_to_be_appended['name'], split_rule_counter), file=sys.stderr)
return rules_to_append_to
# Recursively finds all members of objects by their UID
def find_members(object_group_list, uid_list, member_list=[], debug=False, mode='ip'):
# if debug:
# print("DEBUG: looking for UIDs '{0}'...".format(str(uid_list)), file=sys.stderr)
# Make sure that the uid is a list
if not isinstance(uid_list, list):
uid_list = [uid_list]
# Loop through all objects
for object_group in object_group_list:
if object_group['uid'] in uid_list:
# if debug:
# print('DEBUG: found matching object', str(object_group), file=sys.stderr)
if 'members' in object_group:
if len(object_group['members']) > 0:
for member in object_group['members']:
if is_uid(member):
member_list = find_members(object_group_list, member, member_list=member_list)
else:
if debug:
print('DEBUG: object group {0} has no members.'.format(str(object_group['name'])), file=sys.stderr)
elif object_group['type'] == 'network':
member_list.append(object_group['subnet4'] + '/' + str(object_group['mask-length4']))
elif object_group['type'] == 'host':
member_list.append(object_group['ipv4-address'] + '/32')
elif object_group['type'] == 'dns-domain':
member_list.append(str(object_group['name'])[1:]) # In checkpoint syntax, fqdn starts with a dot
elif object_group['type'] == 'dynamic-object': # Service Tag "AVDServiceRanges"
if debug:
print('DEBUG: adding dynamic-object {0}'.format(object_group['name']), str(object_group), file=sys.stderr)
if object_group['name'] == 'AVDServiceRanges':
member_list.append('WindowsVirtualDesktop')
else:
if log_level >= 3:
print('ERROR: dynamic-object {0} cannot be mapped to an Azure service tag'.format(object_group['name']), file=sys.stderr)
elif object_group['type'] == 'service-tcp':
member_list.append(('tcp', object_group['port']))
elif object_group['type'] == 'service-udp':
member_list.append(('udp', object_group['port']))
elif object_group['type'] == 'service-icmp':
member_list.append(('icmp', '*'))
elif object_group['type'] == 'CpmiAnyObject':
if (mode == 'ip'):
member_list.append('*')
else:
member_list.append(('any', '*'))
elif object_group['type'] == 'RulebaseAction':
member_list.append(object_group['name'])
elif object_group['type'] in ('CpmiGatewayCluster', 'CpmiClusterMember', 'CpmiHostCkp', 'simple-cluster', 'Global'):
if debug:
print('DEBUG: ignoring object type', object_group['type'], file=sys.stderr)
else:
if debug:
print('DEBUG: unknown object type', object_group['type'], file=sys.stderr)
return list(set(member_list))
# Set log_level
if is_number(args.log_level_string):
try:
log_level = int(args.log_level_string)
except:
log_level = 4
else:
if args.log_level_string == 'error':
log_level = 3
elif args.log_level_string == 'warning':
log_level = 4
elif args.log_level_string == 'notice':
log_level = 5
elif args.log_level_string == 'info':
log_level = 6
elif args.log_level_string == 'debug' or args.log_level_string == 'all':
log_level = 7
elif args.log_level_string == 'debugplus' or args.log_level_string == 'all':
log_level = 8
elif args.log_level_string == 'none':
log_level = 0
else:
log_level = 4 # We default to 'warning'
# Get JSON index file list from the specified folder
if log_level > 7:
print ("DEBUG: Loading file {0}...".format(args.json_index_file), file=sys.stderr)
try:
with open(args.json_index_file) as f:
json_index = json.load(f)
except Exception as e:
if log_level >= 3:
print("ERROR: Error when opening JSON index file", args.json_index_file, "-", str(e), file=sys.stderr)
sys.exit(0)
# Go through the files and create the objects
access_layers = []
threat_layers = []
nat_layers = []
for package in json_index['policyPackages']:
if 'objects' in package:
if log_level >= 7:
print ("DEBUG: Objects section found, file {0}...".format(package['objects']['htmlObjectsFileName']), file=sys.stderr)
filename = package['objects']['htmlObjectsFileName']
try:
# Try to open the file with JSON extension
filename = os.path.splitext(package['objects']['htmlObjectsFileName'])[0]+'.json'
with open(filename) as f:
policy_objects = json.load(f)
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file", filename, "-", str(e), file=sys.stderr)
pass
if 'accessLayers' in package:
for layer in package['accessLayers']:
if 'htmlFileName' in layer:
if log_level >= 7:
print ("DEBUG: Access layer found, file {0}...".format(layer['htmlFileName']), file=sys.stderr)
filename = layer['htmlFileName']
try:
# Try to open the file with JSON extension
filename = os.path.splitext(layer['htmlFileName'])[0]+'.json'
with open(filename) as f:
access_layers.append(json.load(f))
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file for access layer", filename, "-", str(e), file=sys.stderr)
pass
if 'threatLayers' in package:
for layer in package['threatLayers']:
if 'htmlFileName' in layer:
if log_level >= 7:
print ("DEBUG: Threat layer found, file {0}...".format(layer['htmlFileName']), file=sys.stderr)
filename = layer['htmlFileName']
try:
filename = os.path.splitext(layer['htmlFileName'])[0] + '.json'
with open(filename) as f:
threat_layers.append(json.load(f))
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file for threat layer", filename, "-", str(e), file=sys.stderr)
pass
if 'natLayer' in package:
layer = package['natLayer']
if 'htmlFileName' in layer:
if log_level >= 7:
print ("DEBUG: NAT layer found, file {0}...".format(layer['htmlFileName']), file=sys.stderr)
filename = layer['htmlFileName']
try:
# Try to open the file with JSON extension
filename = os.path.splitext(layer['htmlFileName'])[0]+'.json'
with open(filename) as f:
# nat_layer = json.load(f)
nat_layers.append(json.load(f))
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file for NAT layer", filename, "-", str(e), file=sys.stderr)
pass
# Inspect the imported objects
# policy_object_types = []
# for policy_object in policy_objects:
# if 'type' in policy_object:
# if not policy_object['type'] in policy_object_types:
# policy_object_types.append(policy_object['type'])
# if log_level >= 7:
# print('Policy object types found:', str(policy_object_types))
# Policy object types found: ['vpn-community-star', 'RulebaseAction', 'CpmiAnyObject', 'service-group', 'group', 'Track', 'Global', 'service-tcp', 'network', 'dynamic-object', 'host', 'CpmiHostCkp', 'service-icmp', 'service-other', 'threat-profile', 'ThreatExceptionRulebase', 'service-udp', 'dns-domain', 'simple-cluster', 'CpmiClusterMember']
# Inspect the imported access layers
def inspect_access_layers(layer_list):
for layer in layer_list:
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'access-rule':
# Rule Name
rule_name = rule['name'] if len(rule['name']) <= 38 else rule['name'][:38]
# action/src/dst/svc object Members
rule_action_members_str = str(find_members(policy_objects, rule['action'], member_list=[])[0])
rule_src_members = find_members(policy_objects, rule['source'], member_list=[], mode='ip')
rule_src_members_str = str(rule_src_members) if len(str(rule_src_members)) <= 38 else str(rule_src_members)[:38]
rule_dst_members = find_members(policy_objects, rule['destination'], member_list=[], mode='ip')
rule_dst_members_str = str(rule_dst_members) if len(str(rule_dst_members)) <= 38 else str(rule_dst_members)[:38]
rule_svc_members = find_members(policy_objects, rule['service'], member_list=[], mode='svc')
rule_svc_members_str = str(rule_svc_members) if len(str(rule_svc_members)) <= 38 else str(rule_svc_members)[:38]
# For each group ID used as source or destination, create an IP group object
if len(rule_src_members) > 0:
for src in rule['source']:
if not is_ipgroup(ipgroups, src):
ipgroups.append({'id': src, 'members': rule_src_members, 'member_count': len(rule_src_members), 'name': find_uid(policy_objects, src)['name']})
if len(rule_dst_members) > 0:
for dst in rule['destination']:
if not is_ipgroup(ipgroups, dst):
ipgroups.append({'id': dst, 'members': rule_dst_members, 'member_count': len(rule_dst_members), 'name': find_uid(policy_objects, dst)['name']})
elif rule['type'] == 'nat-rule':
if log_level >= 7:
print('DEBUG: processing NAT rule', rule['rule-number'], file=sys.stderr)
elif rule['type'] == 'threat-rule':
if log_level >= 7:
print('DEBUG: processing Threat rule', rule['rule-number'], file=sys.stderr)
else:
if log_level >= 7:
print('DEBUG: ignoring rule of type', rule['type'], file=sys.stderr)
else:
print('ERROR: Rule is not a dictionary or does not contain a type key:', str(rule), file=sys.stderr)
def print_access_layer_rule(layer_list, rule_id_list, debug=False):
for layer in layer_list:
if log_level >= 7:
print('{0:<40}{1:<40}{2:<40}{3:<40}{4:<40}'.format('Name', 'Action', 'Source', 'Destination', 'Service'), file=sys.stderr)
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'access-rule' and rule['uid'] in rule_id_list:
# Rule Name
rule_name = rule['name'] if len(rule['name']) <= 38 else rule['name'][:38]
# action/src/dst/svc object Members
rule_action_members_str = str(find_members(policy_objects, rule['action'], member_list=[])[0])
rule_src_members = find_members(policy_objects, rule['source'], member_list=[], mode='ip', debug=debug)
rule_src_members_str = str(rule_src_members) if len(str(rule_src_members)) <= 38 else str(rule_src_members)[:38]
rule_dst_members = find_members(policy_objects, rule['destination'], member_list=[], mode='ip', debug=debug)
rule_dst_members_str = str(rule_dst_members) if len(str(rule_dst_members)) <= 38 else str(rule_dst_members)[:38]
rule_svc_members = find_members(policy_objects, rule['service'], member_list=[], mode='svc', debug=debug)
rule_svc_members_str = str(rule_svc_members) if len(str(rule_svc_members)) <= 38 else str(rule_svc_members)[:38]
# Print
if log_level >= 7:
print('{0:<40}{1:<40}{2:<40}{3:<40}{4:<40}'.format(rule_name, rule_action_members_str, rule_src_members_str, rule_dst_members_str, rule_svc_members_str), file=sys.stderr)
# Process the imported access layers. inspect_access_layers needs to have run first to create the list of IP groups
def process_access_layers(layer_list, ipgroups):
global cnt_netrules_ip, cnt_netrules_fqdn, cnt_chkp_rules
last_action = None
for layer in layer_list:
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'access-rule':
cnt_chkp_rules += 1
# Rule Name and action
rule_name = rule['name']
rule_action = str(find_members(policy_objects, rule['action'], member_list=[])[0])
# If there is a change from deny to allow, or from allow to deny, or if this is the first rule, we need to create a rule collection
if rule_action != last_action:
rule_collection = {
'name': rc_net_name + '-' + rule_action + '-' + str(len(az_net_rcs)),
'action': rule_action,
'rules': []
}
# Append the rule collection to the list of rule collections and set last_action to the new value
az_net_rcs.append(rule_collection)
last_action = rule_action
# action/src/dst/svc object Members
rule_src_members = find_members(policy_objects, rule['source'], member_list=[], mode='ip')
rule_dst_members = find_members(policy_objects, rule['destination'], member_list=[], mode='ip')
rule_svc_members = find_members(policy_objects, rule['service'], member_list=[], mode='svc')
# Print
if len(rule_src_members) > 0 and len(rule_dst_members) > 0 and len(rule_svc_members) > 0:
# 'sourceServiceTags' and 'destinationServiceTags' are auxiliary fields, since the service tags go actually in the 'sourceAddresses' and 'destinationAddresses' fields
# The fields will be removed in the function append_rule
new_rule = {
'name': rule['name'] + '-' + str(rule['uid']),
'ruleType': 'NetworkRule',
'sourceAddresses': [],
'sourceIpGroups': [],
'destinationAddresses': [],
'destinationFqdns': [],
'destinationIpGroups': [],
'sourceServiceTags': [],
'destinationServiceTags': []
}
if not args.rule_id_to_name:
new_rule['name'] = rule['name']
if len(rule_src_members) == 1 and is_ipgroup(ipgroups, rule_src_members[0]):
new_rule['sourceIpGroups'].append(find_ipgroup(ipgroups, rule_src_members[0]))['name']
else:
for src in rule_src_members:
if src == 'any' or src == '*' or 'any' in src or src[0] == 'any':
new_rule['sourceAddresses'] = [ '*' ]
elif is_ipv4(src):
if src not in new_rule['sourceAddresses']:
new_rule['sourceAddresses'].append(src)
# If not an IP address, it must be a service tag
elif src not in new_rule['sourceAddresses']:
if src not in new_rule['sourceServiceTags']:
new_rule['sourceServiceTags'].append(src)
if len(rule_dst_members) == 1 and is_ipgroup(ipgroups, rule_dst_members[0]):
new_rule['destinationIpGroups'].append(find_ipgroup(ipgroups, rule_dst_members[0]))['name']
else:
for dst in rule_dst_members:
if dst == 'any' or dst == '*' or 'any' in dst:
cnt_netrules_ip += 1
new_rule['destinationAddresses'] = [ '*' ]
elif is_fqdn(dst):
cnt_netrules_fqdn += 1
if dst not in new_rule['destinationFqdns']:
cnt_netrules_fqdn += 1
new_rule['destinationFqdns'].append(dst)
elif is_ipv4(dst):
if dst not in new_rule['destinationAddresses']:
cnt_netrules_ip += 1
new_rule['destinationAddresses'].append(dst)
# If not an IP address or a domain name, it must be a service tag
else:
if dst not in new_rule['destinationServiceTags']:
new_rule['destinationServiceTags'].append(dst)
# Services are in an array of 2-tuples (protocol, port)
if 'any' in rule_svc_members:
new_rule['ipProtocols'] = ['Any']
new_rule['destinationPorts'] = [ '*' ]
else:
new_rule['ipProtocols'] = []
new_rule['destinationPorts'] = []
for svc in rule_svc_members:
protocol = svc[0]
port = svc[1]
if protocol == 'tcp' or protocol == 'udp':
if protocol not in new_rule['ipProtocols']:
new_rule['ipProtocols'].append(protocol)
if port not in new_rule['destinationPorts']:
# Checkpoint accepts the syntax >1024, but Azure does not
if port[0] == '>':
new_rule['destinationPorts'].append(str(int(port[1:]) + 1) + '-65535')
else:
new_rule['destinationPorts'].append(port)
elif protocol == 'icmp':
if protocol not in new_rule['ipProtocols']:
new_rule['ipProtocols'].append(protocol)
new_rule['destinationPorts'] = [ '*' ]
elif protocol == 'any':
new_rule['ipProtocols'] = ['Any']
new_rule['destinationPorts'] = [ '*' ]
else:
print('ERROR: Unknown service protocol', protocol, 'in rule', rule_name, file=sys.stderr)
# Add new rule to the latest rule collection (the one we are working on)
if args.remove_explicit_deny and rule_action == 'Drop' and new_rule['sourceAddresses'] == [ '*' ] and new_rule['destinationAddresses'] == [ '*' ] and new_rule['destinationPorts'] == [ '*' ] and new_rule['ipProtocols'] == ['Any']:
discarded_rules.append(rule['uid'])
if log_level >= 6:
print('INFO: Skipping rule "{0}" as it is an explicit catch-all deny rule'.format(rule_name), file=sys.stderr)
else:
az_net_rcs[-1]['rules'] = append_rule(new_rule, az_net_rcs[-1]['rules'])
# If one of the objects was empty, add to the discarded rules
else:
discarded_rules.append(rule['uid'])
# Inspect the imported NAT layers
def inspect_nat_layers(layer_list):
for layer in layer_list:
print('{0:<5}{1:<20}{2:<20}{3:<20}{4:<20}{5:<20}{6:<20}'.format('ID', 'Original Src', 'Translated Src', 'Original Dst', 'Translated Dst', 'Original Svc', 'Translated Svc'), file=sys.stderr)
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'nat-rule':
if log_level >= 7:
# Rule ID
rule_id = rule['rule-number']
# src/dst/svc object Members
rule_osrc_members = find_members(policy_objects, rule['original-source'], member_list=[], mode='ip')
rule_osrc_members_str = str(rule_osrc_members) if len(str(rule_osrc_members)) <= 38 else str(rule_osrc_members)[:38]
rule_tsrc_members = find_members(policy_objects, rule['translated-source'], member_list=[], mode='ip')
rule_tsrc_members_str = str(rule_tsrc_members) if len(str(rule_tsrc_members)) <= 38 else str(rule_tsrc_members)[:38]
rule_odst_members = find_members(policy_objects, rule['original-destination'], member_list=[], mode='ip')
rule_odst_members_str = str(rule_odst_members) if len(str(rule_odst_members)) <= 38 else str(rule_odst_members)[:38]
rule_tdst_members = find_members(policy_objects, rule['translated-destination'], member_list=[], mode='ip')
rule_tdst_members_str = str(rule_tdst_members) if len(str(rule_tdst_members)) <= 38 else str(rule_tdst_members)[:38]
rule_osvc_members = find_members(policy_objects, rule['original-service'], member_list=[], mode='svc')
rule_osvc_members_str = str(rule_osvc_members) if len(str(rule_osvc_members)) <= 38 else str(rule_osvc_members)[:38]
rule_tsvc_members = find_members(policy_objects, rule['translated-service'], member_list=[], mode='svc')
rule_tsvc_members_str = str(rule_tsvc_members) if len(str(rule_tsvc_members)) <= 38 else str(rule_tsvc_members)[:38]
# Print
print('{0:<5}{1:<20}{2:<20}{3:<20}{4:<20}{5:<20}{6:<20}'.format(rule_id, rule_osrc_members_str, rule_tsrc_members_str, rule_odst_members_str, rule_tdst_members_str, rule_osvc_members_str, rule_tsvc_members_str), file=sys.stderr)
else:
if log_level >= 7:
print('DEBUG: ignoring rule of type', rule['type'])
else:
print('ERROR: Rule is not a dictionary or does not contain a type key:', str(rule))
if log_level >= 7:
print('DEBUG: Access layers found:', file=sys.stderr)
inspect_access_layers(access_layers)
# Other types of layers (not required)
# if log_level >= 7:
# print('DEBUG: Threat layers found:')
# inspect_access_layers(threat_layers)
# if log_level >= 7:
# print('DEBUG: NAT layer found:')
# inspect_nat_layers(nat_layers)
# Remove ipgroups that contain FQDNs
ipgroups_copy = ipgroups.copy()
for ipgroup in ipgroups_copy:
for x in ipgroup['members']:
if is_fqdn(x):
if log_level >= 7:
print('DEBUG: Removing IP group', ipgroup['name'], 'because it contains FQDN', x, '(IP Groups can only contain IP addresses)', file=sys.stderr)
ipgroups.remove(ipgroup)
break
if log_level >= 6:
print('INFO: {0} out of {1} IP Groups remain after removing FQDNs'.format(len(ipgroups), len(ipgroups_copy)), file=sys.stderr)
# Show ipgroups
ipgroups = sorted(ipgroups, key=lambda d: d['member_count'], reverse=True)
if log_level >= 6:
print('INFO: {0} IP groups found, capping them to the top {1}'.format(len(ipgroups), args.max_ipgroups), file=sys.stderr)
ipgroups = ipgroups[:args.max_ipgroups]
if log_level >= 8:
print('{0:<50}{1:<38}{2:<5}{3:<80}'.format('IP group name', 'CHKP ID', 'Count', 'IP addresses'), file=sys.stderr)
for ipgroup in ipgroups:
ipgroup_members = str(ipgroup['members']) if len(str(ipgroup['members'])) <= 80 else str(ipgroup['members'])[:80]
print('{0:<50}{1:<38}{2:<5}{3:<50}'.format(ipgroup['name'], ipgroup['id'], str(ipgroup['member_count']), ipgroup_members), file=sys.stderr)
# Check whether any IP group is repeated
if len(list(set([x['id'] for x in ipgroups]))) != len(ipgroups):
if log_level >= 4:
print('ERROR: IP groups with repeated IDs found', file=sys.stderr)
if len(list(set([x['name'] for x in ipgroups]))) != len(ipgroups):
if log_level >= 4:
print('ERROR: IP groups with repeated names found', file=sys.stderr)
# Process rules
process_access_layers(access_layers, ipgroups)
if log_level >= 6:
print('INFO: {0} network rules found, spread across {1} rule collections ({2} allow rules, {3} deny rules)'.format(sum([len(x['rules']) for x in az_net_rcs]), len(az_net_rcs), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Accept']), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Drop'])), file=sys.stderr)
# Now we should have all rules stored as network rule collections. Check whether any can be transformed in an application rule
# App rules need to go into their own rule collections
def create_app_rules(net_rcs):
last_action = None
app_rcs = []
# Loop through a copy of the rules (you cannot change a list while looping through it)
net_rcs_copy = net_rcs.copy()
for net_rc in net_rcs_copy:
for net_rule in net_rc['rules']:
# Check whether the rule is for ports 80/443, and whether the target is a FQDN
if set(net_rule['destinationPorts']) in ({'80', '443'}, {'80'}, {'443'}) and len(net_rule['destinationFqdns']) > 0:
if log_level >= 7:
print('DEBUG: Transforming rule', net_rule['name'], 'to an application rule', file=sys.stderr)
if net_rc['action'] != last_action:
rule_collection = {
'name': rc_app_name + '-' + net_rc['action'] + '-' + str(len(az_app_rcs)),
'action': net_rc['action'],
'rules': []
}
# Append the rule collection to the list of rule collections and set last_action to the new value
app_rcs.append(rule_collection)
last_action = net_rc['action']
# Remove the rule from net_rules
net_rc['rules'].remove(net_rule)
# Change the rule type
net_rule['ruleType'] = 'applicationRule'
# Change the ipProtocols/destinationPorts
net_rule.pop('ipProtocols')
net_rule['protocols'] = []
if '80' in net_rule['destinationPorts']:
net_rule['protocols'].append({'protocolType': 'Http', 'port': 80})
if '443' in net_rule['destinationPorts']:
net_rule['protocols'].append({'protocolType': 'Https', 'port': 443})
net_rule['terminateTls'] = False
net_rule.pop('destinationPorts')
# Set some app rule attributes
net_rule['targetFqdns'] = net_rule['destinationFqdns']
net_rule.pop('destinationFqdns')
net_rule['targetUrls'] = []
net_rule['webCategories'] = []
net_rule['fqdnTags'] = []
# Add the rule to the last app rule collection
app_rcs[-1]['rules'].append(net_rule)
# Finished
return net_rcs, app_rcs
# Inspect both allow and deny network rules for candidates to transform into application rules
if args.use_apprules:
if log_level >= 7:
print('DEBUG: Checking whether any network rule can be transformed to an application rule', file=sys.stderr)
# az_net_rules_allow, az_app_rules_allow = create_app_rules(az_net_rules_allow, az_app_rules_allow)
# az_net_rules_deny, az_app_rules_deny = create_app_rules(az_net_rules_deny, az_app_rules_deny)
az_net_rcs, az_app_rcs = create_app_rules(az_net_rcs)
##########
# Output #
##########
# Generate JSON would be creating an object and serialize it
if args.output == "json":
api_version = "2021-08-01"
azfw_policy_name = args.policy_name
arm_template = {
'$schema': 'https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#',
'contentVersion': '1.0.0.0',
'parameters': {},
'variables': {
'location': '[resourceGroup().location]'
},
'resources': []
}
if not args.dont_create_policy:
resource_policy = {
'type': 'Microsoft.Network/firewallPolicies',
'apiVersion': api_version,
'name': azfw_policy_name,
'location': '[variables(\'location\')]',
'properties': {
'sku': {
'tier': args.policy_sku
},
'dnsSettings': {
'enableProxy': 'true'
},
'threatIntelMode': 'Alert'
}
}
arm_template['resources'].append(resource_policy)
resource_rcg = {
'type': 'Microsoft.Network/firewallPolicies/ruleCollectionGroups',
'apiVersion': api_version,
'name': azfw_policy_name + '/' + rcg_name,
'dependsOn': [],
'location': '[variables(\'location\')]',
'properties': {
'priority': rcg_prio,
'ruleCollections': []
}
}
if not args.dont_create_policy:
resource_rcg['dependsOn'].append('[resourceId(\'Microsoft.Network/firewallPolicies\', \'' + azfw_policy_name +'\')]'),
if args.use_ipgroups:
for ip_grp in ipgroups:
resource_ipgroup = {
'type': 'Microsoft.Network/ipGroups',
'apiVersion': api_version,
'name': format_to_arm_name(ip_grp['name']),
'location': '[variables(\'location\')]',
'properties': {
'ipAddresses': ip_grp['members']
}
}
arm_template['resources'].append(resource_ipgroup)
resource_rcg['dependsOn'].append("[resourceId('Microsoft.Network/ipGroups', '{0}')]".format(format_to_arm_name(ip_grp['name'])))
# Add network rule collections to the template
rc_net_prio = int(rc_net_prio_start)
for net_rc in az_net_rcs:
resource_rcg['properties']['ruleCollections'].append({
'ruleCollectionType': 'FirewallPolicyFilterRuleCollection',
'name': net_rc['name'],
'priority': str(rc_net_prio),
'action': {
'type': 'deny' if net_rc['action'] == 'Drop' else 'allow'
},
'rules': net_rc['rules']
})
rc_net_prio += 10
# Add application rule collections to the template
rc_app_prio = int(rc_app_prio_start)
for app_rc in az_app_rcs:
resource_rcg['properties']['ruleCollections'].append({
'ruleCollectionType': 'FirewallPolicyFilterRuleCollection',
'name': app_rc['name'],
'priority': str(rc_app_prio),
'action': {
'type': 'deny' if app_rc['action'] == 'Drop' else 'allow'
},
'rules': app_rc['rules']
})
rc_app_prio += 10
# if len(az_net_rules_allow) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_net_rc_allow)
# if len(az_net_rules_deny) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_net_rc_deny)
# if len(az_app_rules_allow) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_app_rc_allow)
# if len(az_app_rules_deny) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_app_rc_deny)
arm_template['resources'].append(resource_rcg)
if args.pretty:
print(json.dumps(arm_template, indent=4, sort_keys=True))
else:
print(json.dumps(arm_template))
elif args.output == "none":
if log_level >= 6:
print('INFO: No output type selected', file=sys.stderr)
else:
if log_level >= 3:
print ("ERROR: Output type", args.output, "not recognized!", file=sys.stderr)
# Last info message
if log_level >= 6:
print('INFO: Summary:', file=sys.stderr)
print('INFO: {0} Checkpoint rules analized'.format(str(cnt_chkp_rules)), file=sys.stderr)
print('INFO: {0} Azure Firewall network rules, spread across {1} rule collections ({2} allow rules, {3} deny rules)'.format(sum([len(x['rules']) for x in az_net_rcs]), len(az_net_rcs), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Accept']), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Drop'])), file=sys.stderr)
print('INFO: {0} Azure Firewall application rules, spread across {1} rule collections ({2} allow rules, {3} deny rules)'.format(sum([len(x['rules']) for x in az_app_rcs]), len(az_app_rcs), sum([len(x['rules']) for x in az_app_rcs if x['action'] == 'Accept']), sum([len(x['rules']) for x in az_app_rcs if x['action'] == 'Drop'])), file=sys.stderr)
print('INFO: {0} Checkpoint discarded rules:'.format(len(discarded_rules)), file=sys.stderr)
print_access_layer_rule(access_layers, discarded_rules, debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
203001c61b060edefb7a98c2ccc7daaa1ee84d37 | 8027e4740f16ff7467c5d86ce39ce4582e33a03c | /settings_local.py | a5ac64ef73c30affd72f7577ee991e50dc925f2d | [
"MIT"
] | permissive | johmathe/deep-visualization-toolbox | 992acffeadeae97607c1acfb93c9e5fb65fa7092 | da2b0e2abd34844cb4d7a655d894d3c96167ca00 | refs/heads/master | 2020-06-21T16:41:32.783190 | 2016-11-25T20:02:29 | 2016-11-25T20:02:29 | 74,780,020 | 0 | 0 | null | 2016-11-25T18:24:43 | 2016-11-25T18:24:42 | null | UTF-8 | Python | false | false | 1,481 | py | # Define critical settings and/or override defaults specified in
# settings.py. Copy this file to settings_local.py in the same
# directory as settings.py and edit. Any settings defined here
# will override those defined in settings.py
# Set this to point to your compiled checkout of caffe
caffevis_caffe_root = '/root/caffe/'
# Load model: caffenet-yos
# Path to caffe deploy prototxt file. Minibatch size should be 1.
PATH = '/home/johmathe/code/enmi/rhd'
caffevis_deploy_prototxt = PATH + '/rhd_classifier_deploy.prototxt'
# Path to network weights to load.
caffevis_network_weights = PATH + '/rhd_classifier.caffemodel'
# Other optional settings; see complete documentation for each in settings.py.
caffevis_data_mean = PATH + '/rhd_classifier_mean.npy'
caffevis_labels = PATH + '/rhd_classifier_labels.txt'
caffevis_label_layers = ('fc8_rhd', 'prob')
caffevis_prob_layer = 'prob'
caffevis_unit_jpg_dir = '%DVT_ROOT%/models/caffenet-yos/unit_jpg_vis'
caffevis_jpgvis_layers = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc6_rhd', 'fc7_rhd', 'fc8_rhd', 'prob']
caffevis_jpgvis_remap = {'pool1': 'conv1', 'pool2': 'conv2', 'pool5': 'conv5'}
def caffevis_layer_pretty_name_fn(name):
return name.replace('pool','p').replace('norm','n')
# Use GPU? Default is True.
caffevis_mode_gpu = True
# Display tweaks.
# Scale all window panes in UI by this factor
#global_scale = 1.0
# Scale all fonts by this factor
#global_font_size = 1.0
| [
"johan.mathe@gmail.com"
] | johan.mathe@gmail.com |
aa884db62255ca147c6e5f1127cfa1c35728050e | fca6677041d0b3d074d958f45cb5ca4fc36cecc7 | /EvenOdd seperator.py | f75e229b244e883bb3efc919bd0c0511813462d5 | [] | no_license | Eshh/Python-Language | c3d0f280f58067d9350a2b50a2e25bf99fb2422b | 8dfbbffcdb7186642cbe1fa153b60ba3ac84fc61 | refs/heads/master | 2020-04-13T02:40:54.892309 | 2018-12-23T17:18:59 | 2018-12-23T17:18:59 | 162,909,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # Accept a list and place all even numbers on the left and odd numbers on the right
l = []
while True:
n = int(input("Please enter a number,press 0 to stop: "))
if n == 0:
break
l.append(n)
print(f"Given list of numbers is: {l[:]}")
print()
i = 0
temp = []
for n in l:
if n % 2 == 0:
temp.insert(i, n)
i += 1
elif n % 2 != 0:
temp.append(n)
l = temp.copy()
print("The modified list with even numbers on the left and odd numbers on the right is")
print(l[:])
| [
"noreply@github.com"
] | noreply@github.com |
c1873a773be89f1211eb3e4c066198116c6ee23f | 1f01958585a0c0f10872d7f65181729e56db284f | /Methods/Extra_Functions.py | bc5f24f141c222b9427ca7d3d9bd672d5c694105 | [] | no_license | Lyngsaae/Individual-user-tracking---MscThesis_DTU_Spring2020 | 1d36adb22c7be4a06d4dd0c6b3737004b3601c9b | 55f602be03086bdfdce2264943efa6c0be50f795 | refs/heads/master | 2022-11-15T08:56:36.691455 | 2020-07-06T10:35:17 | 2020-07-06T10:35:17 | 265,823,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | import cv2
import numpy as np
# Draw bonding box and pose
def drawTracking(frame, c1, c2, PoseNet):
x_offset = int(abs(c1[0] - c2[0]) * 0.5)
if c1[0] - x_offset < 0:
x_offset = c1[0]
if c2[0] + x_offset >= frame.shape[1]:
x_offset = frame.shape[1] - c2[0] - 1
profiles = PoseNet.check_signal(frame[int(c1[1]):int(c2[1]), int(c1[0] - x_offset):int(c2[0] + x_offset)], (c1[1], c1[0] - x_offset))
if profiles:
profile_signal = profiles[0]
else:
profile_signal = [[True, False, False, False, False, False]]
frame = cv2.rectangle(frame, c1, c2, (255,0,0), 1)
if profiles and False:
if profile_signal[0][0]:
color_pos = (0, 0, 255)
else:
color_pos = (0, 255, 0)
frame = cv2.polylines(frame, profile_signal[3], isClosed=False, color=color_pos)
frame = cv2.drawKeypoints(frame, profile_signal[2], outImage=np.array([]), color=color_pos, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
t_size = cv2.getTextSize("Tracking", cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
c1 = tuple((int((c1[0] + c2[0] - t_size[0]) / 2), c1[1] - (t_size[1] + 4)))
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
frame = cv2.rectangle(frame, c1, c2, (255,0,0), -1)
frame = cv2.putText(frame, "Tracking", (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225, 255, 255],1);
return frame, profile_signal[0]
# Calculate IoU
def bb_intersection_over_union(boxA, boxB, change_format=True):
if change_format:
boxA[2] += boxA[0]
boxA[3] += boxA[1]
boxB[2] += boxB[0]
boxB[3] += boxB[1]
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
if interArea == 0:
return 0
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
# Find new bounding box, with optimal overlap
def findNewBox(boxA, boxB, change_format=True):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
width = int(abs(max((xB - xA, 0)))/2)
height = int(abs(max((yB - yA), 0))/2)
newBox1 = boxB.copy()
if boxA[0] <= boxB[0] and boxA[2] >= boxB[0]:
if boxA[2] <= boxB[2]:
newBox1[0] += width
elif boxA[0] <= boxB[2] and boxA[2] >= boxB[2]:
if boxA[0] >= boxB[0]:
newBox1[2] -= width
newBox2 = boxB.copy()
if boxA[1] <= boxB[1] and boxA[3] >= boxB[1]:
if boxA[3] <= boxB[3]:
newBox2[1] += height
elif boxA[1] <= boxB[3] and boxA[3] >= boxB[3]:
if boxA[1] >= boxB[1]:
newBox2[3] -= height
if newBox1 != boxB and newBox2 != boxB:
return newBox1 if (newBox1[2] - newBox1[0]) * (newBox1[3] - newBox1[1]) > (newBox2[2] - newBox2[0]) * (newBox2[3] - newBox2[1]) else newBox2
elif newBox1 != boxB:
return newBox1
elif newBox2 != boxB:
return newBox2
else:
return boxB
| [
"noreply@github.com"
] | noreply@github.com |
94762fd3d7df399a13b4ec413e8824568a94be8d | 021f3512b248ead3887b7464a4f32fd157853f98 | /panama-papers/wikipedia-1.py | 03f4c105745c1c23c6ef0977b8d7872044e1e6ba | [
"MIT"
] | permissive | jtmorgan/ds4ux | c1953dca84bfa108392bd25ffe2cb1fb21d0a261 | 14c4ece59b367fe7c8db09a126161693b9a640b3 | refs/heads/master | 2020-12-24T21:01:04.186781 | 2017-10-04T01:03:11 | 2017-10-04T01:03:11 | 59,071,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | # Copyright (C) 2016 Ben Lewis, and Morten Wang
# Licensed under the MIT license, see ../LICENSE
# Question: When was the first edit to the panama papers wikipedia article?
import requests
ENDPOINT = 'https://en.wikipedia.org/w/api.php'
page_title = 'Panama_Papers'
p = { 'action' : 'query',
'prop' : 'revisions',
'titles' : page_title,
'format' : 'json',
'rvlimit' : 1,
'rvdir' : 'newer',
'continue' : '' }
# explain what the parameters mean:
'''
This is documented in the API sandbox. Don't worry about remembering it.
Use the reference.
'action' : 'query' -- don't worry about this.
'prop' : 'revisions' -- this means we are asking for information about edits.
'titles' : 'Panama_Papers' -- this means we want information about the page called "Panama Papers".
'format' : 'json' -- get the response in json, we won't change this.
'rvlimit' : 1 -- get one revision
'rvdir' : 'newer' -- this means get the oldest revision first. use 'older' to get the newest edit first.
'continue' : '' -- we will cover this later!
'''
wp_call = requests.get(ENDPOINT, params=p)
# print(wp_call)
response = wp_call.json()
#
# look at the json response
print(response)
#
# The query dictionary holds the response to our "query"
query = response['query']
print(query)
# The wikipedia api allows to you query about multiple pages
# We can ignore this, since we only queried about one page
pages = query['pages']
print(pages)
#
# get the page we asked for.
# this is a little complicated because pages is a dictionary
page_keys = list(pages.keys())
page_key = page_keys[0]
page = pages[page_key]
#
# the page dictionary has a 'revisions' item. this has the data revisions that we seek
revisions = page['revisions']
#
# we only asked for one revision
revision = revisions[0]
#
revid = revision['revid']
revuser = revision['user']
revdate = revision['timestamp']
title = page['title']
#
print('First edit to ' + title + ' was revision ID ' + str(revid) + ' by ' + revuser + ' on ' + revdate)
| [
"jonnymorgan.esq@gmail.com"
] | jonnymorgan.esq@gmail.com |
9a3319bcfa18e3b934550bb8f706d239a6bc2c5b | 00cf2ec634b6dda5de9171b424447d148ca34ebc | /lesson_2/5.py | 2b7fd0676848c454a79839aca2c1498862a8e9b1 | [] | no_license | coffin5257/pylearning | d2831d81af3d67d1a55fd3c3408380606de9b41d | 30ac5e12e8cc599529a398c53216b3078786bc58 | refs/heads/master | 2020-06-04T04:12:17.694747 | 2014-04-28T22:56:00 | 2014-04-28T22:56:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | #!/user/bin/env python
port = []
print port
port.append(22)
print port
port.append(80)
print port
port.append(23)
print port
port.append(8080)
print port
port.append(139)
print port
port.append(445)
print port
port.append(3389)
print port
port.sort()
print port | [
"coffin5257@gmail.com"
] | coffin5257@gmail.com |
c4890060c5757c30353d0ca108171f30b92bf7b6 | 564d2af5ebb68f8a9d6b89638a053bdf2e2e6bd3 | /Python/Problem-7/dictionary.py | a23ebce83a9fd26ca15b3918d7f415b052b160d4 | [] | no_license | Dev-SumitPatil/SumitPatil | ac69dc8360109dc29003fb053df6a8c200bc0ea1 | d05a6cd170e078a83acadda2cc1bf9ed513beb43 | refs/heads/master | 2023-06-14T12:48:50.633086 | 2021-06-29T16:34:37 | 2021-06-29T16:34:37 | 271,215,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | n=int(input())
d={}
for g in range(1,n+1):
d[g]=g*g
print(d,end="") | [
"sumit.patil6291@gmail.com"
] | sumit.patil6291@gmail.com |
405d8bcb0b31d77a0d6e4e204408ddbb95b0786f | 44dcb2d759917401e1c7008810d9a8efcd50457f | /others/decorators/closures-example.py | f4711ae0bf61b81198c01f62786c7560a5e47826 | [] | no_license | raghav18gupta/python-advance | 85deaf2e5aaf7474111ec794d1f49a92d6db0f83 | 751d5881a958aaea978dbabb21195a3fb12ff7b0 | refs/heads/master | 2021-05-03T06:20:36.687805 | 2018-12-28T09:10:53 | 2018-12-28T09:10:53 | 120,592,480 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import logging
logging.basicConfig(filename='logs/closures-example.log', level=logging.INFO)
def logger(func):
def log_func(*args):
logging.info("Running '{}' with arguments '{}'".format(func.__name__, args))
print(func(*args))
return log_func
def add(x, y):
return x+y
def sub(x, y):
return x-y
add_logger = logger(add)
sub_logger = logger(sub)
add_logger(3, 3)
add_logger(4, 5)
sub_logger(10, 5)
sub_logger(20, 10)
| [
"18raghavgupta@gmail.com"
] | 18raghavgupta@gmail.com |
d6b2a25ddb12afec7062cff3eb7df5acd5126835 | 2d7b4a57aca05fae79bf75266339b0913f9c361f | /ore/organizations/forms.py | 702fcd565163cb2c3caf5c697f4d1d5d7e0fb38f | [
"MIT"
] | permissive | maxov/ore-old | 8aa6bbece458cbdce281f42695fe8b6b14c1e378 | 1d1c73795406fa52ae969726feb89f7aedbc4afc | refs/heads/master | 2021-01-19T21:12:51.100761 | 2016-03-06T03:21:40 | 2016-03-06T03:21:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,225 | py | from django import forms
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.urlresolvers import reverse
from django.forms import widgets
from django.utils.html import escape
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Submit, Hidden, HTML
from crispy_forms.bootstrap import StrictButton, FieldWithButtons
from io import BytesIO
from PIL import Image
from . import models
class AvatarFileInput(widgets.ClearableFileInput):
template_with_initial = (
'%(initial_text)s: <img src="%(initial_url)s" alt="Current avatar"> %(clear_template)s<br />%(input_text)s: %(input)s'
)
class BaseOrganizationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BaseOrganizationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field('name'),
Field('avatar_image', css_class='js-crop-field',
data_width_field="input[name='avatar_width']",
data_height_field="input[name='avatar_height']",
data_x_field="input[name='avatar_x']",
data_y_field="input[name='avatar_y']",
data_max_width="800",
data_max_height="800"
),
Hidden('avatar_width', ''),
Hidden('avatar_height', ''),
Hidden('avatar_x', ''),
Hidden('avatar_y', ''),
Submit('submit', 'Update profile'),
)
self.fields['avatar_image'].widget = AvatarFileInput()
self.fields['avatar_width'] = forms.IntegerField(required=False)
self.fields['avatar_height'] = forms.IntegerField(required=False)
self.fields['avatar_x'] = forms.IntegerField(required=False)
self.fields['avatar_y'] = forms.IntegerField(required=False)
def clean(self):
try:
self.crop_avatar_if_necessary()
except ValidationError as ex:
self.add_error('avatar_image', ex.message)
def crop_avatar_if_necessary(self):
avatar_f = self.cleaned_data.get('avatar_image')
if not avatar_f:
# user is probably trying to clear, or not submitting with avatar
return
# although the avatar is on the .image attribute of avatar_f, we can't use it
# because the file handle has imploded at this point(?)
if hasattr(avatar_f, 'temporary_file_path'):
avatar_fp = avatar_f.temporary_file_path()
elif hasattr(avatar_f, 'read'):
avatar_fp = BytesIO(avatar_f.read())
else:
avatar_fp = BytesIO(avatar_f['content'])
avatar = Image.open(avatar_fp)
touched = False
if avatar.width > 800 or avatar.height > 800:
raise ValidationError(
'This image is too large - avatars can be at most 800x800 pixels.')
try:
avatar.load()
except Exception:
raise ValidationError(
'Upload a valid image. The image you uploaded appears to be malformed or invalid.')
avcrop = None
try:
avcrop_width = int(self.cleaned_data['avatar_width'])
avcrop_height = int(self.cleaned_data['avatar_height'])
avcrop_x = int(self.cleaned_data['avatar_x'])
avcrop_y = int(self.cleaned_data['avatar_y'])
if (
avcrop_width == avcrop_height and
avcrop_width > 0 and
avcrop_height > 0 and
avcrop_x >= 0 and
avcrop_y >= 0 and
avcrop_x < avatar.width and
avcrop_y < avatar.height and
(avcrop_x + avcrop_width) <= avatar.width and
(avcrop_y + avcrop_height) <= avatar.height
):
avcrop = (
avcrop_x, avcrop_y,
avcrop_x + avcrop_width,
avcrop_y + avcrop_height,
)
except Exception:
pass
# we want to ensure that this image is square.
# make the image square.
if avatar.width != avatar.height or avcrop:
if not avcrop:
new_dimension = min(avatar.width, avatar.height)
avcrop = (0, 0, new_dimension, new_dimension)
avatar = avatar.crop(box=avcrop)
touched = True
if avatar.width > 200:
avatar = avatar.resize((200, 200))
touched = True
if touched:
avatar_bytes = BytesIO()
avatar.save(avatar_bytes, format='PNG')
self.cleaned_data['avatar_image'] = InMemoryUploadedFile(
file=avatar_bytes, field_name='avatar_image', name='avatar.png', content_type='image/png',
size=len(avatar_bytes.getbuffer()), charset=None
)
class Meta:
model = models.Organization
fields = ['name', 'avatar_image']
class OrganizationSettingsForm(BaseOrganizationForm):
def __init__(self, *args, **kwargs):
super(OrganizationSettingsForm, self).__init__(*args, **kwargs)
self.helper.form_action = reverse(
'organizations-settings', kwargs={'namespace': self.instance.name},
)
self.helper['name'].update_attributes(readonly=True)
def clean_name(self):
return self.instance.name
class OrganizationCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OrganizationCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field('name'),
Submit('submit', 'Create organization'),
)
def clean_name(self):
if models.Organization.objects.filter(name=self.cleaned_data['name']).exists():
raise ValidationError(
"Sorry, but this name is already in use. Try another?")
return self.cleaned_data['name']
class Meta:
model = models.Organization
fields = ['name']
class OrganizationDeleteForm(forms.Form):
lock = forms.CharField(max_length=64)
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
super(OrganizationDeleteForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.form_action = reverse(
'organizations-delete', kwargs={'namespace': self.instance.name},
)
self.helper.form_show_labels = False
self.helper.form_class = "js-lock-form"
self.helper.attrs = {
'data-confirm': self.instance.name,
'data-input': 'input[name="lock"]',
'data-locks': 'button',
}
self.helper.layout = Layout(
HTML("""
<p>Deleting removes all data, including projects and files, related to this organization forever and is <em>not reversible</em>.</p>
<p>Please type the name of the organization (<tt>{}</tt>) to confirm deletion.</p>
""".format(escape(self.instance.name))),
FieldWithButtons(
Field('lock'), StrictButton('<i class="fa fa-times"></i> Delete', css_class='btn-danger', type='submit')),
)
def clean_lock(self):
lock = self.cleaned_data['lock']
if lock != self.instance.name:
raise ValidationError(
'You must type the organization name exactly, including any capitalisation.')
return lock
class OrganizationRenameForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OrganizationRenameForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.form_action = reverse(
'organizations-rename', kwargs={'namespace': self.instance.name},
)
self.helper.form_show_labels = False
and_the_projects_it_contains = ""
project_count = self.instance.projects.count()
if project_count > 1:
and_the_projects_it_contains = " and the {} projects it contains".format(
project_count)
elif project_count == 1:
and_the_projects_it_contains = " and its project"
self.helper.layout = Layout(
HTML("""
<p>Are you sure you wish to rename this organization?</p>
<p>While this operation is reversible, no redirects of any kind are set up and former links to your organization{} may not work as expected.</p>
<p>In addition, no reservations are made, so the old name will be made available for other users immediately.</p>
""".format(escape(and_the_projects_it_contains), escape(self.instance.name))),
FieldWithButtons(
Field('name'), StrictButton('<i class="fa fa-edit"></i> Rename', css_class='btn-warning', type='submit')),
)
class Meta:
model = models.Organization
fields = ['name']
| [
"git@lukegb.com"
] | git@lukegb.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.