blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f049a829e4f528690569a3ddb76c01f8186474e | aae95f673759e71d8859b05973eb563d15200114 | /M3HW1_AgeClassifier_NOVAK.py | b22943545cce9fc94576bf8756876bc736c2a747 | [] | no_license | tjnovak58/cti110 | 74ae3cb355c1bbde8a51ba9bdc943c172f109885 | 1edec8b428f5aa5858662219ddd2a4dd93a91a9c | refs/heads/master | 2021-05-15T00:47:08.440056 | 2017-11-13T20:18:03 | 2017-11-13T20:18:03 | 103,067,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # CTI-110
# M3HW1 - Age Classifier
# Timothy Novak
# 09/24/17
#
# This program takes an age input and outputs a classification.
#
age = float(input('Enter age: '))
def main():
if age >= 20:
print('The person is an Adult')
if age >= 13 and age < 20:
print('The person is a Teenager')
if age > 1 and age < 13:
print('The person is a Child')
if age <= 1:
print('The person is an Infant')
main()
| [
"noreply@github.com"
] | noreply@github.com |
76a0dc84b832d50eb22547d7ae8063a44087c8f3 | 7f7593bad603e3fe9142707269dd3ad81cd581e7 | /dataset_parser/translator.py | eb214130326180fa5115aea580290947e7598c3b | [] | no_license | kadircet/NeVA | e6eee87d17de0c4d3b7579bd6359f473c3c873f0 | fbcc8163675243f0c5b0e6aae55f4695f91a3e8a | refs/heads/master | 2020-03-27T09:10:46.840256 | 2018-06-01T21:46:41 | 2018-06-01T21:46:41 | 146,319,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | import urllib
import urllib2
api_key = "trnsl.1.1.20171114T121819Z.260cc24adf90daa9.d0f6b72d715f1ec6369a4ff855eb257aeaab16d6"
url = "https://translate.yandex.net/api/v1.5/tr.json/translate"
def translate( text ):
data = {
"lang" : "en-tr",
"text" : text,
"key" : api_key,
"format" : "plain"
}
data = urllib.urlencode(data)
request = urllib2.Request(url, data)
response = urllib2.urlopen(request)
return response.read() | [
"e2035566@ceng.metu.edu.tr"
] | e2035566@ceng.metu.edu.tr |
8bc175401c234330dcca0e841f43babb1b91a34e | e831c22c8834030c22c54b63034e655e395d4efe | /Strings/409-LongestPalindrome.py | a7c78ae605311f965fabd78f56853df5f5a2ed97 | [] | no_license | szhmery/leetcode | a5eb1a393422b21f9fd4304b3bdc4a9db557858c | 9fcd1ec0686db45d24e2c52a7987d58c6ef545a0 | refs/heads/master | 2023-08-16T00:27:56.866626 | 2021-10-23T07:35:37 | 2021-10-23T07:35:37 | 331,875,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from collections import Counter
class Solution:
#https://leetcode.com/problems/longest-palindrome/solution/
def longestPalindrome(self, s: str) -> int:
ans = 0
map = Counter(s)
for v in map.values():
ans += v // 2 * 2
if ans % 2 == 0 and v % 2 != 0: # aaaccc, if a is 3, add 1 more.
ans += 1
return ans
solution = Solution()
result = solution.longestPalindrome('abccb')
print(result)
result = solution.longestPalindrome('ccc')
print(result)
result = solution.longestPalindrome('cccaaadde')
print(result)
| [
"szhmery@gmail.com"
] | szhmery@gmail.com |
1666cfe1c060fb01ca77d7386ee78fdec9cfdcc8 | e9cf85d3d2cfc1b0ed1f890679b098fe343438b2 | /Scripts/venv/Scripts/easy_install-3.6-script.py | 6e2835f1e41cc6036851b42df91870e2720fd94e | [] | no_license | jakowalski/filmCrawler | 8be9b54f337bf25352d5ffb37edbbaa61c940aa8 | bd75c5d96b1efd5d3248e8b3f6730a79e74ff892 | refs/heads/master | 2020-04-08T17:21:56.182900 | 2019-01-20T19:01:00 | 2019-01-20T19:01:00 | 159,563,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!H:\Projects\FilmCrawler\Scripts\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"sidrox@o2.pl"
] | sidrox@o2.pl |
b28a6e9427e27b1ccb8fa350686110b8a21e74e3 | 68c4805ad01edd612fa714b1e0d210115e28bb7d | /venv/Lib/site-packages/numba/cuda/tests/cudapy/test_print.py | 59513d127a2aebd9b1461428f48b504cac50b75b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Happy-Egg/redesigned-happiness | ac17a11aecc7459f4ebf0afd7d43de16fb37ae2c | 08b705e3569f3daf31e44254ebd11dd8b4e6fbb3 | refs/heads/master | 2022-12-28T02:40:21.713456 | 2020-03-03T09:04:30 | 2020-03-03T09:04:30 | 204,904,444 | 2 | 1 | Apache-2.0 | 2022-12-08T06:19:04 | 2019-08-28T10:18:05 | Python | UTF-8 | Python | false | false | 1,919 | py | from __future__ import print_function
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.cuda.testing import captured_cuda_stdout, SerialMixin
def cuhello():
i = cuda.grid(1)
print(i, 999)
print(-42)
def printfloat():
i = cuda.grid(1)
print(i, 23, 34.75, 321)
def printstring():
i = cuda.grid(1)
print(i, "hop!", 999)
def printempty():
print()
class TestPrint(SerialMixin, unittest.TestCase):
def test_cuhello(self):
jcuhello = cuda.jit('void()', debug=False)(cuhello)
with captured_cuda_stdout() as stdout:
jcuhello[2, 3]()
# The output of GPU threads is intermingled, but each print()
# call is still atomic
out = stdout.getvalue()
lines = sorted(out.splitlines(True))
expected = ['-42\n'] * 6 + ['%d 999\n' % i for i in range(6)]
self.assertEqual(lines, expected)
def test_printfloat(self):
jprintfloat = cuda.jit('void()', debug=False)(printfloat)
with captured_cuda_stdout() as stdout:
jprintfloat()
# CUDA and the simulator use different formats for float formatting
self.assertIn(stdout.getvalue(), ["0 23 34.750000 321\n",
"0 23 34.75 321\n"])
def test_printempty(self):
cufunc = cuda.jit('void()', debug=False)(printempty)
with captured_cuda_stdout() as stdout:
cufunc()
self.assertEqual(stdout.getvalue(), "\n")
def test_string(self):
cufunc = cuda.jit('void()', debug=False)(printstring)
with captured_cuda_stdout() as stdout:
cufunc[1, 3]()
out = stdout.getvalue()
lines = sorted(out.splitlines(True))
expected = ['%d hop! 999\n' % i for i in range(3)]
self.assertEqual(lines, expected)
if __name__ == '__main__':
unittest.main()
| [
"yangyang4910709@163.com"
] | yangyang4910709@163.com |
2aa62736bf9b102bfaafa6329a5283ff80764c52 | 64b562f5bc27e20e267f20c66d6c5212c746eaa1 | /student/urls.py | ad988ad5ac054e15a8affacfde6bec190589d42f | [] | no_license | ykugb/PerfectCRM | 9419e7f6ff14e300f29cabaf99327503ac823add | 0342b6cf1db3d5911257d84e357e3f522cb0fd7f | refs/heads/master | 2022-12-02T16:29:48.675924 | 2018-04-14T00:59:53 | 2018-04-14T01:10:07 | 129,470,846 | 0 | 1 | null | 2022-11-21T14:56:19 | 2018-04-14T01:42:19 | Python | UTF-8 | Python | false | false | 126 | py |
from django.conf.urls import url
from student import views
urlpatterns = [
url(r'^$', views.index,name="stu_index"),
]
| [
"402173349@qq.com"
] | 402173349@qq.com |
eadd064afcb20f96f92a1dd01fffdcfba42712a5 | 24dd3c272457110b2b51bb783715d1245afcd9ce | /eth_dev/infura.py | 73181d7325cfb92aa1ccb3a2719e9daa434c82ab | [] | no_license | fubuloubu/eth-dev | 81761da7942927a97830c426cccf650046e6db74 | 383e51bba0b4471ef1c7a5d6ee2d1ff6a0562f8a | refs/heads/master | 2020-04-30T04:24:29.606074 | 2019-03-19T23:15:56 | 2019-03-19T23:15:56 | 176,610,133 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | import os
import sys
from importlib import import_module
def get_web3(network: str, project_id: str):
# Infura websocket API requires Project ID token as of March 23rd
print("Setting Infura Project ID to", project_id, file=sys.stderr)
os.environ['WEB3_INFURA_PROJECT_ID'] = project_id
# Dynamically load the correct autoloader (based on network)
print("Connecting to the", network, "network (using Infura)", file=sys.stderr)
infura_module = import_module("web3.auto.infura.%s" % network)
# Return w3 autoloader for network
return getattr(infura_module, 'w3')
| [
"fubuloubu@gmail.com"
] | fubuloubu@gmail.com |
8ef6afb127f1e1660480057972b67363ca5217de | b83bec9a4bfb7f9905f52cbe65e56b3cebaaac1d | /bikeshare.py | 825f3bed9ffc2ecc7130854741edd6c383a3d76d | [] | no_license | saramckellop/pdsnd_github | f0fc13c15a3ac07f267560d39312d96619ffd9b4 | 0fa2a5580fe67ec4a9f390f858d78d59afaad484 | refs/heads/master | 2020-12-11T11:07:55.570068 | 2020-01-15T16:02:15 | 2020-01-15T16:02:15 | 233,831,972 | 0 | 0 | null | 2020-01-14T12:02:30 | 2020-01-14T12:02:30 | null | UTF-8 | Python | false | false | 7,437 | py | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze, if invalid information is input an error message is displayed.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
city = input('What city would you like to explore first? \nPlease enter Chicago, New York City or Washington.: ').lower()
try:
while city == ('chicago', 'new york city', 'washington'):
print('Okay\,\ {} it is!'.format.city.title())
except:
print('That\'s not a valid city, please try again.')
month = input('What month would you like to analyze?: ').lower()
try:
while month == ('january', 'february', 'march', 'april', 'may', 'june'):
print('Let\'s look at {}.'.format.month.title())
if month == ('all'):
print('We can look at them all!')
except:
print('That is an invalid input.')
day = input('What day would you like to investigate?: ').lower()
try:
while day == ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'):
print( 'Let\'s review {}.'.format.day.title())
if day == ('all'):
print('We can investigate them all!')
except:
print('That is not a valid day, please try again.')
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
df = df[df['month'] == month]
if day != 'all':
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel; including
the most popular start month by index number,
the most popular start day of the week and
the most popular start hour.
"""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
df['Start Date'] = pd.to_datetime(df['Start Time'])
df['month'] = df['Start Time'].dt.month
popular_month = df['month'].mode()[0]
print('{} is the most popular Start Month.'.format(popular_month))
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['day'] = df['Start Time'].dt.day
popular_day = df['day'].mode()
print('{} is the most popular Start Day.'.format(popular_day))
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['hour'] = df['Start Time'].dt.hour
popular_hour = df['hour'].mode()
print('{} is the most popular Start Hour.'.format(popular_hour))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip
by delineating the most commonly used start station,
the most commonly used end station and the most
frequent combination of start and end station trips.
"""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
start_stations = df['Start Station'].mode()[0]
print('The count of the most commonly used Start Station is:',start_stations)
end_stations = df['End Station'].mode()[0]
print('The count of the most commonly used End Station is:',end_stations)
most_frequent_combo = df[['Start Station', 'End Station']].mode().loc[0]
print('The most frequent combination of start and end stations is: {} & {}.'.format(most_frequent_combo[0], most_frequent_combo[1]))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
total_travel_time = df['Trip Duration'].sum()
print(total_travel_time)
mean_travel_time = df['Trip Duration'].mean()
print(mean_travel_time)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users categorized by
user type, gender and birth year where applicable.
"""
print('\nCalculating User Stats...\n')
start_time = time.time()
user_types = df['User Type'].value_counts()
print(user_types)
if 'Gender' in df.columns:
gender_types = df['Gender'].value_counts()
print(gender_types)
else:
print('Gender column does not exist for this city.')
if 'Birth Year' in df.columns:
earliest_birth_year = df['Birth Year'].min()
print('Earliest birth year recorded:', earliest_birth_year)
else:
print('Birth Year column does not exist for this city.')
if 'Birth Year' in df.columns:
most_recent_birth_year = df['Birth Year'].max()
print('Most recent birth year recorded:', most_recent_birth_year)
else:
print('Birth Year column does not exist for this city.')
if 'Birth Year' in df.columns:
most_common_birth_year = df['Birth Year'].mean()
print('Most common birth year recorded:', most_common_birth_year)
else:
print('Birth Year column does not exist for this city.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_data(df):
""" Allows the user to choose to see more data in groups of 5 rows.
"""
raw_data = input('Would you like to analyze 5 more rows of data? \nType yes or no.').lower()
index1 = 0
index2 = 5
while True:
if raw_data == 'yes':
print(df[df.columns[0: ]].iloc[index1:index2])
index1 += 5
index2 += 5
more_columns = input('Would like like to analyze 5 more rows?').lower()
if more_columns not in ('yes', 'y'):
break
else:
break
def main():
""" Allows the user to choose to restart the project or to end the project.
"""
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
display_data(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| [
"saramckellop@gmail.com"
] | saramckellop@gmail.com |
9d5286a5880894b85b64c677592c9c21f3670174 | d4f6b2f0ff0300f7a5a2570280d1348fca5e48a2 | /product/views.py | 9132ff348b83ed580b987abbffd91e8f721c211f | [] | no_license | pixies/beraca.org | 92f0671767d1e5a090613135a4808105f7db7b99 | a71d932464399ae4bfccd8022588f51912c87894 | refs/heads/master | 2022-12-09T23:56:07.196093 | 2018-06-28T15:15:14 | 2018-06-28T15:15:14 | 137,068,948 | 0 | 0 | null | 2022-11-22T02:07:44 | 2018-06-12T12:32:53 | CSS | UTF-8 | Python | false | false | 2,469 | py | from django.db.models import Q
from rest_framework import generics, mixins, permissions, response
from accounts.permissions import IsClientOrAdmin, IsAdmin, IsManagerOrAdmin
from .serializers import ProductSerializer, ProductSearchSerializer
from .models import Product
class ProductList(mixins.ListModelMixin,
generics.GenericAPIView):
http_method_names = ['get']
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = [permissions.AllowAny]
#IsClientOrAdmin]
def get(self, request, *args, **kwargs):
if 'names' in request.GET:
queryset = self.get_queryset()
queryset = queryset.values('id', 'scientific_name', 'common_name')
serializer = ProductSearchSerializer(queryset, many=True)
return response.Response(serializer.data)
return self.list(request, *args, **kwargs)
class ProductCreate(mixins.CreateModelMixin,
generics.GenericAPIView):
http_method_names = ['post']
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = [ permissions.AllowAny,] #permissions.IsAuthenticated,
# IsAdmin]
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ProductDetail(mixins.RetrieveModelMixin,
generics.GenericAPIView):
http_method_names = ['get']
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = [permissions.AllowAny,]
#IsClientOrAdmin]
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class ProductUpdate(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
http_method_names = ['get', 'put', 'delete']
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = [permissions.AllowAny,]
#IsManagerOrAdmin]
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| [
"pixies@riseup.net"
] | pixies@riseup.net |
4ddcc5e2081faa2bf1cfbed6af652a7d0dd75730 | b343b3eb399eeee735ce4f7e12d3ecfbe31f2666 | /regist.py | 87e2605b244fce8b23925904897ffb2a13870611 | [] | no_license | LiXinKing/python_timer | 8ab091b86ecb27d7574e3ca44e3a7be767548868 | 954486f4ed7a655d57086fe76824106d8351102a | refs/heads/master | 2020-06-13T00:26:04.832039 | 2016-12-13T16:02:49 | 2016-12-13T16:02:49 | 75,472,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from globalDef import g_relative_timer_regist_dict
from globalDef import g_absolute_timer_regist_dict
from globalDef import g_round_timer_regist_dict
from event import *
def regist_relative_timer(timer_name, timer_stamp, timer_func, func_para):
g_relative_timer_regist_dict[timer_name] = (timer_stamp, timer_func, func_para)
def regist_absolute_timer(timer_name, timer_stamp, timer_func, func_para):
g_absolute_timer_regist_dict[timer_name] = (timer_stamp, timer_func, func_para)
def regist_round_timer(timer_name, timer_stamp, timer_func, func_para):
g_round_timer_regist_dict[timer_name] = (timer_stamp, timer_func, func_para)
| [
"444989527@qq.com"
] | 444989527@qq.com |
e298d1cd83cc89ad0620c397e0a13cede3f39494 | cc23059aa361c4ab3ec7ef5d8ad10e521ded1da1 | /thema16-spieltheorie/monty-hall.py | 279db4f67d134555f2734a81b8768ce1bfc39c5e | [] | no_license | iblech/mathezirkel-kurs | 747574d51a5b874526436bfd5cc03aa735760dc2 | 42537d975752999e1d77bcac9e04844b3c64e629 | refs/heads/master | 2023-01-20T13:35:17.414250 | 2022-12-22T16:21:39 | 2022-12-22T16:21:39 | 14,818,826 | 11 | 7 | null | 2022-10-19T08:59:50 | 2013-11-30T10:02:27 | TeX | UTF-8 | Python | false | false | 1,297 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import random
numDoors = 3
shouldSwitch = False
numSimulations = 1000
numWins = 0
# Liste der Türen -- also insgesamt numDoors viele Zahlen
doors = range(0,numDoors)
for i in range(numSimulations):
# Tür, hinter der sich das Auto befindet -- natürlich unbekannt für den
# Kandidaten
carDoor = random.choice(doors)
# Tür, die der Kandidat zu Beginn auswählt
selectedDoor = random.choice(doors)
# Liste der Türen, unter denen der Moderator eine auswählen und öffnen wird
showableDoors = [ x for x in doors if x != carDoor and x != selectedDoor ]
# Vom Moderator ausgewählte Tür
uncoveredDoor = random.choice(showableDoors)
print "Auto: %d, ausgewählt: %d, gezeigt: %d" % (carDoor, selectedDoor, uncoveredDoor)
if shouldSwitch:
# Liste der Türen, die zur neuen Auswahl stehen
choosableDoors = [ x for x in doors if x != selectedDoor and x != uncoveredDoor ]
selectedDoor = random.choice(choosableDoors)
# Zähler hochsetzen
if selectedDoor == carDoor:
numWins = numWins + 1
print "*** Von %d Spielen insgesamt %d gewonnen, %.1f %%." % \
(numSimulations, numWins, numWins / numSimulations * 100)
| [
"iblech@web.de"
] | iblech@web.de |
b1ac8ccb27daa0ee5ed8560892cafa40d792b723 | 3b7bfe318f264800245b60894634b5cfb1a9f19f | /manage.py | 9a0699ec56418ef63d652bbedf0df110d5ae82d7 | [] | no_license | aviyan171/Text-Utils | 6fe3b7d1e56423c7bb271a672be3a091b5a63e6e | e25c0e840d563e4a6dd62e070e61a7b001cf4669 | refs/heads/master | 2023-07-02T22:39:58.075190 | 2021-08-04T04:19:20 | 2021-08-04T04:19:20 | 390,938,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newtextutils.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"Abhiyanupreti76@gmail.com"
] | Abhiyanupreti76@gmail.com |
71f71829867a94e2a7dfe8ba5fd81f25492ed3a5 | b011ff74ed9737b0a8ab6e043200adeacb0ed40e | /regression_x/gradient_linear.py | 4ed7ea8002a4ba27726ec0b0a3abb573408ae26e | [] | no_license | lisz1012/linear_regression | d6836c10113972456f93bbc63e3d0cbca2e2c751 | 2f27bd35eafce30e8f26805c4e71bc2920375a56 | refs/heads/master | 2022-12-25T12:06:33.185605 | 2020-10-12T06:09:27 | 2020-10-12T06:09:27 | 303,294,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import random
def get_data(w,num):
x=[ random.uniform(0,5) for i in range(0,num)]
y=[ w*s for s in x]
return zip(x,y)
def train_step_pow(data,w,rate=0.03):
g=sum([ (w*x-y)*x for [x,y] in data])/len(data)
w=w-rate*g
return w
def train_step_abs(data,w,rate=0.03):
g=sum([ x if (w*x-y)>0 else -1*x for [x,y] in data])/len(data)
w=w-rate*g
return w
def cal_data_error(data,w):
error=[(w*x-y)*(w*x-y) for [x,y] in data ]
return error
#第一个参数是w 第二个参数是数量
data=get_data(10,10) +get_data(6,2)
w1=w2=7
#pre_errors=cal_data_error(data,w)
for i in range(0,5000):
w1=train_step_pow(data,w1)#正规mse训练
w2=train_step_abs(data,w2)#绝对值mse训练
if i%50==0:
#errors=cal_data_error(data,w)
#mse_delta=[ "%.3f"%(e2-e1) for [e1,e2] in zip(errors,pre_errors)]
#pre_errors=errors
print "{},{}".format(w1,w2)
#print " ".join(mse_delta)
| [
"lisz1012@163.com"
] | lisz1012@163.com |
ee13fe72a65e189340348da65a80f1eee53e3cb6 | 90904785370340ed6b98c24d852ac9211c0960c5 | /taskone/urls.py | 576c9fee891d5b16c5e96f0a66f2cfaff639da20 | [] | no_license | alhulail/task1 | 92b72df8998e917b3e53d5214bd2c0ce290fbcc4 | df9bb908e8f60f2e3fb03b6be2e374d89e1940cf | refs/heads/master | 2020-03-22T09:34:15.013407 | 2018-07-05T12:37:00 | 2018-07-05T12:37:00 | 139,846,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | """taskone URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"hadeel@Hadeels-MacBook-Pro.local"
] | hadeel@Hadeels-MacBook-Pro.local |
4ad18edeba3a472fa88ee13931a6c5ad42d6a3dc | d7779408c44502a0cb8da4e3923e1b68492b1610 | /apps/organization/forms.py | ccfe66e97ba094e2b0233dc63e529b03fbcc07b3 | [
"MIT"
] | permissive | codelieche/moocweb | 5c4429d3ebee43452d42db63fdd364935e2d6eee | 0e25efa597a79a38066ec41559334be604388f30 | refs/heads/master | 2021-01-13T11:49:08.444658 | 2017-02-26T16:36:08 | 2017-02-26T16:36:08 | 81,343,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # _*_ coding:utf-8 _*_
import re
from django import forms
from operation.models import UserAsk
class UserAskForm(forms.ModelForm):
'''用户咨询Form'''
class Meta:
model = UserAsk
fields = ['name', 'mobile', 'course_name']
def clean_mobil(self):
'''
验证手机号码是否合法
:return:
'''
mobile = self.cleaned_data['mobile']
REGEX_MOBILE = '^1[358]\d{9}$|^147\d{8}$|^176\d{8}$'
p = re.compile(REGEX_MOBILE)
if p.match(mobile):
return mobile
else:
return False
raise forms.ValidationError("手机号码非法", code="mobile_invalid")
| [
"codelieche@gmail.com"
] | codelieche@gmail.com |
e4fbfcf43719e760b30a0d7f710d90c100782c5f | 9926cb4c9b30739e1c735465414d6ceb021ece22 | /accounts/migrations/0001_initial.py | e49cbaf2bdff3637b9a4af681bfbb07631ae0763 | [
"MIT"
] | permissive | thetruefuss/thefacebook | 401b31253f786fd485291dcba2498a3ddf182e9f | 0ebbbe535679a2f8f96614398ef43c1396d7bacc | refs/heads/master | 2023-07-23T19:27:38.815985 | 2018-07-04T23:54:08 | 2018-07-04T23:54:08 | 139,773,013 | 11 | 7 | MIT | 2023-07-17T04:58:38 | 2018-07-04T23:18:28 | HTML | UTF-8 | Python | false | false | 4,966 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-23 20:41
from __future__ import unicode_literals
import accounts.managers
import accounts.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('name', models.CharField(max_length=30, verbose_name='Name')),
('status', models.CharField(choices=[('student', 'Student'), ('alumnus', 'Alumnus/Alumna'), ('faculty', 'Faculty'), ('staff', 'Staff')], default='student', max_length=10, verbose_name='Status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Email')),
('member_since', models.DateTimeField(auto_now_add=True, verbose_name='Member Since')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
managers=[
('objects', accounts.managers.UserManager()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(blank=True, null=True, upload_to=accounts.models.get_picture_filename, verbose_name='Upload a picture')),
('concentration', models.CharField(blank=True, max_length=100, verbose_name='Concentration')),
('relationship_status', models.CharField(blank=True, choices=[('empty', ''), ('single', 'Single'), ('relationship', 'In a Relationship'), ('engaged', 'Engaged'), ('married', 'Married'), ('complicated', "It's Complicated")], default='empty', max_length=20, verbose_name='Relationship Status')),
('sex', models.CharField(blank=True, choices=[('empty', ''), ('male', 'Male'), ('female', 'Female'), ('other', 'Other')], default='empty', max_length=20, verbose_name='Sex')),
('dob', models.DateField(blank=True, null=True)),
('phone_number', models.CharField(blank=True, max_length=100, verbose_name='Concentration')),
('high_school', models.CharField(blank=True, max_length=100, verbose_name='High School')),
('screen_name', models.CharField(blank=True, max_length=100, verbose_name='Screen Name')),
('political_views', models.TextField(blank=True, max_length=1000, verbose_name='Political Views')),
('interests', models.CharField(blank=True, max_length=256, verbose_name='Interests')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('friend', models.ManyToManyField(blank=True, related_name='friends', to=settings.AUTH_USER_MODEL)),
('pending_request', models.ManyToManyField(blank=True, related_name='pending_requests', to=settings.AUTH_USER_MODEL)),
('poke', models.ManyToManyField(blank=True, related_name='pokes', to=settings.AUTH_USER_MODEL)),
('sent_request', models.ManyToManyField(blank=True, related_name='sent_requests', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Profile',
'verbose_name_plural': 'Profiles',
'ordering': ('-created',),
},
),
]
| [
"alisheikh1114@gmail.com"
] | alisheikh1114@gmail.com |
ec75cbcec9ed9d1ce713094f1f6a425eda95710e | 0724193d3ddd21a428aabbdd16d84e944cf082dc | /atsim/potentials/config/_pymath.py | fe8b6890e9de404638a1b2bbc908ed037be09a9a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fengnianzhuang/atsim-potentials | 305b5aa33b9e7df7765abfda3a73d7f31241ba69 | 566020dc0d2df4b701b9c8cd00319a1c9461f56e | refs/heads/master | 2023-06-15T21:05:00.608645 | 2021-05-20T19:00:22 | 2021-05-20T19:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | import math
import sys
def ceil(x):
return math.ceil(x)
def copysign(a,b):
return math.copysign(a,b)
def fabs(x):
return math.fabs(x)
def floor(x):
return math.floor(x)
def fmod(a,b):
return math.fmod(a,b)
def fsum(*args):
return math.fsum(args)
def ldexp(a,b):
return math.ldexp(a,int(b))
def trunc(x):
return math.trunc(x)
def exp(x):
return math.exp(x)
def log(*args):
return math.log(*args)
def log1p(x):
return math.log1p(x)
if hasattr(math, "log2"):
def log2(x):
return math.log2(x)
else:
def log2(x):
return math.log(x,2)
def log10(x):
return math.log10(x)
def pow(x,a):
return math.pow(x,a)
def sqrt(x):
return math.sqrt(x)
def acos(x):
return math.acos(x)
def atan(x):
return math.atan(x)
def atan2(x,y):
return math.atan2(x,y)
def cos(x):
return math.cos(x)
def hypot(x,y):
return math.hypot(x,y)
def sin(x):
return math.sin(x)
def tan(x):
return math.tan(x)
def radians(x):
return math.radians(x)
def degrees(x):
return math.degrees(x)
def acosh(x):
return math.acosh(x)
def asinh(x):
return math.asinh(x)
def atanh(x):
return math.atanh(x)
def cosh(x):
return math.cosh(x)
def sinh(x):
return math.sinh(x)
def tanh(x):
return math.tanh(x)
def factorial(x):
return math.factorial(x)
if hasattr(math, "gcd"):
_gcd = math.gcd
else:
import fractions
_gcd = fractions.gcd
def gcd(a,b):
return _gcd(int(a),int(b))
| [
"m.rushton@imperial.ac.uk"
] | m.rushton@imperial.ac.uk |
83f5b76bf9b55dea682c92b0b6a41c45c844e9b9 | 3341f5d682b06b4a7ff8963c0f1b88d937fae3df | /setup.py | a7a6a8a907b1bad631daca56ff1cb24f08e2bed0 | [] | no_license | jmrodrig/Alicat_control | 83772700a7a9b2ca2045e0487aede28060bb2927 | aced19313da0371b8e5ef6d509c2b4473f995dc4 | refs/heads/master | 2021-01-19T18:53:38.909786 | 2013-07-10T09:30:31 | 2013-07-10T09:30:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from distutils.core import setup
import py2exe
setup(console=['start_alicat.py'])
| [
"jose.neves.rodrigues@gmail.com"
] | jose.neves.rodrigues@gmail.com |
49bffe80d5dc1bd97ce084a22875362795285f16 | a7e89bc0436f67e2160905e7d1becd681acc42c1 | /manage.py | ea86dc2ce3543cb9faa1c82848b799de048a0cc7 | [] | no_license | supermanfeng/shengxianproject | 6cc718a99d17054a959af264aae88c02d75be10b | dba59227e918653c5e6b5d4dd892afc4477eccd1 | refs/heads/master | 2020-03-10T13:30:47.214241 | 2018-04-26T13:20:34 | 2018-04-26T13:20:34 | 129,401,909 | 1 | 0 | null | 2018-04-27T12:26:08 | 2018-04-13T12:52:08 | JavaScript | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vueshengxian.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"2252506855@qq.com"
] | 2252506855@qq.com |
3b05ad30d907a8027648557e26ffae9634bd2c22 | 57c55954c4e7691a5bb7e58094ce8d68a90ac068 | /GRC1/Library/nrnoble.py | da34894b3b31e0c8bb6a0b7063513d66629764f0 | [] | no_license | nrnoble/masterdirectory | 1dea7213220fac8b0bdfcf32bb45e4a645a1d0b5 | 5de73ee6d6945974083767c7bcbb650de08356de | refs/heads/master | 2020-12-30T14:45:33.227000 | 2017-06-09T10:33:12 | 2017-06-09T10:33:12 | 91,083,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import random, time, sys
Colors =['Red', 'Green', 'Blue', 'White', 'Black', 'Yellow', 'Orange', 'Pink', 'Purple', 'Gold', 'Gray', 'Maroon']
Colors2 =['Red', 'Green', 'Blue', 'White', 'Yellow', 'Orange', 'Pink', 'Purple', 'Gold', 'Silver', 'Gray', 'Maroon']
def pause(win):
#win.redraw()
win.update()
win.waitForClick()
def getNewColor(Color):
newColor = Color
while (Color == newColor):
newColor = random.choice(Colors)
return newColor
def sleep(sec):
time.sleep(sec) | [
"nrnoble@hotmail.com"
] | nrnoble@hotmail.com |
4f12b2cc59d6c1796f624bc5b10d8d35fa779390 | 22749c6a569661b2637233cc0aebdc1701033b26 | /src/python/pants/backend/codegen/protobuf/python/python_protobuf_module_mapper_test.py | d1a882ed3ab9459719226cada03e3667f28f2afd | [
"Apache-2.0"
] | permissive | akk5597/pants | 2eceb226c39b8ef7f603dfa96684b7522e1a9065 | 7ad295f71d2990eebbbe9c778bbf70f7d9e66584 | refs/heads/main | 2023-08-27T02:40:54.753545 | 2021-11-10T03:42:18 | 2021-11-10T03:42:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,124 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.codegen.protobuf.python import additional_fields, python_protobuf_module_mapper
from pants.backend.codegen.protobuf.python.python_protobuf_module_mapper import (
PythonProtobufMappingMarker,
)
from pants.backend.codegen.protobuf.target_types import ProtobufSourcesGeneratorTarget
from pants.backend.codegen.protobuf.target_types import rules as python_protobuf_target_types_rules
from pants.backend.python.dependency_inference.module_mapper import FirstPartyPythonMappingImpl
from pants.core.util_rules import stripped_source_files
from pants.engine.addresses import Address
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*additional_fields.rules(),
*stripped_source_files.rules(),
*python_protobuf_module_mapper.rules(),
*python_protobuf_target_types_rules(),
QueryRule(FirstPartyPythonMappingImpl, [PythonProtobufMappingMarker]),
],
target_types=[ProtobufSourcesGeneratorTarget],
)
def test_map_first_party_modules_to_addresses(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--source-root-patterns=['root1', 'root2', 'root3']"])
rule_runner.write_files(
{
"root1/protos/f1.proto": "",
"root1/protos/f2.proto": "",
"root1/protos/BUILD": "protobuf_sources()",
# These protos would result in the same module name, so neither should be used.
"root1/two_owners/f.proto": "",
"root1/two_owners/BUILD": "protobuf_sources()",
"root2/two_owners/f.proto": "",
"root2/two_owners/BUILD": "protobuf_sources()",
# A file with grpc. This also uses the `python_source_root` mechanism, which should be
# irrelevant to the module mapping because we strip source roots.
"root1/tests/f.proto": "",
"root1/tests/BUILD": "protobuf_sources(grpc=True, python_source_root='root3')",
}
)
result = rule_runner.request(FirstPartyPythonMappingImpl, [PythonProtobufMappingMarker()])
assert result == FirstPartyPythonMappingImpl(
mapping=FrozenDict(
{
"protos.f1_pb2": (Address("root1/protos", relative_file_path="f1.proto"),),
"protos.f2_pb2": (Address("root1/protos", relative_file_path="f2.proto"),),
"tests.f_pb2": (Address("root1/tests", relative_file_path="f.proto"),),
"tests.f_pb2_grpc": (Address("root1/tests", relative_file_path="f.proto"),),
}
),
ambiguous_modules=FrozenDict(
{
"two_owners.f_pb2": (
Address("root1/two_owners", relative_file_path="f.proto"),
Address("root2/two_owners", relative_file_path="f.proto"),
)
}
),
)
| [
"noreply@github.com"
] | noreply@github.com |
bf6f236aa05ce0ae841dd0b933b1930625d39351 | a75d4e8ff5e2d0641e539af3980768c10298dfb9 | /main.py | d8058f25dc7e3b8e940ce79bf28746f3235b9492 | [] | no_license | vitvara/tk-space-1 | c70942af4c235ebabc8648d7d49efc9c31feb961 | 57f668f3137ce893d576f03c8f7c6ffc0cb794c3 | refs/heads/main | 2023-03-23T23:35:07.711907 | 2021-03-24T23:17:02 | 2021-03-24T23:17:02 | 351,682,625 | 1 | 0 | null | 2021-03-26T06:20:18 | 2021-03-26T06:20:17 | null | UTF-8 | Python | false | false | 5,424 | py | import math
from random import randint, random
import tkinter as tk
from gamelib import Sprite, GameApp, Text
from consts import *
from elements import Ship, Bullet, Enemy
from utils import random_edge_position, normalize_vector, direction_to_dxdy, vector_len, distance
class SpaceGame(GameApp):
def init_game(self):
self.ship = Ship(self, CANVAS_WIDTH // 2, CANVAS_HEIGHT // 2)
self.level = 1
self.level_text = Text(self, '', 100, 580)
self.update_level_text()
self.score = 0
self.score_wait = 0
self.score_text = Text(self, '', 100, 20)
self.update_score_text()
self.bomb_power = BOMB_FULL_POWER
self.bomb_wait = 0
self.bomb_power_text = Text(self, '', 700, 20)
self.update_bomb_power_text()
self.elements.append(self.ship)
self.enemies = []
self.bullets = []
def add_enemy(self, enemy):
self.enemies.append(enemy)
def add_bullet(self, bullet):
self.bullets.append(bullet)
def bullet_count(self):
return len(self.bullets)
def bomb(self):
if self.bomb_power == BOMB_FULL_POWER:
self.bomb_power = 0
self.bomb_canvas_id = self.canvas.create_oval(
self.ship.x - BOMB_RADIUS,
self.ship.y - BOMB_RADIUS,
self.ship.x + BOMB_RADIUS,
self.ship.y + BOMB_RADIUS
)
self.after(200, lambda: self.canvas.delete(self.bomb_canvas_id))
for e in self.enemies:
if self.ship.distance_to(e) <= BOMB_RADIUS:
e.to_be_deleted = True
self.update_bomb_power_text()
def update_score_text(self):
self.score_text.set_text('Score: %d' % self.score)
def update_bomb_power_text(self):
self.bomb_power_text.set_text('Power: %d%%' % self.bomb_power)
def update_level_text(self):
self.level_text.set_text('Level: %d' % self.level)
def update_score(self):
self.score_wait += 1
if self.score_wait >= SCORE_WAIT:
self.score += 1
self.score_wait = 0
self.update_score_text()
def update_bomb_power(self):
self.bomb_wait += 1
if (self.bomb_wait >= BOMB_WAIT) and (self.bomb_power != BOMB_FULL_POWER):
self.bomb_power += 1
self.bomb_wait = 0
self.update_bomb_power_text()
def create_enemy_star(self):
enemies = []
x = randint(100, CANVAS_WIDTH - 100)
y = randint(100, CANVAS_HEIGHT - 100)
while vector_len(x - self.ship.x, y - self.ship.y) < 200:
x = randint(100, CANVAS_WIDTH - 100)
y = randint(100, CANVAS_HEIGHT - 100)
for d in range(18):
dx, dy = direction_to_dxdy(d * 20)
enemy = Enemy(self, x, y, dx * ENEMY_BASE_SPEED, dy * ENEMY_BASE_SPEED)
enemies.append(enemy)
return enemies
def create_enemy_from_edges(self):
x, y = random_edge_position()
vx, vy = normalize_vector(self.ship.x - x, self.ship.y - y)
vx *= ENEMY_BASE_SPEED
vy *= ENEMY_BASE_SPEED
enemy = Enemy(self, x, y, vx, vy)
return [enemy]
def create_enemies(self):
if random() < 0.2:
enemies = self.create_enemy_star()
else:
enemies = self.create_enemy_from_edges()
for e in enemies:
self.add_enemy(e)
def pre_update(self):
if random() < 0.1:
self.create_enemies()
def process_bullet_enemy_collisions(self):
for b in self.bullets:
for e in self.enemies:
if b.is_colliding_with_enemy(e):
b.to_be_deleted = True
e.to_be_deleted = True
def process_ship_enemy_collision(self):
for e in self.enemies:
if self.ship.is_colliding_with_enemy(e):
self.stop_animation()
def process_collisions(self):
self.process_bullet_enemy_collisions()
self.process_ship_enemy_collision()
def update_and_filter_deleted(self, elements):
new_list = []
for e in elements:
e.update()
e.render()
if e.to_be_deleted:
e.delete()
else:
new_list.append(e)
return new_list
def post_update(self):
self.process_collisions()
self.bullets = self.update_and_filter_deleted(self.bullets)
self.enemies = self.update_and_filter_deleted(self.enemies)
self.update_score()
self.update_bomb_power()
def on_key_pressed(self, event):
if event.keysym == 'Left':
self.ship.start_turn('LEFT')
elif event.keysym == 'Right':
self.ship.start_turn('RIGHT')
elif event.char == ' ':
self.ship.fire()
elif event.char.upper() == 'Z':
self.bomb()
def on_key_released(self, event):
if event.keysym == 'Left':
self.ship.stop_turn('LEFT')
elif event.keysym == 'Right':
self.ship.stop_turn('RIGHT')
if __name__ == "__main__":
root = tk.Tk()
root.title("Space Fighter")
# do not allow window resizing
root.resizable(False, False)
app = SpaceGame(root, CANVAS_WIDTH, CANVAS_HEIGHT, UPDATE_DELAY)
app.start()
root.mainloop()
| [
"jittat@gmail.com"
] | jittat@gmail.com |
f91fe8efd301c91d87c7cc5119bec68f6456f407 | 05fb5c55ef7c7dde7e7709783fd71f141d3f4d37 | /lesson1/clientMath.py | e5b1640adf2a1fc44d0602066ff1f7c7b66fa7e4 | [] | no_license | binaryfun/Internet-Programming-Class | 292ec0b8c591c9bbb9c4a604bd78a0a2246d0a36 | 5efda0bd6f3dfb03f22af3aaa6d0cf636d17e3ee | refs/heads/master | 2021-01-23T07:20:42.811277 | 2011-03-08T04:29:29 | 2011-03-08T04:29:29 | 1,258,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/python
# Echo client program
import socket
import sys
if len(sys.argv) != 3: # the program name and the two arguments
# stop the program and print an error message
sys.exit("Must provide two positive numbers\n \
Example: %s 123 1213" % sys.argv[0] )
# Convert the two arguments from strings into numbers
x =sys.argv[1]
y =sys.argv[2]
HOST = '' # The remote host
PORT = 5008 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
fullTextToSend = "%s %s" % (x,y)
s.send(fullTextToSend)
data = s.recv(1024)
s.close()
print 'Received', repr(data)
| [
"directorymanager@gmail.com"
] | directorymanager@gmail.com |
16559e513ed34f05a32fd05f3bb1afa075465a9f | ed111fd8e49676a07ef456602a57c61a6644c748 | /models/pizza_toppings.py | 60add0e50fc0684c141b4a9a267735f3a7524076 | [] | no_license | Heidenmason/RabbitRanch | 54290b4a85fa2119a02bc96676962c493eb4ec97 | 41b75584a3c1270145886954d04f8d18e3d877b7 | refs/heads/master | 2020-12-04T05:11:45.321732 | 2020-01-16T06:43:29 | 2020-01-16T06:43:29 | 231,626,892 | 0 | 0 | null | 2020-01-16T06:43:31 | 2020-01-03T16:41:52 | Python | UTF-8 | Python | false | false | 686 | py | class PizzaToppings:
def __init__(self, pizza_topping_id, type_id, sauce, meat, cheese, veggies):
self.__pizza_topping_id = pizza_topping_id
self.__type_id = type_id
self.__sauce = sauce
self.__meat = meat
self.__cheeses = cheese
self.__veggies = veggies
# getters
def get_pizza_topping_id(self):
return self.__pizza_topping_id
def get_type_id_top(self):
return self.__type_id
def get_sauce(self):
return self.__sauce
def get_meat(self):
return self.__meat
def get_cheese(self):
return self.__cheeses
def get_veggies(self):
return self.__veggies
| [
"heidenmason@gmail.com"
] | heidenmason@gmail.com |
313285dd7c238502f8bc4ba4af3bfb4143144e8a | 159ab045e72fe1639d8bc7d4e34774bed85c1ea6 | /python101_course_server_checker.py | d2e52e37f61fdf24f5f1257264807ae8e0d1bbf6 | [] | no_license | Interligo/py101-server-checker | 08408226ee0deb9ae42a53e4d1ec8fa56e439d68 | c930f029cbbbce9f72d67d11b2c75723769e2c3f | refs/heads/main | 2023-01-13T17:26:16.152492 | 2020-11-14T10:06:16 | 2020-11-14T10:06:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import requests
from logger_settings import logging
def course_server_checker():
response = requests.get('https://python101.online')
logging.info(f'response.url: {response.url}')
logging.info(f'response.status_code: {response.status_code}')
return response.ok
| [
"noreply@github.com"
] | noreply@github.com |
0d3897723c75c8c6541b18a612fb5a3fdc091199 | 5a4fe2ad0023743291cda1ea8749f07c8a5984bc | /nagiosunity/commands/battery.py | 9013308a6345414feb2390eec8566146b30c212c | [
"Apache-2.0"
] | permissive | dan-and/nagios-unity | 5686b6abf96b3bd2c9c48d5acd865cc89e40c1f1 | 3df800d22a48f875e1afdafd945bf4b245e8703b | refs/heads/master | 2021-05-08T23:16:22.804951 | 2017-12-01T10:15:22 | 2017-12-01T10:15:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | # coding=utf-8
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
import logging
from nagiosunity.lib import unity
from nagiosunity.lib import utils
_log = logging.getLogger(__name__)
class Battery(unity.UnityWrapper):
name = 'battery'
def __init__(self, options, **kwargs):
super(Battery, self).__init__(options)
self.options = options
self.kwargs = kwargs
self._batteries = None
@property
def batteries(self):
return self._batteries if self._batteries else self.unity.get_battery()
def check(self):
all_status = ok, warning, critical, unknown = utils.get_all_status(
self.batteries)
code = utils.max_if_not_empty(ok + warning + critical + unknown,
key=lambda i: i[0])
code = code[0]
status_mark = utils.get_status_mark("BATTERY", code)
first_line = "Total Batteries #{}, Failed batteries: {}".format(
len(ok + warning + critical + unknown), [c[1] for c in critical])
# Status line
print(status_mark + first_line + " | ")
# Failed details
utils.print_if_failure(all_status[code], self.batteries)
return code
| [
"peter.wang13@emc.com"
] | peter.wang13@emc.com |
bacb06400d052ba719278fc6b64451f272f98072 | 6100a1906b8199b164b2ebe866b7cba149dcf9db | /util/__init__.py | c09c5c20c48f33bb6e3abbe70c32b73ccf4d4023 | [
"MIT"
] | permissive | LucasRibeiroRJBR/SQL_Console | bcfd3c54d448c01f380d6f688858ec7393a10790 | 511580b5e48b3ac9dc47d5f918426f9e2bfb52d8 | refs/heads/main | 2023-01-24T11:20:15.832921 | 2020-12-12T22:56:22 | 2020-12-12T22:56:22 | 320,940,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="escola"
)
cursor = mydb.cursor()
def select():
while True:
print('\n1- ID\n2- NACIONALIDADE\n3- IDIOMA\n4- CURSO TÉCNICO\n5- VOLTAR')
op_select = input('\nFiltrar por -> ')
if op_select == '1':
cond = input('\nID -> ')
x = f'id = "{cond}"'
cursor.execute(f'SELECT * FROM aluno where {x}')
elif op_select == '2':
cond = input('\nNACIONALIDADE -> ')
x = f'nacionalidade = "{cond}"'
cursor.execute(f'SELECT * FROM aluno where {x}')
elif op_select == '3':
cond = input('\nIDIOMA -> ')
x = f'idioma = "{cond}"'
cursor.execute(f'SELECT * FROM aluno where {x}')
elif op_select == '4':
cond = input('\nCURSO TÉCNICO -> ')
x = f'cur_tec = "{cond}"'
cursor.execute(f'SELECT * FROM aluno where {x}')
elif op_select == '5':
break
print()
myresult = cursor.fetchall()
print('+' + '-'*4 + '+' + '-'*45 + '+' + '-'*45 + '+' + '-'*15 + '+' + '-'*10 + '+' + '-'*15 + '+')
print(f'|{"ID":^4}|{"NOME":^45}|{"E-MAIL":^45}|{"NACIONALIDADE":^15}|{"IDIOMA":^10}|{"CURSO TÉCNICO":^15}|')
print('+' + '-'*4 + '+' + '-'*45 + '+' + '-'*45 + '+' + '-'*15 + '+' + '-'*10 + '+' + '-'*15 + '+')
for x in myresult:
xx = str(x[2])
xxx = str(xx[14:23])
print(f'|{x[0]:^4}|{x[1]:^45}|{x[3]:^45}|{x[4]:^15}|{x[5]:^10}|{x[8]:^15}|')
print('+' + '-'*4 + '+' + '-'*45 + '+' + '-'*45 + '+' + '-'*15 + '+' + '-'*10 + '+' + '-'*15 + '+')
def select_personalizado():
cond = input('\nCOMANDO -> ')
cursor.execute(f'{cond}')
print()
myresult = cursor.fetchall()
print('+' + '-'*4 + '+' + '-'*45 + '+' + '-'*45 + '+' + '-'*15 + '+' + '-'*10 + '+' + '-'*15 + '+')
print(f'|{"ID":^4}|{"NOME":^45}|{"E-MAIL":^45}|{"NACIONALIDADE":^15}|{"IDIOMA":^10}|{"CURSO TÉCNICO":^15}|')
print('+' + '-'*4 + '+' + '-'*45 + '+' + '-'*45 + '+' + '-'*15 + '+' + '-'*10 + '+' + '-'*15 + '+')
for x in myresult:
xx = str(x[2])
xxx = str(xx[14:23])
print(f'|{x[0]:^4}|{x[1]:^45}|{x[3]:^45}|{x[4]:^15}|{x[5]:^10}|{x[8]:^15}|')
print('+' + '-'*4 + '+' + '-'*45 + '+' + '-'*45 + '+' + '-'*15 + '+' + '-'*10 + '+' + '-'*15 + '+') | [
"lucasribeirorjbr@gmail.com"
] | lucasribeirorjbr@gmail.com |
9ab3889ed4bf1f930b6df7ed4a537cf575bc487d | 04ee83c352281739c8b3f79f2503723449bdcc01 | /venv/lib/python3.6/site-packages/pde/fields/vectorial.py | 918afbde04329da497ddb5f206a1089019b5fd4f | [] | no_license | pretentious7/LTCURECA2020 | 5b7e88e3c2791fd1827a6a9ee5c0b72da9baf897 | 1654b1b89b93959c4e029c636cff170d0fdcb59b | refs/heads/master | 2022-11-10T09:06:47.901715 | 2020-06-30T22:05:23 | 2020-06-30T22:05:23 | 267,156,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,138 | py | '''
Defines a vectorial field over a grid
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
'''
from typing import (Callable, Optional, Union, Any, Dict, List, Sequence,
TYPE_CHECKING)
import numpy as np
import numba as nb
from .base import DataFieldBase
from .scalar import ScalarField
from ..grids.base import GridBase, DimensionError
from ..tools.numba import jit
from ..tools.docstrings import fill_in_docstring
if TYPE_CHECKING:
from ..grids.boundaries.axes import BoundariesData # @UnusedImport
from .tensorial import Tensor2Field # @UnusedImport
class VectorField(DataFieldBase):
""" Single vector field on a grid
Attributes:
grid (:class:`~pde.grids.base.GridBase`):
The underlying grid defining the discretization
data (:class:`numpy.ndarray`):
Vector components at the support points of the grid
label (str):
Name of the field
"""
rank = 1
@classmethod
def from_scalars(cls, fields: List[ScalarField], label: str = None) \
-> "VectorField":
""" create a vector field from a list of ScalarFields
Note that the data of the scalar fields is copied in the process
Args:
fields (list):
The list of (compatible) scalar fields
label (str, optional):
Name of the returned field
Returns:
:class:`VectorField`: the resulting vector field
"""
grid = fields[0].grid
if grid.dim != len(fields):
raise DimensionError('Grid dimension and number of scalar fields '
f'differ ({grid.dim} != {len(fields)})')
data = []
for field in fields:
assert field.grid.compatible_with(grid)
data.append(field.data)
return cls(grid, data, label)
@classmethod
@fill_in_docstring
def from_expression(cls, grid: GridBase, expressions: Sequence[str],
label: str = None) -> "VectorField":
""" create a vector field on a grid from given expressions
Warning:
{WARNING_EXEC}
Args:
grid (:class:`~pde.grids.GridBase`):
Grid defining the space on which this field is defined
expressions (list of str):
A list of mathematical expression, one for each component of the
vector field. The expressions determine the values as a function
of the position on the grid. The expressions may contain
standard mathematical functions and they may depend on the axes
labels of the grid.
label (str, optional):
Name of the field
"""
from ..tools.expressions import ScalarExpression
if isinstance(expressions, str) or len(expressions) != grid.dim:
axes_names = grid.axes + grid.axes_symmetric
raise ValueError(f'Expected {grid.dim} expressions for the '
f'coordinates {axes_names}.')
# obtain the coordinates of the grid points
points = {name: grid.cell_coords[..., i]
for i, name in enumerate(grid.axes)}
# evaluate all vector components at all points
data = []
for expression in expressions:
expr = ScalarExpression(expression=expression, signature=grid.axes)
values = np.broadcast_to(expr(**points), grid.shape)
data.append(values)
# create vector field from the data
return cls(grid=grid, # lgtm [py/call-to-non-callable]
data=data,
label=label)
def __getitem__(self, key: int) -> ScalarField:
""" extract a component of the VectorField """
if not isinstance(key, int):
raise IndexError('Index must be an integer')
return ScalarField(self.grid, self.data[key])
def dot(self, other: Union["VectorField", "Tensor2Field"],
out: Optional[Union[ScalarField, "VectorField"]] = None,
label: str = 'dot product') -> Union[ScalarField, "VectorField"]:
""" calculate the dot product involving a vector field
This supports the dot product between two vectors fields as well as the
product between a vector and a tensor. The resulting fields will be a
scalar or vector, respectively.
Args:
other (VectorField or Tensor2Field):
the second field
out (ScalarField or VectorField, optional):
Optional field to which the result is written.
label (str, optional):
Name of the returned field
Returns:
ScalarField or VectorField: the result of applying the dot operator
"""
from .tensorial import Tensor2Field # @Reimport
# check input
self.grid.assert_grid_compatible(other.grid)
if isinstance(other, VectorField):
result_type = ScalarField
elif isinstance(other, Tensor2Field):
result_type = VectorField # type: ignore
else:
raise TypeError('Second term must be a vector or tensor field')
if out is None:
out = result_type(self.grid)
else:
assert isinstance(out, result_type)
self.grid.assert_grid_compatible(out.grid)
# calculate the result
np.einsum('i...,i...->...', self.data, other.data, out=out.data)
if label is not None:
out.label = label
return out
__matmul__ = dot # support python @-syntax for matrix multiplication
def get_dot_operator(self) -> Callable:
""" return operator calculating the dot product involving vector fields
This supports both products between two vectors as well as products
between a vector and a tensor.
Warning:
This function does not check types or dimensions.
Returns:
function that takes two instance of :class:`numpy.ndarray`, which
contain the discretized data of the two operands. An optional third
argument can specify the output array to which the result is
written. Note that the returned function is jitted with numba for
speed.
"""
dim = self.grid.dim
@jit
def inner(a, b, out):
""" calculate dot product between fields `a` and `b` """
out[:] = a[0] * b[0] # overwrite potential data in out
for i in range(1, dim):
out[:] += a[i] * b[i]
return out
if nb.config.DISABLE_JIT:
def dot(a: np.ndarray, b: np.ndarray, out: np.ndarray = None) \
-> np.ndarray:
""" wrapper deciding whether the underlying function is called
with or without `out`. """
if out is None:
out = np.empty(b.shape[1:])
return inner(a, b, out)
else:
@nb.generated_jit
def dot(a: np.ndarray, b: np.ndarray, out: np.ndarray = None) \
-> np.ndarray:
""" wrapper deciding whether the underlying function is called
with or without `out`. """
if isinstance(a, nb.types.Number):
# simple scalar call -> do not need to allocate anything
raise RuntimeError('Dot needs to be called with fields')
elif isinstance(out, (nb.types.NoneType, nb.types.Omitted)):
# function is called without `out`
def f_with_allocated_out(a, b, out):
""" helper function allocating output array """
return inner(a, b, out=np.empty(b.shape[1:]))
return f_with_allocated_out
else:
# function is called with `out` argument
return inner
return dot
def outer_product(self, other: "VectorField", out: "Tensor2Field" = None,
label: str = None) -> "Tensor2Field":
""" calculate the outer product of this vector field with another
Args:
other (:class:`VectorField`):
The second vector field
out (:class:`pde.fields.tensorial.Tensor2Field`, optional):
Optional tensorial field to which the result is written.
label (str, optional):
Name of the returned field
"""
from .tensorial import Tensor2Field # @Reimport
self.assert_field_compatible(other)
if out is None:
out = Tensor2Field(self.grid)
else:
self.grid.assert_grid_compatible(out.grid)
# calculate the result
np.einsum('i...,j...->ij...', self.data, other.data, out=out.data)
if label is not None:
out.label = label
return out
@fill_in_docstring
def divergence(self, bc: "BoundariesData",
out: Optional[ScalarField] = None,
label: str = 'divergence') -> ScalarField:
""" apply divergence operator and return result as a field
Args:
bc:
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
out (ScalarField, optional):
Optional scalar field to which the result is written.
label (str, optional):
Name of the returned field
Returns:
ScalarField: the result of applying the operator
"""
divergence = self.grid.get_operator('divergence', bc=bc)
if out is None:
out = ScalarField(self.grid, divergence(self.data), label=label)
else:
assert isinstance(out, ScalarField)
self.grid.assert_grid_compatible(out.grid)
divergence(self.data, out=out.data)
return out
@fill_in_docstring
def gradient(self, bc: "BoundariesData",
out: Optional['Tensor2Field'] = None,
label: str = 'gradient') -> 'Tensor2Field':
""" apply (vecotr) gradient operator and return result as a field
Args:
bc:
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
out (Tensor2Field, optional):
Optional tensorial field to which the result is written.
label (str, optional):
Name of the returned field
Returns:
Tensor2Field: the result of applying the operator
"""
vector_gradient = self.grid.get_operator('vector_gradient', bc=bc)
if out is None:
from .tensorial import Tensor2Field # @Reimport
out = Tensor2Field(self.grid, vector_gradient(self.data),
label=label)
else:
assert isinstance(out, VectorField)
self.grid.assert_grid_compatible(out.grid)
vector_gradient(self.data, out=out.data)
return out
@fill_in_docstring
def laplace(self, bc: "BoundariesData",
out: Optional['VectorField'] = None,
label: str = 'vector laplacian') -> 'VectorField':
""" apply vector Laplace operator and return result as a field
Args:
bc:
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
out (VectorField, optional):
Optional vector field to which the result is written.
label (str, optional):
Name of the returned field
Returns:
VectorField: the result of applying the operator
"""
if out is not None:
assert isinstance(out, VectorField)
laplace = self.grid.get_operator('vector_laplace', bc=bc)
return self.apply(laplace, out=out, label=label)
@property
def integral(self) -> np.ndarray:
""" :class:`numpy.ndarray`: integral of each component over space """
return self.grid.integrate(self.data)
def to_scalar(self, scalar: str = 'auto',
label: Optional[str] = 'scalar `{scalar}`') -> ScalarField:
""" return a scalar field by applying `method`
The two tensor invariants are given by
Args:
scalar (str):
Choose the method to use. Possible choices are `norm` (the
default), `max`, `min`, or `squared_sum`.
label (str, optional):
Name of the returned field
Returns:
:class:`pde.fields.scalar.ScalarField`: the scalar field after
applying the operation
"""
if scalar == 'auto':
scalar = 'norm'
if scalar == 'norm':
data = np.linalg.norm(self.data, axis=0)
elif scalar == 'max':
data = np.max(self.data, axis=0)
elif scalar == 'min':
data = np.min(self.data, axis=0)
elif scalar == 'squared_sum' or scalar == 'norm_squared':
data = np.sum(self.data**2, axis=0)
else:
raise ValueError(f'Unknown method `{scalar}` for `to_scalar`')
if label is not None:
label = label.format(scalar=scalar)
return ScalarField(self.grid, data, label=label)
def get_line_data(self, scalar: str = 'auto', # type: ignore
extract: str = 'auto') -> Dict[str, Any]:
""" return data for a line plot of the field
Args:
method (str or int):
The method for extracting scalars as described in
`self.to_scalar`.
Returns:
dict: Information useful for performing a line plot of the field
"""
return self.to_scalar(scalar=scalar).get_line_data(extract=extract)
def get_image_data(self, scalar: str = 'auto', **kwargs) -> Dict[str, Any]:
r""" return data for plotting an image of the field
Args:
scalar (str or int):
The method for extracting scalars as described in
`self.to_scalar`.
\**kwargs: Additional parameters are forwarded to `get_image_data`
Returns:
dict: Information useful for plotting an image of the field
"""
return self.to_scalar(scalar=scalar).get_image_data(**kwargs)
def get_vector_data(self, **kwargs) -> Dict[str, Any]:
r""" return data for a vector plot of the field
Args:
\**kwargs: Additional parameters are forwarded to
`grid.get_image_data`
Returns:
dict: Information useful for plotting an vector field
"""
# TODO: Handle Spherical and Cartesian grids, too. This could be
# implemented by adding a get_vector_data method to the grids
if self.grid.dim == 2:
vx = self[0].get_image_data(**kwargs)
vy = self[1].get_image_data(**kwargs)
vx['data_x'] = vx.pop('data')
vx['data_y'] = vy['data']
vx['title'] = self.label
return vx
else:
raise NotImplementedError()
| [
"abhicherath@gmail.com"
] | abhicherath@gmail.com |
5a8c329ae43244aced63ae8867c913d3c03e6fd4 | 096fb90533ac914bdefaa91c1bf835c9d8c0d08b | /q22.py | 66b0e073cfacb1692433d2c652e29b557de5bf09 | [] | no_license | hasilrahman/assignment1 | 32640e29c4ee04353a2c241dab205be674fd1cb7 | d2e59e7213d61fc28361fd0d77020604739197c4 | refs/heads/main | 2023-03-05T09:00:35.647183 | 2021-02-15T04:49:09 | 2021-02-15T04:49:09 | 338,969,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | dic={"eng":50,"maths":33}
dic["eng"]=60
dic["maths"]=80
print(dic) | [
"noreply@github.com"
] | noreply@github.com |
9c9abfffb870fcf7a1f3784ecc5e2ecbc9ac49c4 | 8c248a2ca0a8939be7f5f9fc88e1ee1af0dfefd1 | /com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/data/DataInt32.py | 82d86bf354d02fe9fa09b952ffa1c5e950b44f0c | [
"Apache-2.0"
] | permissive | systemincloud/sic-examples | e3971dbbd846285c9a517d27cb4a25f4be0a7771 | b82d5d672f515b1deb5ddb35c5a93c003e03c030 | refs/heads/master | 2020-12-29T02:39:44.074782 | 2016-01-06T14:32:34 | 2016-01-06T14:32:34 | 16,419,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | from sicpythontask.PythonTaskInfo import PythonTaskInfo
from sicpythontask.PythonTask import PythonTask
from sicpythontask.InputPort import InputPort
from sicpythontask.OutputPort import OutputPort
from sicpythontask.data.Int32 import Int32
@PythonTaskInfo
class DataInt32(PythonTask):
def __init_ports__(self):
self.in1 = InputPort(name="in1", data_type=Int32)
self.in2 = InputPort(name="in2", data_type=Int32)
self.out = OutputPort(name="out", data_type=Int32)
def execute(self, grp):
in1Data = self.in1.get_data(Int32)
in2Data = self.in2.get_data(Int32)
self.out.put_data(Int32(in1Data.value + in2Data.value))
| [
"marek.jagielski@gmail.com"
] | marek.jagielski@gmail.com |
f00d8d4e9c9bc00c3e059a357d9a41dce49b213b | 7355a81397876c75a52da63eacd72f0e540b3fa2 | /planner/apps/dashboard/forms.py | 9b806e328467908aafe643d950b02ee153b1a57b | [] | no_license | reeson46/Planner | 7fbf98a07e3637b7e9228728440e104cbe7a6717 | 261c30bb1796d3defa53b9bc39b67a8232fb101a | refs/heads/main | 2023-08-11T07:34:40.352524 | 2021-09-14T22:27:25 | 2021-09-14T22:27:25 | 373,234,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | from django import forms
from django.forms import ModelForm
from .models import Board, Category
class BoardForm(ModelForm):
class Meta:
model = Board
fields = "__all__"
def __init__(self, *args, **kwargs):
super(BoardForm, self).__init__(*args, **kwargs)
self.fields["name"].widget.attrs.update(
{"class": "card", "placeholder": "Name"}
)
self.fields["name"].label = False
self.fields["created_by"].widget = forms.HiddenInput()
class CategoryForm(ModelForm):
class Meta:
model = Category
fields = ["name"]
def __init__(self, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
self.fields["name"].widget.attrs.update(
{
"class": "card",
}
)
self.fields["name"].empty_label = None
| [
"luka.loncaric@gmail.com"
] | luka.loncaric@gmail.com |
7b512c468b007c8b2f336f735e4eb125dfc4082e | a03eba726a432d8ef133f2dc55894ba85cdc4a08 | /config/hostsconf/views.py | f6ec260509c9276a8b978a80bd4cf61bb22bcbaf | [
"MIT"
] | permissive | mansonul/events | 2546c9cfe076eb59fbfdb7b4ec8bcd708817d59b | 4f6ca37bc600dcba3f74400d299826882d53b7d2 | refs/heads/master | 2021-01-15T08:53:22.442929 | 2018-01-30T16:14:20 | 2018-01-30T16:14:20 | 99,572,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | from django.http import HttpResponseRedirect
from django_hosts.resolvers import reverse as host_reverse
def www_root_redirect(request, path=None):
url_ = host_reverse("home", host='www')
if path is not None:
url_ = url_ + path
return HttpResponseRedirect(host_reverse('home'))
| [
"contact@dragosnicu.com"
] | contact@dragosnicu.com |
823144a692b2926cab21554df910419d0eff5e50 | 0e7e690435972e74d96626bbe3e3355301059aee | /py/facerec/scripts/fisherfaces_example.py | f7c46cadf391a48801b14af40f2143e2091c8da2 | [
"BSD-3-Clause"
] | permissive | phretor/facerec | 53a9079a1f43b4dc2298c9de7694e13e649047ed | fcc631065eb8da9806c0904283f63116d4588741 | refs/heads/master | 2021-01-18T06:00:07.823424 | 2012-02-09T22:43:22 | 2012-02-09T22:43:22 | 3,402,120 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | from facerec.dataset import DataSet
from facerec.feature import LBP,Fisherfaces
from facerec.distance import EuclideanDistance, CosineDistance
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import plot_eigenvectors
from facerec.preprocessing import MinMaxNormalizePreprocessing
from facerec.operators import ChainOperator
import numpy as np
import logging,sys
# set up a handler for logging
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add handler to facerec modules
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# load a dataset
dataSet = DataSet("/home/philipp/facerec/data/c1")
# define a 1-NN classifier with Euclidean Distance
classifier = NearestNeighbor(dist_metric=EuclideanDistance())
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# now stuff them into a PredictableModel
model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
model.compute(dataSet.data,dataSet.labels)
plot_eigenvectors(model.feature.eigenvectors, 9, sz=dataSet.data[0].shape)
# perform a 5-fold cross validation
cv = KFoldCrossValidation(model, k=5)
cv.validate(dataSet.data, dataSet.labels)
| [
"bytefish@gmx.de"
] | bytefish@gmx.de |
edf41798fa0e01ff97c9f048dd79ff4eb088c77a | a7d5fad9c31dc2678505e2dcd2166ac6b74b9dcc | /dlkit/authz_adapter/learning/managers.py | 89a47c8f2c81a8efae575f3c791d4a82dc5e30e3 | [
"MIT"
] | permissive | mitsei/dlkit | 39d5fddbb8cc9a33e279036e11a3e7d4fa558f70 | 445f968a175d61c8d92c0f617a3c17dc1dc7c584 | refs/heads/master | 2022-07-27T02:09:24.664616 | 2018-04-18T19:38:17 | 2018-04-18T19:38:17 | 88,057,460 | 2 | 1 | MIT | 2022-07-06T19:24:50 | 2017-04-12T13:53:10 | Python | UTF-8 | Python | false | false | 62,360 | py | """AuthZ Adapter implementations of learning managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import sessions
from ..osid import managers as osid_managers
from ..osid.osid_errors import Unimplemented
from ..osid.osid_errors import Unimplemented, OperationFailed, Unsupported
from ..primitives import Id
from ..utilities import raise_null_argument
from dlkit.manager_impls.learning import managers as learning_managers
class LearningProfile(osid_managers.OsidProfile, learning_managers.LearningProfile):
"""Adapts underlying LearningProfile methodswith authorization checks."""
def __init__(self):
osid_managers.OsidProfile.__init__(self)
def _get_hierarchy_session(self, proxy=None):
if proxy is not None:
try:
return self._provider_manager.get_objective_bank_hierarchy_session(proxy)
except Unimplemented:
return None
try:
return self._provider_manager.get_objective_bank_hierarchy_session()
except Unimplemented:
return None
def supports_objective_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_lookup()
def supports_objective_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_query()
def supports_objective_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_admin()
def supports_objective_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_hierarchy()
def supports_objective_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_hierarchy_design()
def supports_objective_sequencing(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_sequencing()
def supports_objective_objective_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_objective_bank()
def supports_objective_objective_bank_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_objective_bank_assignment()
def supports_objective_requisite(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_requisite()
def supports_objective_requisite_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_requisite_assignment()
def supports_activity_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_lookup()
def supports_activity_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_query()
def supports_activity_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_admin()
def supports_activity_objective_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_objective_bank()
def supports_activity_objective_bank_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_objective_bank_assignment()
def supports_proficiency_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_lookup()
def supports_proficiency_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_query()
def supports_proficiency_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_admin()
def supports_proficiency_objective_bank_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_objective_bank_assignment()
def supports_objective_bank_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_lookup()
def supports_objective_bank_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_admin()
def supports_objective_bank_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_hierarchy()
def supports_objective_bank_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_hierarchy_design()
def get_objective_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_record_types()
objective_record_types = property(fget=get_objective_record_types)
def get_objective_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_search_record_types()
objective_search_record_types = property(fget=get_objective_search_record_types)
def get_activity_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_activity_record_types()
activity_record_types = property(fget=get_activity_record_types)
def get_activity_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_activity_search_record_types()
activity_search_record_types = property(fget=get_activity_search_record_types)
def get_proficiency_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_proficiency_record_types()
proficiency_record_types = property(fget=get_proficiency_record_types)
def get_proficiency_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_proficiency_search_record_types()
proficiency_search_record_types = property(fget=get_proficiency_search_record_types)
def get_objective_bank_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_bank_record_types()
objective_bank_record_types = property(fget=get_objective_bank_record_types)
def get_objective_bank_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_bank_search_record_types()
objective_bank_search_record_types = property(fget=get_objective_bank_search_record_types)
class LearningManager(osid_managers.OsidManager, LearningProfile, learning_managers.LearningManager):
"""Adapts underlying LearningManager methodswith authorization checks."""
def __init__(self):
LearningProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:learningProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_manager('LEARNING', provider_impl)
# need to add version argument
def get_objective_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
objective_lookup_session = property(fget=get_objective_lookup_session)
@raise_null_argument
def get_objective_lookup_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_objective_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
objective_query_session = property(fget=get_objective_query_session)
@raise_null_argument
def get_objective_query_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_objective_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_admin_session = property(fget=get_objective_admin_session)
@raise_null_argument
def get_objective_admin_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_hierarchy_session = property(fget=get_objective_hierarchy_session)
@raise_null_argument
def get_objective_hierarchy_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_hierarchy_design_session = property(fget=get_objective_hierarchy_design_session)
@raise_null_argument
def get_objective_hierarchy_design_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_sequencing_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_sequencing_session = property(fget=get_objective_sequencing_session)
@raise_null_argument
def get_objective_sequencing_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_objective_bank_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankSession')(
provider_session=self._provider_manager.get_objective_objective_bank_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_objective_bank_session = property(fget=get_objective_objective_bank_session)
def get_objective_objective_bank_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_objective_objective_bank_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_objective_bank_assignment_session = property(fget=get_objective_objective_bank_assignment_session)
def get_objective_requisite_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_requisite_session = property(fget=get_objective_requisite_session)
@raise_null_argument
def get_objective_requisite_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_requisite_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_requisite_assignment_session = property(fget=get_objective_requisite_assignment_session)
@raise_null_argument
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_activity_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
activity_lookup_session = property(fget=get_activity_lookup_session)
@raise_null_argument
def get_activity_lookup_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_activity_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
activity_query_session = property(fget=get_activity_query_session)
@raise_null_argument
def get_activity_query_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_activity_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
activity_admin_session = property(fget=get_activity_admin_session)
@raise_null_argument
def get_activity_admin_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_activity_objective_bank_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankSession')(
provider_session=self._provider_manager.get_activity_objective_bank_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
activity_objective_bank_session = property(fget=get_activity_objective_bank_session)
def get_activity_objective_bank_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_activity_objective_bank_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
activity_objective_bank_assignment_session = property(fget=get_activity_objective_bank_assignment_session)
def get_proficiency_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
proficiency_lookup_session = property(fget=get_proficiency_lookup_session)
@raise_null_argument
def get_proficiency_lookup_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_proficiency_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
proficiency_query_session = property(fget=get_proficiency_query_session)
@raise_null_argument
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_proficiency_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
proficiency_admin_session = property(fget=get_proficiency_admin_session)
@raise_null_argument
def get_proficiency_admin_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_proficiency_objective_bank_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_proficiency_objective_bank_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
proficiency_objective_bank_assignment_session = property(fget=get_proficiency_objective_bank_assignment_session)
def get_objective_bank_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankLookupSession')(
provider_session=self._provider_manager.get_objective_bank_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_lookup_session = property(fget=get_objective_bank_lookup_session)
def get_objective_bank_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankAdminSession')(
provider_session=self._provider_manager.get_objective_bank_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_admin_session = property(fget=get_objective_bank_admin_session)
def get_objective_bank_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchySession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_hierarchy_session = property(fget=get_objective_bank_hierarchy_session)
def get_objective_bank_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_design_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_hierarchy_design_session = property(fget=get_objective_bank_hierarchy_design_session)
def get_learning_batch_manager(self):
raise Unimplemented()
learning_batch_manager = property(fget=get_learning_batch_manager)
class LearningProxyManager(osid_managers.OsidProxyManager, LearningProfile, learning_managers.LearningProxyManager):
"""Adapts underlying LearningProxyManager methodswith authorization checks."""
def __init__(self):
LearningProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidProxyManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:learningProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_proxy_manager('LEARNING', provider_impl)
# need to add version argument
@raise_null_argument
def get_objective_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_query_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_admin_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_design_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_sequencing_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_sequencing_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_objective_bank_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankSession')(
provider_session=self._provider_manager.get_objective_objective_bank_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_objective_bank_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_objective_objective_bank_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_query_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_admin_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_objective_bank_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankSession')(
provider_session=self._provider_manager.get_activity_objective_bank_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_objective_bank_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_activity_objective_bank_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_proficiency_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_proficiency_admin_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_proficiency_objective_bank_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_proficiency_objective_bank_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankLookupSession')(
provider_session=self._provider_manager.get_objective_bank_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankAdminSession')(
provider_session=self._provider_manager.get_objective_bank_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchySession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_design_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
def get_learning_batch_proxy_manager(self):
raise Unimplemented()
learning_batch_proxy_manager = property(fget=get_learning_batch_proxy_manager)
| [
"cjshaw@mit.edu"
] | cjshaw@mit.edu |
b3198f33c305f528c35c7946bf193d81970e7867 | 0ee21652c5d4b41be13c8db871261ab48e5f6878 | /test/functional/p2p_feefilter.py | a7d3235a8c8e69bd5d4acb4d3efeeb4aad3128b3 | [
"MIT"
] | permissive | The-Bitcoin-Phantom/The-bitcoin-Phantom | 60f4a8f85e1a5235dc40b84b7c04bcd0e846c52d | c914b51924932f07026eb6ba057c6e375e4dcdac | refs/heads/master | 2020-08-25T06:46:47.745015 | 2019-10-23T05:59:27 | 2019-10-23T05:59:27 | 216,977,357 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,092 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The bitphantom Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import bitphantomTestFramework
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(bitphantomTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received by test connection for all txs at
# feerate of .2 sat/byte
node1.settxfee(Decimal("0.00000200"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of .15 sat/byte on test connection
self.nodes[0].p2p.send_and_ping(msg_feefilter(150))
# Test that txs are still being received by test connection (paying .15 sat/byte)
node1.settxfee(Decimal("0.00000150"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to .1 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.00000100"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| [
"dinesh@xiornis.com"
] | dinesh@xiornis.com |
c8970539fdd5c192186f560b26c1f90428a84e38 | 6bd48af1413be83b021c5aa00c56d1e2a7e86d3b | /pii_recognition/pipelines/pii_validation_pipeline.py | 5a5cd28868a79209ff5a74bcda5e91bc8224833a | [] | no_license | gabechu/pii_recognition | 56ca28d29512f22a20982d53692f98c4c7a2ebea | 501d94042f4469aa1797657bc7db32308d28b593 | refs/heads/master | 2021-02-21T19:45:37.105435 | 2020-11-26T07:06:42 | 2020-11-26T07:06:42 | 245,364,610 | 2 | 0 | null | 2020-11-26T07:06:43 | 2020-03-06T08:17:06 | Python | UTF-8 | Python | false | false | 9,130 | py | from typing import Dict, FrozenSet, List, Mapping, Optional, Set, Union
from pakkr import Pipeline, returns
from pii_recognition.data_readers.data import Data
from pii_recognition.data_readers.presidio_fake_pii_reader import PresidioFakePiiReader
from pii_recognition.evaluation.character_level_evaluation import (
EntityPrecision,
EntityRecall,
TextScore,
build_label_mapping,
compute_entity_precisions_for_prediction,
compute_entity_recalls_for_ground_truth,
compute_pii_detection_fscore,
)
from pii_recognition.recognisers import registry as recogniser_registry
from pii_recognition.recognisers.entity_recogniser import EntityRecogniser
from pii_recognition.utils import dump_to_json_file, load_yaml_file, stringify_keys
from tqdm import tqdm
@returns(Data)
def read_benchmark_data(benchmark_data_file: str) -> Data:
reader = PresidioFakePiiReader()
data = reader.build_data(benchmark_data_file)
# remove empty items
data.items = list(filter(lambda item: item.text != "", data.items))
return data
@returns(Data)
def identify_pii_entities(
data: Data, recogniser_name: str, recogniser_params: Dict
) -> Data:
recogniser: EntityRecogniser = recogniser_registry.create_instance(
recogniser_name, recogniser_params
)
for item in tqdm(data.items):
item.pred_labels = recogniser.analyse(item.text, recogniser.supported_entities)
return data
@returns(scores=List)
def calculate_precisions_and_recalls(
data: Data,
grouped_targeted_labels: List[Set[str]],
nontargeted_labels: Optional[Set[str]] = None,
) -> Dict[str, List[TextScore]]:
label_mapping = build_label_mapping(grouped_targeted_labels, nontargeted_labels)
scores = []
for item in data.items:
if item.pred_labels:
pred_labels = item.pred_labels
else: # pred_labels could be None
pred_labels = []
ent_precisions = compute_entity_precisions_for_prediction(
len(item.text), item.true_labels, pred_labels, label_mapping
)
ent_recalls = compute_entity_recalls_for_ground_truth(
len(item.text), item.true_labels, pred_labels, label_mapping
)
ticket_score = TextScore(
text=item.text, precisions=ent_precisions, recalls=ent_recalls
)
scores.append(ticket_score)
return {"scores": scores}
@returns()
def log_predictions_and_ground_truths(
predictions_dump_path: str, scores: List[TextScore]
):
results = dict()
for score in scores:
text = score.text
predictions = {
text[p.entity.start : p.entity.end]: {
"type": p.entity.entity_type,
"score": round(p.precision, 2),
"start": p.entity.start,
}
for p in score.precisions
}
ground_truths = {
text[r.entity.start : r.entity.end]: {
"type": r.entity.entity_type,
"score": round(r.recall, 2),
"start": r.entity.start,
}
for r in score.recalls
}
results.update(
{text: {"predicted": predictions, "ground_truth": ground_truths}}
)
dump_to_json_file(results, predictions_dump_path)
@returns(Dict)
def calculate_aggregate_metrics(
scores: List[TextScore],
grouped_targeted_labels: List[Set[str]],
fbeta: float = 1.0,
) -> Dict[Union[str, FrozenSet[str]], float]:
results: Dict[Union[str, FrozenSet[str]], float] = dict()
results["exact_match_f1"] = get_rollup_fscore_on_pii(
scores, fbeta, recall_threshold=None)
results["partial_match_f1_threshold_at_50%"] = get_rollup_fscore_on_pii(
scores, fbeta, recall_threshold=0.5
)
type_scores: Mapping = get_rollup_metrics_on_types(
grouped_targeted_labels, scores, fbeta
)
results.update(type_scores)
return results
@returns()
def report_results(results: Dict, scores_dump_path: str):
results = stringify_keys(results)
dump_to_json_file(results, scores_dump_path)
def get_rollup_fscore_on_pii(
scores: List[TextScore], fbeta: float, recall_threshold: Optional[float]
) -> float:
"""Calculate f score on PII recognition.
A single score, f score, will be calculate to indicate how a system did on
predicting PII entities. Recall thresholding is supported, if the system can
recognise a certain portion of an entity greater than the threshold, that
entity then will be considered identified.
Args:
scores: a list of text scores providing info including precisions and recalls.
fbeta: beta value for f score.
recall_threshold: a float between 0 and 1. Any recall value that is greater
than or equals to the threshold would be rounded up to 1.
Returns:
A f score represents performance of a system.
"""
fscores = []
for text_score in scores:
precisions = [p.precision for p in text_score.precisions]
recalls = [r.recall for r in text_score.recalls]
f = compute_pii_detection_fscore(precisions, recalls, recall_threshold, fbeta)
fscores.append(f)
if fscores:
return round(sum(fscores) / len(fscores), 4)
else:
# The only possibility to have empty fscores is that argument "scores"
# is empty. In this case, we assign f score to 0.
return 0.0
def _update_table(
table: Dict[FrozenSet[str], Dict], new_item: Union[EntityPrecision, EntityRecall]
) -> Dict[FrozenSet, Dict]:
"""A helper function to log fscores."""
entity_label = new_item.entity.entity_type
for label_set in table.keys():
if entity_label in label_set:
if isinstance(new_item, EntityPrecision):
table[label_set]["precisions"].append(new_item.precision)
elif isinstance(new_item, EntityRecall):
table[label_set]["recalls"].append(new_item.recall)
return table
def regroup_scores_on_types(
grouped_labels: List[Set[str]], scores: List[TextScore]
) -> Dict[FrozenSet[str], Dict]:
"""Regroup scores according to parameter grouped_labels.
Prediction scores (precisions and recalls) are collected for each of the example
texts and stored in scores parameter. We need to regroup those scores on entity
types to obtain, for example, all precisions and recalls for the group
{"PER", "PERSON"}.
Args:
grouped_llabels: entity labels separated as sets of groups, for example,
[{"PER", "PERSON"}, {"ORG"}].
scores: a list of text scores providing info including precisions and recalls
for each prediction and ground truth within a text.
Returns:
A dictionary that key is a group of entities and value precisions and recalls
for that group.
"""
score_table: Dict[FrozenSet, Dict] = {
frozenset(label_set): {"precisions": [], "recalls": []}
for label_set in grouped_labels
}
# update score table
for text_score in scores:
for precision in text_score.precisions:
score_table = _update_table(score_table, precision)
for recall in text_score.recalls:
score_table = _update_table(score_table, recall)
return score_table
def get_rollup_metrics_on_types(
grouped_labels: List[Set[str]], scores: List[TextScore], fbeta: float,
) -> Dict[FrozenSet[str], Dict[str, Union[float, str]]]:
"""Calculate f1, average precision and average recall for every group in the
grouped labels.
"""
score_table = regroup_scores_on_types(grouped_labels, scores)
metrics = dict()
for key, value in score_table.items():
f1 = round(
compute_pii_detection_fscore(
value["precisions"], value["recalls"], beta=fbeta
),
4,
)
if value["precisions"]:
ave_precision = round(
sum(value["precisions"]) / len(value["precisions"]), 4
)
else:
ave_precision = "undefined"
if value["recalls"]:
ave_recall = round(sum(value["recalls"]) / len(value["recalls"]), 4)
else:
ave_recall = "undefined"
metrics.update(
{key: {"f1": f1, "ave-precision": ave_precision, "ave-recall": ave_recall}}
)
return metrics
def exec_pipeline(config_yaml_file: str):
pipeline = Pipeline(
read_benchmark_data,
identify_pii_entities,
calculate_precisions_and_recalls,
log_predictions_and_ground_truths,
calculate_aggregate_metrics,
report_results,
name="pii_validation_pipeline",
)
config = load_yaml_file(config_yaml_file)
if config:
# conversions to meet requirements on type checks
config["grouped_targeted_labels"] = [
set(item) for item in config["grouped_targeted_labels"]
]
config["nontargeted_labels"] = set(config["nontargeted_labels"])
return pipeline(**config)
else:
raise ValueError("Config YAML is empty.")
| [
"noreply@github.com"
] | noreply@github.com |
cc8cec707c2592a054a96cf064f412c9b86e713a | a4d955af89d12010963f775aa5688a6156714afa | /qlearningAgents.py | 2cb6ec7ec33b8b44000fe11ee092e5550779de47 | [] | no_license | mobbistcn/pacman | 140d35b3681403cd9c49e66fdac39cb0eab0ae91 | 2ab695e803904c5417f663082d00bbc768613dd2 | refs/heads/master | 2021-01-17T23:41:45.039039 | 2017-03-07T18:47:34 | 2017-03-07T18:47:34 | 84,233,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,299 | py | # qlearningAgents.py
# ------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import *
from learningAgents import ReinforcementAgent
from featureExtractors import *
import random,util,math
class QLearningAgent(ReinforcementAgent):
"""
Q-Learning Agent
Functions you should fill in:
- computeValueFromQValues
- computeActionFromQValues
- getQValue
- getAction
- update
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
"""
def __init__(self, **args):
"You can initialize Q-values here..."
ReinforcementAgent.__init__(self, **args)
"*** YOUR CODE HERE ***"
self.qvalue = util.Counter()
def getQValue(self, state, action):
"""
Returns Q(state,action)
Should return 0.0 if we have never seen a state
or the Q node value otherwise
"""
"*** YOUR CODE HERE ***"
qValue = self.qvalue[(state, action)]
if qValue == None:
return 0.0
return qValue
def computeValueFromQValues(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions. Note that if
there are no legal actions, which is the case at the
terminal state, you should return a value of 0.0.
"""
"*** YOUR CODE HERE ***"
qValue = [self.getQValue(state, action) for action in self.getLegalActions(state)]
if len(self.getLegalActions(state)) ==0:
return 0
return max(qValue)
def computeActionFromQValues(self, state):
"""
Compute the best action to take in a state. Note that if there
are no legal actions, which is the case at the terminal state,
you should return None.
"""
"*** YOUR CODE HERE ***"
if self.getLegalActions(state):
bestValue = -99999
bestAction = None
for action in self.getLegalActions(state):
curr = self.getQValue(state, action)
if curr > bestValue:
bestValue = curr
bestAction = action
return bestAction
def getAction(self, state):
"""
Compute the action to take in the current state. With
probability self.epsilon, we should take a random action and
take the best policy action otherwise. Note that if there are
no legal actions, which is the case at the terminal state, you
should choose None as the action.
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
legalActions = self.getLegalActions(state)
action = None
"*** YOUR CODE HERE ***"
if util.flipCoin(self.epsilon):
action = random.choice(legalActions)
else:
action = self.getPolicy(state)
return action
def update(self, state, action, nextState, reward):
"""
The parent class calls this to observe a
state = action => nextState and reward transition.
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
"*** YOUR CODE HERE ***"
self.qvalue[(state, action)] = (1 - self.alpha)*self.qvalue[(state, action)] + self.alpha*( reward + self.discount*self.getValue(nextState))
def getPolicy(self, state):
return self.computeActionFromQValues(state)
def getValue(self, state):
return self.computeValueFromQValues(state)
class PacmanQAgent(QLearningAgent):
"Exactly the same as QLearningAgent, but with different default parameters"
def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
"""
These default parameters can be changed from the pacman.py command line.
For example, to change the exploration rate, try:
python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
alpha - learning rate
epsilon - exploration rate
gamma - discount factor
numTraining - number of training episodes, i.e. no learning after these many episodes
"""
args['epsilon'] = epsilon
args['gamma'] = gamma
args['alpha'] = alpha
args['numTraining'] = numTraining
self.index = 0 # This is always Pacman
QLearningAgent.__init__(self, **args)
def getAction(self, state):
"""
Simply calls the getAction method of QLearningAgent and then
informs parent of action for Pacman. Do not change or remove this
method.
"""
action = QLearningAgent.getAction(self,state)
self.doAction(state,action)
return action
class ApproximateQAgent(PacmanQAgent):
"""
ApproximateQLearningAgent
You should only have to overwrite getQValue
and update. All other QLearningAgent functions
should work as is.
"""
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
self.weights = util.Counter()
def getWeights(self):
return self.weights
def getQValue(self, state, action):
"""
Should return Q(state,action) = w * featureVector
where * is the dotProduct operator
"""
"*** YOUR CODE HERE ***"
features = self.featExtractor.getFeatures(state, action)
return self.weights * features
def update(self, state, action, nextState, reward):
"""
Should update your weights based on transition
"""
"*** YOUR CODE HERE ***"
correction = reward + self.discount*self.getValue(nextState) - self.getQValue(state, action)
features = self.featExtractor.getFeatures(state, action)
for feature in features:
self.weights[feature] += self.alpha * correction * features[feature]
def final(self, state):
"Called at the end of each game."
# call the super-class final method
PacmanQAgent.final(self, state)
# did we finish training?
if self.episodesSoFar == self.numTraining:
# you might want to print your weights here for debugging
"*** YOUR CODE HERE ***"
pass
| [
"noreply@github.com"
] | noreply@github.com |
75c788a648281903bff2bd13fe24752d7911f635 | c0a536c29ffaf5583479afefd29b6c13068536da | /tic_tac.py | 4a1b0276ad5fafb2c4025ae54c3e4a0211c8374a | [] | no_license | harishpolagani/tic_tac_toe_using_python_basic1 | b6543685d0ca9f09579030ba36b8ae01b20e3cf6 | be8b8df573ad4778cb16382474e8ceaa3b12bdf5 | refs/heads/master | 2020-03-18T02:10:26.876581 | 2018-05-20T19:13:01 | 2018-05-20T19:13:01 | 134,177,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,768 | py | import numpy as np
import sys
class Tick_Tac_Toe_class():
game_state_info = ["GAME_NOT_STARTED","GAME_STARTED","GAME_RESUMED","GAME_COMPLETED"]
def __init__(self,game_square_matrix_shape=3):
self.game_state = 0 #state_machine of Game current state: 0 - game not started 1 - game started 2 - game resumed 3 - game completed
self.user_input_position = 0 #actual position chosen by Player
self.no_of_player = 2 # Restricted to 2
self.user_input_taken_Flag = False #to ensure player has given proper input
self.exit_flag = False #flag to exit from game
self.count = 0 #player info Even number - PLAYER:A Odd number - PLAYER:B
self.game_square_matrix_shape = game_square_matrix_shape #restricted to 3 now
self.matrix_tic=np.zeros((game_square_matrix_shape,game_square_matrix_shape),dtype=np.int32)
def get_matrix_dim(self):
return self.matrix_tic.shape
def get_current_player_name(self):
#restricting no of players 2 for now
if self.count % 2 == 0: return "PLAYER1"
else: return "PLAYER2"
def get_current_player_id(self):
#restricting no of players 2 for now
if self.count % 2 == 0: return 1 #"PLAYER1"
else: return 2 #"PLAYER2"
def get_matrix_printable_format_string(self):
matrix_data="\n"
row_size1 = 0
column_size1 =0
x_row,y_column = self.get_matrix_dim()
while row_size1 < x_row :
while column_size1 < y_column:
matrix_data += "{0:3}".format(self.matrix_tic[row_size1,column_size1])
column_size1 += 1
row_size1 += 1
column_size1 = 0
matrix_data += "\n"
return matrix_data
def set_game_state(self,state):
#state_machine of Game current state
# 0 - game not started
# 1 - game started
# 2 - game resumed
# 3 - game completed
self.game_state = state
return True
def get_game_state(self):
#state_machine of Game current state
# 0 - game not started
# 1 - game started
# 2 - game resumed
# 3 - game completed
state = self.game_state
return Tick_Tac_Toe_class().game_state_info[state]
def validate_user_input(self,user_matrix_rows,user_matrix_columns):
if user_matrix_rows == 'EXIT' or user_matrix_columns == 'EXIT':
self.exit_flag = True
sys.exit()
return False
else:
if user_matrix_rows.isdigit() and user_matrix_columns.isdigit():
user_matrix_rows = int(user_matrix_rows)
user_matrix_columns = int(user_matrix_columns)
max_matrix_rows,max_matrix_columns =self.get_matrix_dim()
if type(user_matrix_rows) == int and type(user_matrix_columns) == int:
if user_matrix_rows < max_matrix_rows and user_matrix_columns < max_matrix_columns:
return True
else:
print("please enter row/column with in range of matrix")
return False
else:
print("please enter only valid digits for row/column")
return False
def get_position_from_user(self):
#validating user_input of dimension
valid_user_input = False
while not valid_user_input:
user_matrix_rows = input("enter the row value / enter EXIT to exit : ")
if user_matrix_rows != "EXIT":
#if user wants to exit while reading row value then there is no point in asking column value
user_matrix_columns = input("enter the column value / enter EXIT to exit : ")
else:
# to avoid UnboundLocalError: local variable 'user_matrix_columns' referenced before assignment
user_matrix_columns = "EXIT"
if self.validate_user_input(user_matrix_rows,user_matrix_columns):
valid_user_input = True
return int(user_matrix_rows),int(user_matrix_columns)
def set_position_chosen_by_user(self,user_matrix_rows,user_matrix_columns,player):
self.matrix_tic[user_matrix_rows,user_matrix_columns]=player
self.count += 1 #increase count for next player turn
return True
def check_win_condition(self,player_id):
# Matrix 'A' is:
# 1 2 3
# 4 5 6
# 7 8 9
# Matrix of A[0] : row0 ==> array([1, 2, 3])
# Matrix of A[1] : row1 ==> array([4, 5, 6])
# Matrix of A[2] : row2 ==> array([7, 8, 9])
# Diagonal matrix of A is: numpy.diag(A) ==> array([1, 5, 9])
# cross diagonal Matrix of A: numpy.diag(numpy.fliplr(A)) ==> array([3, 5, 7])
# matrix B = numpy.transpose(A)
# 1 4 7
# 2 5 8
# 3 6 9
# Matrix of B[0] : Column0 ==> array([1, 4, 7])
# Matrix of B[1] : Column1 ==> array([2, 5, 8])
# Matrix of B[2] : Column2 ==> array([3, 6, 9])
#
# required matrix of player1 = [1, 1, 1]
# required matrix of player1 = [2, 2, 2]
# winning condition check if A[0] or A[1] or A[2] or B[0]or B[1] or B[2] or Diag and cross-diag are == player1 or player2
# ====================================================================================
#
#here base matrice restricted to 3X3 matrice
player1_check_matrice = np.full((1,self.game_square_matrix_shape),1,dtype=np.int32)
player2_check_matrice = np.full((1,self.game_square_matrix_shape),2,dtype=np.int32)
row,column=self.matrix_tic.shape
row_base = 0
column_base =0
Temp_matrice = self.matrix_tic
Temp_transpose_matrice = np.transpose(Temp_matrice)
diag_matrice = np.diag(Temp_matrice)
cross_diag_matrice = np.diag(np.fliplr(Temp_matrice))
if player_id == 1:
if np.array_equal( diag_matrice , player1_check_matrice[0]):
return True
if np.array_equal( cross_diag_matrice , player1_check_matrice[0]):
return True
if player_id == 2:
if np.array_equal( diag_matrice , player2_check_matrice[0]):
return True
if np.array_equal( cross_diag_matrice , player2_check_matrice[0]):
return True
# compare A[n] rows
while row_base < row:
if player_id == 1:
if np.array_equal( Temp_matrice[row_base] , player1_check_matrice[0]):
return True
if player_id == 2:
if np.array_equal( Temp_matrice[row_base] , player2_check_matrice[0]):
return True
row_base += 1
# compare A[n] columns using transpose matrice
while column_base < column:
if player_id == 1:
if np.array_equal( Temp_transpose_matrice[column_base] , player1_check_matrice[0]):
return True
if player_id == 2:
if np.array_equal( Temp_transpose_matrice[column_base] , player2_check_matrice[0]):
return True
column_base += 1
return False
# playing Tic_Tac_Toe game
def play_game(self):
print("Game status= ",self.get_game_state())
print("No. of players = ",self.no_of_player)
#set game status to 1 i.e.. "game started"
self.set_game_state(1)
while not self.exit_flag:
player_name=self.get_current_player_name()
player=self.get_current_player_id()
print(self.get_matrix_printable_format_string())
print("Its",player_name,"Turn")
row,column = self.get_position_from_user()
self.set_position_chosen_by_user(row,column,player)
if self.count >= self.game_square_matrix_shape-1:
if self.check_win_condition(player):
print("**************************")
print("__________HURRAY__________")
print("Game over:")
print("winner is: ",player_name)
print("**************************")
self.exit_flag = True
def __str__(self):
return " {0} ".format(self.get_matrix_printable_format_string())
if __name__ == "__main__":
Tick_Tac_Toe_class().play_game()
| [
"harishpolagani@gmail.com"
] | harishpolagani@gmail.com |
90e783ea257a3f30cbf5ecd45264e3e1bfb0f5e5 | dc221edce0ad617aac3b9ad8f4f347ff84f56bf9 | /.history/client_20200807180109.py | 54c6da2132910d7f0425fdabfa0c1da205eccabc | [] | no_license | zlm05170/cacontroller | 310014c83ecf130643230eba87990e635fe1575f | e76d2eb5d58d6adfe7823e0dcd0059027c52b6bc | refs/heads/master | 2022-12-21T08:05:58.315017 | 2020-09-23T11:45:07 | 2020-09-23T11:45:07 | 284,527,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,720 | py | import asyncio
import websockets
import time
import json
import traceback
def view_actor_data(actor, port_type, port_name):
pass
def get_port_value_by_name(port_list, name):
for port in port_list:
if port['port']['name'] == name:
return port['value']
def find_port_index_by_name(actor, port_type, port_name):
port_list = actor[port_type]
num_port = len(port_list)
for i in range(num_port):
if port_list[i]['port']['name'] == port_name:
return i
def print_port_data_by_index(actor, port_type, index):
print(actor[port_type][index]['port']['name'] + ': ' + actor[port_type][index]['port']['value'])
async def start():
uri = "ws://192.168.114.18:8887"
actor_info = {
'clazz' : '',
'name' : '',
'uuid' : None,
'parent_uuid' : None
}
gps_gunnerus = actor_info.copy()
gps_gunnerus['clazz'] = 'GPSController'
gps_gunnerus['name'] = 'GPS1'
gps_target_ship_1 = actor_info.copy()
gps_target_ship_1['clazz'] = 'GPSController'
gps_target_ship_1['name'] = 'Target Ship 1'
gps_target_ship_2 = actor_info.copy()
gps_target_ship_2['clazz'] = 'GPSController'
gps_target_ship_2['name'] = 'Target Ship 2'
gunnerus_thruster_port = actor_info.copy()
gunnerus_thruster_port['clazz'] = 'ThrusterActor'
gunnerus_thruster_port['name'] = 'Port'
gunnerus_thruster_starboard = actor_info.copy()
gunnerus_thruster_starboard['clazz'] = 'ThrusterActor'
gunnerus_thruster_starboard['name'] = 'Starboard'
actor_info_list = [gps_gunnerus, gps_target_ship_1, gps_target_ship_2, gunnerus_thruster_port, gunnerus_thruster_starboard]
actor_list = [None for i in range(5)]
async with websockets.connect(uri, ping_timeout=None) as websocket:
while True:
# name = f"luman!"
# await websocket.send(name)
# #print(f"> {name}")
#await sendmessage(websocket)
gunnerus = None
ts1 = None
ts2 = None
if not websocket.open:
print('reconnecting')
websocket = await websockets.connect(uri)
else:
resp = await websocket.recv()
try:
data_dic = json.loads(resp[resp.index('{'):])
evaluate(data_dic)
except:
traceback.print_exc()
await sendmessage()
# async def sendmessage():
# name = f"luman"
# return websocket.send(name)
async def evaluate(data_dic, clazz, name):
x = False if data_dic['clazz'].find(clazz) == -1 else True
y = (data_dic['name'] == name)
for i in range(len(actor_list)):
actor_info = actor_info_list[i]
actor = await evaluate(resp, actor_info['clazz'], actor_info['name'])
if actor != None:
actor_info['uuid'] = actor['uuid']
actor_info['parent_uuid'] = get_port_value_by_name(actor['output'],'PARENT')
print_port_data_by_index(find_port_index_by_name(actor_list[0], 'output', 'longitude'.upper()))
#print(print_port_data_by_index)
if x and y:
return data_dic
def clazz_ls(data_dic):
#print(data_dic['output']) # list
lon, lat, east, north, course, speed, rpm, alpha = 0.0, 0.0, 0.0, 0.0, 0.0, [], [], []
for message in data_dic['output']:
port = message['port']['name']
if port == "longitude".upper():
lon = message['value']['value']
elif port == "latitude".upper():
lat = message['value']['value']
elif port == "easting".upper():
east = message['value']['value']
elif port == "northing".upper():
north = message['value']['value']
elif port == "bearing".upper():
course = message['value']['value']
elif port == "WORLD_VELOCITY".upper():
value_ls = message['value']['valueObjects']
for v in value_ls:
speed.append(v['value'])
elif port == "ACTUAL_RPM".upper():
rpm = message['value']['value']
elif port == "ANGLE".upper():
alpha = message['value']['value']
else:
pass
all_data = [lon, lat, east, north, course, speed, rpm, alpha]
#return all_data
print(all_data)
async def savefile(receivedata):
#time.sleep(5)
with open('serverdata.json', 'w') as json_file:
json_file.writelines(receivedata)
if __name__=='__main__':
#rospy.init_node("simulator_drl")
asyncio.get_event_loop().run_until_complete(start())
asyncio.get_event_loop().run_forever()
| [
"angelxx05170@gmail.com"
] | angelxx05170@gmail.com |
6ffb802354785ccf06e1e714cbc18922392c69de | 3d1f2c12c95b81fd5129a5f13f9b4f35eff50d37 | /Tkinter_Chatbot.py | a1779142813832dbe1ba955d3e23a1b73b31bab2 | [] | no_license | AdityaKansal/Machine-Learning-Experiments | 0126a937a4581b70611f2ecd9d78e570296a15c2 | e0844bc3707257d2547d13189edf3ec6ffd54fec | refs/heads/master | 2020-03-21T10:32:34.814542 | 2018-11-01T08:18:42 | 2018-11-01T08:18:42 | 138,456,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 11:45:17 2018
@author: akansal2
"""
from chatterbot import ChatBot
import tkinter as tk
try:
import ttk as ttk
import ScrolledText
except ImportError:
import tkinter.ttk as ttk
import tkinter.scrolledtext as ScrolledText
import time
class TkinterGUIExample(tk.Tk):
def __init__(self, *args, **kwargs):
"""
Create & set window variables.
"""
tk.Tk.__init__(self, *args, **kwargs)
self.chatbot = ChatBot(
"GUI Bot",
storage_adapter="chatterbot.storage.SQLStorageAdapter",
logic_adapters=[
"chatterbot.logic.BestMatch"
],
input_adapter="chatterbot.input.VariableInputTypeAdapter",
output_adapter="chatterbot.output.OutputAdapter",
#database_uri="../database.db"
)
self.title("Chatterbot")
self.initialize()
def initialize(self):
"""
Set window layout.
"""
self.grid()
self.respond = ttk.Button(self, text='Get Response', command=self.get_response)
self.respond.grid(column=0, row=0, sticky='nesw', padx=3, pady=3)
self.usr_input = ttk.Entry(self, state='normal')
self.usr_input.grid(column=1, row=0, sticky='nesw', padx=3, pady=3)
self.conversation_lbl = ttk.Label(self, anchor=tk.E, text='Conversation:')
self.conversation_lbl.grid(column=0, row=1, sticky='nesw', padx=3, pady=3)
self.conversation = ScrolledText.ScrolledText(self, state='disabled')
self.conversation.grid(column=0, row=2, columnspan=2, sticky='nesw', padx=3, pady=3)
def get_response(self):
"""
Get a response from the chatbot and display it.
"""
user_input = self.usr_input.get()
self.usr_input.delete(0, tk.END)
response = self.chatbot.get_response(user_input)
self.conversation['state'] = 'normal'
self.conversation.insert(
tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n"
)
self.conversation['state'] = 'disabled'
time.sleep(0.5)
gui_example = TkinterGUIExample()
gui_example.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
2383da4f3157b6df7d4081cad9ec8c16e5a45b8b | 123fe9cac97e52845416745ec2f0888f40a2a644 | /setup.py | 6a4e1b7fa10a76b3357a870258c07113d9429c7e | [] | no_license | kevlened/SQLementary | 49ed78bc5b7fbfb12aae462833755baf16b52324 | 7f5430a2ace8269cda8bbc3496690ecbe5055a2e | refs/heads/master | 2016-09-09T18:22:10.304382 | 2013-04-01T23:10:23 | 2013-04-01T23:10:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | import sys
import os
from cx_Freeze import setup, Executable
# Loops through a directory, grabbing relative file path names
def files_in_dir(directory_name):
f = []
for (dirpath, dirname, filenames) in os.walk(directory_name):
for filename in filenames:
relpath = os.path.join(dirpath, filename)
f.append(relpath)
return f
# Dependencies are automatically detected, but it might need fine tuning.
packages = ["os","flask_sqlalchemy","flask_admin","flask_login","flask_wtf","sqlalchemy.dialects.sqlite","werkzeug","jinja2.ext","email"]
include_files = ['README.txt']
include_files.extend(files_in_dir("static"))
include_files.extend(files_in_dir("templates"))
build_exe_options = {"packages":packages,"include_files":include_files}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
#if sys.platform == "win32":
# base = "Win32GUI"
setup( name = "SQLementary",
version = "0.1",
description = "An easy SQL generator",
options = {"build_exe": build_exe_options},
executables = [Executable("web.py", base=base, targetName="SQLementary.exe", shortcutName="SQLementary", shortcutDir='ProgramMenuFolder')]) | [
"boyettel@gmail.com"
] | boyettel@gmail.com |
67ff4516b957eac77cb2d9c3c3cdd7977bf99823 | 14480e08c80c17abf8d436d600e44461d826c2f5 | /app/status/urls.py | abce2eae987c491cc7cba1e72e78094c7c49edd9 | [] | no_license | baidang201/mycat_backend | 9fece7632794a10c35541ef32cdef21a2084b7ee | bac18b960d648143afc1704fc79aec4069e4e8e0 | refs/heads/master | 2020-03-20T04:25:09.137888 | 2018-06-09T13:10:57 | 2018-06-09T13:10:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | from flask import Blueprint
import app.status.controller as ctrl
status = Blueprint('status', __name__)
status.add_url_rule('/token/<user>', view_func=ctrl.token, methods=['GET'])
status.add_url_rule('/point/<user>', view_func=ctrl.point, methods=['GET'])
| [
"psy2848048@gmail.com"
] | psy2848048@gmail.com |
c5dbea9ccf836413ec434ef98dbb19109f8484fe | 67acb19a994b72cd9f6d8f833f147d03dae8f86c | /index.py | b8a371da9417bbfd21955c55aa1de13c0df03611 | [] | no_license | Mathewolakunle/cscfinal | 039ca35fe489b5bb1614ab5ec99859278f495186 | a057276dcc3cbdc75681affdb4666a4c4e20da13 | refs/heads/main | 2023-04-26T01:58:19.976855 | 2021-04-30T04:09:37 | 2021-04-30T04:09:37 | 363,012,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,292 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
url_confirmed = 'time_series_covid19_confirmed_global.csv'
url_deaths = 'time_series_covid19_deaths_global.csv'
url_recovered = 'time_series_covid19_recovered_global.csv'
confirmed = pd.read_csv(url_confirmed)
deaths = pd.read_csv(url_deaths)
recovered = pd.read_csv(url_recovered)
# Unpivot data frames
date1 = confirmed.columns[4:]
total_confirmed = confirmed.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], value_vars=date1, var_name='date', value_name='confirmed')
date2 = deaths.columns[4:]
total_deaths = deaths.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], value_vars=date2, var_name='date', value_name='death')
date3 = recovered.columns[4:]
total_recovered = recovered.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], value_vars=date3, var_name='date', value_name='recovered')
# Merging data frames
covid_data = total_confirmed.merge(right=total_deaths, how='left', on=['Province/State', 'Country/Region', 'date', 'Lat', 'Long'])
covid_data = covid_data.merge(right=total_recovered, how='left', on=['Province/State', 'Country/Region', 'date', 'Lat', 'Long'])
# Converting date column from string to proper date format
covid_data['date'] = pd.to_datetime(covid_data['date'])
# Check how many missing value naN
covid_data.isna().sum()
# Replace naN with 0
covid_data['recovered'] = covid_data['recovered'].fillna(0)
# Calculate new column
covid_data['active'] = covid_data['confirmed'] - covid_data['death'] - covid_data['recovered']
covid_data_1 = covid_data.groupby(['date'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
covid_data_2 = covid_data.groupby(['date', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
# create dictionary of list
covid_data_dict = covid_data[['Country/Region', 'Lat', 'Long']]
list_locations = covid_data_dict.set_index('Country/Region')[['Lat', 'Long']].T.to_dict('dict')
app = dash.Dash(__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}])
server = app.server
app.layout = html.Div([
html.Div([
html.Div([
html.Img(src=app.get_asset_url('corona-logo-1.jpg'),
id='corona-image',
style={
"height": "60px",
"width": "auto",
"margin-bottom": "25px",
},
)
],
className="one-third column",
),
html.Div([
html.Div([
html.H3("Covid - 19", style={"margin-bottom": "0px", 'color': 'white'}),
html.H5("Track Covid - 19 Cases", style={"margin-top": "0px", 'color': 'white'}),
])
], className="one-half column", id="title"),
html.Div([
html.H6('Last Updated: ' + str(covid_data_1['date'].iloc[-1].strftime("%B %d, %Y")) + ' 00:01 (UTC)',
style={'color': 'orange'}),
], className="one-third column", id='title1'),
], id="header", className="row flex-display", style={"margin-bottom": "25px"}),
html.Div([
html.Div([
html.H6(children='Global Cases',
style={
'textAlign': 'center',
'color': 'white'}
),
html.P(f"{covid_data_1['confirmed'].iloc[-1]:,.0f}",
style={
'textAlign': 'center',
'color': 'orange',
'fontSize': 40}
),
html.P('new: ' + f"{covid_data_1['confirmed'].iloc[-1] - covid_data_1['confirmed'].iloc[-2]:,.0f} "
+ ' (' + str(round(((covid_data_1['confirmed'].iloc[-1] - covid_data_1['confirmed'].iloc[-2]) /
covid_data_1['confirmed'].iloc[-1]) * 100, 2)) + '%)',
style={
'textAlign': 'center',
'color': 'orange',
'fontSize': 15,
'margin-top': '-18px'}
)], className="card_container three columns",
),
html.Div([
html.H6(children='Global Deaths',
style={
'textAlign': 'center',
'color': 'white'}
),
html.P(f"{covid_data_1['death'].iloc[-1]:,.0f}",
style={
'textAlign': 'center',
'color': '#dd1e35',
'fontSize': 40}
),
html.P('new: ' + f"{covid_data_1['death'].iloc[-1] - covid_data_1['death'].iloc[-2]:,.0f} "
+ ' (' + str(round(((covid_data_1['death'].iloc[-1] - covid_data_1['death'].iloc[-2]) /
covid_data_1['death'].iloc[-1]) * 100, 2)) + '%)',
style={
'textAlign': 'center',
'color': '#dd1e35',
'fontSize': 15,
'margin-top': '-18px'}
)], className="card_container three columns",
),
html.Div([
html.H6(children='Global Recovered',
style={
'textAlign': 'center',
'color': 'white'}
),
html.P(f"{covid_data_1['recovered'].iloc[-1]:,.0f}",
style={
'textAlign': 'center',
'color': 'green',
'fontSize': 40}
),
html.P('new: ' + f"{covid_data_1['recovered'].iloc[-1] - covid_data_1['recovered'].iloc[-2]:,.0f} "
+ ' (' + str(round(((covid_data_1['recovered'].iloc[-1] - covid_data_1['recovered'].iloc[-2]) /
covid_data_1['recovered'].iloc[-1]) * 100, 2)) + '%)',
style={
'textAlign': 'center',
'color': 'green',
'fontSize': 15,
'margin-top': '-18px'}
)], className="card_container three columns",
),
html.Div([
html.H6(children='Global Active',
style={
'textAlign': 'center',
'color': 'white'}
),
html.P(f"{covid_data_1['active'].iloc[-1]:,.0f}",
style={
'textAlign': 'center',
'color': '#e55467',
'fontSize': 40}
),
html.P('new: ' + f"{covid_data_1['active'].iloc[-1] - covid_data_1['active'].iloc[-2]:,.0f} "
+ ' (' + str(round(((covid_data_1['active'].iloc[-1] - covid_data_1['active'].iloc[-2]) /
covid_data_1['active'].iloc[-1]) * 100, 2)) + '%)',
style={
'textAlign': 'center',
'color': '#e55467',
'fontSize': 15,
'margin-top': '-18px'}
)], className="card_container three columns")
], className="row flex-display"),
html.Div([
html.Div([
html.P('Select Country:', className='fix_label', style={'color': 'white'}),
dcc.Dropdown(id='w_countries',
multi=False,
clearable=True,
value='US',
placeholder='Select Countries',
options=[{'label': c, 'value': c}
for c in (covid_data['Country/Region'].unique())], className='dcc_compon'),
html.P('New Cases : ' + ' ' + ' ' + str(covid_data_2['date'].iloc[-1].strftime("%B %d, %Y")) + ' ', className='fix_label', style={'color': 'white', 'text-align': 'center'}),
dcc.Graph(id='confirmed', config={'displayModeBar': False}, className='dcc_compon',
style={'margin-top': '20px'},
),
dcc.Graph(id='death', config={'displayModeBar': False}, className='dcc_compon',
style={'margin-top': '20px'},
),
dcc.Graph(id='recovered', config={'displayModeBar': False}, className='dcc_compon',
style={'margin-top': '20px'},
),
dcc.Graph(id='active', config={'displayModeBar': False}, className='dcc_compon',
style={'margin-top': '20px'},
),
], className="create_container three columns", id="cross-filter-options"),
html.Div([
dcc.Graph(id='pie_chart',
config={'displayModeBar': 'hover'}),
], className="create_container four columns"),
html.Div([
dcc.Graph(id="line_chart")
], className="create_container five columns"),
], className="row flex-display"),
html.Div([
html.Div([
dcc.Graph(id="map")], className="create_container1 twelve columns"),
], className="row flex-display"),
], id="mainContainer",
style={"display": "flex", "flex-direction": "column"})
@app.callback(
Output('confirmed', 'figure'),
[Input('w_countries', 'value')])
def update_confirmed(w_countries):
covid_data_2 = covid_data.groupby(['date', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
value_confirmed = covid_data_2[covid_data_2['Country/Region'] == w_countries]['confirmed'].iloc[-1] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['confirmed'].iloc[-2]
delta_confirmed = covid_data_2[covid_data_2['Country/Region'] == w_countries]['confirmed'].iloc[-2] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['confirmed'].iloc[-3]
return {
'data': [go.Indicator(
mode='number+delta',
value=value_confirmed,
delta={'reference': delta_confirmed,
'position': 'right',
'valueformat': ',g',
'relative': False,
'font': {'size': 15}},
number={'valueformat': ',',
'font': {'size': 20},
},
domain={'y': [0, 1], 'x': [0, 1]})],
'layout': go.Layout(
title={'text': 'New Confirmed',
'y': 1,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(color='orange'),
paper_bgcolor='#1f2c56',
plot_bgcolor='#1f2c56',
height=50
),
}
@app.callback(
Output('death', 'figure'),
[Input('w_countries', 'value')])
def update_confirmed(w_countries):
covid_data_2 = covid_data.groupby(['date', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
value_death = covid_data_2[covid_data_2['Country/Region'] == w_countries]['death'].iloc[-1] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['death'].iloc[-2]
delta_death = covid_data_2[covid_data_2['Country/Region'] == w_countries]['death'].iloc[-2] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['death'].iloc[-3]
return {
'data': [go.Indicator(
mode='number+delta',
value=value_death,
delta={'reference': delta_death,
'position': 'right',
'valueformat': ',g',
'relative': False,
'font': {'size': 15}},
number={'valueformat': ',',
'font': {'size': 20},
},
domain={'y': [0, 1], 'x': [0, 1]})],
'layout': go.Layout(
title={'text': 'New Death',
'y': 1,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(color='#dd1e35'),
paper_bgcolor='#1f2c56',
plot_bgcolor='#1f2c56',
height=50
),
}
@app.callback(
Output('recovered', 'figure'),
[Input('w_countries', 'value')])
def update_confirmed(w_countries):
covid_data_2 = covid_data.groupby(['date', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
value_recovered = covid_data_2[covid_data_2['Country/Region'] == w_countries]['recovered'].iloc[-1] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['recovered'].iloc[-2]
delta_recovered = covid_data_2[covid_data_2['Country/Region'] == w_countries]['recovered'].iloc[-2] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['recovered'].iloc[-3]
return {
'data': [go.Indicator(
mode='number+delta',
value=value_recovered,
delta={'reference': delta_recovered,
'position': 'right',
'valueformat': ',g',
'relative': False,
'font': {'size': 15}},
number={'valueformat': ',',
'font': {'size': 20},
},
domain={'y': [0, 1], 'x': [0, 1]})],
'layout': go.Layout(
title={'text': 'New Recovered',
'y': 1,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(color='green'),
paper_bgcolor='#1f2c56',
plot_bgcolor='#1f2c56',
height=50
),
}
@app.callback(
Output('active', 'figure'),
[Input('w_countries', 'value')])
def update_confirmed(w_countries):
covid_data_2 = covid_data.groupby(['date', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
value_active = covid_data_2[covid_data_2['Country/Region'] == w_countries]['active'].iloc[-1] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['active'].iloc[-2]
delta_active = covid_data_2[covid_data_2['Country/Region'] == w_countries]['active'].iloc[-2] - covid_data_2[covid_data_2['Country/Region'] == w_countries]['active'].iloc[-3]
return {
'data': [go.Indicator(
mode='number+delta',
value=value_active,
delta={'reference': delta_active,
'position': 'right',
'valueformat': ',g',
'relative': False,
'font': {'size': 15}},
number={'valueformat': ',',
'font': {'size': 20},
},
domain={'y': [0, 1], 'x': [0, 1]})],
'layout': go.Layout(
title={'text': 'New Active',
'y': 1,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(color='#e55467'),
paper_bgcolor='#1f2c56',
plot_bgcolor='#1f2c56',
height=50
),
}
# Create pie chart (total casualties)
@app.callback(Output('pie_chart', 'figure'),
[Input('w_countries', 'value')])
def update_graph(w_countries):
covid_data_2 = covid_data.groupby(['date', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
new_confirmed = covid_data_2[covid_data_2['Country/Region'] == w_countries]['confirmed'].iloc[-1]
new_death = covid_data_2[covid_data_2['Country/Region'] == w_countries]['death'].iloc[-1]
new_recovered = covid_data_2[covid_data_2['Country/Region'] == w_countries]['recovered'].iloc[-1]
new_active = covid_data_2[covid_data_2['Country/Region'] == w_countries]['active'].iloc[-1]
colors = ['orange', '#dd1e35', 'green', '#e55467']
return {
'data': [go.Pie(labels=['Confirmed', 'Death', 'Recovered', 'Active'],
values=[new_confirmed, new_death, new_recovered, new_active],
marker=dict(colors=colors),
hoverinfo='label+value+percent',
textinfo='label+value',
textfont=dict(size=13),
hole=.7,
rotation=45
# insidetextorientation='radial',
)],
'layout': go.Layout(
# width=800,
# height=520,
plot_bgcolor='#1f2c56',
paper_bgcolor='#1f2c56',
hovermode='closest',
title={
'text': 'Total Cases : ' + (w_countries),
'y': 0.93,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont={
'color': 'white',
'size': 20},
legend={
'orientation': 'h',
'bgcolor': '#1f2c56',
'xanchor': 'center', 'x': 0.5, 'y': -0.07},
font=dict(
family="sans-serif",
size=12,
color='white')
),
}
# Create bar chart (show new cases)
@app.callback(Output('line_chart', 'figure'),
[Input('w_countries', 'value')])
def update_graph(w_countries):
# main data frame
covid_data_2 = covid_data.groupby(['date', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].sum().reset_index()
# daily confirmed
covid_data_3 = covid_data_2[covid_data_2['Country/Region'] == w_countries][['Country/Region', 'date', 'confirmed']].reset_index()
covid_data_3['daily confirmed'] = covid_data_3['confirmed'] - covid_data_3['confirmed'].shift(1)
covid_data_3['Rolling Ave.'] = covid_data_3['daily confirmed'].rolling(window=7).mean()
return {
'data': [go.Bar(x=covid_data_3[covid_data_3['Country/Region'] == w_countries]['date'].tail(30),
y=covid_data_3[covid_data_3['Country/Region'] == w_countries]['daily confirmed'].tail(30),
name='Daily confirmed',
marker=dict(
color='orange'),
hoverinfo='text',
hovertext=
'<b>Date</b>: ' + covid_data_3[covid_data_3['Country/Region'] == w_countries]['date'].tail(30).astype(str) + '<br>' +
'<b>Daily confirmed</b>: ' + [f'{x:,.0f}' for x in covid_data_3[covid_data_3['Country/Region'] == w_countries]['daily confirmed'].tail(30)] + '<br>' +
'<b>Country</b>: ' + covid_data_3[covid_data_3['Country/Region'] == w_countries]['Country/Region'].tail(30).astype(str) + '<br>'
),
go.Scatter(x=covid_data_3[covid_data_3['Country/Region'] == w_countries]['date'].tail(30),
y=covid_data_3[covid_data_3['Country/Region'] == w_countries]['Rolling Ave.'].tail(30),
mode='lines',
name='Rolling average of the last seven days - daily confirmed cases',
line=dict(width=3, color='#FF00FF'),
# marker=dict(
# color='green'),
hoverinfo='text',
hovertext=
'<b>Date</b>: ' + covid_data_3[covid_data_3['Country/Region'] == w_countries]['date'].tail(30).astype(str) + '<br>' +
'<b>Rolling Ave.(last 7 days)</b>: ' + [f'{x:,.0f}' for x in covid_data_3[covid_data_3['Country/Region'] == w_countries]['Rolling Ave.'].tail(30)] + '<br>'
)],
'layout': go.Layout(
plot_bgcolor='#1f2c56',
paper_bgcolor='#1f2c56',
title={
'text': 'Last 30 Days Confirmed Cases : ' + (w_countries),
'y': 0.93,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont={
'color': 'white',
'size': 20},
hovermode='x',
margin = dict(r = 0),
xaxis=dict(title='<b>Date</b>',
color='white',
showline=True,
showgrid=True,
showticklabels=True,
linecolor='white',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Arial',
size=12,
color='white'
)
),
yaxis=dict(title='<b>Daily confirmed Cases</b>',
color='white',
showline=True,
showgrid=True,
showticklabels=True,
linecolor='white',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Arial',
size=12,
color='white'
)
),
legend={
'orientation': 'h',
'bgcolor': '#1f2c56',
'xanchor': 'center', 'x': 0.5, 'y': -0.3},
font=dict(
family="sans-serif",
size=12,
color='white'),
)
}
# Create scattermapbox chart
@app.callback(Output('map', 'figure'),
[Input('w_countries', 'value')])
def update_graph(w_countries):
covid_data_3 = covid_data.groupby(['Lat', 'Long', 'Country/Region'])[['confirmed', 'death', 'recovered', 'active']].max().reset_index()
covid_data_4 = covid_data_3[covid_data_3['Country/Region'] == w_countries]
if w_countries:
zoom = 2
zoom_lat = list_locations[w_countries]['Lat']
zoom_lon = list_locations[w_countries]['Long']
return {
'data': [go.Scattermapbox(
lon=covid_data_4['Long'],
lat=covid_data_4['Lat'],
mode='markers',
marker=go.scattermapbox.Marker(
size=covid_data_4['confirmed'] / 1500,
color=covid_data_4['confirmed'],
colorscale='hsv',
showscale=False,
sizemode='area',
opacity=0.3),
hoverinfo='text',
hovertext=
'<b>Country</b>: ' + covid_data_4['Country/Region'].astype(str) + '<br>' +
'<b>Longitude</b>: ' + covid_data_4['Long'].astype(str) + '<br>' +
'<b>Latitude</b>: ' + covid_data_4['Lat'].astype(str) + '<br>' +
'<b>Confirmed</b>: ' + [f'{x:,.0f}' for x in covid_data_4['confirmed']] + '<br>' +
'<b>Death</b>: ' + [f'{x:,.0f}' for x in covid_data_4['death']] + '<br>' +
'<b>Recovered</b>: ' + [f'{x:,.0f}' for x in covid_data_4['recovered']] + '<br>' +
'<b>Active</b>: ' + [f'{x:,.0f}' for x in covid_data_4['active']] + '<br>'
)],
'layout': go.Layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0},
# width=1820,
# height=650,
hovermode='closest',
mapbox=dict(
accesstoken='pk.eyJ1IjoicXM2MjcyNTI3IiwiYSI6ImNraGRuYTF1azAxZmIycWs0cDB1NmY1ZjYifQ.I1VJ3KjeM-S613FLv3mtkw',
center=go.layout.mapbox.Center(lat=zoom_lat, lon=zoom_lon),
# style='open-street-map',
style='dark',
zoom=zoom
),
autosize=True,
)
}
if __name__ == '__main__':
app.run_server(debug=True)
| [
"mathew.alaba@coyotes.usd.edu"
] | mathew.alaba@coyotes.usd.edu |
5bbb829967936246b03da49dbe2c58f523568660 | 6959d1dec46a490ac1469e21c68506157efea0ee | /Pandora/apps.py | 02ebc2445ba9bb709fd38d97417e67ff5d5ceeb7 | [] | no_license | andyshu6/Nuclear | 305f589a7d81cd90f7f19c3b28cb50d5d7867af3 | 88c68e50dc7506f495120313536e3cfa48329e8f | refs/heads/master | 2020-03-08T18:29:33.430204 | 2018-07-11T16:27:18 | 2018-07-11T16:27:18 | 107,992,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class PandoraConfig(AppConfig):
name = 'Pandora'
| [
"john@sample.com"
] | john@sample.com |
5eaa14e11dd89e64149ecac16a37c91c7920acbb | b9429fed4685c6eacb9e2965384a64274e4e7003 | /community_detection/profiling_operators/cohesion/set_cohesion.py | a72fe4bb7345d26beeb42cbf12f12320130a3822 | [] | no_license | JoGreen/twitterProfiling | 904141f053bd71f77ac9df1e9d42213c00c895f0 | f6a49ee6d6cece73b852d1640b0c6f9214a3f522 | refs/heads/master | 2022-12-09T03:36:57.900332 | 2018-02-27T19:36:39 | 2018-02-27T19:36:39 | 108,031,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,045 | py | from itertools import combinations
from community_detection.profiling_operators.similarity import jaccard
from community_detection.profiling_operators.means import geometric_mean
def compute_cohesion(users):
pass
def retweet_cohesion():
pass
def topic_cohesion():
pass
def profiles_cohesion(profiles):
#type:(list[dict])->float
try:
profiles = list(profiles)
profiles_with_display_name = []
mean_profile_length = 0
for p in profiles:
interests = []
try:
interests = [i for i in p['info']['interests']['all'].keys()]
# interests.append(p['info']['language']['primary']['name'])
except KeyError: pass
try:
interests.append(p['info']['location']['country']['name'])
print 'countr name added in profile interests'
except KeyError: pass
except Exception as e:
print 'generic exception ->it s not key error exception',
print e
if len(interests) > 0:
profiles_with_display_name.append(interests)
mean_profile_length = mean_profile_length + len(interests)
print profiles_with_display_name
mean_profile_length = mean_profile_length/len(profiles)
except TypeError :
print 'input for profile cohesion has to be an iterable'
profiles_indexes = range(0,len(profiles_with_display_name) )
print profiles_indexes
profiles_couples = combinations(profiles_indexes,2)
#cohesion_vector = [jaccard(list(couple)[0], list(couple)[1]) for couple in profiles_couples]
jaccard_vector = []
intersection_vector = []
for index in profiles_couples:
#jaccars similarities
jaccard_vector.append(jaccard(profiles_with_display_name[index[0] ], profiles_with_display_name[index[1] ]) )
max_length = max(len(profiles_with_display_name[index[0] ]), len(profiles_with_display_name[index[1] ]) )
#similarities based on intersection cardinality divided the mean of profiles length
common_interests = len(set(profiles_with_display_name[index[0] ]).intersection(set(profiles_with_display_name[index[1] ]) ) )
intersection_vector.append(float(common_interests)) #/mean_profile_length)
denominatore = max(max(intersection_vector), mean_profile_length) # or interests number in clique profile but ..
# it risks to produce some value of intersection_vector > 1 anyway it seems not enough rapresentitive.
intersection_vector = [e/denominatore for e in intersection_vector]
print jaccard_vector
print intersection_vector
#geometric mean of cohesion
jaccard_cohesion = geometric_mean(jaccard_vector)
intesection_cohesion = geometric_mean(intersection_vector)
print 'jaccard cohesion =', jaccard_cohesion
print 'intersection cohesion', intesection_cohesion
#return jaccard_cohesion
return intesection_cohesion
def _get_followers_count(user_id):
pass
| [
"giorgiomaria@live.it"
] | giorgiomaria@live.it |
927abca95b4c49ca979cdcfe7c8e51381094bb8f | 7c48279b15dc98c0620834ad6a40a86e4ba9ee59 | /projects/Casino.py | 8da2bc719951963ddc75c73ec5b3962903848acb | [] | no_license | loganwyas/oasisdigital | b7942d62d8ea501b8a822b562562365340107ed1 | 5faf392ce7570be06a0e224b914c90ae1094b74d | refs/heads/master | 2023-01-23T14:31:35.762923 | 2020-03-24T15:49:00 | 2020-03-24T15:49:00 | 220,025,283 | 0 | 0 | null | 2023-01-07T16:19:07 | 2019-11-06T15:11:48 | JavaScript | UTF-8 | Python | false | false | 18,296 | py | # -*- coding: utf-8 -*-
# Logan W. and Zayn A.
import random
import time
import sys
cards = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K', 'A']
chips = 1000
wheels = []
dollars = 0
slots = ['-', '-', '-', '=', '=', '=', '≡', '≡', '≡', 'ꭥ', 'ꭥ', '♥', '♥', '$', '7']
originalbet = 0
def ATM():
global chips
global dollars
print("Would you like to exchange chips or dollars?")
answer = raw_input().lower()
if 'chip' in answer:
print("You have decided to exchange chips for money.")
time.sleep(2)
NumberofChips(3)
print("How much do you want to exchange? (Each chip is equal to 5 dollars)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
ATM()
elif bet == 0:
print("You can't exchange 0 chips!")
ATM()
else:
dollars += (5 * bet)
chips -= bet
elif 'dollar' in answer or 'money' in answer:
print("You have decided to exchange money for chips.")
time.sleep(2)
NumberofChips(3)
print("How much do you want to exchange? (Each chip is equal to 5 dollars)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
if bet > dollars:
print("You don't have enough money for that. You have " + str(dollars) + " dollars.")
time.sleep(3)
ATM()
elif bet == 0:
print("You can't exchange 0 dollars!")
ATM()
elif bet % 5 != 0:
print("Error: Every chip is worth 5 dollars. You entered a value not divisable by 5.")
time.sleep(5)
ATM()
else:
dollars -= bet
chips += (bet / 5)
else:
print("Unknown answer. Going back to menu.")
time.sleep(2)
Menu()
def Food():
global dollars
NoChips()
NumberofChips(2)
print("What would you like to buy? (food or drinks)")
answer = raw_input()
if 'f' in answer:
print("Type the number of the food you want.")
print("1. Hot Dog - $5")
print("2. Hamburger - $7")
print("3. Nachos - $4")
print("4. Wings - $6")
print("5. Salad - $3")
answer = str(raw_input())
if '1' in answer:
bet = 5
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 5
elif '2' in answer:
bet = 7
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 7
elif '3' in answer:
bet = 4
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 4
elif '4' in answer:
bet = 6
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 6
elif '5' in answer:
bet = 3
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 3
else:
print("Unknown answer. Going back to menu.")
time.sleep(3)
Menu()
elif 'd' in answer:
print("Type the number of the drink you want.")
print("1. Water - FREE")
print("2. Coca-Cola - $5")
print("3. Diet Coke - $4")
print("4. Sprite - $5")
print("5. Tea - $3")
answer = str(raw_input())
if '1' in answer:
dollars -= 0
elif '2' in answer:
bet = 5
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 5
elif '3' in answer:
bet = 4
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 4
elif '4' in answer:
bet = 5
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 5
elif '5' in answer:
bet = 3
if bet > dollars:
print("You don't have enough money for that. Going back to menu.")
time.sleep(3)
Menu()
dollars -= 3
else:
print("Unknown answer. Going back to menu.")
time.sleep(3)
Menu()
else:
print("Unknown answer. Going back to menu.")
time.sleep(2)
Menu()
print("Buy anything else?")
answer = raw_input().lower()
if 'y' in answer and 'n' not in answer:
Food()
elif 'n' in answer:
Menu()
else:
print("Unknown answer. Going back to menu.")
time.sleep(2)
Menu()
def Menu():
NoChips()
NumberofChips(3)
print("Play Blackjack (b or j), play slot machine (slot), use ATM (atm), buy food (buy or food), or quit (q)?")
answer = raw_input().lower()
if 'b' in answer or 'j' in answer:
Blackjack()
elif 'slot' in answer:
print("Easy (easy), medium (m), or hard (h) slots?")
answer = raw_input().lower()
if 'easy' in answer:
easyWheels()
elif 'm' in answer:
mediumWheels()
elif 'h' in answer:
hardWheels()
else:
print("Unknown answer.")
Menu()
elif 'q' in answer:
Leave()
elif 'buy' in answer or 'food' in answer:
Food()
elif 'atm' in answer:
ATM()
else:
print("Unknown answer.")
Menu()
def NumberofChips(o):
if o == 1:
print("You have " + str(chips) + " chips")
elif o == 2:
print("You have " + str(dollars) + " dollars")
elif o == 3:
print("You have " + str(dollars) + " dollars and " + str(chips) + " chips")
elif o == 4:
print(str(chips) + " chips left")
time.sleep(2)
def NoChips():
global chips
global dollars
if chips == 0 and dollars == 0:
print("You have run out of chips. You must leave the casino.")
time.sleep(3)
Leave()
def Leave():
sys.exit("You have left the casino.")
def easyWheels():
global wheels
global bet
global chips
global originalbet
NoChips()
NumberofChips(1)
print("How much would you like to bet? (Remember: this will be the bet per spin of the slot machine)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
originalbet = bet
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
hardWheels()
elif bet == 0:
print("You can't bet 0 chips!")
hardWheels()
print("After each spin, you can either spin again (Just press enter), quit (q), or change bet (c). Press enter after each input.")
time.sleep(4)
while True:
wheels = []
chips -= bet
for x in range (3):
number = random.choice(slots[0:9])
wheels += [number]
print(wheels[0] + " " + wheels[1] + " " + wheels[2])
if wheels[0] == wheels[1] == wheels[2]:
if wheels[0] == '-':
print("+" + str(bet * 2) + " chips")
chips += (bet * 2)
elif wheels[0] == '=':
print("+" + str(bet * 3) + " chips")
chips += (bet * 3)
elif wheels[0] == '≡':
print("+" + str(bet * 5) + " chips")
chips += (bet * 5)
NoChips()
NumberofChips(4)
answer = raw_input().lower()
if answer == '':
continue
elif answer == 'q':
Menu()
elif answer == 'c':
NumberofChips(1)
print("How much would you like to bet? (Remember: this will be the bet per spin of the slot machine)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
print("Bet will stay as current bet.")
time.sleep(2)
bet = originalbet
continue
elif bet == 0:
print("You can't bet 0 chips!")
time.sleep(2)
print("Bet will stay as current bet.")
time.sleep(2)
bet = originalbet
continue
originalbet = bet
def mediumWheels():
global wheels
global bet
global chips
global originalbet
NoChips()
NumberofChips(1)
print("How much would you like to bet? (Remember: this will be the bet per spin of the slot machine)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
originalbet = bet
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
hardWheels()
elif bet == 0:
print("You can't bet 0 chips!")
hardWheels()
print("After each spin, you can either spin again (Just press enter), quit (q), or change bet (c). Press enter after each input.")
time.sleep(4)
while True:
wheels = []
chips -= bet
for x in range (3):
number = random.choice(slots[0:13])
wheels += [number]
print(wheels[0] + " " + wheels[1] + " " + wheels[2])
if wheels[0] == wheels[1] == wheels[2]:
if wheels[0] == '-':
print("+" + str(bet * 5) + " chips")
chips += (bet * 5)
elif wheels[0] == '=':
print("+" + str(bet * 10) + " chips")
chips += (bet * 10)
elif wheels[0] == '≡':
print("+" + str(bet * 15) + " chips")
chips += (bet * 15)
elif wheels[0] == 'ꭥ':
print("+" + str(bet * 30) + " chips")
chips += (bet * 30)
elif wheels[0] == '♥':
print("+" + str(bet * 50) + " chips")
chips += (bet * 50)
NoChips()
NumberofChips(4)
answer = raw_input().lower()
if answer == '':
continue
elif answer == 'q':
Menu()
elif answer == 'c':
NumberofChips(1)
print("How much would you like to bet? (Remember: this will be the bet per spin of the slot machine)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
print("Bet will stay as current bet.")
time.sleep(2)
bet = originalbet
continue
elif bet == 0:
print("You can't bet 0 chips!")
time.sleep(2)
print("Bet will stay as current bet.")
time.sleep(2)
bet = originalbet
continue
originalbet = bet
def hardWheels():
global wheels
global bet
global chips
global originalbet
NoChips()
NumberofChips(1)
print("How much would you like to bet? (Remember: this will be the bet per spin of the slot machine)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
originalbet = bet
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
hardWheels()
elif bet == 0:
print("You can't bet 0 chips!")
hardWheels()
print("After each spin, you can either spin again (Just press enter), quit (q), or change bet (c). Press enter after each input.")
time.sleep(4)
while True:
wheels = []
chips -= bet
for x in range (3):
number = random.choice(slots)
wheels += [number]
print(wheels[0] + " " + wheels[1] + " " + wheels[2])
if wheels[0] == wheels[1] == wheels[2]:
if wheels[0] == '-':
print("+" + str(bet * 10) + " chips")
chips += (bet * 10)
elif wheels[0] == '=':
print("+" + str(bet * 20) + " chips")
chips += (bet * 20)
elif wheels[0] == '≡':
print("+" + str(bet * 30) + " chips")
chips += (bet * 30)
elif wheels[0] == 'ꭥ':
print("+" + str(bet * 50) + " chips")
chips += (bet * 50)
elif wheels[0] == '♥':
print("+" + str(bet * 100) + " chips")
chips += (bet * 100)
elif wheels[0] == '$':
print("+" + str(bet * 250) + " chips")
chips += (bet * 250)
elif wheels[0] == '7':
print("+" + str(bet * 500) + " chips")
chips += (bet * 500)
NoChips()
NumberofChips(4)
answer = raw_input().lower()
if answer == '':
continue
elif answer == 'q':
Menu()
elif answer == 'c':
NumberofChips(1)
print("How much would you like to bet? (Remember: this will be the bet per spin of the slot machine)")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
print("Bet will stay as current bet.")
time.sleep(2)
bet = originalbet
continue
elif bet == 0:
print("You can't bet 0 chips!")
time.sleep(2)
print("Bet will stay as current bet.")
time.sleep(2)
bet = originalbet
continue
def Blackjack():
global chips
global cards
NoChips()
NumberofChips(1)
aces = 0
softaces = 0
hand = []
total = 0
bjstart = True
print("How much do you want to bet?")
betString = raw_input()
bet = [int(s) for s in betString.split() if s.isdigit()][0]
if bet > chips:
print("You don't have enough chips for that. You have " + str(chips) + " chips.")
time.sleep(3)
Blackjack()
elif bet == 0:
print("You can't bet 0 chips!")
Blackjack()
while total < 21:
card = random.choice(cards)
hand += [card]
if card == 'J' or card == 'Q' or card == 'K':
total += 10
elif card == 'A':
total += 11
aces += 1
else:
total += int(card)
if bjstart == True:
bjstart = False
card = random.choice(cards)
hand += [card]
if card == 'J' or card == 'Q' or card == 'K':
total += 10
elif card == 'A':
total += 11
aces += 1
else:
total += int(card)
else:
if 'A' in hand:
if total > 21:
if aces > softaces:
total -= 10
softaces += 1
print("Your cards are " + str(hand) + " (" + str(total) + ")")
time.sleep(2)
if total == 21:
print("You got blackjack!")
time.sleep(2)
break
elif total > 21:
print("Sorry, you busted.")
chips -= bet
time.sleep(2)
print("Play again?")
answer = raw_input().lower()
if 'y' in answer and 'n' not in answer:
Blackjack()
elif 'n' in answer:
Menu()
else:
print("Unknown answer. Going back to menu.")
time.sleep(2)
Menu()
else:
print("Hit or stay?")
answer = raw_input().lower()
if 's' in answer:
print("You have decided to stay.")
time.sleep(2)
break
elif 'h' in answer:
print("You have decided to hit.")
time.sleep(2)
elif total > 17:
print("Unknown answer. Assuming you will stay.")
time.sleep(2)
break
else:
print("Unknown answer. Assuming you will hit.")
time.sleep(2)
print("Let's see what the dealer has.")
time.sleep(2)
dealerhand = random.randint(total, total + 2)
if dealerhand > 21:
print("The dealer busted! You win " + str(bet * 2) + " chips!")
chips += bet
elif dealerhand == total:
print("The dealer got the same total as you. You get your original bet back (" + str(bet) + ")")
else:
print("The dealer beat your hand. You don't win anything")
chips -= bet
time.sleep(2)
print("Play again?")
answer = raw_input().lower()
if 'y' in answer and 'n' not in answer:
Blackjack()
elif 'n' in answer:
Menu()
else:
print("Unknown answer. Going back to menu.")
time.sleep(2)
Menu()
Menu() | [
"loganw9999@gmail.com"
] | loganw9999@gmail.com |
47c4e431686bd9948b48577adbf24aab230585ff | 0ee8ea963a8fa0aa9951696682ffdaa29e26745c | /back-end/apps/products/apiGraphQl/schema.py | dbb6e5d7e16fb6fc57c9a4e8c92f3bf49855ad8e | [] | no_license | acorrea-B/KarlaCommerce | ddfdabb74eb4d5f9f5e5a96cedc368477c6a18e7 | f180fcf58c49a1bfce60ed6bcbefb8902c42c268 | refs/heads/main | 2023-07-08T02:01:51.901763 | 2021-08-08T03:40:04 | 2021-08-08T03:40:04 | 390,139,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import graphene
from .nodes import ProductNode
from ..models import Product
class ProductQuery(graphene.ObjectType):
list_products = graphene.List( ProductNode )
def resolve_list_products(root, info, **kwargs):
"""
Este resolve retorna el listado de productos del
commercio sin ningun tipo de filtro o paginado
"""
return Product.objects.all() | [
"aljyque95@gmail.com"
] | aljyque95@gmail.com |
f2e88da8862e71c22ad12aa32a41af1c3f6d44ae | 11398711ca0ec8e5e699a9a87d79ad1fea6a1bf3 | /venv/bin/easy_install | 4d374412ec4b07f4397fda5ecc845193c05bf97b | [] | no_license | ThatGuyNamedTim/CapitalOneAirbnb | d86e62f359343af157d1bb484919aa6bab7d118e | 507e6ab6277ea3097a28fc56a5483c1a0ada7f89 | refs/heads/master | 2018-12-23T05:04:10.941995 | 2018-10-10T12:55:27 | 2018-10-10T12:55:27 | 109,745,663 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/home/tim/Documents/CapitalOneAirbnb/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"tim@arch.umd.edu"
] | tim@arch.umd.edu | |
43de55f5e7ff24beb62ff9ef00a46e24edd4bae1 | b384480f7e3fd81ab7d1ab743581c91a0e786f75 | /dplib/dp_mechanisms.py | 69a0b9721eba5dec524652d1f4c718de31f1aa31 | [] | no_license | kamathakshay/dp-deep-learning | 6f653a5d18f8f380d850ab707a1a9c65babfc39a | 6d652a7a86964e23d0837b14c1b350f7b6c27752 | refs/heads/master | 2022-11-01T04:23:23.894232 | 2018-12-05T01:21:13 | 2018-12-05T01:21:13 | 154,505,334 | 0 | 1 | null | 2022-10-09T19:39:55 | 2018-10-24T13:23:19 | Python | UTF-8 | Python | false | false | 3,557 | py | from __future__ import division
import tensorflow as tf
from per_example_gradients import PerExampleGradients
def clipped_gradients(loss, params, clipbound = None, epsilon = None, delta = None) :
xs = [tf.convert_to_tensor(x) for x in var_list]
px_grads = per_example_gradients.PerExampleGradients(loss, xs)
sanitized_grads = []
for px_grad, v in zip(px_grads, var_list):
sanitized_grad = self._sanitizer.sanitize(px_grad, clipbound = clipboud,
epsilon = epsilon, delta = delta)
sanitized_grads.append(sanitized_grad)
return sanitized_grads
def sanitize(x, clipbound = None, epsilon=None, delta= None):
"""Sanitize the given tensor.
This sanitizes a given tensor by first applying l2 norm clipping and then
adding noise.
Args:
x: the tensor to sanitize.
clipbound: the bound on l2 norm beyond which we clip example-wise
gradients
epsilon: eps for (eps,delta)-DP. Use it to compute sigma.
delta: delta for (eps,delta)-DP. Use it to compute sigma.
Returns:
a sanitized tensor
"""
eps, delta = (epsilon, delta)
with tf.control_dependencies(
[tf.Assert(tf.greater(eps, 0),
["eps needs to be greater than 0"]),
tf.Assert(tf.greater(delta, 0),
["delta needs to be greater than 0"])]):
# The following formula is taken from
# Dwork and Roth, The Algorithmic Foundations of Differential
# Privacy, Appendix A.
# http://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf
sigma = tf.sqrt(2.0 * tf.log(1.25 / delta)) / eps
l2norm_bound = clipbound
if l2norm_bound is not None:
x = BatchClipByL2norm(x, l2norm_bound)
num_examples = tf.slice(tf.shape(x), [0], [1])
saned_x = AddGaussianNoise(tf.reduce_sum(x, 0),
sigma * l2norm_bound)
#else:
# saned_x = tf.reduce_sum(x, 0)
return saned_x
#TAKEN FROM UTILS OF OP
def AddGaussianNoise(t, sigma):
"""Add i.i.d. Gaussian noise (0, sigma^2) to every entry of t.
Args:
t: the input tensor.
sigma: the stddev of the Gaussian noise.
name: optional name.
Returns:
the noisy tensor.
"""
with tf.name_scope(values=[t, sigma],
default_name="add_gaussian_noise") as name:
noisy_t = t + tf.random_normal(tf.shape(t), stddev=sigma)
return noisy_t
#TAKEN FROM UTILS OF OP
def BatchClipByL2norm(t, upper_bound):
"""Clip an array of tensors by L2 norm.
Shrink each dimension-0 slice of tensor (for matrix it is each row) such
that the l2 norm is at most upper_bound. Here we clip each row as it
corresponds to each example in the batch.
Args:
t: the input tensor.
upper_bound: the upperbound of the L2 norm.
Returns:
the clipped tensor.
"""
assert upper_bound > 0
with tf.name_scope(values=[t, upper_bound], name=name,
default_name="batch_clip_by_l2norm") as name:
saved_shape = tf.shape(t)
batch_size = tf.slice(saved_shape, [0], [1])
t2 = tf.reshape(t, tf.concat(axis=0, values=[batch_size, [-1]]))
upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
tf.constant(1.0/upper_bound))
# Add a small number to avoid divide by 0
l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001)
scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound
clipped_t = tf.matmul(tf.diag(scale), t2)
clipped_t = tf.reshape(clipped_t, saved_shape, name=name)
return clipped_t
| [
"kamath.akshay@gmail.com"
] | kamath.akshay@gmail.com |
1476a08e4cc839c82fff0b0b997018ff4664d677 | 3ff20e2271e422806f78d13bb64b813e3817eb43 | /app.py | de2b32950dfab0c188e42b94c41090ebf4be63b3 | [] | no_license | daniel2346/Endpoints_Python_Finkargo | 1a4b2ffdd92858d3a8a121aed00e2c5b091a763e | ddef132c8c289068ecbb1fe31dedcfd1c9c608a6 | refs/heads/main | 2023-07-16T08:57:49.052337 | 2021-09-06T12:53:20 | 2021-09-06T12:53:20 | 403,467,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,528 | py | from flask import Flask, jsonify, request, make_response
import mysql.connector
from mysql.connector import errorcode
from flask_httpauth import HTTPBasicAuth
import MySQLdb
from flask_sqlalchemy import SQLAlchemy
import re
import collections
import json
USER_DATA ={
"admin":"root"
}
app = Flask(__name__)
db = SQLAlchemy(app)
auth = HTTPBasicAuth()
@auth.verify_password
def verify(username, password):
if not(username and password):
return False
return USER_DATA.get(username) == password
@app.route('/sortlist/<int_list>', methods=['GET'])
@auth.login_required
def sortArray(int_list):
if not re.match(r'^\d+(?:,\d+)*,?$', int_list):
return "Por favor colocar una serie de números únicamente", 400
requestList = sum(int(i) for i in int_list.split(','))
sortedListFromRequest = list(int(d) for d in str(requestList))
duplicatedElements = [item for item, count in collections.Counter(sortedListFromRequest).items() if count > 1]
sortedListFromRequest.sort()
sortedListFromRequest = list(dict.fromkeys(sortedListFromRequest))
return jsonify({"sin clasificar": list(int(d) for d in str(requestList)) }
,{"clasificado": sortedListFromRequest + duplicatedElements })
@app.route('/getbalances', methods=['POST'])
def balancePerMonth():
if not request.is_json:
return jsonify({"errorMessage": "No se encontró objeto JSON en la solicitud"}), 400
jsonRequest = json.loads(request.data)
months = jsonRequest['mes']
sales = jsonRequest['ventas']
expenses = jsonRequest['gastos']
response = []
if len(sales) != len(months) or len(expenses) != len(months) :
return jsonify({"errorMessage": "Error en la estructura del JSON, por favor verificar"}), 400
for i in range(len(months)):
if not isinstance(sales[i], int) or not isinstance(expenses[i], int) :
return jsonify({"errorMessage": "Error en la estructura del JSON, por favor verificar"}), 400
response.append({"mes": months[i],"ventas":sales[i],"gastos":expenses[i], "balance": sales[i] -expenses[i] })
return jsonify(response)
@app.route('/database', methods=['GET','POST'])
@auth.login_required
def handleDatabase():
try:
db_connection = mysql.connector.connect(
host= "localhost",
user= "root",
passwd= "",
database="db_finkargo_daag"
)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
return "Usuario o clave incorrectos", 405
else:
return "No se pudo conectar al servidor", 500
else:
cursor = db_connection.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS `usuarios` (`id` INT AUTO_INCREMENT PRIMARY KEY,`Nombres` VARCHAR(20), `Apellidos` VARCHAR(20),"
"`Edad` int(3), `Nacionalidad` VARCHAR(20))")
if request.method == 'GET':
cursor.execute("SELECT * FROM usuarios")
row_headers=[x[0] for x in cursor.description]
myresult = cursor.fetchall()
response = []
for x in myresult:
response.append(dict(zip(row_headers,x)))
return jsonify(response)
if request.method == 'POST':
if not request.is_json:
return jsonify({"errorMessage": "No se encontró objeto JSON en la solicitud"}), 400
data = request.data
r_nombres = request.json['Nombres']
r_apellidos = request.json['Apellidos']
r_edad = request.json['Edad']
r_nacionalidad = request.json['Nacionalidad']
sql = "insert into usuarios (Nombres, Apellidos, Edad, Nacionalidad) values ('%s', '%s','%d','%s')" %(r_nombres, r_apellidos, r_edad, r_nacionalidad)
try:
cursor.execute(sql)
db_connection.commit()
except (MySQLdb.Error, MySQLdb.Warning) as e:
db_connection.rollback()
return e,400
return "Proceso realizado con exito"
@app.route('/database/<id>', methods=['PUT','DELETE'])
@auth.login_required
def handleDatabaseUpdateDelete(id):
try:
db_connection = mysql.connector.connect(
host= "localhost",
user= "root",
passwd= "",
database="db_finkargo_daag"
)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
return "Usuario o clave incorrectos", 405
else:
return "No se pudo conectar al servidor", 500
else:
cursor = db_connection.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS `usuarios` (`id` INT AUTO_INCREMENT PRIMARY KEY,`Nombres` VARCHAR(20), `Apellidos` VARCHAR(20),"
"`Edad` int(3), `Nacionalidad` VARCHAR(20))")
if request.method == 'PUT':
if not request.is_json:
return jsonify({"errorMessage": "No se encontró objeto JSON en la solicitud"}), 400
r_nombres = request.json['Nombres']
r_apellidos = request.json['Apellidos']
r_edad = request.json['Edad']
r_nacionalidad = request.json['Nacionalidad']
try:
cursor.execute("UPDATE usuarios SET Nombres=%s,Apellidos=%s,Edad=%s,Nacionalidad=%s WHERE id=%s",(r_nombres,r_apellidos,r_edad,r_nacionalidad,id))
db_connection.commit()
except:
db_connection.rollback()
return "error en la solicitud",400
return "Proceso realizado con exito"
if request.method == 'DELETE':
try:
cursor.execute("DELETE FROM usuarios WHERE id = '"+id+"'")
db_connection.commit()
except (MySQLdb.Error, MySQLdb.Warning) as e:
db_connection.rollback()
return e,400
return "Proceso realizado con exito"
if __name__ == '__main__':
app.run(debug=True, port=4000) | [
"noreply@github.com"
] | noreply@github.com |
769a920462f74093deebe33e6db9ca5f4ce57734 | bc6e2056500afdd5d11a28a613d6d73f5dd05447 | /moneycash/produccion/admin.py | 28741262c044e84e45e8db2088d83ef264941422 | [] | no_license | xangcastle/respaldo | d0540fabc089f947f052019431d55a9c3c85f131 | 48c5f53b2a2bce0bfa79b1fcc657aa40268e702b | refs/heads/master | 2021-01-10T01:52:48.102689 | 2015-12-16T15:42:02 | 2015-12-16T15:42:02 | 48,118,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,700 | py | from django.contrib import admin
from .models import *
from moneycash.entidad import entidad_admin
from moneycash.documento import documento_admin
from django.template.context import RequestContext
from django.shortcuts import render_to_response
class contadores_tabular(admin.TabularInline):
model = equipo_periodo
extra = 0
classes = ('grp-collapse grp-open',)
#fields = ('equipo', 'contador_inicial', 'contador_final')
class detalle_recibo_tabular(admin.TabularInline):
model = recibo_detalle
extra = 0
classes = ('grp-collapse grp-open',)
class recibo_admin(documento_admin):
list_display = ('numero', 'fecha', 'area', 'copias', 'importe')
inlines = [detalle_recibo_tabular]
fieldsets = (('Datos del Recibo',
{'classes': ('grp-collapse grp-open',),
'fields': (('numero', 'fecha'), 'area'), }),
("Detalle Inlines", {"classes":
("placeholder recibo_detalle_set-group",), "fields": ()}),
('Datos de Facturacion',
{'classes': ('grp-collapse grp-open',),
'fields': (('copias', 'importe', 'tc'),), }),)
actions = ['generar_imprimir', 'facturar']
list_filter = ('periodo', 'area')
def generar_imprimir(self, request, queryset):
for r in queryset:
r.impreso = True
r.save()
id_unico = False
if queryset.count() == 1:
id_unico = True
ctx = {'queryset': queryset, 'id_unico': id_unico}
return render_to_response('moneycash/produccion/recibo.html', ctx,
context_instance=RequestContext(request))
generar_imprimir.short_description = "Imprimir recibos selecionados"
def facturar(self, request, queryset):
facturar(queryset)
class periodo_admin(admin.ModelAdmin):
list_display = ('short_name', 'inicio_produccion', 'fin_produccion',
'copias_equipos', 'copias_areas', 'importe_produccion', 'cerrado')
inlines = [contadores_tabular]
fieldsets = (('Datos del Periodo', {'classes': ('grp-collapse grp-open',),
'fields': (('fecha_inicial', 'fecha_final'),
('inicio_produccion', 'fin_produccion'),)}),)
def generar_recibos(self, request, queryset):
for p in queryset:
crear_recibos(p)
generar_recibos.short_description = \
'generar recibos de los periodos seleccionados'
def cargar_copias(self, request, queryset):
for p in queryset:
cargar_copias(p)
cargar_copias.short_description = \
'cargar copias de los periodos seleccionados'
def activar_equipos(self, request, queryset):
for p in queryset:
activar_equipos(p)
activar_equipos.short_description = \
'activar equipos de los periodos seleccionados'
def cerrar_(self, request, queryset):
for p in queryset:
cerrar(p)
cerrar.short_description = \
'cerrar periodos seleccionados'
actions = [generar_recibos, cargar_copias, activar_equipos, cerrar_]
class equipo_admin(entidad_admin):
list_display = ('code', 'modelo', 'serie', 'marca', 'contador_inicial',
'contador_actual', 'vida_util', 'costo_compra', 'depreciacion_copia',
'valor_depreciado', 'precio_venta', 'activo',)
search_fields = ('code', 'name', 'modelo', 'serie')
list_filter = ('activo', 'marca', 'ubicacion')
fieldsets = (('Datos Generales',
{'classes': ('grp-collapse grp-open',),
'fields': (('code', 'modelo'), ('serie', 'marca'),
('velocidad', 'ubicacion')), }),
('Datos de Facturacion',
{'classes': ('grp-collapse grp-open',),
'fields': (('contador_inicial', 'contador_actual', 'vida_util'),
('costo_compra', 'depreciacion_copia', 'valor_depreciado'),
('precio_venta', 'activo'), ('costo_copia',
'precio_copia')), }),)
ordering = ['code']
class cliente_admin(entidad_admin):
list_display = ('code', 'name', 'identificacion', 'telefono', 'direccion',
'activo')
search_fields = ('code', 'name', 'telefono')
list_filter = ('activo', )
fieldsets = (('Datos Generales',
{'classes': ('grp-collapse grp-open',),
'fields': (('code', 'name'), ('identificacion', 'telefono'),
('direccion',), ('contacto', 'nombre_area'), 'activo'), }),)
class area_admin(entidad_admin):
list_display = ('code', 'name', 'encargado', 'unidad_ejecutora',
'ubicacion', 'activo')
search_fields = ('code', 'name', 'encargado')
list_filter = ('activo', 'cliente', 'ubicacion')
fieldsets = (('Datos del Area',
{'classes': ('grp-collapse grp-open',),
'fields': (('code', 'name'), ('encargado', 'unidad_ejecutora'),
('equipos', 'activo'), ('ubicacion', 'cliente'), 'item'), }),)
class factura_detalle_admin(admin.TabularInline):
model = factura_detalle
extra = 0
classes = ('grp-collapse grp-open',)
class factura_admin(documento_admin):
list_display = ('numero', 'fecha', 'cliente', 'subtotal', 'descuento',
'iva', 'total', 'total', 'tc', 'ir', 'al', 'impreso')
fieldsets = (
('Datos de la Factura',
{'classes': ('grp-collapse grp-open',),
'fields': (('numero', 'fecha'), 'cliente',
('exento_iva', 'exento_ir', 'exento_al')), }),
("Detalle Inlines",
{"classes": ("placeholder factura_detalle_set-group",),
'fields': ()}),
('Totales de la Factura',
{'classes': ('grp-collapse grp-open',),
'fields': (('subtotal', 'descuento'),
('iva', 'total'), ('ir', 'al'), 'tc'), }),
)
inlines = [factura_detalle_admin]
actions = ['generar_imprimir']
def generar_imprimir(self, request, queryset):
id_unico = False
if queryset.count() == 1:
id_unico = True
ctx = {'queryset': queryset, 'id_unico': id_unico}
queryset.update(impreso=True)
return render_to_response('moneycash/produccion/factura.html', ctx,
context_instance=RequestContext(request))
generar_imprimir.short_description = "Imprimir Facturas Selecionadas"
admin.site.register(Marca, entidad_admin)
admin.site.register(Equipo, equipo_admin)
admin.site.register(Area, area_admin)
admin.site.register(Ubicacion, entidad_admin)
admin.site.register(Cliente, cliente_admin)
admin.site.register(Periodo, periodo_admin)
admin.site.register(Recibo, recibo_admin)
admin.site.register(Factura, factura_admin)
admin.site.register(Item, entidad_admin)
admin.site.register(Categoria, entidad_admin)
| [
"cesarabel@johnmay.com.ni"
] | cesarabel@johnmay.com.ni |
6641e64590f5d5a87376b0da7dd60cb4abca9df8 | 381c0c5080ca97ffa2ef2fafea236c723869e1cd | /0_Python_scripts/10.1_T_codon_enrichment.py | 7a5695fa98bba4046cf0150b953e4a0577cad8ab | [] | no_license | ath32/ASCs | 051d60510c4ed09d986f5cd30eddad66a573b5d3 | 33b0ee4df0cab2f0e2a6d06a32fd7231fb5799a2 | refs/heads/master | 2020-04-22T15:31:53.858680 | 2019-08-12T11:55:37 | 2019-08-12T11:55:37 | 170,480,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,686 | py | ### IMPORTS ###
import os
import csv
import numpy as np
### CHOOSE SOURCE FOLDER (EMBL) ###
source = '2_FASTA_Eubacteria_cds_TT11'
#source = '2_HEGs_fasta'
#source = '2_LEGs_fasta'
#source = '2_FASTA_Eubacteria_cds_TT11_TAA'
#source = '2_FASTA_Eubacteria_cds_TT11_TGA'
#source = '2_FASTA_Eubacteria_cds_TT11_TAG'
### FUNCTIONS ###
def main():
CSV_total = []
for root, dirs, filenames in os.walk(source):
for f in filenames:
path = os.path.join(source, f)
raw = open(path).read()
if '>' in raw:
#Split into gene list for use in functions
genes = raw.strip().split('>')
genes_c = list(filter(None, genes))
#Obtain accession
a = raw.split('>')
a1 = a[1].split(';')
accession = a1[0]
#Obtain GC and GC3
a2 = a1[1].strip()
a3 = a2.split("=")
GC = float(a3[1])
a4 = a1[2].strip()
a5 = a4.split("=")
GC3 = float(a5[1])
#Get list
utr_list = get_utrs(genes_c)
if len(utr_list) > 0:
#Get position 1 frequencies
nested_1, codons = get_frequencies(utr_list, 1)
#Get positions 3-6
nested_3, codons = get_frequencies(utr_list, 3)
nested_4, codons = get_frequencies(utr_list, 4)
nested_5, codons = get_frequencies(utr_list, 5)
nested_6, codons = get_frequencies(utr_list, 6)
#Calculate average frequencies for 3-6
averages = get_averages(nested_3, nested_4, nested_5, nested_6)
#Only use genomes where average > 0
if zero_check(averages) == True:
#Combine data in format ready for CSV, and calculate differences
freq_pos1 = [i[1] for i in nested_1]
combined = [freq_pos1, averages]
output = [list(i) for i in zip(*combined)]
for i in output:
ratio = (i[0] / i[1]) - 1
i.append(ratio)
#Flatten to create CSV line for each genome
output_flattened = [item for sublist in output for item in sublist]
#Add accession, GC and GC3
output_with_info = [[accession], output_flattened, [GC], [GC3]]
CSV_line = [item for sublist in output_with_info for item in sublist]
CSV_total.append(CSV_line)
headers = ['Accession', 'taa_1', 'taa_3-6', 'taa_ratio',
'tga_1', 'tga_3-6', 'tga_ratio',
'tag_1', 'tag_3-6', 'tag_ratio',
'ttt_1', 'ttt_3-6', 'ttt_ratio',
'tta_1', 'tta_3-6', 'tta_ratio',
'ttc_1', 'ttc_3-6', 'ttc_ratio',
'ttg_1', 'ttg_3-6', 'ttg_ratio',
'tat_1', 'tat_3-6', 'tat_ratio',
'tac_1', 'tac_3-6', 'tac_ratio',
'tca_1', 'tca_3-6', 'tca_ratio',
'tct_1', 'tct_3-6', 'tct_ratio',
'tcc_1', 'tcc_3-6', 'tcc_ratio',
'tcg_1', 'tcg_3-6', 'tcg_ratio',
'tgt_1', 'tgt_3-6', 'tgt_ratio',
'tgc_1', 'tgc_3-6', 'tgc_ratio',
'tgg_1', 'tgg_3-6', 'tgg_ratio',
'GC', 'GC3']
create_csv(headers, CSV_total)
def get_utrs(chunks):
utr_list = []
for i in chunks:
#Obtain UTRs
split_genes = i.split('\n')
utr = split_genes[1]
codon_seq = [utr[i:i+3] for i in range(0, len(utr), 3)]
utr_list.append(codon_seq)
return utr_list
def get_frequencies(utr_list, position):
#List of T starting codons
codons = ['taa', 'tga', 'tag', 'ttt', 'tta', 'ttc', 'ttg',
'tat', 'tac', 'tca', 'tct', 'tcc', 'tcg', 'tgt', 'tgc',
'tgg']
#Nest list in preparation of codons
nest = [[i] for i in codons]
#For each codon option...
for codon in nest:
#Add counter for each codon
codon.append(0)
#For each UTR, add to the counter if the codon is present
for utr in utr_list:
if utr[position] == codon[0]:
codon[1] += 1
#Convert raw counts to frequency
output = [[i, j/len(utr_list)] for [i, j] in nest]
return output, codons
def get_averages(nested_3, nested_4, nested_5, nested_6):
#Get lists of just frequencies
three = [i[1] for i in nested_3]
four = [i[1] for i in nested_4]
five= [i[1] for i in nested_5]
six = [i[1] for i in nested_6]
raw = [three, four, five, six]
#Re-nest by codon frequency
nested = [list(i) for i in zip(*raw)]
#Calculate means
means = [np.mean(i) for i in nested]
return means
def zero_check(averages):
if all(i > 0 for i in averages):
outcome = True
else:
outcome = False
return outcome
def create_csv(headers, csv_total):
filename = "10.4_T_codons_all.csv"
#filename = "10.4_T_codons_hegs.csv"
#filename = "10.4_T_codons_legs.csv"
#filename = "10.4_T_codons_taa.csv"
#filename = "10.4_T_codons_tga.csv"
#filename = "10.4_T_codons_tag.csv"
subdir = "4_Outputs/CSVs"
filepath = os.path.join(subdir, filename)
with open(filepath, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(headers)
for j in csv_total:
writer.writerow(j)
### RUN ###
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
1672c0561222f541e78f07447bb4d5b574ce8122 | ab1b3afffbdd05b8b9da6fe5204a4dd6e0d2b873 | /123.py | a23f96d5963e952a8dfd7d7c9cde31ae5affc47f | [] | no_license | fkgogo123/CIFAR10 | 5f6b4cffa2539d6cd8e1b09e8e7bbcd9b825d086 | 9c4c6b73c91d3da0b6f714442a539c662f8bdc77 | refs/heads/master | 2022-12-30T23:59:34.815589 | 2020-10-15T06:14:57 | 2020-10-15T06:14:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import torch
x = torch.rand(3, 32, 32)
print(x.size())
print(x.size(0))
a = torch.randint(1, 10, [5])
b = torch.randint(2, 11, [5])
print(a)
print(b)
c = torch.eq(a, b).float().sum().item()
print(type(c))
# print(c.item())
| [
"1982614192@qq.com"
] | 1982614192@qq.com |
9883545bfdc6f01d8752f12f5121a3bab3b56b21 | 908cb47174dbc50091c315e6ca90e4746b716c6e | /apps/base_user/forms.py | bc20e2d186893363e56edb8b7a3d5911ff377daa | [] | no_license | azer1999/markatekstil | b4d24b26e531f46022bb2f1e953118fe51b9a379 | e84c4efd95a97c1f97a772b195f9de6efe10ab4f | refs/heads/master | 2023-06-16T06:09:02.511193 | 2021-07-15T12:47:01 | 2021-07-15T12:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,631 | py | from django import forms
from django.contrib.auth import get_user_model
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import ReadOnlyPasswordHashField, AuthenticationForm
from django.contrib.auth import authenticate
from PIL import Image
# get custom user
User = get_user_model()
class MyUserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given email and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("email", "username","first_name", "last_name")
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(MyUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class MyUserChangeForm(forms.ModelForm):
class Meta:
model = User
fields = ("first_name", "last_name","profile_photo",)
widgets = {
"email": forms.EmailInput(attrs={
"class": ""
}),
"first_name": forms.TextInput(attrs={
"class": ""
}),
"last_name": forms.TextInput(attrs={
"class": ""
}),
}
def __init__(self, *args, **kwargs):
super(MyUserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"] | [
"elxjd.2014@gmail.com"
] | elxjd.2014@gmail.com |
d2a2148478a07f208203d379d258d6deca8f63ff | a250ca3a58c5b6b5f4e463496cb40e6e48619c01 | /code/model.py | 98d5a7d46a1d92d1c4aa6bf64842658f3e677082 | [] | no_license | pmk2109/Patently | 6105d4037a5c752d2abecb843893b45986643692 | 98ad39721a68989933dbb15784d53664fd0bbd6f | refs/heads/master | 2021-01-24T17:34:39.355188 | 2016-09-06T18:21:31 | 2016-09-06T18:21:31 | 63,189,382 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,336 | py | import pandas as pd
import numpy as np
import string
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.metrics.pairwise import linear_kernel
import cPickle as pickle
import time
import msgpack
def load_data(path=None):
'''
DOCSTRING: load_data
Given the 'subset' or 'total' parameter, find the respective .csv file,
read into a csv, parse out relevant fields and pickle the relevant objects.
Returns: Dataframe object, abstracts array, descriptions array, claims array
'''
if path == 'subset':
path = '../data/total_parsed_data_subset.csv'
elif path == 'total':
path = '../data/total_parsed_data.csv'
else:
print "ERROR: pass a valid path to data as a parameter"
return
df = pd.read_csv(path)
df.fillna("", inplace=True)
abstracts = df.abstract.values
descriptions = df.description.values
claims = df.claims.values
return df, abstracts, descriptions, claims
def vectorize(text, tfidf=None):
'''
DOCSTRING: vectorize
Given raw text, and the optional tfidf parameter, use TfidfVectorizer
to vectorize text. If tfidf parameter is present, use its vocab
for the input text for relevant comparison.
Returns: (fit_transformed text, tfidf object), (transformed text, __)
'''
if tfidf:
return tfidf.transform(text)
elif tfidf is None:
tfidf = TfidfVectorizer(stop_words='english')
return tfidf.fit_transform(text), tfidf
def get_similarity(vocab, idea, n_items=5):
'''
DOCSTRING: get_similarity
For given vocab as tfidf sparse matrix and an input idea to test,
check to make sure the sparse matrix column space is equal and
use cosine similarity and n_items parameter to return the
relevant cosine similarity scores and indices
Returns: cosine similarity scores, indices
'''
if vocab.shape[1] == idea.shape[1]:
pass
else:
print 'ERROR: shape mismatch'
return
cosine_similarity = vocab * idea.T
cs_dense = np.array(cosine_similarity.todense())
cs_array = np.array([float(i) for i in cs_dense])
#indicate how many results to return... currently 10
ind = np.argpartition(cs_array, -n_items)[-n_items:]
#this prints out the top results (unsorted).. these are to be
#transormed into scores
scores = cs_array[ind]
indices = ind
return scores, indices
def main(path, pkl):
tic = time.clock()
try:
path = sys.argv[1]
except:
print "ERROR: Specify data type [subset/total]"
return
try:
pkl = sys.argv[2]
except:
print "ERROR: Specify pickle behavior [True/False]"
return
if pkl == 'True':
try:
print 'Loading data...'
df, abstracts, descriptions, claims = load_data(path)
abstracts_tfidf, tfidf = vectorize(abstracts)
except:
print 'Error loading data!'
return
print 'Pickling data...'
# think about writing a pickle function that loops
# over a set of items passed in
pickle.dump(abstracts_tfidf, open('../data/abstracts_tfidf.p', 'wb'))
pickle.dump(tfidf, open('../data/tfidf.p', 'wb'))
df.to_msgpack('../data/dataframe.p')
print 'Finished pickling...'
elif pkl == 'False':
print 'Unpickling data...'
abstracts_tfidf = pickle.load(open('../data/abstracts_tfidf.p', 'rb'))
tfidf = pickle.load(open('../data/tfidf.p', 'rb'))
df = pd.read_msgpack('../data/dataframe.p')
else:
print "Second argument to pickle must be [True/False]"
return
toc = time.clock()
print 'User input (hardcoded)'
text = ['Blood coagulation cold plasma device that kills bacteria']
new_text_tfidf = vectorize(text, tfidf)
print 'Getting similarity...'
scores, indices = get_similarity(abstracts_tfidf, new_text_tfidf, 5)
'''
[Index([u'doc_number', u'date', u'publication_type', u'patent_length', u'title',
u'abstract', u'description', u'claims']
'''
print df.loc[indices][['doc_number', 'date', 'title', 'abstract']]
print time.clock() - tic
return
if __name__ == '__main__':
main()
| [
"pmk2109@gmail.com"
] | pmk2109@gmail.com |
2dfc3c9e0e90b30322d87f19a0f5e8464b0abd70 | b952929285c4f518d260bf6c9f70ab19ee9de9e0 | /q2e3/tests/test_chaining.py | 1716896114cf874aeda8a5088abd8e9d54a9b23e | [] | no_license | vitalwarley/ia1 | f5b31bcaaff7df6da1fe21964e0f00df8c2e717d | d4ae1243c97c31f97b57caba42951da2dae17f7f | refs/heads/master | 2021-09-15T16:42:40.187162 | 2018-06-07T02:20:43 | 2018-06-07T02:20:43 | 126,322,382 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,095 | py | import unittest
from ia1.q2e3.inference.chaining import ChainingStrategy
from ia1.q2e3.inference.rules import Rule
from ia1.q2e3.inference.utils import RulesUtils
class ChainingStrategyTest(unittest.TestCase):
def test_01_backward(self):
RulesUtils.get_rules_from_file('../rules_base')
goal = 'green'
goal_false = 'canary'
self.assertTrue(ChainingStrategy.backward(goal))
self.assertFalse(ChainingStrategy.backward(goal_false))
def test_02_backward_easy(self):
RulesUtils.get_rules_from_file('../rules_base_02')
goal = 'd'
goal_false = 'e' # Not in rules_base_02
self.assertTrue(ChainingStrategy.backward(goal))
self.assertFalse(ChainingStrategy.backward(goal_false))
def test_03_backward_less_easy(self):
RulesUtils.get_rules_from_file('../rules_base_03')
goal = 'x'
goal_false = 'y' # In rules_base_03 as csq, but need f, which can be proved True.
self.assertTrue(ChainingStrategy.backward(goal))
self.assertFalse(ChainingStrategy.backward(goal_false))
def test_04_backward_almost_not_easy(self):
RulesUtils.get_rules_from_file('../rules_base_04')
goal = 'goal'
goal_false = 'm' # Need p and q, which can't be proved.
self.assertTrue(ChainingStrategy.backward(goal))
self.assertFalse(ChainingStrategy.backward(goal_false))
def test_05_backward_medium_maybe(self):
RulesUtils.get_rules_from_file('../rules_base_05')
goal = 'goal'
goal_false = 'nao_tem' # In some rule's antecedent only
self.assertTrue(ChainingStrategy.backward(goal))
self.assertFalse(ChainingStrategy.backward(goal_false))
def test_06_forward(self):
RulesUtils.get_rules_from_file('../rules_base')
new_facts = ['frog', 'green']
self.assertEqual(ChainingStrategy.forward(), new_facts) # TODO: use RuleUtils.get_new_facts()
def test_07_forward(self):
RulesUtils.get_rules_from_file('../rules_base_02')
new_facts = ['c', 'd']
self.assertEqual(ChainingStrategy.forward(), new_facts) # TODO: use RuleUtils.get_new_facts()
def test_08_forward_left_one(self):
RulesUtils.get_rules_from_file('../rules_base_03')
new_facts = ['z', 'w', 'x']
self.assertEqual(ChainingStrategy.forward(), new_facts) # ...
def test_09_forward_duplicate_csq(self):
RulesUtils.get_rules_from_file('../rules_base_05')
new_facts = ['v', 'u', 'y', 'x', 'z', 'w', 'goal']
self.assertEqual(ChainingStrategy.forward(), new_facts)
def test_10_forward_duplicate_csq_but_satisfiable(self):
RulesUtils.get_rules_from_file('../rules_base_06')
# Same as before, but notice in rules_base_06 a rule with goal as csq again, and satisfiable.
new_facts = ['v', 'u', 'y', 'x', 'z', 'w', 'goal'] # Therefore, we test for no duplicate of goal in new_facts.
self.assertEqual(ChainingStrategy.forward(), new_facts)
def tearDown(self):
RulesUtils.clear_running_database()
| [
"wvb@ic.ufal.br"
] | wvb@ic.ufal.br |
9be1a3be6e0bda3711aa96d5d4d568c1c09b73c8 | 68a110fc825decc06d1102d381febebdbbf0f88f | /test.py | a4b26638f4fa516dac8718c3e76cb1af4634a9d1 | [] | no_license | tracefinder/TestServRepo | 15e90c8343c5bcbaac8781a374b10ed98f5204d6 | 62c2d1f1f16ad1b3e490957fadd3a5130c6e2b75 | refs/heads/master | 2021-01-22T18:23:07.432458 | 2012-08-19T14:03:51 | 2012-08-19T14:03:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | """Test server.py."""
import hashlib
import json
import requests
import sys
import threading
import time
import tserver
import unittest
class TestServer(unittest.TestCase):
"""Test class."""
def setUp(self):
"""Start the server in new thread."""
if not tserver.DEBUG:
print "Debug mode is turned off. Set tserver.DEBUG True."
sys.exit(0)
print "Trying to start server."
p1 = threading.Thread(target=tserver.run, name="Server")
p1.start()
def test_server(self):
"""Send POST request containing some commit info."""
repo = 'Repo_Test'
branch = 'Branch_Test'
commit = 'Commit_Test'
t = time.time()
tm = time.asctime()
hash_ = hashlib.sha256()
hash_.update(commit + str(t))
h = hash_.hexdigest()
info = json.dumps({'hash': h, 'repo': repo, 'branch': branch, 'commit': commit, 'time': tm})
payload = {'info': info}
r = requests.post('http://localhost:13000', data=payload)
self.assertEqual(tserver.MyHandler.debug_info, ' START: Server Time: ' + time.asctime() + '\nClient time: %s\
\nHash: %s\nRepo: %s, Branch: %s\nCommit: %s\nEND\n' % (tm, h, repo, branch, commit))
def tearDown(self):
"""Shutdown the server using command 'stop'."""
r = requests.post('http://localhost:13000', data={'command': 'stop'})
if __name__ == '__main__':
unittest.main()
sys.exit(0)
| [
"tracefinder@gmail.com"
] | tracefinder@gmail.com |
b0ed908027f0e57408b65f8aa5e9073396394613 | e77e194dfee468b0ab8db6fc51af5ad45a0cb014 | /whatstatts_v5.py | 1a789db7f6a54c77c1dfaa85b36e92f39ffbd00e | [
"MIT"
] | permissive | ausathe/whatsapp-statistics | eafa560e832e38990bb49a8a3506707ed2266c00 | 8a3bf0b800765cb3f1310f138b8611bb7f9e573b | refs/heads/master | 2020-03-22T10:02:51.756973 | 2018-07-05T17:28:45 | 2018-07-05T17:28:45 | 139,877,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,992 | py | import re, math, os
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import gridplot, row, layout
from bokeh.models import ColumnDataSource, ranges, LabelSet, Div, SingleIntervalTicker, LinearAxis
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
class Chat(object):
def __init__(self, file):
"""Initialises the object with file-name and fetches the content into self.chat_cntnt"""
self.file = file
with open(self.file, "r") as chat:
self.chat_cntnt = chat.read()
def number_of_messages(self):
"""Finds and returns self.tot_num_msgs and self.num_media"""
pttrn_num_msgs = re.compile(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - (.*?): ')
matches = pttrn_num_msgs.findall(self.chat_cntnt)
self.tot_num_msgs = len(matches)
pttrn_num_media = re.compile(r'\s<Media omitted>')
matches = pttrn_num_media.findall(self.chat_cntnt)
self.num_media = len(matches)
return self.tot_num_msgs, self.num_media
def number_of_contributing_members(self):
"""Finds and returns self.num_mem and self.member_list"""
members = re.findall(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - (.*?): ', self.chat_cntnt)
self.member_list = list(set(members))
self.member_list_set = set(members)
self.num_mem = len(self.member_list)
for idx, peep in enumerate(self.member_list):
if u'\u202a' in peep:
self.member_list[idx] = peep.strip(u'\u202a')
if u'\u202c' in peep:
self.member_list[idx] = self.member_list[idx].strip(u'\u202c')
return self.num_mem, self.member_list
# Analysis point 3, 3a and 3b.
# (dependent on self.number_of_contributing_members())
def message_by_member_splitup(self):
# self.number_of_contributing_members()
self.mem_msg_splitup = {}
for peep in self.member_list_set:
pttrn_mem_by_msg = re.compile(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - '+re.escape(peep)+r': ')
matches = pttrn_mem_by_msg.findall(self.chat_cntnt)
self.mem_msg_splitup[peep.strip(u'\u202a').strip(u'\u202c')] = len(matches)
self.max_msg_peep = max(self.mem_msg_splitup, key=self.mem_msg_splitup.get)
self.numMsgs_by_max_msg_peep = max(self.mem_msg_splitup.values())
self.min_msg_peep = min(self.mem_msg_splitup, key=self.mem_msg_splitup.get)
self.numMsgs_by_min_msg_peep = min(self.mem_msg_splitup.values())
return self.mem_msg_splitup, {self.max_msg_peep:self.numMsgs_by_max_msg_peep}, {self.min_msg_peep:self.numMsgs_by_min_msg_peep}
# Analysis point 5 and 5a.
# (dependent on self.number_of_contributing_members())
def media_by_member_splitup(self):
# self.number_of_contributing_members()
self.mem_media_splitup = {}
for peep in self.member_list_set:
pttrn_media_by_mem = re.compile(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - '+re.escape(peep)+r':\s<Media omitted>')
matches = pttrn_media_by_mem.findall(self.chat_cntnt)
self.mem_media_splitup[peep.strip(u'\u202a').strip(u'\u202c')] = len(matches)
self.max_media_peep = max(self.mem_media_splitup, key=self.mem_media_splitup.get)
self.numMedia_by_max_media_peep = max(self.mem_media_splitup.values())
self.min_media_peep = min(self.mem_media_splitup, key=self.mem_media_splitup.get)
self.numMedia_by_min_media_peep = min(self.mem_media_splitup.values())
return self.mem_media_splitup, {self.max_media_peep:self.numMedia_by_max_media_peep}, {self.min_media_peep:self.numMedia_by_min_media_peep}
def time_stats(self):
'''Returns (time-span of chat, time of first message, time of second message, msg timestamp list, msg datestamp list, msg hourstamp list, msg monthstamp list)'''
self.msg_timestamps = []
self.msg_datestamps = []
self.msg_hourstamps = []
self.msg_monthstamps = []
self.media_timestamps = []
self.media_datestamps = []
self.media_hourstamps = []
self.media_monthstamps = []
for msg_ln in self.chat_cntnt.splitlines():
pttrn_new_datetime = re.compile(r'(\b\d*/\d*/\d*, \d*:\d* [AP]M) - .*?:')
pttrn_new_date = re.compile(r'(\b\d*/\d*/\d*), \d*:\d* [AP]M - .*?:')
pttrn_new_hour = re.compile(r'(\b\d*/\d*/\d*), (\d*):\d* ([AP]M) - .*?:')
pttrn_new_month = re.compile(r'\b\d*/(\d*/\d*), \d*:\d* [AP]M - .*?:')
pttrn_new_datetime_media = re.compile(r'(\b\d*/\d*/\d*, \d*:\d* [AP]M) - .*?: <Media omitted>')
pttrn_new_date_media = re.compile(r'(\b\d*/\d*/\d*), \d*:\d* [AP]M - .*?: <Media omitted>')
pttrn_new_hour_media = re.compile(r'(\b\d*/\d*/\d*), (\d*):\d* ([AP]M) - .*?: <Media omitted>')
pttrn_new_month_media = re.compile(r'\b\d*/(\d*/\d*), \d*:\d* [AP]M - .*?: <Media omitted>')
datetime_matches = pttrn_new_datetime.findall(msg_ln)
date_matches = pttrn_new_date.findall(msg_ln)
hour_matches = pttrn_new_hour.findall(msg_ln)
month_matches = pttrn_new_month.findall(msg_ln)
datetime_matches_media = pttrn_new_datetime_media.findall(msg_ln)
date_matches_media = pttrn_new_date_media.findall(msg_ln)
hour_matches_media = pttrn_new_hour_media.findall(msg_ln)
month_matches_media = pttrn_new_month_media.findall(msg_ln)
if len(datetime_matches) == 1:
self.msg_timestamps.append(datetime_matches[0])
self.msg_datestamps.append(date_matches[0])
self.msg_hourstamps.append(' '.join(hour_matches[0]))
self.msg_monthstamps.append(month_matches[0])
if len(datetime_matches_media) == 1:
self.media_timestamps.append(datetime_matches_media[0])
self.media_datestamps.append(date_matches_media[0])
self.media_hourstamps.append(' '.join(hour_matches_media[0]))
self.media_monthstamps.append(month_matches_media[0])
self.chat_timeLength = datetime.strptime(self.msg_timestamps[-1], '%d/%m/%y, %I:%M %p') - datetime.strptime(self.msg_timestamps[0], '%d/%m/%y, %I:%M %p')
return (self.chat_timeLength,
self.msg_timestamps[0], self.msg_timestamps[-1], self.msg_timestamps,
self.msg_datestamps,
self.msg_hourstamps,
self.msg_monthstamps,
self.media_timestamps,
self.media_datestamps,
self.media_hourstamps,
self.media_monthstamps
)
def dash_it_up(self):
# print("DASH IT UP BEGIN:\t\t" + datetime.strftime(datetime.now(), '%I:%M:%S'))
total_num_messages, total_num_media = self.number_of_messages()
# print("self.number_of_messages() executed")
num_members, member_list = self.number_of_contributing_members()
# print("self.number_of_contributing_members() executed")
member_numMsg_dict, max_msg_peep_dict, min_msg_peep_dict = self.message_by_member_splitup()
# print("self.message_by_member_splitup() executed")
member_numMedia_dict, max_media_peep_dict, min_media_peep_dict = self.media_by_member_splitup()
# print("self.media_by_member_splitup() executed")
chat_timespan, msg_one_t, msg_last_t, all_times, all_dates, all_hours, all_months, all_times_media, all_dates_media, all_hours_media, all_months_media= self.time_stats()
# print("self.time_stats() executed")
output_file("./HTMLs/_STATISTICS_{}.html".format(os.path.basename(os.path.splitext(self.file)[0])))
#PLOT 0: TITLE OF THE PAGE===========================================================================================================================#
title_plot = figure(plot_height=30, logo=None)
title_plot.title.text = "{} ({} participants)".format(os.path.basename(os.path.splitext(self.file)[0]), num_members)
# title_plot.title.text_font = "SF Pro Display"
title_plot.title.text_font_size = "55px"
title_plot.title.text_font_style = "bold"
title_plot.title.align = "center"
#DISTRIBUTION PLOT SETTINGS====================================================#
title_text_font_size = "40px"
xtick_font_size_value = (-1/7*num_members + 152/7) if num_members>=20 else 16
xtick_text_font_size = "{}px".format(xtick_font_size_value)
individual_bar_label_size = "{}px".format(xtick_font_size_value)
colors = [""]
#PLOT 1: MESSAGE DISTRIBUTION===========================================================================================================================#
source = ColumnDataSource(dict(x=list(self.mem_msg_splitup.keys()), y=list(self.mem_msg_splitup.values())))
plot1 = figure(x_range=list(self.mem_msg_splitup.keys()), logo=None, sizing_mode="scale_width", plot_height=400)
plot1.title.text = "Messages: {}".format(total_num_messages)
plot1.title.text_font_size = title_text_font_size
# plot1.title.text_font = page_font
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-xtick_font_size_value/2, y_offset=0, source=source, render_mode='canvas',
text_font_size=individual_bar_label_size,
# text_font=page_font
)
plot1.vbar(source=source,
x='x',
top='y',
width=0.8)
plot1.add_layout(labels)
plot1.xgrid.grid_line_color = None
plot1.y_range.start = 0
plot1.xaxis.major_label_orientation = math.pi/2
plot1.xaxis.major_label_text_font_size = xtick_text_font_size
# plot1.xaxis.major_label_text_font = page_font
plot1.yaxis.axis_label = "#messages"
plot1.yaxis.major_label_orientation = math.pi/2
# plot1.yaxis.major_label_text_font = page_font
plot1.yaxis.major_label_text_font_size = "16px"
plot1.yaxis.axis_label_text_font_size = "16px"
# plot1.yaxis.axis_label_text_font = page_font
#PLOT 2: MEDIA DISTRIBUTION===========================================================================================================================#
source = ColumnDataSource(dict(x=list(self.mem_media_splitup.keys()), y=list(self.mem_media_splitup.values())))
plot2 = figure(x_range=list(self.mem_media_splitup.keys()), logo=None, sizing_mode="scale_width", plot_height=400)
plot2.title.text = "Media: {}".format(total_num_media)
plot2.title.text_font_size = title_text_font_size
# plot2.title.text_font = page_font
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-xtick_font_size_value/2, y_offset=0, source=source, render_mode='canvas',
text_font_size=individual_bar_label_size,
# text_font=page_font
)
plot2.vbar(source=source,
x='x',
top='y',
width=0.8, color="firebrick")
plot2.add_layout(labels)
plot2.xgrid.grid_line_color = None
plot2.y_range.start = 0
plot2.xaxis.major_label_orientation = math.pi/2
plot2.xaxis.major_label_text_font_size = xtick_text_font_size
# plot2.xaxis.major_label_text_font = page_font
plot2.yaxis.axis_label = "#media"
plot2.yaxis.major_label_orientation = math.pi/2
# plot2.yaxis.major_label_text_font = page_font
# plot2.yaxis.major_label_text_font_size = "16px"
plot2.yaxis.axis_label_text_font_size = "16px"
# plot2.yaxis.axis_label_text_font = page_font
#PLOT 3: MEMBER LIST & (TOTAL NUMBER OF MEMBERS)===========================================================================================================================#
plot3 = figure(plot_height=13, logo=None, sizing_mode="scale_width")
name_str = ''
for x in member_list:
if name_str == '':
name_str += x
else:
name_str += ', '+x
plot3.title.text = "Participants ({}): {}".format(num_members, name_str)
plot3.title.text_font_size = "18px"
# plot3.title.text_font = page_font
plot3.title.text_font_style = "normal"
plot3.title.align = "center"
#TIME DISTRIBUTION PLOTS' LOCAL FUNCTIONS===========================================================#
def perdelta(start, end, delta):
curr = start
while curr<end:
yield curr
curr += delta
def timeBlockSpan(first, last):
"""
Returns: 1 ==> minutes (very new chat)
2 ==> hours (relatively new chat)
3 ==> days (relatively old chat)
4 ==> months (established chat)(cancelled)
"""
t_delta = last - first
if t_delta.total_seconds() <= 3600:
return 1
elif 3600 < t_delta.total_seconds() <= 259200:
return 2
elif 259200 < t_delta.total_seconds() and t_delta.days <= 91:
return 3
elif t_delta.days > 91:
return 4
#PLOT 4: MESSAGE TIME DISTRIBUTION===========================================================================================================================#
# print("Begin" + datetime.strftime(datetime.now(), '%I:%M:%S'))
all_dates_dtObjs = []
for stamp in all_dates:
all_dates_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y').date())
all_times_dtObjs = []
for stamp in all_times:
all_times_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y, %I:%M %p'))
all_hours_dtObjs = []
for stamp in all_hours:
all_hours_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y %I %p'))
all_months_dtObjs = []
for stamp in all_months:
all_months_dtObjs.append(datetime.strptime(stamp, '%m/%y'))
# print("created all dtObjs" + datetime.strftime(datetime.now(), '%I:%M:%S'))
first_date, last_date = all_dates_dtObjs[0], all_dates_dtObjs[-1]
first_dt, last_dt = all_times_dtObjs[0], all_times_dtObjs[-1]
first_hour, last_hour = all_hours_dtObjs[0], all_hours_dtObjs[-1]
first_month, last_month = all_months_dtObjs[0], all_months_dtObjs[-1]
timeBlockSpan_decision = timeBlockSpan(first_dt, last_dt)
# print("TBS decision generated" + datetime.strftime(datetime.now(), '%I:%M:%S'))
if timeBlockSpan_decision == 1:
all_times_msgs_distr = {}
for i in perdelta(first_dt, last_dt+timedelta(seconds=60), timedelta(seconds=60)):
all_times_msgs_distr[i] = all_times_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%I:%M %p") for x in all_times_msgs_distr.keys()]
y = list(all_times_msgs_distr.values())
elif timeBlockSpan_decision == 2:
all_hours_msgs_distr = {}
for i in perdelta(first_hour, last_hour+timedelta(hours=1), timedelta(hours=1)):
all_hours_msgs_distr[i] = all_hours_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d/%m, %H-{} hours".format(x.hour+1)) for x in all_hours_msgs_distr.keys()]
y = list(all_hours_msgs_distr.values())
elif timeBlockSpan_decision == 3:
all_dates_msgs_distr = {}
for i in perdelta(first_date, last_date+timedelta(days=1), timedelta(days=1)):
all_dates_msgs_distr[i] = all_dates_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d %B '%y") for x in all_dates_msgs_distr.keys()]
y = list(all_dates_msgs_distr.values())
elif timeBlockSpan_decision == 4:
all_months_msgs_distr = {}
for i in perdelta(first_month, last_month+relativedelta(months=+1), relativedelta(months=+1)):
all_months_msgs_distr[i] = all_months_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%B '%y") for x in all_months_msgs_distr.keys()]
y = list(all_months_msgs_distr.values())
# print(datetime.strftime(datetime.now(), '%I:%M:%S'))
num_bars_on_plot = len(xLabels)
xtick_font_size_value = (-1/7*num_bars_on_plot + 152/5.5) if num_bars_on_plot>=40 else 16
xtick_text_font_size = "{}px".format(xtick_font_size_value)
source = ColumnDataSource(dict(x=xLabels, y=y))
plot4 = figure(plot_height=180, logo=None, sizing_mode="scale_width", x_range=xLabels)
plot4.title.text = "Messages time distribution [{} - {} (~{} days)]".format(msg_one_t, msg_last_t, chat_timespan.days+1)
plot4.title.text_font_size = title_text_font_size
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-6, y_offset=0, source=source, render_mode='canvas',
text_font_size=xtick_text_font_size,
# text_font=page_font
)
plot4.vbar(source=source, x='x', top='y', width=0.9, color="#9EA09E")
plot4.add_layout(labels)
plot4.xaxis.major_label_orientation = math.pi/2
plot4.xaxis.major_label_text_font_size = xtick_text_font_size
plot4.yaxis.axis_label = "Activity (#messages)"
plot4.yaxis.axis_label_text_font_size = "16px"
#PLOT 5: MEDIA TIME DISTRIBUTION===========================================================================================================================#
all_dates_media_dtObjs = []
for stamp in all_dates_media:
all_dates_media_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y').date())
all_times_media_dtObjs = []
for stamp in all_times_media:
all_times_media_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y, %I:%M %p'))
all_hours_media_dtObjs = []
for stamp in all_hours_media:
all_hours_media_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y %I %p'))
all_months_media_dtObjs = []
for stamp in all_months_media:
all_months_media_dtObjs.append(datetime.strptime(stamp, '%m/%y'))
# print("created all dtObjs" + datetime.strftime(datetime.now(), '%I:%M:%S'))
first_date_media, last_date_media = all_dates_media_dtObjs[0], all_dates_media_dtObjs[-1]
first_dt_media, last_dt_media = all_times_media_dtObjs[0], all_times_media_dtObjs[-1]
first_hour_media, last_hour_media = all_hours_media_dtObjs[0], all_hours_media_dtObjs[-1]
first_month_media, last_month_media = all_months_media_dtObjs[0], all_months_media_dtObjs[-1]
timeBlockSpan_decision = timeBlockSpan(first_dt_media, last_dt_media)
# print("TBS decision generated" + datetime.strftime(datetime.now(), '%I:%M:%S'))
if timeBlockSpan_decision == 1:
all_times_media_distr = {}
for i in perdelta(first_dt_media, last_dt_media+timedelta(seconds=60), timedelta(seconds=60)):
all_times_media_distr[i] = all_times_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%I:%M %p") for x in all_times_media_distr.keys()]
y = list(all_times_media_distr.values())
elif timeBlockSpan_decision == 2:
all_hours_media_distr = {}
for i in perdelta(first_hour_media, last_hour_media+timedelta(hours=1), timedelta(hours=1)):
all_hours_media_distr[i] = all_hours_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d/%m, %H-{} hours".format(x.hour+1)) for x in all_hours_media_distr.keys()]
y = list(all_hours_media_distr.values())
elif timeBlockSpan_decision == 3:
all_dates_media_distr = {}
for i in perdelta(first_date_media, last_date_media+timedelta(days=1), timedelta(days=1)):
all_dates_media_distr[i] = all_dates_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d %B '%y") for x in all_dates_media_distr.keys()]
y = list(all_dates_media_distr.values())
elif timeBlockSpan_decision == 4:
all_months_media_distr = {}
for i in perdelta(first_month_media, last_month_media+relativedelta(months=+1), relativedelta(months=+1)):
all_months_media_distr[i] = all_months_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%B '%y") for x in all_months_media_distr.keys()]
y = list(all_months_media_distr.values())
# print(datetime.strftime(datetime.now(), '%I:%M:%S'))
num_bars_on_plot = len(xLabels)
xtick_font_size_value = (-1/7*num_bars_on_plot + 152/5.5) if num_bars_on_plot>=40 else 16
xtick_text_font_size = "{}px".format(xtick_font_size_value)
source = ColumnDataSource(dict(x=xLabels, y=y))
plot5 = figure(plot_height=180, logo=None, sizing_mode="scale_width", x_range=xLabels)
plot5.title.text = "Media time distribution [{} - {} (~{} days)]".format(msg_one_t, msg_last_t, chat_timespan.days+1)
plot5.title.text_font_size = title_text_font_size
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-6, y_offset=0, source=source, render_mode='canvas',
text_font_size=xtick_text_font_size,
# text_font=page_font
)
plot5.vbar(source=source, x='x', top='y', width=0.9, color="#FFC300")
plot5.add_layout(labels)
plot5.xaxis.major_label_orientation = math.pi/2
plot5.xaxis.major_label_text_font_size = xtick_text_font_size
plot5.yaxis.axis_label = "Activity (#media)"
plot5.yaxis.axis_label_text_font_size = "16px"
#DASHBOARD ASSIMILATION===========================================================================================================================#
dashboard = layout(
children=[
[title_plot],
[plot3],
[plot1, plot2],
[plot4],
[plot5]
],
sizing_mode="scale_width"
)
show(dashboard)
# print("DASH IT UP END:\t\t" + datetime.strftime(datetime.now(), '%I:%M:%S'))
def main():
chat = Chat("./chats/WhatsApp Chat with xyz.txt")
chat.dash_it_up()
# print("{} done at {}".format(C, datetime.strftime(datetime.now(), '%I:%M:%S %p')))
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
6bf0e432bf6a9ac21ec7bfa42a25fbbf1ac73a1b | d965666c0bcae4290d4189b5f539696651b23fd8 | /venv/lib/python2.7/site-packages/pyshark/capture/capture.py | 5f759f289bf529559d905b039c75128d2e691b1b | [] | no_license | anditangguhf/Skripsi-Deteksi-DGA | 5de556959f72ed837da1f9c43cbcda8a5e3f479f | 81ce1f13a0679d969375c24c3a7116d63587b956 | refs/heads/master | 2022-12-03T04:59:05.843833 | 2020-08-12T04:59:08 | 2020-08-12T04:59:08 | 254,344,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,145 | py | from __future__ import unicode_literals
import os
import threading
import logbook
import sys
import trollius as asyncio
from logbook import StreamHandler
from trollius import From, subprocess, Return
from trollius.executor import TimeoutError
from trollius.py33_exceptions import ProcessLookupError
from pyshark.tshark.tshark import get_process_path, get_tshark_display_filter_flag, \
tshark_supports_json, TSharkVersionException
from pyshark.tshark.tshark_json import packet_from_json_packet
from pyshark.tshark.tshark_xml import packet_from_xml_packet, psml_structure_from_xml
class TSharkCrashException(Exception):
pass
class UnknownEncyptionStandardException(Exception):
pass
class RawMustUseJsonException(Exception):
"""If the use_raw argument is True, so should the use_json argument"""
class StopCapture(Exception):
"""
Exception that the user can throw anywhere in packet-handling to stop the capture process.
"""
pass
class Capture(object):
"""
Base class for packet captures.
"""
DEFAULT_BATCH_SIZE = 2 ** 16
SUMMARIES_BATCH_SIZE = 64
DEFAULT_LOG_LEVEL = logbook.CRITICAL
SUPPORTED_ENCRYPTION_STANDARDS = ['wep', 'wpa-pwk', 'wpa-pwd', 'wpa-psk']
def __init__(self, display_filter=None, only_summaries=False, eventloop=None,
decryption_key=None, encryption_type='wpa-pwd', output_file=None,
decode_as=None, disable_protocol=None, tshark_path=None,
override_prefs=None, capture_filter=None, use_json=False, include_raw=False):
self.loaded = False
self.tshark_path = tshark_path
self._override_prefs = override_prefs
self.debug = False
self.use_json = use_json
self.include_raw = include_raw
self._packets = []
self._current_packet = 0
self._display_filter = display_filter
self._capture_filter = capture_filter
self._only_summaries = only_summaries
self._output_file = output_file
self._running_processes = set()
self._decode_as = decode_as
self._disable_protocol = disable_protocol
self._log = logbook.Logger(self.__class__.__name__, level=self.DEFAULT_LOG_LEVEL)
if include_raw and not use_json:
raise RawMustUseJsonException("use_json must be True if include_raw")
self.eventloop = eventloop
if self.eventloop is None:
self._setup_eventloop()
if encryption_type and encryption_type.lower() in self.SUPPORTED_ENCRYPTION_STANDARDS:
self.encryption = (decryption_key, encryption_type.lower())
else:
raise UnknownEncyptionStandardException("Only the following standards are supported: %s."
% ', '.join(self.SUPPORTED_ENCRYPTION_STANDARDS))
def __getitem__(self, item):
"""
Gets the packet in the given index.
:param item: packet index
:return: Packet object.
"""
return self._packets[item]
def __len__(self):
return len(self._packets)
def next(self):
return self.next_packet()
# Allows for child classes to call next() from super() without 2to3 "fixing"
# the call
def next_packet(self):
if self._current_packet >= len(self._packets):
raise StopIteration()
cur_packet = self._packets[self._current_packet]
self._current_packet += 1
return cur_packet
def clear(self):
"""
Empties the capture of any saved packets.
"""
self._packets = []
self._current_packet = 0
def reset(self):
"""
Starts iterating packets from the first one.
"""
self._current_packet = 0
def load_packets(self, packet_count=0, timeout=None):
"""
Reads the packets from the source (cap, interface, etc.) and adds it to the internal list.
If 0 as the packet_count is given, reads forever
:param packet_count: The amount of packets to add to the packet list (0 to read forever)
:param timeout: If given, automatically stops after a given amount of time.
"""
initial_packet_amount = len(self._packets)
def keep_packet(pkt):
self._packets.append(pkt)
if packet_count != 0 and len(self._packets) - initial_packet_amount >= packet_count:
raise Return()
try:
self.apply_on_packets(keep_packet, timeout=timeout)
self.loaded = True
except TimeoutError:
pass
def set_debug(self, set_to=True):
"""
Sets the capture to debug mode (or turns it off if specified).
"""
if set_to:
StreamHandler(sys.stdout).push_application()
self._log.level = logbook.DEBUG
self.debug = set_to
def _setup_eventloop(self):
"""
Sets up a new eventloop as the current one according to the OS.
"""
if os.name == 'nt':
self.eventloop = asyncio.ProactorEventLoop()
if sys.version_info <= (3, 0):
# FIXME: There appears to be a bug in the 2.7 version of trollius, wherein the selector retrieves an
# object of value 0 and attempts to look for it in the weakref set, which raises an exception.
# This hack sidesteps this issue, but does not solve it. If a proper fix is found, apply it!
self.eventloop._selector._stopped_serving = set()
else:
self.eventloop = asyncio.new_event_loop()
asyncio.set_event_loop(self.eventloop)
if os.name == 'posix' and isinstance(threading.current_thread(), threading._MainThread):
asyncio.get_child_watcher().attach_loop(self.eventloop)
@classmethod
def _get_json_separator(cls):
return ("}%s%s ," % (os.linesep, os.linesep)).encode()
@classmethod
def _extract_packet_json_from_data(cls, data, got_first_packet=True):
tag_start = 0
if not got_first_packet:
tag_start = data.find(b"{")
if tag_start == -1:
return None, data
closing_tag = cls._get_json_separator()
tag_end = data.find(closing_tag)
if tag_end == -1:
closing_tag = ("}%s%s]" % (os.linesep, os.linesep)).encode()
tag_end = data.find(closing_tag)
if tag_end != -1:
# Include closing parenthesis but not comma
tag_end += len(closing_tag) - 1
return data[tag_start:tag_end], data[tag_end + 1:]
return None, data
@staticmethod
def _extract_tag_from_data(data, tag_name=b'packet'):
"""
Gets data containing a (part of) tshark xml.
If the given tag is found in it, returns the tag data and the remaining data.
Otherwise returns None and the same data.
:param data: string of a partial tshark xml.
:return: a tuple of (tag, data). tag will be None if none is found.
"""
opening_tag = b'<' + tag_name + b'>'
closing_tag = opening_tag.replace(b'<', b'</')
tag_end = data.find(closing_tag)
if tag_end != -1:
tag_end += len(closing_tag)
tag_start = data.find(opening_tag)
return data[tag_start:tag_end], data[tag_end:]
return None, data
def _packets_from_tshark_sync(self, packet_count=None, existing_process=None):
"""
Returns a generator of packets.
This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and
reimplements reading packets in a sync way, yielding each packet as it arrives.
:param packet_count: If given, stops after this amount of packets is captured.
"""
# NOTE: This has code duplication with the async version, think about how to solve this
tshark_process = existing_process or self.eventloop.run_until_complete(self._get_tshark_process())
psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))
packets_captured = 0
data = b''
try:
while True:
try:
packet, data = self.eventloop.run_until_complete(
self._get_packet_from_stream(tshark_process.stdout, data, psml_structure=psml_structure,
got_first_packet=packets_captured > 0))
except EOFError:
self._log.debug('EOF reached (sync)')
break
if packet:
packets_captured += 1
yield packet
if packet_count and packets_captured >= packet_count:
break
finally:
self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process))
def apply_on_packets(self, callback, timeout=None, packet_count=None):
"""
Runs through all packets and calls the given callback (a function) with each one as it is read.
If the capture is infinite (i.e. a live capture), it will run forever, otherwise it will complete after all
packets have been read.
Example usage:
def print_callback(pkt):
print pkt
capture.apply_on_packets(print_callback)
If a timeout is given, raises a Timeout error if not complete before the timeout (in seconds)
"""
coro = self.packets_from_tshark(callback, packet_count=packet_count)
if timeout is not None:
coro = asyncio.wait_for(coro, timeout)
return self.eventloop.run_until_complete(coro)
@asyncio.coroutine
def packets_from_tshark(self, packet_callback, packet_count=None, close_tshark=True):
"""
A coroutine which creates a tshark process, runs the given callback on each packet that is received from it and
closes the process when it is done.
Do not use interactively. Can be used in order to insert packets into your own eventloop.
"""
tshark_process = yield From(self._get_tshark_process(packet_count=packet_count))
try:
yield From(self._go_through_packets_from_fd(tshark_process.stdout, packet_callback,
packet_count=packet_count))
except StopCapture:
pass
finally:
if close_tshark:
yield From(self._close_async())
#yield From(self._cleanup_subprocess(tshark_process))
@asyncio.coroutine
def _go_through_packets_from_fd(self, fd, packet_callback, packet_count=None):
"""
A coroutine which goes through a stream and calls a given callback for each XML packet seen in it.
"""
packets_captured = 0
self._log.debug('Starting to go through packets')
psml_struct, data = yield From(self._get_psml_struct(fd))
while True:
try:
packet, data = yield From(self._get_packet_from_stream(fd, data,
got_first_packet=packets_captured > 0,
psml_structure=psml_struct))
except EOFError:
self._log.debug('EOF reached')
break
if packet:
packets_captured += 1
try:
packet_callback(packet)
except StopCapture:
self._log.debug('User-initiated capture stop in callback')
break
if packet_count and packets_captured >= packet_count:
break
@asyncio.coroutine
def _get_psml_struct(self, fd):
"""
Gets the current PSML (packet summary xml) structure in a tuple ((None, leftover_data)),
only if the capture is configured to return it, else returns (None, leftover_data).
A coroutine.
"""
data = b''
psml_struct = None
if self._only_summaries:
# If summaries are read, we need the psdml structure which appears on top of the file.
while not psml_struct:
new_data = yield From(fd.read(self.SUMMARIES_BATCH_SIZE))
data += new_data
psml_struct, data = self._extract_tag_from_data(data, b'structure')
if psml_struct:
psml_struct = psml_structure_from_xml(psml_struct)
elif not new_data:
raise Return(None, data)
raise Return(psml_struct, data)
else:
raise Return(None, data)
@asyncio.coroutine
def _get_packet_from_stream(self, stream, existing_data, got_first_packet=True,
psml_structure=None):
"""
A coroutine which returns a single packet if it can be read from the given StreamReader.
:return a tuple of (packet, remaining_data). The packet will be None if there was not enough XML data to create
a packet. remaining_data is the leftover data which was not enough to create a packet from.
:raises EOFError if EOF was reached.
"""
# yield each packet in existing_data
if self.use_json:
packet, existing_data = self._extract_packet_json_from_data(existing_data,
got_first_packet=got_first_packet)
else:
packet, existing_data = self._extract_tag_from_data(existing_data)
if packet:
if self.use_json:
packet = packet_from_json_packet(packet)
else:
packet = packet_from_xml_packet(packet, psml_structure=psml_structure)
raise Return(packet, existing_data)
new_data = yield From(stream.read(self.DEFAULT_BATCH_SIZE))
existing_data += new_data
if not new_data:
# Reached EOF
raise EOFError()
raise Return(None, existing_data)
def _get_tshark_path(self):
return get_process_path(self.tshark_path)
def _stderr_output(self):
# Ignore stderr output unless in debug mode (sent to console)
return None if self.debug else open(os.devnull, "w")
@asyncio.coroutine
def _get_tshark_process(self, packet_count=None, stdin=None):
"""
Returns a new tshark process with previously-set parameters.
"""
if self.use_json:
output_type = 'json'
if not tshark_supports_json(self.tshark_path):
raise TSharkVersionException("JSON only supported on Wireshark >= 2.2.0")
else:
output_type = 'psml' if self._only_summaries else 'pdml'
parameters = [self._get_tshark_path(), '-l', '-n', '-T', output_type] + \
self.get_parameters(packet_count=packet_count)
self._log.debug('Creating TShark subprocess with parameters: ' + ' '.join(parameters))
tshark_process = yield From(asyncio.create_subprocess_exec(*parameters,
stdout=subprocess.PIPE,
stderr=self._stderr_output(),
stdin=stdin))
self._created_new_process(parameters, tshark_process)
raise Return(tshark_process)
def _created_new_process(self, parameters, process, process_name="TShark"):
self._log.debug('%s subprocess created', process_name)
if process.returncode is not None and process.returncode != 0:
raise TSharkCrashException(
'%s seems to have crashed. Try updating it. (command ran: "%s")' % (
process_name, ' '.join(parameters)))
self._running_processes.add(process)
@asyncio.coroutine
def _cleanup_subprocess(self, process):
"""
Kill the given process and properly closes any pipes connected to it.
"""
if process.returncode is None:
try:
process.kill()
yield asyncio.wait_for(process.wait(), 1)
except TimeoutError:
self._log.debug('Waiting for process to close failed, may have zombie process.')
except ProcessLookupError:
pass
except OSError:
if os.name != 'nt':
raise
elif process.returncode > 0:
raise TSharkCrashException('TShark seems to have crashed (retcode: %d). Try rerunning in debug mode [ capture_obj.set_debug() ] or try updating tshark.' % process.returncode)
def close(self):
self.eventloop.run_until_complete(self._close_async())
@asyncio.coroutine
def _close_async(self):
for process in self._running_processes:
yield From(self._cleanup_subprocess(process))
self._running_processes.clear()
def __del__(self):
if self._running_processes:
self.close()
def get_parameters(self, packet_count=None):
"""
Returns the special tshark parameters to be used according to the configuration of this class.
"""
params = []
if self._capture_filter:
params += ['-f', self._capture_filter]
if self._display_filter:
params += [get_tshark_display_filter_flag(self.tshark_path), self._display_filter]
# Raw is only enabled when JSON is also enabled.
if self.include_raw:
params += ["-x"]
if packet_count:
params += ['-c', str(packet_count)]
if all(self.encryption):
params += ['-o', 'wlan.enable_decryption:TRUE', '-o', 'uat:80211_keys:"' + self.encryption[1] + '","' +
self.encryption[0] + '"']
if self._override_prefs:
for preference_name, preference_value in self._override_prefs.items():
if all(self.encryption) and preference_name in ('wlan.enable_decryption', 'uat:80211_keys'):
continue # skip if override preferences also given via --encryption options
params += ['-o', '{0}:{1}'.format(preference_name, preference_value)]
if self._output_file:
params += ['-w', self._output_file]
if self._decode_as:
for criterion, decode_as_proto in self._decode_as.items():
params += ['-d', ','.join([criterion.strip(), decode_as_proto.strip()])]
if self._disable_protocol:
params += ['--disable-protocol', self._disable_protocol.strip()]
return params
def __iter__(self):
if self.loaded:
return iter(self._packets)
else:
return self._packets_from_tshark_sync()
def __repr__(self):
return '<%s (%d packets)>' % (self.__class__.__name__, len(self._packets))
| [
"anditangguhf@gmail.com"
] | anditangguhf@gmail.com |
762c5f01dc26bf85b36b2cda337b1e05fd67f44e | 22f96e07b22e3ca89ee757badd1f35ed9efcc034 | /docs/conf.py | d4b5fe98e2c13e6412c9c4feeec2f5eaf200fdf8 | [
"MIT"
] | permissive | Duc98f/MangAdventure | 83e341ecbdb6592c947f77e32848346dcc23e861 | fe69c850f6adce1d9a8755e5aa63db358a6084f6 | refs/heads/master | 2023-06-09T23:08:25.595545 | 2021-06-13T10:55:16 | 2021-06-13T11:16:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,077 | py | # -- Setup Django --
from os import environ as env
from os.path import dirname, join
from sys import path
path.insert(0, dirname(dirname(__file__)))
path.insert(1, join(dirname(__file__), '_ext'))
env['DJANGO_SETTINGS_MODULE'] = 'MangAdventure.tests.settings'
__import__('django').setup()
# -- Project information --
import MangAdventure as MA # noqa: E402
project = 'MangAdventure'
author = MA.__author__
release = MA.__version__
copyright = f'2018-2021, {project}, {MA.__license__} license'
# -- General configuration --
extensions = [
'sphinx.ext.autodoc',
'mangadventure_patches',
'sphinx_autodoc_typehints',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = 'en'
pygments_style = 'manni'
needs_sphinx = '3.3'
# -- InterSphinx & extlinks configuration --
_django = 'https://docs.djangoproject.com/en/3.2/'
_mdn = 'https://developer.mozilla.org/en-US/docs/Web/'
intersphinx_mapping = {
'django': (_django, f'{_django}_objects/'),
'python': ('https://docs.python.org/3.6/', None),
}
extlinks = {
'setting': (f'{_django}ref/settings/#std:setting-%s', ''),
'tag': (f'{_django}ref/templates/builtins/#%s', ''),
'auth': ('https://django-allauth.rtfd.io/en/latest/%s', ''),
'csp': (f'{_mdn}HTTP/Headers/Content-Security-Policy/%s', ''),
'status': (f'{_mdn}HTTP/Status/%s', ''),
'header': (f'{_mdn}HTTP/Headers/%s', ''),
'schema': ('https://schema.org/%s', ''),
}
# -- Autodoc configuration --
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'undoc-members': True,
'exclude-members': ','.join((
'__new__',
'__dict__',
'__repr__',
'__init__',
'__slots__',
'__module__',
'__weakref__',
'__slotnames__',
'__annotations__',
))
}
autodoc_mock_imports = ['pytest']
autodoc_inherit_docstrings = True
always_document_param_types = True
set_type_checking_flag = True
typehints_fully_qualified = False
typehints_document_rtype = True
# disable sphinx.ext.autodoc.typehints
autodoc_typehints = 'none'
# -- Options for HTML output --
html_theme = 'sphinx_rtd_theme'
html_theme_path = [__import__(html_theme).get_html_theme_path()]
html_theme_options = {
'logo_only': True,
'display_version': False,
'collapse_navigation': True,
}
html_static_path = ['_static']
html_logo = '_static/logo.png'
# html_sidebars = {}
# -- Options for HTMLHelp output --
htmlhelp_basename = f'{project}Doc'
# -- Options for LaTeX output --
latex_elements = {}
latex_documents = [(
master_doc, f'{project}.tex',
f'{project} Documentation', author, 'manual'
)]
# -- Options for manual page output --
man_pages = [(
master_doc, project.lower(),
f'{project} Documentation', author.split(', '), 7
)]
# -- Options for Texinfo output --
texinfo_documents = [(
master_doc, project, f'{project} Documentation',
author, project, MA.__doc__, 'Miscellaneous'
)]
| [
"chronobserver@disroot.org"
] | chronobserver@disroot.org |
452d6a1116be732f045e520d350dc705407e2c81 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/eve/client/script/ui/shared/fitting/panels/offensePanel.py | 2f426e8e743c0e2dd09191bd7a22606f6464d826 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 3,079 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\shared\fitting\panels\offensePanel.py
from carbonui import const as uiconst
from carbonui.primitives.container import Container
from carbonui.primitives.sprite import Sprite
from eve.client.script.ui.control.eveLabel import EveLabelMedium
from eve.client.script.ui.station.fitting.fittingTooltipUtils import SetFittingTooltipInfo
from eve.client.script.ui.shared.fitting.panels.basePanel import BaseMenuPanel
from localization import GetByLabel
import uthread
class OffensePanel(BaseMenuPanel):
damageStats = (('turretDps', 'res:/UI/Texture/Icons/26_64_1.png', 'UI/Fitting/FittingWindow/TurretDpsTooltip', 'DamagePerSecondTurrets'), ('droneDps', 'res:/UI/Texture/Icons/drones.png', 'UI/Fitting/FittingWindow/DroneDpsTooltip', 'DamagePerSecondDrones'), ('missileDps', 'res:/UI/Texture/Icons/81_64_16.png', 'UI/Fitting/FittingWindow/MissileDpsTooltip', 'DamagePerSecondMissiles'))
iconSize = 26
def ApplyAttributes(self, attributes):
BaseMenuPanel.ApplyAttributes(self, attributes)
def LoadPanel(self, initialLoad = False):
self.Flush()
self.ResetStatsDicts()
self.display = True
parentGrid = self.GetValueParentGrid(columns=len(self.damageStats))
for dps, texturePath, hintPath, tooltipName in self.damageStats:
hint = GetByLabel(hintPath)
c = self.GetValueCont(self.iconSize)
parentGrid.AddCell(cellObject=c)
icon = Sprite(texturePath=texturePath, parent=c, align=uiconst.CENTERLEFT, pos=(0,
0,
self.iconSize,
self.iconSize), state=uiconst.UI_DISABLED)
SetFittingTooltipInfo(targetObject=c, tooltipName=tooltipName)
c.hint = hint
label = EveLabelMedium(text='', parent=c, state=uiconst.UI_DISABLED, align=uiconst.CENTERLEFT)
self.statsLabelsByIdentifier[dps] = label
self.statsIconsByIdentifier[dps] = icon
self.statsContsByIdentifier[dps] = c
BaseMenuPanel.FinalizePanelLoading(self, initialLoad)
def UpdateOffenseStats(self):
uthread.new(self._UpdateOffenseStats)
def _UpdateOffenseStats(self):
itemID = self.controller.GetItemID()
turretDps, missileDps = self.dogmaLocation.GetTurretAndMissileDps(itemID)
dpsText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=turretDps)
self.SetLabel('turretDps', dpsText)
missileText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=missileDps)
self.SetLabel('missileDps', missileText)
droneDps, drones = self.dogmaLocation.GetOptimalDroneDamage(itemID)
droneText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=droneDps)
self.SetLabel('droneDps', droneText)
totalDps = turretDps + missileDps + droneDps
totalDpsText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=totalDps)
self.SetStatusText(totalDpsText)
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
24b1dd003a704844352756f3fd2812733e0fd7d8 | 6955cf08b26ddce910f4932374d9b5242680009f | /tasks/toplevel.py | e3710f3b479bba0c705d153c244b825c31850b05 | [
"MIT"
] | permissive | ADicksonLab/wepy | 6c4cea39dacecf4597e0278a0a7e4a50aa3641e5 | 3a029510114db6e66db6a264bd213c9f06559b41 | refs/heads/master | 2023-04-30T03:26:22.365330 | 2023-04-21T15:50:39 | 2023-04-21T15:50:39 | 101,077,926 | 43 | 21 | MIT | 2023-08-31T04:01:04 | 2017-08-22T15:24:10 | Python | UTF-8 | Python | false | false | 88 | py | """User editable top-level commands"""
from invoke import task
from .config import *
| [
"samuel.lotz@salotz.info"
] | samuel.lotz@salotz.info |
43f27c1adc4b1af940ae9d90439fd9baa7553be4 | cefbd23ef2b043517299534b9dc1c75907cf233a | /bameditor/deal_mut/dealHaplotype.py | 507d9f6720cbc140551bfd0c68c1503447f21034 | [
"MIT"
] | permissive | fangshuangsang/BamEditor | ee27b40d02e52519fce354ec9c603a6502173932 | 5702c3bcfd1e93139096432c7bd537a50852cdca | refs/heads/master | 2020-03-26T22:59:48.343459 | 2018-08-21T09:41:54 | 2018-08-21T09:41:54 | 145,502,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,123 | py | from bameditor.deal_mut.readEditor import editRead
from bameditor.common.methods import count_coverage, getReadStrand, find_mate
from bameditor.common.bamconvert import remap
from bameditor.common.bamobject import Read
from collections import OrderedDict
import random
import pysam
from multiprocessing import Pool
import os
def deal_haplotype(bam_file, haplotype, reffasta, haplotype_prefix, mindepth, minmutreads, minmapq, diffcover,
is_single, is_multmapfilter, aligner, aligner_index, **kwargs):
reads_dict = OrderedDict()
bam = pysam.AlignmentFile(bam_file, 'rb')
reads = bam.fetch(reference=haplotype.chrom, start=haplotype.start, end=haplotype.end + 1)
depth = 0
for read in reads:
depth += 1
if read.reference_start is not None and not read.is_secondary and bin(read.flag & 2048) != bin(2048):
if read.query_name not in reads_dict:
reads_dict[read.query_name] = {}
strand = getReadStrand(read)
reads_dict[read.query_name][strand] = read
# judge depth and mut reads whether qualified
if depth < int(mindepth):
print "depth less than min depth!"
return False, "haplotype in position %s:%s-%s: depth less than min depth(%s)" % (
haplotype.chrom, haplotype.start, haplotype.end, mindepth)
else:
mut_reads_num = int(depth * haplotype.freq)
if mut_reads_num < int(minmutreads):
print "mutation reads num less than minmutreads!"
return False, "haplotype in position %s:%s-%s: mut reads less than min mut reads(%s)" % (
haplotype.chrom, haplotype.start, haplotype.end, minmutreads)
print "start pick reads"
# print str(haplotype)
res = pick_reads(bam, reads_dict, mut_reads_num, is_single, minmapq, is_multmapfilter)
if res[0] is False:
return False, "haplotype in position %s:%s-%s: %s" % (haplotype.chrom, haplotype.start, haplotype.end, res[1])
chosen_reads, mate_reads = res
print "end pick reads"
# edit
my_chosen_reads = {}
my_mate_reads = {}
tmp_bam_file = haplotype_prefix + ".chosen.edited.bam"
tmp_bam = pysam.AlignmentFile(tmp_bam_file, 'wb', template=bam)
chosen_reads_num = 0
real_mut_reads_num = 0
for readName, readInfo in chosen_reads.items():
my_chosen_reads[readName] = {}
tmp_dict = {}
tmp_dict2 = {}
for strand, read in readInfo.items():
my_read = Read(read)
res = editRead(my_read, reffasta, haplotype.mutList)
if res is False:
continue
real_mut_reads_num += 1
sequence, quality, shift = res
read.query_sequence = sequence
read.query_qualities = quality
tmp_dict[strand] = my_read
tmp_dict2[strand] = read
if is_single:
for strand in tmp_dict:
my_chosen_reads[readName][strand] = tmp_dict[strand]
tmp_bam.write(tmp_dict2[strand])
chosen_reads_num += 1
else:
if len(tmp_dict) == 0:
continue
elif len(tmp_dict) == 1 and readName in mate_reads:
for strand in tmp_dict:
my_chosen_reads[readName][strand] = tmp_dict[strand]
tmp_bam.write(tmp_dict2[strand])
chosen_reads_num += 1
mate_read = mate_reads[readName]
my_mate_reads[readName] = Read(mate_read)
tmp_bam.write(mate_read)
elif len(tmp_dict) == 2:
for strand in tmp_dict:
my_chosen_reads[readName][strand] = tmp_dict[strand]
tmp_bam.write(tmp_dict2[strand])
chosen_reads_num += 1
tmp_bam.close()
# alignment and judge coverdiff whether qualified
chosen_bam_file = haplotype_prefix + ".chosen.remap.bam"
genome_index = aligner_index
remap(genome_index, tmp_bam_file, chosen_bam_file, aligner, is_single)
chosen_bam = pysam.AlignmentFile(chosen_bam_file)
if judge_coverdiff(bam, depth, chosen_bam, chosen_reads_num, haplotype, float(diffcover)):
return my_chosen_reads, my_mate_reads, real_mut_reads_num, depth
else:
return False, "haplotype in position %s:%s-%s: coverdiff is less than minDiffCover" % (
haplotype.chrom, haplotype.start, haplotype.end)
def pick_reads(bam, reads_dict, choose_num, is_single, minmapq, is_multmapfilter):
total_reads_num = len(reads_dict)
chosen_reads = {}
keys = reads_dict.keys()
chosen_reads_id = []
try_reads_id = []
mate_reads = {}
if is_single:
num = 0
chosen_reads_id = []
while num < choose_num:
choose_id = random.randint(0, total_reads_num - 1)
if choose_id in chosen_reads_id:
if len(try_reads_id) == total_reads_num:
return False, 'cannot find enough reads for mutation, need reads: %s, find reads: %s' % (
choose_num, num)
continue
try_reads_id.append(choose_id)
read_name = keys[choose_id]
strand_keys = reads_dict[read_name].keys()
strand = strand_keys[0]
read = reads_dict[read_name][strand]
if int(read.mapping_quality) < minmapq or len(read.query_sequence) < 250:
continue
chosen_reads[read_name] = {}
chosen_reads[read_name][strand] = read
chosen_reads_id.append(choose_id)
num += 1
else:
num = 0
while num < choose_num:
# print num, choose_num, loop_num, len(chosen_reads_id)
choose_id = random.randint(0, total_reads_num - 1)
if choose_id in try_reads_id:
if len(try_reads_id) == total_reads_num:
return False, 'cannot find enough reads for mutation, need reads: %s, find reads: %s' % (
choose_num, num)
continue
try_reads_id.append(choose_id)
read_name = keys[choose_id]
strand_keys = reads_dict[read_name].keys()
if len(reads_dict[read_name]) == 1:
read = reads_dict[read_name][strand_keys[0]]
if int(read.mapping_quality) < minmapq:
continue
else:
if is_multmapfilter and read.has_tag("XA"):
continue
chosen_reads[read_name] = {}
chosen_reads[read_name][strand_keys[0]] = read
if not read.mate_is_unmapped:
mate_read = find_mate(read, bam)
if mate_read is not None:
mate_reads[read_name] = mate_read
else:
print "no mate read found!"
if len(reads_dict[read_name]) == 2:
read1 = reads_dict[read_name][strand_keys[0]]
read2 = reads_dict[read_name][strand_keys[1]]
if int(read1.mapping_quality) < minmapq or int(read2.mapping_quality) < minmapq:
continue
else:
if is_multmapfilter and (read1.has_tag("XA") or read2.has_tag("XA")):
continue
chosen_reads[read_name] = {}
chosen_reads[read_name][strand_keys[0]] = read1
chosen_reads[read_name][strand_keys[1]] = read2
num += len(reads_dict[read_name])
chosen_reads_id.append(choose_id)
return chosen_reads, mate_reads
def judge_coverdiff(bam, depth, chosen_bam, chosen_reads_num, haplotype, min_diff_cover):
old_coverage = count_coverage(bam, haplotype.chrom, haplotype.start, haplotype.end)
new_coverage = count_coverage(chosen_bam, haplotype.chrom, haplotype.start, haplotype.end) + (
depth - chosen_reads_num)
if new_coverage * 1.0 / old_coverage > min_diff_cover:
return True
else:
return False
def deal_haplotype_multi(bam_file, haplotype_list, out_dir, reffasta, process, mindepth,
minmutreads, minmapq, diffcover, is_single, is_multmapfilter,
aligner, aligner_index, invalid_log, success_list):
haplotype_pool = Pool(processes=int(process))
haplotype_res = []
haplotype_temp_out_dir = os.path.join(out_dir, "haplotype_out")
if not os.path.exists(haplotype_temp_out_dir):
os.mkdir(haplotype_temp_out_dir)
for haplotype in haplotype_list:
haplotype_prefix = os.path.join(haplotype_temp_out_dir,
"%s_%s_%s" % (haplotype.chrom, haplotype.start, haplotype.end))
haplotype_res.append(
haplotype_pool.apply_async(deal_haplotype,
args=(bam_file, haplotype, reffasta, haplotype_prefix, mindepth,
minmutreads, minmapq, diffcover, is_single, is_multmapfilter,
aligner, aligner_index)))
haplotype_pool.close()
haplotype_pool.join()
# step3: merge mut_list of each read
total_chosen_reads = {}
total_chosen_reads_muts = {}
total_chosen_mate_reads = {}
haplotype_out_file = os.path.join(out_dir, "haplotype.txt")
hap_out = open(haplotype_out_file, 'w')
fout_s = open(success_list, 'w')
for each_res, haplotype in zip(haplotype_res, haplotype_list):
res = each_res.get()
if not res[0]:
invalid_log.info(res[1])
continue
fout_s.write(haplotype.mutinfo() + "\n")
chosen_reads, mate_reads, real_mut_reads_num, depth = res
hap_out.write(
"\t".join(
[str(haplotype), str(depth), str(real_mut_reads_num),
str(real_mut_reads_num * 1.0 / depth)]) + "\n")
# invalid_log.info(invalid)
for read_name in chosen_reads:
if read_name not in total_chosen_reads:
total_chosen_reads[read_name] = {}
total_chosen_reads_muts[read_name] = {}
for strand in chosen_reads[read_name]:
if strand not in total_chosen_reads[read_name]:
total_chosen_reads[read_name][strand] = chosen_reads[read_name][strand]
total_chosen_reads_muts[read_name][strand] = []
total_chosen_reads_muts[read_name][strand].extend(haplotype.mutList)
for read_name in mate_reads:
if read_name not in total_chosen_mate_reads:
total_chosen_mate_reads[read_name] = mate_reads[read_name]
fout_s.close()
hap_out.close()
return total_chosen_reads, total_chosen_reads_muts
| [
"fangshuangsang@gene.ac"
] | fangshuangsang@gene.ac |
f3a43ef0015900475f2c2da760ba827c2fe933df | 923f1c7bd149d37c23c5b2f067baab3f5b95a4cf | /setup.py | 309502c9f88be647a041ae202762971497a89441 | [
"BSD-2-Clause"
] | permissive | Lokeshburade007/python-mammoth | 7467d08ad906e932fbdba720557ee5fd8d862c28 | f8eb2e1214b7ef1749f2cf73a91b09c9f3adf6a8 | refs/heads/master | 2023-08-21T00:35:06.783844 | 2021-10-12T18:52:31 | 2021-10-12T18:52:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | #!/usr/bin/env python
import os
import sys
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='mammoth',
version='1.4.17',
description='Convert Word documents from docx to simple and clean HTML and Markdown',
long_description=read("README"),
author='Michael Williamson',
author_email='mike@zwobble.org',
url='http://github.com/mwilliamson/python-mammoth',
packages=['mammoth', 'mammoth.docx', 'mammoth.html', 'mammoth.styles', 'mammoth.styles.parser', 'mammoth.writers'],
entry_points={
"console_scripts": [
"mammoth=mammoth.cli:main"
]
},
keywords="docx word office clean html markdown md",
install_requires=[
"cobble>=0.1.3,<0.2",
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
license="BSD-2-Clause",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| [
"mike@zwobble.org"
] | mike@zwobble.org |
21e654ff366601ad396ad938ac9ea2c375d5d094 | 5c008dad5881eaefe36501628d8d3c16e122b9c0 | /Week_01/examples/num_iterate_q2.py | 9dfcab74432181feb0efee201324778fc017b752 | [] | no_license | xeusteerapat/MIT-6.00.1x-EDX | 846787898b06cd5223fa825cf9757f2e92449b02 | cae706564efdc41e435b309493484ea9348c908d | refs/heads/master | 2020-07-18T22:30:43.502441 | 2019-11-04T16:16:51 | 2019-11-04T16:16:51 | 206,325,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | divisor = 2
for num in range(0, 10, 2):
print(num/divisor)
| [
"xeus085@gmail.com"
] | xeus085@gmail.com |
824450c7fa223ebcedb7ecc9358f30483f01e5d4 | 235c6af1174c36b66c8212a5216fe16fcf333e3b | /hr_gpb/tests/migrations/0004_testing_type.py | 2254a005a3f706af385fc6bace861f1f607759fb | [] | no_license | vmoshikov/cp_final_mco | 21e5081ac9f2b0f4576224fe18a0ac9a9a9af616 | e37b4f455c6561835dcc14f919a765c354bca398 | refs/heads/master | 2023-01-18T23:50:34.510086 | 2020-11-29T05:49:33 | 2020-11-29T05:49:33 | 316,544,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # Generated by Django 3.1.3 on 2020-11-28 10:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0003_testingsolution'),
]
operations = [
migrations.AddField(
model_name='testing',
name='type',
field=models.IntegerField(choices=[(0, 'Код'), (1, 'Текст'), (2, 'Творческая')], default=0),
),
]
| [
"vmoshikov@gmail.com"
] | vmoshikov@gmail.com |
5c5135fbae58089e6508a9f29d16bb8b0b6d7268 | d0227639cbe42193209ed83bbadd58fc344e5a5c | /SUTD_IEEE_Kivy_Workshop/Kivy_workshop_2/Examples/button.py | 29257aeade80fd6d66d346af1e208eea4ed66e71 | [] | no_license | SUTD-IEEE/workshop-resources | de438ab85c71b51ef0f8ca68b3ebcf1f1c920b66 | e2131672880351b446b2edcbaea33f6e381917a8 | refs/heads/master | 2023-05-11T19:55:31.219026 | 2019-10-16T14:34:33 | 2019-10-16T14:34:33 | 158,399,332 | 0 | 1 | null | 2023-05-01T21:16:16 | 2018-11-20T14:07:58 | Python | UTF-8 | Python | false | false | 805 | py | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout # put the parent object you want your window to inherit when the app starts here
class mainWindow(BoxLayout): # create your window object which initialise with the object you imported from kivy library
pass # don't do anything here, all the structures are defined in .kv file
# create your Application object which inherits the App feature in kivy library
class buttonApp(App): # the cla
# ss name "<kivy_file_name>App" must be kivy file name
# construct your build function which will build your window for later to run
def build(self):
return mainWindow()
if __name__ == "__main__":
display = buttonApp() # instantiate builder object, which is your built app
display.run() # run the application
| [
"xmliszt@gmail.com"
] | xmliszt@gmail.com |
b81540805d7dd0c978d6e2c6ae2b0d43270153f2 | c883edee8089b6b6d63fe09122a490e4f24e775f | /safarirally/urls.py | a6d27b0a29230b073b550de2211b3f5be604dc3f | [] | no_license | AlexWanyoike/SafariRally--Capstone | eb91186b9ea749c58c0afa6631dc1fefe72d6474 | f927106796c554ca043341ee3d5560f43d3c32ee | refs/heads/main | 2023-05-29T13:47:51.548286 | 2021-06-11T00:29:02 | 2021-06-11T00:29:02 | 375,275,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | """safarirally URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
from django_registration.backends.one_step.views import RegistrationView
from django.contrib.auth import views as auth_views
from django.contrib.auth.views import LoginView,LogoutView
from django.conf import settings
from django.conf.urls.static import static
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('rally.urls')),
path('accounts/register/', RegistrationView.as_view(success_url='/welcome'), name='django_registration_register'),
path('logout/', auth_views.LogoutView.as_view(next_page='/')),
path('accounts/', include('django.contrib.auth.urls')),
path('login/', LoginView.as_view(), {"next_page": '/'}),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('tinymce/', include('tinymce.urls')),
path('api-token-auth/', obtain_auth_token)
]
| [
"alex.wanyoike12@gmail.com"
] | alex.wanyoike12@gmail.com |
44e1cc6abfc82d0901e764a12160ec58254b3fc3 | f461843969e18cbe693485fc6e204b9d75b12e19 | /multi_robot_master_slave_communication/devel/lib/python2.7/dist-packages/hello_protocol/srv/__init__.py | e6ecfec9743f6686dd4325c48fb48e5bc43a8f7c | [] | no_license | PubuduC/Master-Slave-Communication-ROS | cbd45d8e0242675feaacfc6d8d765c4f89db3685 | 0b79804592f1deb903a0738bdeb398612fdbe4b1 | refs/heads/master | 2020-03-29T13:53:31.040116 | 2018-09-23T13:37:02 | 2018-09-23T13:37:02 | 149,987,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | from ._Connection import *
from ._Wave import *
| [
"pubuduc.14@cse.mrt.ac.lk"
] | pubuduc.14@cse.mrt.ac.lk |
0f7eff054630379b7d3bb9aad4e78e03d1842976 | eff453a069766e0913cd7cc211a5a9de1ab65535 | /django_todo/settings.py | a06e4d0cf59925e03a79126ef10348fb0c52350b | [] | no_license | saffiya/ci-fullstackframerworks-django | 63ff4fbaa485f667f798c7d06b9fafc3f4418bbd | 75be29377e2a4a5210fade4f87b4aa5fb3ce1bfd | refs/heads/master | 2023-08-11T21:47:49.983276 | 2020-11-09T19:08:04 | 2020-11-09T19:08:04 | 302,541,654 | 0 | 0 | null | 2021-09-22T19:38:09 | 2020-10-09T05:28:50 | HTML | UTF-8 | Python | false | false | 3,411 | py | """
Django settings for django_todo project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import dj_database_url
development = os.environ.get('DEVELOPMENT', False)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = development
if development:
ALLOWED_HOSTS = ['localhost']
else:
ALLOWED_HOSTS = [os.environ.get('HEROKU_HOSTNAME')]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if development:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': dj_database_url.parse(os.environ.get('DATABASE_URL'))
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"saffiyax@icloud.com"
] | saffiyax@icloud.com |
bee86f6052648fe66498190667983e309d90b6fd | 33527330cb02279266f49ae2a82416a57cd5266f | /mytestsite/catalog/views.py | 28bbada4b8d67bf59a1db1a2f921b3eceea17adf | [] | no_license | poiuy098/django_library | 9dff8546afcd9eabebd05c89917d08ea09ad868b | acbf0a3e2f96e63b6b44c10fe2017a71936f48fc | refs/heads/master | 2022-08-04T16:59:23.328116 | 2020-05-13T05:49:44 | 2020-05-13T05:49:44 | 263,524,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,091 | py | from django.shortcuts import render
from .models import Book, Author, BookInstance, Genre
# Create your views here.
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available = BookInstance.objects.filter(status__exact='a').count()
# The 'all()' is implied by default.
num_authors = Author.objects.count()
# Number of visits to this view, as counted in the session variable.
num_visits = request.session.get('num_visits', 0)
# Set a session value
request.session['num_visits'] = num_visits + 1
context = {
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors,
'num_visits': num_visits,
}
# Render the HTML template index.html with the data in the context variable
return render(request, 'index.html', context=context)
from django.views.generic import ListView
from django.views.generic import DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import permission_required
# @permission_required('catalog.can_mark_returned')
class BookListView(ListView):
model = Book
# context_object_name = 'my_book_list' # your own name for the list as a template variable
# queryset = Book.objects.filter(author_icontains='hsu') #get 5 books containing the title war
# template_name = 'books/my_arbitrary_template_name_list.html' # Specify your own template name/location
class BookDetailView(DetailView):
model = Book
paginate_by = 10
# class AuthorListView(ListView):
# model = Author
class AuthorListView(ListView):
model = Author
class AuthorDetailView(DetailView):
model = Author
class LoanedBooksByUserListView(LoginRequiredMixin,ListView):
"""Generic class-based view listing books on loan to current user."""
model = BookInstance
template_name ='catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin,ListView):
"""Generic class-based view listing all books on loan. Only visible to users with can_mark_returned permission."""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from .forms import RenewBookForm
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
"""
View function for renewing a specific BookInstance by librarian
"""
book_inst=get_object_or_404(BookInstance, pk = pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST or None)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed') )
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date,})
return render(request, 'catalog/book_renew_librarian.html', {'form': form, 'bookinst':book_inst})
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Author
class AuthorCreate(CreateView):
model = Author
fields = '__all__'
initial={'date_of_death':'05/01/2018',}
class AuthorUpdate(UpdateView):
model = Author
fields = ['first_name','last_name','date_of_birth','date_of_death']
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('authors')
class BookCreate(CreateView):
model = Book
fields = '__all__'
class BookUpdate(UpdateView):
model = Book
fields = ['title','author','summary','isbn','genre']
class BookDelete(DeleteView):
model = Book
success_url = reverse_lazy('books') | [
"chiayuhsu35@gmail.com"
] | chiayuhsu35@gmail.com |
90d61a45791a4c4fca451ce3958912b1271ff667 | f71d67025b732e66e1a37c02c05392c3dd116d65 | /Lessons/ITP1/08_Character/d.py | 81a5619fd4674529f96b237cb3fef6f221b7ee12 | [] | no_license | clarinet758/aoj | 2829f92137dd1a93734445e1e92513f8e3e0b5c0 | 21787ffee1a6dd60c717d7b880b63107187e4710 | refs/heads/main | 2023-06-25T12:04:40.127040 | 2023-06-19T16:36:38 | 2023-06-19T16:36:38 | 34,978,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import sys
import io
import re
import math
import itertools
#sys.stdin=file('input.txt')
#sys.stdout=file('output.txt','w')
#10**9+7
mod=1000000007
#mod=1777777777
pi=3.141592653589
xy=[(1,0),(-1,0),(0,1),(0,-1)]
bs=[(-1,-1),(-1,1),(1,1),(1,-1)]
#start = time.clock()
n=raw_input()*2
print'Yes' if raw_input() in n else'No'
ans=chk=0
#end = time.clock()
#print end - start
| [
"clarinet758@gmail.com"
] | clarinet758@gmail.com |
c7e87cf056a58a6c73ad62703787f7056eccda69 | e05f2cd35de3ec146608bcdcde0daf51ec5b60e3 | /plot_radar_rms.py3 | 14889020b4ab182ae9e713493fa0edc7d3773160 | [] | no_license | louiswicker/DART_example | ceccfdb1607ef74da42c6973ec117cef70771685 | c74ddefc501d9daf614a8c830815a382fed9c06a | refs/heads/master | 2022-02-12T16:24:09.740083 | 2019-07-19T20:59:20 | 2019-07-19T20:59:20 | 197,837,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,689 | py3 |
import pandas as pd
import numpy as np
from netcdftime import utime
import matplotlib.pyplot as plt
import sys, os, glob
from optparse import OptionParser
import datetime as dtime
import xarray as xr
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from pltbook import nice_mxmnintvl, nice_clevels
time_format = "%Y-%m-%d_%H:%M:%S"
day_utime = utime("days since 1601-01-01 00:00:00")
sec_utime = utime("seconds since 1970-01-01 00:00:00")
# definitions for the plot layout with multiple panels
_cmin, _cmax, _cinc = -5., 25., 2.0
auto_clevels = False
left, width = 0.1, 0.5
bottom, height = 0.1, 0.5
bottom_h = left_h = left+width+0.03
rectC = [left, bottom, width, height]
rectX = [left, bottom_h, width, 0.2]
rectY = [left_h, bottom, 0.2, height]
mpl.rcParams['figure.figsize'] = (12,10)
_obs_error = [7.5, 3.0]
# Create 15 min bins for a 9 hour period (9x4=36). Each bin will average for analysis times
time_bins = [ (15*t,15*(t+3)) for t in range(36)]
# Create uneven height bins because of MRMS and other radar scans.
height_bins = [(0.0, 1000.), (1000., 2000.), (2000., 3000.), (3000., 4000.),
(4000., 5000.), (5000., 6000.), (7000.,8000.), (8000.,9000.), (9000.,10000.)]
#-------------------------------------------------------------------------------
#
def obs_seq_read_netcdf(filename, retFileAttr = False):
if retFileAttr == False:
try:
return xr.open_dataset(filename).to_dataframe()
except IOError:
print(" \n ----> netCDF obs_seq_final file not found! \n")
sys.exit(-1)
else:
try:
xa = xr.open_dataset(filename)
return xa.to_dataframe(), xa.attrs
except IOError:
print(" \n ----> netCDF obs_seq_final file not found! \n")
sys.exit(-1)
#-------------------------------------------------------------------------------
#
def obs_seq_get_obtype(df, kind=None, name=None):
if kind:
idx = df['kind'] == kind
return df[idx]
if name:
idx = df['name'] == name
return df[idx]
print("\n OBS_SEQ_GET_OBTYPE: no kind or name specified, exiting \n")
sys.exit(-1)
#-------------------------------------------------------------------------------
#
def obs_seq_2D_bin(df, variable, time=None, height=None, threshold=None, dart_qc=True):
# Create the data structures needed for bin information
fbins = np.zeros((len(height),len(time)))
bins = np.zeros((len(height),len(time)))
spread = np.zeros((len(height),len(time)))
num_obs = np.zeros((len(height),len(time)))
min = []
hgt = []
for n, t in enumerate(time):
# Create coordinate list for time in minutes
min.append(t[0])
# This string is used to bin data in time
time_string = '%d <= anal_min <= %d' % (t[0], t[1])
# Pandas dataframe query: This query string returns a new dataframe with only those
# rows that match the string.
cut0_df = df.query(time_string)
for m, z in enumerate(height):
# This string is used to bin data in height
height_string = '%f < height <= %f' % (z[0], z[1])
# Pandas dataframe query: This query string returns a new dataframe with only those
# rows that match the string.
cut1_df = cut0_df.query(height_string)
# Create coordinate list for heights
if n == 0:
hgt.append(0.5*(z[0]+z[1]))
# Remove all DART_QC indices != 0.0 because those are the only ones assimilated...
if dart_qc:
cut2_df = cut1_df.query("dart_qc < 0.1")
else:
cut2_df = cut1_df
if threshold != None: # threshold is a string, like "heights > 2000."
cut3_df = cut2_df.query(threshold)
bins[m,n] = np.sqrt((cut3_df[variable]**2).mean())
spread[m,n] = cut3_df['sdHxf'].mean()
num_obs[m,n] = np.sum(cut3_df[variable] != 0.0)
else:
bins[m,n] = np.sqrt((cut2_df[variable]**2).mean())
spread[m,n] = cut2_df['sdHxf'].mean()
num_obs[m,n] = np.sum(cut2_df[variable] != 0.0)
if threshold != None:
del cut0_df, cut1_df, cut2_df, cut3_df
else:
del cut0_df, cut1_df, cut2_df
return {'spread': spread, 'bin2d': bins, 'num_obs': num_obs,
'mins': np.array(min), 'hgts': np.array(hgt)}
#-------------------------------------------------------------------------------
#
def obs_seq_TimeHeightRMS(data_dict, plotlabel=None, otype="RMS", ptype = "Prior", obs_error=7.5):
fig = plt.figure(figsize=(12,12))
fig.text(0.68, 0.75, "\nBlack Line: RMSI\nBlue Line: %s Spread\nRed: 10*Consist Ratio\nGreen: # of obs" % ptype,
size=16, va="baseline", ha="left", multialignment="left")
if plotlabel != None:
fig.text(0.68, 0.68, plotlabel, size=20, va="baseline", ha="left", multialignment="center")
zmin = 0.0
zmax = 10.
# Decouple data_dict
spread = np.ma.masked_invalid(data_dict['spread'])
anal_min = np.ma.masked_invalid(data_dict['mins'])
z = np.ma.masked_invalid(data_dict['hgts'])
data = np.ma.masked_invalid(data_dict['bin2d'])
num_obs = np.ma.masked_invalid(data_dict['num_obs'])
datebins = []
minutes_from = dtime.datetime.strptime("2017-05-16_18:00:00", time_format)
for min in anal_min:
datebins.append(minutes_from + dtime.timedelta(0,int(min)*60))
# 2D plot
axC = plt.axes(rectC)
if auto_clevels:
cmin, cmax, cint, clevels = nice_clevels(-30, 30, outside=False, cint = 5.0)
else:
clevels = np.arange(_cmin, _cmax+_cinc, _cinc)
cs1=axC.contourf(datebins, z/1000., data, clevels, cmap=cm.get_cmap('YlOrRd'))
cs2=axC.contour(datebins, z/1000., data, cs1.levels, colors='k')
start = datebins[0].strftime("%Y%m%d%H%M%S")
end = datebins[-1].strftime("%Y%m%d%H%M%S")
s = dtime.datetime.strptime(start, "%Y%m%d%H%M%S")
e = dtime.datetime.strptime(end, "%Y%m%d%H%M%S")
axC.set_xlim(s, e)
axC.set_ylim(zmin,zmax)
maj_loc = mdates.MinuteLocator(interval=30)
axC.xaxis.set_major_locator(maj_loc)
dateFmt = mdates.DateFormatter('%H:%M')
axC.xaxis.set_major_formatter(dateFmt)
min_loc = mdates.MinuteLocator(interval=15)
axC.xaxis.set_minor_locator(min_loc)
labels = axC.get_xticklabels()
plt.setp(labels, rotation=40, fontsize=10)
axC.clabel(cs2, inline=1, fontsize=10, fmt="%1.1f")
axC.set_ylabel("Height (km)")
axC.set_xlabel("Time")
# 1D time series plot
axX = plt.axes(rectX)
time_data = data.mean(axis=0)
time_spread = spread.mean(axis=0)
time_consist= 10*(time_spread**2 + obs_error**2) / (time_data**2)
# time_consist= np.sqrt((time_spread**2 + obs_error**2))
start = datebins[0].strftime("%Y%m%d%H%M%S")
end = datebins[-1].strftime("%Y%m%d%H%M%S")
s = dtime.datetime.strptime(start, "%Y%m%d%H%M%S")
e = dtime.datetime.strptime(end, "%Y%m%d%H%M%S")
axX.plot(datebins, time_data, lw=2.0, color='k')
axX.plot(datebins, time_spread, lw=1.0, color='b')
axX.plot(datebins, time_consist, lw=1.0, color='r')
# Twin the x-axis to create a double y axis for num_obs
axX2 = axX.twinx()
axX2.plot(datebins, num_obs.mean(axis=0), lw=1.0, color='g')
axX2.set_ylabel('No. of Obs', color='g')
axX2.tick_params('y', colors='g')
axX.set_xlim(s, e)
axX.set_ylim(_cmin, _cmax)
axX.set_xticklabels([])
axX.set_ylabel("RMSI/Spread/Consist")
min_loc = mdates.MinuteLocator(interval=15)
axX.xaxis.set_minor_locator(min_loc)
axX.grid(True)
# 1D Height Plotting Plotting
axY = plt.axes(rectY)
height_data = data.mean(axis=1)
height_spread = spread.mean(axis=1)
height_consist= (height_spread**2 + obs_error**2) / (height_data**2)
# height_consist= np.sqrt((height_spread**2 + obs_error**2))
axY.plot(height_data, z/1000., lw=2.0, color='k')
axY.plot(height_spread, z/1000., lw=1.0, color='b')
axY.plot(height_consist, z/1000., lw=1.0, color='r')
# Twin the y-axis to create a double x axis for num_obs
axY2 = axY.twiny()
axY2.plot(num_obs.mean(axis=1), z/1000., lw=1.0, color='g')
axY2.set_xlabel('No. of Obs', color='g')
axY2.tick_params('x', colors='g')
axY.set_ylim(0.0,z.max()/1000.)
axY.set_xlim(-5.,15)
axY.set_yticklabels([])
axY.set_xlabel("RMSI/Spread/Consist")
# major ticks every 20, minor ticks every 5
major_ticks = np.arange(0,16,4)
minor_ticks = np.arange(0,16,2)
axY.set_xticks(major_ticks)
axY.set_xticks(minor_ticks, minor=True)
axY.grid(True)
#=========================================================================================
# Plot the innovations from an obs_seq_file.nc file created by dart_cc.py
#-------------------------------------------------------------------------------
def main(argv=None):
if argv is None:
argv = sys.argv
# Command line interface for DART_cc
parser = OptionParser()
parser.add_option("-f", "--file", dest="file", default=None, type="string",
help = "netCDF4 obs_seq_final to process")
parser.add_option("-v", "--variable", dest="var", default="REF", type="string",
help = "radar variable to process [REF, VR], default is REF")
parser.add_option("-p", "--plotfile", dest="plotfilename", default=None, type="string",
help = "name of output pdf file")
parser.add_option( "--show", dest="show", default=False, action="store_true", help="Turn off screen plotting")
parser.add_option( "--thres", dest="thres", default=None, help="use this to threshold calculations using an obs floor")
parser.add_option("--dir", dest="dir", default="./", type="string", help = "full pathname where to put image")
(options, args) = parser.parse_args()
if options.file == None:
print("\n NO FILE IS SUPPLIED, EXITING.... \n ")
parser.print_help()
print()
sys.exit(1)
# Read in the data
dataset, fileAttrs = obs_seq_read_netcdf(options.file, retFileAttr = True)
# Get the radar variable out of file fileAttrs has a dictionary for the DART ob type kinds
if options.var == "VR":
kind = fileAttrs['DOPPLER_RADIAL_VELOCITY']
_cmin, _cmax, _cinc = -15., 15., 1.0
obs_error = _obs_error[1]
else:
kind = fileAttrs['RADAR_REFLECTIVITY']
obs_error = _obs_error[0]
field = obs_seq_get_obtype(dataset, kind=kind)
# Construct a output pdf filename
if options.plotfilename == None:
file = os.path.split(options.file)[-1]
file_time = dtime.datetime.strptime(file[-11:-3], "%Y%m%d")
if file.find("obs") > 0:
plotfilename = "%s/%s_%sRMS_%s" % (options.dir, options.var, file[0:file.find("obs")], file[-11:-3])
else:
plotfilename = "%s/%s_RMS_%s" % (options.dir, options.var, file[-11:-3])
plotlabel = "REALTIME\nOBTYPE: %s" % (options.var)
else:
plotfilename = options.plotfilename
plotlabel = "%s\nDART QC ON" % (options.var)
if options.thres != None:
plotlabel = "%s\nDART QC ON\n%s" % (plotlabel, options.thres)
# Bin data
data_dict = obs_seq_2D_bin(field, 'innov', time=time_bins, height=height_bins, threshold=options.thres)
# Plot data
if options.file.find("final") != 0:
obs_seq_TimeHeightRMS(data_dict, plotlabel=plotlabel, ptype="Prior", obs_error=obs_error)
else:
obs_seq_TimeHeightRMS(data_dict, plotlabel=plotlabel, obs_error=obs_error)
# Saving and plotting
plt.savefig(plotfilename+".png")
if options.show:
plt.show()
#-------------------------------------------------------------------------------
# Main program for testing...
#
if __name__ == "__main__":
sys.exit(main())
# End of file
| [
"Louis.Wicker@noaa.gov"
] | Louis.Wicker@noaa.gov |
bbd6937df6f4f79ac032e28715614e30d4335ddf | 16fa0fd1cc6fa4f8996a626ab4cc625e5e33207c | /Comment/apps.py | 8cae2737e81a4fb05520c4a75c281f9e866f748a | [] | no_license | Arash3f/zoro_blog | ead1fba404f8140f4f7b13b23515fa280063072f | 9157afe5352481b8befa15bdd2ef093297b52cf0 | refs/heads/master | 2023-07-19T04:31:27.348756 | 2021-08-29T00:30:45 | 2021-08-29T00:30:45 | 400,917,468 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.apps import AppConfig
class CommentConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Comment'
| [
"arash.alfooneh@gmail.com"
] | arash.alfooneh@gmail.com |
36e5c2eb9d63b44287a1631df6b90e4a75fc9927 | bcd23137a7284513821435f30fb5c516d742349e | /ordersapp/migrations/0001_initial.py | b7f8bd8179e005c8c82e40eee9991987621ae156 | [] | no_license | FlintVorselon51/Geekshop2 | da6bd6237172c7ffe2ec9534fa2b1e5e55d77199 | b81061cf02ed5749c8b3fbb933515dba5b054bea | refs/heads/master | 2023-06-29T15:15:38.538699 | 2021-07-19T13:10:14 | 2021-07-19T13:10:14 | 378,866,204 | 0 | 0 | null | 2021-07-23T15:34:31 | 2021-06-21T08:51:01 | Python | UTF-8 | Python | false | false | 1,961 | py | # Generated by Django 3.2.4 on 2021-07-01 15:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('mainapp', '0003_product_is_active'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='создан')),
('updated', models.DateTimeField(auto_now=True, verbose_name='обновлен')),
('is_active', models.BooleanField(default=True)),
('status', models.CharField(choices=[('FM', 'Формируется'), ('STP', 'Отправлен в обработку'), ('PRD', 'Обработан'), ('PD', 'Оплачен'), ('RD', 'Готов к выдаче'), ('CNC', 'Отменен'), ('DVD', 'Выдан')], default='FM', max_length=3, verbose_name='статус')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveSmallIntegerField(default=0, verbose_name='количество')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orderitems', to='ordersapp.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.product', verbose_name='продукт')),
],
),
]
| [
"arapov14861@gmail.com"
] | arapov14861@gmail.com |
804535c7be0e830cc0a6354de5218716a95b2ba7 | 6ad93f33b9fe5d595648d36d74e23e7f0ddeb124 | /Models/NB/ARXIV/train.py | 7638b300f4725c605ea507617c2a581a39e9ffc0 | [] | no_license | nimitpatel26/NLP_Project | 3d481ba513756b4db287e7233620e05fe3ce8a91 | 540e09031243bc755178a3e0547ee21d0c2773ce | refs/heads/master | 2020-09-07T16:56:14.947651 | 2019-12-18T23:46:09 | 2019-12-18T23:46:09 | 220,852,005 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py |
import pickle
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.naive_bayes import MultinomialNB
from scipy.sparse import dok_matrix,vstack
from collections import OrderedDict
from multiprocessing import Pool, Process
from sklearn import metrics
import multiprocessing as mp
import time
import numpy as np
import math
LABELS = OrderedDict({'math': 0, 'physics': 1, 'nlin': 2, 'q-bio': 3,
'cs': 4, 'stat': 5, 'q-fin': 6, 'econ': 7, 'eess': 8})
def main():
mainData = pickle.load(open("../../../Data/XY_ARXIV.p","rb"))
X = mainData[0]
Y = mainData[1]
X_test = mainData[2]
Y_test = mainData[3]
del mainData
nb = MultinomialNB()
# Fit the model
print("FITTING THE DATA")
nb.fit(X,Y)
with open("../../../Data/nbARXIVModel.p","wb") as handle:
pickle.dump(nb,handle)
# prevent recursive multiprocessing in windows
if __name__ == '__main__':
main()
| [
"jacob.barnett@appliedis.com"
] | jacob.barnett@appliedis.com |
ab8138777508b8e8d77eae92daa1bf55dad2b21c | f06893da365d6fc8d7ccb94b5b4091ad046ecc4c | /pytorch-yolo-v3/camera.py | 26f8ae4e22a79d23985cf76822c678be56503a64 | [
"MIT"
] | permissive | Shiyipaisizuo/target_detection | ca9e77829893a88045b1f68f93ef64364bb4cf8f | 59bb626021f6e850ef3e1b817189e6dd4eaedbac | refs/heads/master | 2020-03-27T03:00:50.412150 | 2018-08-23T09:32:34 | 2018-08-23T09:32:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,135 | py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
from preprocess import prep_image, inp_to_image
import pandas as pd
import random
import argparse
import pickle as pkl
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
img_ = img[:, :, ::-1].transpose((2, 0, 1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def write(x, img):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2, color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2, color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4),
cv2.FONT_HERSHEY_PLAIN, 1, [225, 255, 255], 1)
return img
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo')
parser.add_argument("--confidence", dest="confidence",
help="Object Confidence to filter predictions", default=0.25)
parser.add_argument("--nms_thresh", dest="nms_thresh",
help="NMS Threshhold", default=0.4)
parser.add_argument("--reso", dest='reso', help="Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default="160", type=str)
return parser.parse_args()
if __name__ == '__main__':
cfgfile = "./cfg/yolov3.cfg"
weightsfile = "./weights/yolov3.weights"
num_classes = 80
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
num_classes = 80
bbox_attrs = 5 + num_classes
model = Darknet(cfgfile)
model.load_weights(weightsfile)
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda()
model.eval()
videofile = 'video.avi'
cap = cv2.VideoCapture(0)
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
if ret:
img, orig_im, dim = prep_image(frame, inp_dim)
# im_dim = torch.FloatTensor(dim).repeat(1,2)
if CUDA:
im_dim = im_dim.cuda()
img = img.cuda()
output = model(Variable(img), CUDA)
output = write_results(
output, confidence, num_classes, nms=True, nms_conf=nms_thesh)
if type(output) == int:
frames += 1
print("FPS of the video is {:5.2f}".format(
frames / (time.time() - start)))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
output[:, 1:5] = torch.clamp(
output[:, 1:5], 0.0, float(inp_dim))/inp_dim
# im_dim = im_dim.repeat(output.size(0), 1)
output[:, [1, 3]] *= frame.shape[1]
output[:, [2, 4]] *= frame.shape[0]
classes = load_classes('data/coco.names')
colors = pkl.load(open("pallete", "rb"))
list(map(lambda x: write(x, orig_im), output))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print("FPS of the video is {:5.2f}".format(
frames / (time.time() - start)))
else:
break
| [
"shiyipaisizuo@gmail.com"
] | shiyipaisizuo@gmail.com |
649f08992e51522902cc6c5842f273b6b76b591f | 21f599250699a8bfe5e2f8be2ec9855e0e04b2cd | /Visualization/PlottingScripts/Figure_361_ChargedPionRAA.py | 3d5a8d6b6c0686b9ad5c0bb63ebd3a1520eb3833 | [] | no_license | amitkr2410/HPC_data_analysis | 36f313968c2667e0d30c1d5ad7381973e91dbcf1 | 3ae572b354ea8a95eb5d6e5f828487e1137f62ff | refs/heads/main | 2023-05-31T02:10:02.814571 | 2023-05-15T05:03:48 | 2023-05-15T05:03:48 | 209,374,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,880 | py | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
# define format for the plots
import matplotlib as mpl
#serif
mpl.rc('font', family='times', weight='bold')
mpl.rcParams['text.usetex'] = True
params = {'text.latex.preamble' : r'\usepackage{amsmath} \usepackage{lmodern} \boldmath'}
mpl.rcParams.update(params)
mpl.rcParams["font.weight"]="bold"
mpl.rcParams["axes.labelweight"]="bold"
mpl.rcParams['figure.figsize'] = [6., 4.5]
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['lines.markersize'] = 7
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['xtick.top'] = True
mpl.rcParams['xtick.labelsize'] = 15
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['xtick.minor.width'] = 1.5
mpl.rcParams['xtick.minor.visible'] = True
mpl.rcParams['xtick.direction'] = "in"
mpl.rcParams['ytick.right'] = True
mpl.rcParams['ytick.labelsize'] = 15
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['ytick.minor.width'] = 1.5
mpl.rcParams['ytick.minor.visible'] = False
mpl.rcParams['ytick.direction'] = "in"
mpl.rcParams['legend.fontsize'] = 15
mpl.rcParams['legend.numpoints'] = 1
mpl.rcParams['font.size'] = 15
# mpl.rcParams['font.weight'] = 'black'
# mpl.rcParams['axes.labelweight'] = 'black'
mpl.rcParams['savefig.format'] = "pdf"
# In[3]:
########################################################################################################################################################
# Set Color, Line Style, and Markers
##--
color_theory = ['red','blue','green']
line_theory = ['solid','dashed','dotted']
color_data = ['black','magenta']
marker_data = ['o','s','^','X'] ## o: circle, s: square, ^:triangle, X:cross
########################################################################################################################################################
# In[4]:
########################################################################################################################################################
# Set File Path and labels
##--
file_pp_theory = '../200GeV/ColorlessSingleHadronYield_pp_Jetscape_STAR_200GeV_EtaMax_0p35_FS_ChargedHadron_Cent_0-10_MATTER_ppAuAu_BGS0.txt'
files_aa_theory = ['../200GeV/ColorlessSingleHadronYield_PbPb_Jetscape_STAR_200GeV_EtaMax_0p35_FS_ChargedHadron_Cent_0-10_MatterLBT_withRecoil_Type5_alphas_0p3_Q0_2GeV_BGS1.txt']
label_theory = [r'$\hat{q}=\hat{q}^{\mathrm{run}}_{\mathrm{HTL}}f(Q^{2}), \mathrm{JS(MATTER+LBT) }$']
files_data = ['../../ExperimentData_All/ExpData_RHIC_200_Raa_Centrality_0_10.dat']
label_data = ['$\mathrm{PHENIX~[PRC~87,~034911~(2013)]},~\pi^{0}$']
OutputFilename='../Plots/ChargedPion_RAA_200GeV_PHENIX_y0p35.pdf'
x_min =8.0
x_max =21
y_min = 0
y_max = 1.0
########################################################################################################################################################
# In[5]:
########################################################################################################################################################
# Set Multiplication Factors
##--
factor_theory = [1,1,1]
factor_data = factor_theory
########################################################################################################################################################
# In[6]:
########################################################################################################################################################
# Fucntions to load ascii format files
##--
def GetTheory(filename):
data = np.loadtxt(filename, comments='#')
x = data[:,0]
y = data[:,1]
xerr = data[:,2]
yerr = data[:,3]
xstep = np.append(x-xerr,x[-1]+xerr[-1])
ystep = np.append(y,y[-1])
return x, y, xerr, yerr, xstep, ystep
def GetExp(filename):
data = np.loadtxt(filename, comments='#')
x = data[:,0]
y = data[:,1]
xerrl = data[:,2]
xerrh = data[:,3]
yerr = data[:,4]
ysysl = np.append(data[:,5],data[-1,5])
ysysh = np.append(data[:,6],data[-1,6])
xstep = np.append(x-xerrl,x[-1]+xerrh[-1])
ystep = np.append(y,y[-1])
return x, y, 0.5*(xerrl+xerrh), yerr, xstep, ystep, ysysl, ysysh
##---
def GetRatioTheoryToTheory(num_file, den_file, error=False ):
x_num, y_num, xerr_num, yerr_num, xstep_num, ystep_num = GetTheory(num_file)
x_den, y_den, xerr_den, yerr_den, xstep_den, ystep_den = GetTheory(den_file)
yerr = yerr_num/y_den
if error:
yerr = RatioError(y_num,yerr_num,y_den,yerr_den)
return x_num, y_num/y_den, xerr_num, yerr, xstep_num, ystep_num/ystep_den
##---
def GetRatioExpToTheory(num_file, den_file):
x_num, y_num, xerr_num, yerr_num, xstep_num, ystep_num, ysysl_num, ysysh_num = GetExp(num_file)
x_den, y_den, xerr_den, yerr_den, xstep_den, ystep_den = GetTheory(den_file)
return x_num, y_num/y_den, xerr_num, yerr_num/y_den, xstep_num, ystep_num/ystep_den, ysysl_num/ystep_den, ysysh_num/ystep_den
##---
def RatioError(v1,e1,v2,e2):
#v1, e1: numerator value and error
#v2, e2: denominator value and error
error1 = e1/v2
error2 = (e2/v2)*(v1/v2)
error = np.sqrt(error1*error1+error2*error2)
return error
########################################################################################################################################################
# In[7]:
########################################################################################################################################################
# Fucntions to add JETSCAPE Logo
##--
from reportlab.pdfgen import canvas
from PyPDF2 import PdfFileWriter, PdfFileReader
import sys
def add_jetscape_logo(filename, x0, y0, dx, tag=''):
input_filename = filename+'.pdf'
dy = dx*(67.0/100.0)
c = canvas.Canvas('temp.pdf')
c.drawImage('JetscapeLogo.jpg', x0,y0,dx,dy)
c.save()
output = PdfFileWriter()
input1 = PdfFileReader(open(input_filename, "rb"))
watermark = PdfFileReader(open("temp.pdf", "rb"))
input_page = input1.getPage(0)
input_page.mergePage(watermark.getPage(0))
output.addPage(input_page)
output_filename = filename+tag+'.pdf'
# finally, write "output" to document-output.pdf
outputStream = open(output_filename, "wb")
output.write(outputStream)
outputStream.close()
########################################################################################################################################################
# In[34]:
########################################################################################################################################################
# Main Plot Code RAA
##--
import os
fig, axs = plt.subplots(1, 1, figsize=(6,4.5), sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0})
## Log Scale for xy axes
#axs.set_xscale('log')
# axs.set_yscale('log')
axs.set_xlim(x_min,x_max)
axs.set_ylim(y_min,y_max)
## horizontal line at y=1
axs.axhline(1, color = "black", linewidth=0.2, alpha=0.5)
axs.yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))
axs.yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))
## Label for xy axes
axs.set_xlabel(r'$p_{\mathrm{T}}~\mathrm{(GeV)}$', fontsize=18)
axs.set_ylabel(r'$R^{\pi}_{\mathrm{AA}}$', fontsize=16)
## Plot Theory Lines
for i, file in enumerate(files_aa_theory):
x, y, xerr, yerr, xstep, ystep = GetRatioTheoryToTheory(file,file_pp_theory,error=True)
# Steps
axs.step(xstep, factor_theory[i]*ystep, where='post', color=color_theory[i],linestyle=line_theory[i])
# Error bars for stat errors
axs.errorbar(x, factor_theory[i]*y, factor_theory[i]*yerr, marker="", linestyle="none", color=color_theory[i])
# Plot Exp Data Points
for i, file in enumerate(files_data):
x, y, xerr, yerr, xstep, ystep, ysysl, ysysh = GetExp(file)
# Markers with error bars for stat errors
axs.errorbar(x, factor_theory[i]*y, yerr=factor_theory[i]*yerr, linestyle="none", color = color_data[i], marker=marker_data[i])
# Shades for sys errors
axs.fill_between(xstep, factor_theory[i]*(ystep-ysysl), factor_theory[i]*(ystep+ysysh), step='post', alpha=0.2, color = color_data[i])
## Legentds
handles = []
labels = []
for i, file in enumerate(files_data):
dp = axs.errorbar(0, 0, 0, linestyle="none", color = color_data[i], marker=marker_data[i])
dsys = axs.fill(np.NaN, np.NaN, alpha=0.2, color = color_data[i])
handles.append((dp[0],dsys[0]))
labels.append(label_data[i])
for i, file in enumerate(files_aa_theory):
tl = axs.errorbar(0, 0, color=color_theory[i],linestyle=line_theory[i])
handles.append(tl[0])
labels.append(label_theory[i])
axs.legend(handles,labels,ncol=1,loc='center left',edgecolor='none', frameon=True, facecolor='none', handletextpad=0.4, handleheight=1.8, labelspacing=0.2, bbox_to_anchor=(0, 0.65), borderaxespad=0.8, handlelength=1.6, fontsize=14)
## Text
axs.text(x_min+0.6, y_max-0.2, '$\mathrm{AuAu~(0\\text{-}10\%),~\sqrt{\it{s}_{\mathrm{NN}}}=200~GeV}$\n'+ '$|y|<0.35$' , horizontalalignment='left', verticalalignment='bottom')
axs.text(x_min+1.5, y_max-0.51,'$\mathrm{\\alpha^{\mathrm{fix}}_{\mathrm{s}}=0.3},~Q_{\mathrm{sw}}=2~\mathrm{GeV},~(\pi^{+}+\pi^{-})/2$',horizontalalignment='left', verticalalignment='bottom')
## Generate PDF File
plt.tight_layout()
plt.savefig('temp2')
## Add Logos
add_jetscape_logo('temp2',320,240,80,'_logo')
os.remove('temp.pdf')
RenameFileName='mv temp2_logo.pdf' + ' ' + OutputFilename
os.system(RenameFileName)
os.remove('temp2.pdf')
CommandOpenFile='open ' + OutputFilename
os.system(CommandOpenFile)
########################################################################################################################################################
# In[ ]:
| [
"amitkr2410@gmail.com"
] | amitkr2410@gmail.com |
438b696275ec2df4858fda817feab0936b95b4a3 | d4613cdafada5d47e45c42e344787cefa8b46f26 | /code/models/slowfast/utils/distributed.py | 9f7996203fe85e59154688b9a2c9eb992c7a7034 | [] | no_license | subburajs/ChaLearn-2021-ISLR-Challenge | 3b27106a71b5d7d4b1ef50f5547d032a01bc4b3c | b401a487a613b6e9dcfb42ce9ca134e7bda8b4f8 | refs/heads/main | 2023-03-28T03:37:48.120375 | 2021-03-19T01:17:07 | 2021-03-19T01:17:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,516 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Distributed helpers."""
import functools
import logging
import pickle
import torch
import torch.distributed as dist
from models.slowfast.utils.env import setup_dist_environment
setup_dist_environment()
def all_gather(tensors):
"""
All gathers the provided tensors from all processes across machines.
Args:
tensors (list): tensors to perform all gather across all processes in
all machines.
"""
gather_list = []
output_tensor = []
world_size = dist.get_world_size()
for tensor in tensors:
tensor_placeholder = [
torch.ones_like(tensor) for _ in range(world_size)
]
dist.all_gather(tensor_placeholder, tensor, async_op=False)
gather_list.append(tensor_placeholder)
for gathered_tensor in gather_list:
output_tensor.append(torch.cat(gathered_tensor, dim=0))
return output_tensor
def all_reduce(tensors, average=True):
"""
All reduce the provided tensors from all processes across machines.
Args:
tensors (list): tensors to perform all reduce across all processes in
all machines.
average (bool): scales the reduced tensor by the number of overall
processes across all machines.
"""
for tensor in tensors:
dist.all_reduce(tensor, async_op=False)
if average:
world_size = dist.get_world_size()
for tensor in tensors:
tensor.mul_(1.0 / world_size)
return tensors
def init_process_group(
local_rank,
local_world_size,
shard_id,
num_shards,
init_method,
dist_backend="nccl",
):
"""
Initializes the default process group.
Args:
local_rank (int): the rank on the current local machine.
local_world_size (int): the world size (number of processes running) on
the current local machine.
shard_id (int): the shard index (machine rank) of the current machine.
num_shards (int): number of shards for distributed training.
init_method (string): supporting three different methods for
initializing process groups:
"file": use shared file system to initialize the groups across
different processes.
"tcp": use tcp address to initialize the groups across different
dist_backend (string): backend to use for distributed training. Options
includes gloo, mpi and nccl, the details can be found here:
https://pytorch.org/docs/stable/distributed.html
"""
# Sets the GPU to use.
torch.cuda.set_device(local_rank)
# Initialize the process group.
proc_rank = local_rank + shard_id * local_world_size
world_size = local_world_size * num_shards
dist.init_process_group(
backend=dist_backend,
init_method=init_method,
world_size=world_size,
rank=proc_rank,
)
def is_master_proc(num_gpus=8):
"""
Determines if the current process is the master process.
"""
if torch.distributed.is_initialized():
return dist.get_rank() % num_gpus == 0
else:
return True
def get_world_size():
"""
Get the size of the world.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
"""
Get the rank of the current process.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
Returns:
(group): pytorch dist group.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
"""
Seriialize the tensor to ByteTensor. Note that only `gloo` and `nccl`
backend is supported.
Args:
data (data): data to be serialized.
group (group): pytorch dist group.
Returns:
tensor (ByteTensor): tensor that serialized.
"""
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Padding all the tensors from different GPUs to the largest ones.
Args:
tensor (tensor): tensor to pad.
group (group): pytorch dist group.
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor(
[tensor.numel()], dtype=torch.int64, device=tensor.device
)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather_unaligned(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
| [
"zhouhaocomm@qq.com"
] | zhouhaocomm@qq.com |
28e38d1acbd30bd2f6f1d9c19626c68e8d9108bf | f6c7a98908b01e934d8fd12255e33dfecafbce2a | /eemi_3adev_cours_cryptologie/Diffie–Hellman.py | 88c6271bae392a1557b45322c6fd7b719bc8fe73 | [] | no_license | Noelierx/eemi_3adev | c447d1607fafb77b02e032a013679bc29feff69d | f3e8969a7ac3b84d5797451b5035cdf9366d84ca | refs/heads/master | 2023-08-03T00:32:21.727083 | 2023-07-20T08:21:29 | 2023-07-20T08:21:29 | 167,063,732 | 0 | 0 | null | 2023-08-30T10:56:36 | 2019-01-22T20:38:50 | PHP | UTF-8 | Python | false | false | 1,039 | py | # Variables Utilisées
sharedPrime = 23 # p Prime = principale
sharedBase = 5 # g Base = secondaire
aliceSecret = 6 # a clé secrète d'Alice
bobSecret = 15 # b clé secrète de Bob
# Début
print( "Variables partagées publiquement:")
print( " Publicly Shared Prime: " , sharedPrime )
print( " Publicly Shared Base: " , sharedBase )
# Alice envoie à Bob A = g^a mod p
A = (sharedBase**aliceSecret) % sharedPrime
print( "Alice envoie sur le channel public: " , A )
# Bob envoie à Alice B = g^b mod p
B = (sharedBase ** bobSecret) % sharedPrime
print( "Bob envoie sur le channel public: ", B )
print( "\n------------\n" )
print( "Secret partagé confidentiellement calculé:" )
# Alice calcule le secret partagé: s = B^a mod p
aliceSharedSecret = (B ** aliceSecret) % sharedPrime
print( " Le secret partagé par Alice: ", aliceSharedSecret )
# Bob calcule le secret partagé: s = A^b mod p
bobSharedSecret = (A**bobSecret) % sharedPrime
print( " Le secret partagé par Bob: ", bobSharedSecret )
| [
"noelieroux11@gmail.com"
] | noelieroux11@gmail.com |
7410d04c9172221be25de2d4e2d1d87dad4df20f | 8189ec7435293519b17adf3a6e869fa87ef0c343 | /prediction.py | bf773722ad282ed2e10c74c0554579acf1e8a75d | [] | no_license | martinagalletti/LM | a16c631d2c4db871820b13a733c888e6c30fed1e | 779cfcb7fd478ef3b4144ad41454f162514b33ba | refs/heads/master | 2023-02-08T04:19:30.740596 | 2020-09-18T13:01:11 | 2020-09-18T13:01:11 | 296,615,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | from main import *
# Assuming that we have some input (words) compute the final output (word predicted)
def predict(device, net, words, n_vocab, vocab_to_int, int_to_vocab, top_k=5):
net.eval()
state_h, state_c = net.zero_state(1)
state_h = state_h.to(device)
state_c = state_c.to(device)
for w in words:
ix = torch.tensor([[vocab_to_int[w]]]).to(device)
output, (state_h, state_c) = net(ix, (state_h, state_c))
_, top_ix = torch.topk(output[0], k=top_k)
choices = top_ix.tolist()
choice = np.random.choice(choices[0])
words.append(int_to_vocab[choice])
for _ in range(100):
ix = torch.tensor([[choice]]).to(device)
output, (state_h, state_c) = net(ix, (state_h, state_c))
_, top_ix = torch.topk(output[0], k=top_k)
choices = top_ix.tolist()
choice = np.random.choice(choices[0])
words.append(int_to_vocab[choice])
print(' '.join(words))
# We will use that final output as input for the next time step and continue doing so until we have a sequence of length we wanted.
# Finally, we simply print out the result sequence to the consol
for _ in range(100):
ix = torch.tensor([[choice]]).to(device)
output, (state_h, state_c) = net(ix, (state_h, state_c))
_, top_ix = torch.topk(output[0], k=top_k)
choices = top_ix.tolist()
choice = np.random.choice(choices[0])
words.append(int_to_vocab[choice])
print(' '.join(words)) | [
"martinagalletti@icloud.com"
] | martinagalletti@icloud.com |
e2931dfc55162bc47e12d78b686337ca5d4767de | 6a61dc0110ff8fb9a3d9847a2f68f271bc2569a6 | /test/__init__.py | 6c80434f1375720a3056e1d9263825752301489a | [
"Apache-2.0"
] | permissive | matthagy/sputil | d1df47de043de3c233df99c333921da54ac72ad9 | 8feb0a219138884cf9e861e4a2f0d96fdfea909f | refs/heads/master | 2021-01-23T13:22:38.787147 | 2013-10-12T01:03:21 | 2013-10-12T01:03:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,568 | py |
import unittest
import numpy as np
import scipy.sparse as sp
import sputil as spu
def create_unique_random_number(mn, mx, N):
assert N < mx-mn
acc = set()
while len(acc) < N:
i = np.random.randint(mn, mx)
if i not in acc:
acc.add(i)
return acc
def make_sparse_matrix(N_rows=1, N_cols=1, fill_frac=0.01,
dtype=float, format='csr'):
dtype = np.dtype(dtype)
m = sp.dok_matrix((N_rows, N_cols), dtype=dtype)
N_elements = N_rows * N_cols
n_fill = int(round(fill_frac * N_elements))
fill_indices_1d = create_unique_random_number(0, N_elements, n_fill)
for inx_1d in fill_indices_1d:
row_i,col_i = divmod(inx_1d, N_cols)
m[row_i, col_i] = np.random.randint(-1000000, 1000000)
assert m.getnnz() == n_fill
return m.asformat(format)
def make_sparse_row(N_cols, format='csr', **kwds):
return make_sparse_matrix(N_rows=1, N_cols=N_cols, format=format, **kwds)
def make_sparse_col(N_rows, format='csc', **kwds):
return make_sparse_matrix(N_rows=N_rows, N_cols=1, format=format, **kwds)
def assert_eq_sparse_matrices(m1, m2):
assert m1.shape == m2.shape
assert np.all(m1.toarray() == m2.toarray())
def make_hstack_mat_set(N=100, N_rows=25, min_n_cols=20, max_n_cols=50, format='csc', **kwds):
return [make_sparse_matrix(N_rows, np.random.randint(min_n_cols, max_n_cols),
format=format, **kwds)
for _ in xrange(N)]
def make_vstack_mat_set(N=100, N_cols=25, min_n_rows=20, max_n_rows=50, format='csr', **kwds):
return [make_sparse_matrix(np.random.randint(min_n_rows, max_n_rows), N_cols,
format=format, **kwds)
for _ in xrange(N)]
class TestHstackCSCCols(unittest.TestCase):
def testhstack_csc_cols(self):
cols = [make_sparse_col(1000) for _ in xrange(1000)]
m1 = sp.hstack(cols)
m2 = spu.hstack_csc_cols(cols)
assert_eq_sparse_matrices(m1, m2)
class TestVstackCSRRows(unittest.TestCase):
def testvstack_csr_rows(self):
rows = [make_sparse_row(1000) for _ in xrange(1000)]
m1 = sp.vstack(rows)
m2 = spu.vstack_csr_rows(rows)
assert_eq_sparse_matrices(m1, m2)
class TestHstackCSCMats(unittest.TestCase):
def testhstack_csc_mats(self):
mats = make_hstack_mat_set(format='csc')
m1 = sp.hstack(mats)
m2 = spu.hstack_csc_mats(mats)
assert_eq_sparse_matrices(m1, m2)
class TestVstackCSRMats(unittest.TestCase):
def testvstack_csr_mats(self):
mats = make_vstack_mat_set(format='csr')
m1 = sp.vstack(mats)
m2 = spu.vstack_csr_mats(mats)
assert_eq_sparse_matrices(m1, m2)
class TestVstackCSCCols(unittest.TestCase):
def testvstack_csc_mats(self):
mats = make_vstack_mat_set(format='csc', N_cols=1)
m1 = sp.vstack(mats)
m2 = spu.vstack_csc_cols(mats)
assert_eq_sparse_matrices(m1, m2)
class TestVstackCSCMats(unittest.TestCase):
def testvstack_csc_mats(self):
mats = make_vstack_mat_set(format='csc')
m1 = sp.vstack(mats)
m2 = spu.vstack_csc_mats(mats)
assert_eq_sparse_matrices(m1, m2)
class TestGetSelectCSCCols(unittest.TestCase):
def testget_select_csc_cols(self):
m = make_sparse_matrix(N_rows=100, N_cols=200, format='csc')
indices = np.random.randint(0, m.shape[1], 50)
cols = spu.get_select_csc_cols(m, indices)
assert len(cols) == len(indices)
for inx,col in zip(indices, cols):
assert_eq_sparse_matrices(col, m.getcol(inx))
class TestGetSelectCSCRRows(unittest.TestCase):
def testget_select_csr_rows(self):
m = make_sparse_matrix(N_rows=200, N_cols=100, format='csr')
indices = np.random.randint(0, m.shape[0], 50)
rows = spu.get_select_csr_rows(m, indices)
assert len(rows) == len(indices)
for inx,row in zip(indices, rows):
assert_eq_sparse_matrices(row, m.getrow(inx))
class TestGetCSCCols(unittest.TestCase):
def testget_csc_cols(self):
m = make_sparse_matrix(N_rows=100, N_cols=200, format='csc')
indices = range(m.shape[1])
cols = spu.get_csc_cols(m)
assert len(cols) == len(indices)
for inx,col in zip(indices, cols):
assert_eq_sparse_matrices(col, m.getcol(inx))
class TestGetCSCRRows(unittest.TestCase):
def testget_csr_rows(self):
m = make_sparse_matrix(N_rows=200, N_cols=100, format='csr')
indices = range(m.shape[0])
rows = spu.get_csr_rows(m)
assert len(rows) == len(indices)
for inx,row in zip(indices, rows):
assert_eq_sparse_matrices(row, m.getrow(inx))
class TestRemoveCSRRows(unittest.TestCase):
def testremove_csr_rows(self):
N_rows = 1000
remove_frac = 0.1
rows = [make_sparse_row(1000) for _ in xrange(N_rows)]
remove_indices = np.arange(N_rows)
np.random.shuffle(remove_indices)
remove_indices = remove_indices[:int(remove_frac * N_rows):]
remove_indices = np.sort(remove_indices)
remove_indices_set = set(remove_indices)
mat = spu.vstack_csr_rows(rows)
mat_wo_rows = spu.vstack_csr_rows([r for i,r in enumerate(rows)
if i not in remove_indices_set])
mat_remove_rows = spu.remove_csr_rows(mat, remove_indices)
assert_eq_sparse_matrices(mat_wo_rows, mat_remove_rows)
__name__ == '__main__' and unittest.main()
| [
"mhagy3@greengate.chemistry.gatech.edu"
] | mhagy3@greengate.chemistry.gatech.edu |
1160fe2c4176a9a8392411959eb0d17929231848 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-gaussdbforopengauss/huaweicloudsdkgaussdbforopengauss/v3/model/gauss_d_bfor_open_gauss_user_for_list.py | ffed016aea17b6dc6eec320863498e523cbc368d | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,944 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class GaussDBforOpenGaussUserForList:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'attributes': 'GaussDBforOpenGaussUserForListAttributes',
'memberof': 'str'
}
attribute_map = {
'name': 'name',
'attributes': 'attributes',
'memberof': 'memberof'
}
def __init__(self, name=None, attributes=None, memberof=None):
"""GaussDBforOpenGaussUserForList
The model defined in huaweicloud sdk
:param name: 帐号名。
:type name: str
:param attributes:
:type attributes: :class:`huaweicloudsdkgaussdbforopengauss.v3.GaussDBforOpenGaussUserForListAttributes`
:param memberof: 用户的默认权限。
:type memberof: str
"""
self._name = None
self._attributes = None
self._memberof = None
self.discriminator = None
self.name = name
if attributes is not None:
self.attributes = attributes
if memberof is not None:
self.memberof = memberof
@property
def name(self):
"""Gets the name of this GaussDBforOpenGaussUserForList.
帐号名。
:return: The name of this GaussDBforOpenGaussUserForList.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GaussDBforOpenGaussUserForList.
帐号名。
:param name: The name of this GaussDBforOpenGaussUserForList.
:type name: str
"""
self._name = name
@property
def attributes(self):
"""Gets the attributes of this GaussDBforOpenGaussUserForList.
:return: The attributes of this GaussDBforOpenGaussUserForList.
:rtype: :class:`huaweicloudsdkgaussdbforopengauss.v3.GaussDBforOpenGaussUserForListAttributes`
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this GaussDBforOpenGaussUserForList.
:param attributes: The attributes of this GaussDBforOpenGaussUserForList.
:type attributes: :class:`huaweicloudsdkgaussdbforopengauss.v3.GaussDBforOpenGaussUserForListAttributes`
"""
self._attributes = attributes
@property
def memberof(self):
"""Gets the memberof of this GaussDBforOpenGaussUserForList.
用户的默认权限。
:return: The memberof of this GaussDBforOpenGaussUserForList.
:rtype: str
"""
return self._memberof
@memberof.setter
def memberof(self, memberof):
"""Sets the memberof of this GaussDBforOpenGaussUserForList.
用户的默认权限。
:param memberof: The memberof of this GaussDBforOpenGaussUserForList.
:type memberof: str
"""
self._memberof = memberof
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GaussDBforOpenGaussUserForList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
32696f6860af40942b55cdf636030b11405dc3b1 | 46702ef7c5b2e0565c370fe3d2ed6b614e35090c | /python/stick.py | 3b8967d7447c1946ddd40888aac22b5d1d57ed28 | [] | no_license | Imperyall/examples | eda944c1e719dbd89254671c733669b90dbf7201 | 6df7ce73cb26045a586a66de4e349d24ae69b8c4 | refs/heads/master | 2020-07-08T09:47:13.965334 | 2019-08-21T18:34:09 | 2019-08-21T18:34:09 | 203,636,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,761 | py | import time
from opentron.util import asyncio, Util
from opentron.constants import MAX_STICKS
class WorkerStick(Util):
def __init__(self, name, db, ready_action,
stick_action, stocks, symbol,
asset, timeframe_id, timeframe,
period):
super().__init__()
self.name = name
self.db = db
self.ready_action = ready_action
self.stick_action = stick_action
self.stocks = stocks
self.symbol = symbol
self.asset = asset
self.timeframe_id = timeframe_id
self.timeframe = timeframe
self.period = period
self.enable = False
self.log.info(f'{self.name} | Init Worker')
# Воркер
async def run(self):
self.log.info(f'{self.name} | Start run stick')
await self.maintenance()
step = time.time() % self.period
await asyncio.sleep(self.period - step)
while self.enable:
try:
response = await self.get_average()
if len(response) > 0:
data = await self.db.get_last_stick(
self.asset, self.timeframe_id)
if len(data) == 0:
self.log.warning(f'{self.name} | Database is empty')
await self.maintenance()
else:
await self.update(
[{'time': i.pop(0), 'data': i} for i in response])
else:
self.log.warning(f'{self.name} | Response is empty')
except Exception as e:
self.log.error(f'{self.name} | Work error: {e}')
finally:
for i in range(self.period):
if self.enable:
await asyncio.sleep(1)
# Получить свечи
async def get_average(self, limit=1):
kwargs = dict(
limit=limit,
symbol=self.symbol,
timeframe=self.timeframe
)
average = []
master = []
for stock in self.stocks:
if stock.master:
master = await stock.get_stock_data(kwargs)
for i in range(limit):
average.append(stock.create_average(master[i]))
break
for stock in self.stocks:
if stock.master:
continue
current = await stock.get_stock_data(kwargs)
for i in range(limit):
if len(current) != 0 and len(current) > i:
current[i][0] = master[i][0]
stock.create_average(current[i], average[i])
else:
stock.create_average(master[i], average[i])
return average
# Проверка базы данных
async def maintenance(self):
self.log.info(f'{self.name} | Start DB Maintenance')
task_id = self.ready_action()
task_id2 = self.stick_action()
response = await self.get_average(MAX_STICKS)
if len(response) > 0:
data = await self.db.get_last_stick(
self.asset, self.timeframe_id, MAX_STICKS)
if len(data) == 0:
response = [{'time': i.pop(0), 'data': i}
for i in response]
else:
last = data[0]
new_data = []
for i in response:
time = i.pop(0)
if time > last:
new_data.append({'time': time, 'data': i})
response = new_data
await self.update(response)
else:
self.log.warning(f'{self.name} | Response is empty')
self.log.info(f'{self.name} | End DB Maintenance')
self.stick_action(task_id2)
self.ready_action(task_id)
# Вставить данные по свечам в базу данных
async def update(self, values):
if len(values) > 0:
try:
values = [{
'asset': self.asset,
'timeframe': self.timeframe_id,
**i,
} for i in values]
if len(values) == 1:
values = values[0]
await self.db.upd(
asset=self.asset,
timeframe=self.timeframe_id,
time=values['time'],
data=values['data']
)
else:
await self.db.insert().gino.all(values)
except Exception as e:
self.log.error(f'{self.name} | Insert data error: {e}')
| [
"noreply@github.com"
] | noreply@github.com |
2cc804dfdab688bf362e74ce26d321cd757a91f0 | ff30245fbc0f1db3189b11bec501007d5abdfa20 | /app/bot.py | 9ec708a01737e9ae715a1dc3895e31e581c6f839 | [] | no_license | hirotaka-s/Sprint-2016-Advanced | 9ee7a4801ef28d84f9d07f390189ee6ed7270fe8 | 792d2ca2f6176cc365ebdc824ebdbde91b64089f | refs/heads/master | 2020-07-09T14:40:32.081434 | 2016-06-03T09:50:32 | 2016-06-03T09:50:32 | 74,016,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,739 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tinydb import TinyDB, Query
import json
import requests
import re
import time
class Bot(object):
def __init__(self):
self.__commands = BotCommand()
def is_bot_command(self, recv_message):
split_message = recv_message.split(' ', 2)
if split_message[0] == 'bot':
return True
return False
def command(self, recv_message):
# ['bot', 'cmd', 'data']
split_message = recv_message.split(' ', 2)
try:
if split_message[1].startswith('_'):
raise AttributeError
func = getattr(self.__commands, split_message[1])
params = split_message[2:]
return func(*params)
except AttributeError as e:
return 'No such command: ' + split_message[1]
except TypeError as e:
return 'Arguments for command:' + split_message[1] + ' is invalid'
class TodoForBot(object):
def __init__(self):
self.__todo_db = TinyDB('todo_for_bot_db.json')
self.__query = Query()
def add(self, todo_name, todo_detail):
try:
self.__todo_db.insert({'todo_name' : todo_name, 'todo_detail' :todo_detail})
return 'todo added'
except:
return 'todo add faild'
def delete(self, todo_name):
try:
if self.__todo_db.remove(self.__query.todo_name == todo_name):
return 'todo deleted'
else:
return 'todo delete faild. no such todo: ' + todo_name
except:
return 'todo delete failed'
def list(self):
todo_list = []
for i, todo in enumerate(self.__todo_db.all()):
tmp_list = []
#tmp_list.append('todo'+str(i+1))
tmp_list.append(todo['todo_name'])
tmp_list.append(todo['todo_detail'])
todo_list.append(' '.join(tmp_list))
if todo_list:
return '\n'.join(todo_list)
else:
return 'todo empty'
class TranslatorForBot(object) :
def __init__(self):
self.__html_tag_re = re.compile(r'<[^>]*?>')
self.__access_token_last_update_time = None
self.__expires_in = 600
secrets_fp = open('secret.json')
secrets = json.load(secrets_fp)
self.__get_token_payload = {'client_secret': secrets['client_secret'],
'client_id': secrets['client_id'],
'scope': 'http://api.microsofttranslator.com',
'grant_type': 'client_credentials'
}
secrets_fp.close()
self.__access_token = self.__get_access_token()
def __get_access_token(self):
try:
res = requests.post('https://datamarket.accesscontrol.windows.net/v2/OAuth2-13', data=self.__get_token_payload)
self.__access_token_last_update_time = time.time()
res_json = json.loads(res.text)
self.__expires_in = int(res_json['expires_in'])
return res_json['access_token']
except Exception as e:
raise e
def __generate_headers(self):
return {'Authorization': 'Bearer ' + self.__access_token}
def __generate_request_params(self, to, text):
return {'to': to, 'text': text, 'oncomplete':'translated'}
def translate(self, to, text):
# if access_token is expired, get new access_token
try:
if time.time() - self.__access_token_last_update_time > self.__expires_in:
self.__access_token = self.__get_access_token()
res = requests.get('https://api.microsofttranslator.com/v2/Http.svc/Translate', params=self.__generate_request_params(to, text), headers=self.__generate_headers())
if res.status_code != requests.codes.ok:
return 'bot: Invlid request! Check your params.'
return 'bot: ' + self.__html_tag_re.sub('', res.text)
except Exception as e:
print(e)
return 'bot: Some error occord!'
class WordCheckerForBot(object):
def __init__(self):
self.__is_enable = True
self.__dict_db = TinyDB('dict_for_bot_db.json')
self.__query = Query()
def wordcheck(self, text):
if self.__is_enable:
for word in self.__dict_db.all():
text = text.replace(word['bad_word'], ' [検閲により削除] ')
return text
def add(self, word):
try:
if self.__dict_db.search(self.__query.bad_word == word):
return 'bot wordchecker: The word is already added'
self.__dict_db.insert({'bad_word' : word})
return 'bot wordchecker: Add word: ' + word
except Exception as e:
return 'bot wordchecker: Add failed'
def delete(self, word):
try:
if self.__dict_db.remove(self.__query.bad_word == word):
return 'bot wordchecker: word deleted'
else:
return 'bot wordchecker: Word delete faild. no such word: ' + todo_name
except Exception as e:
return 'bot wordchecker: Delete failed'
def is_enable(self):
if self.__is_enable:
return 'bot wordchecker: Enabled'
else:
return 'bot wordchecker: Disabled'
def enable(self):
if self.__is_enable:
return 'bot wordchecker: Already enabled'
else:
self.__is_enable = True
return 'bot wordchecker: Enabled'
def disable(self):
if self.__is_enable:
self.__is_enable = False
return 'bot wordchecker: Disabled'
else:
return 'bot wordchecker: Already disabled'
def list(self):
word_list = []
for word in self.__dict_db.all():
word_list.append(word['bad_word'])
if word_list:
return '[ ' + ', '.join(word_list) + ' ]'
else:
return 'bot wordchecker: Dictionary is empty'
class AliasForBot(object):
def __init__(self, bot_command):
self.__alias_db = TinyDB('alias_for_bot_db.json')
self.__query = Query()
self.__bot_command = bot_command
[self.__register_function(a['command_name'], a['alias_name']) for a in self.__alias_db.all()]
def __register_function(self, command_name, alias_name):
func = getattr(self.__bot_command, command_name)
setattr(self.__bot_command, alias_name, func)
return True
def alias(self, command_name, alias_name):
try:
if self.__alias_db.search(self.__query.alias_name == alias_name):
return 'bot alias: Alias:' + alias_name + ' is already exist.'
if self.__alias_db.search(self.__query.alias_name == command_name):
return 'bot alias: '+ command_name + ' is alias. Not regist alias.'
func = getattr(self.__bot_command, alias_name)
return 'bot alias: Already exist command:' + alias_name + ' is not using as alias.'
except AttributeError as e:
None
try:
if command_name.startswith('_'):
raise AttributeError
if self.__register_function(command_name, alias_name):
self.__alias_db.insert({'command_name' : command_name, 'alias_name' : alias_name})
return 'bot alias: Set alias ' + command_name + ' -> ' + alias_name
except AttributeError as e:
return 'bot alias: No such command: ' + command_name
def unalias(self, alias_name):
if self.__alias_db.search(self.__query.alias_name == alias_name):
remove_alias = self.__alias_db.search(self.__query.alias_name == alias_name)[0]
if remove_alias:
delattr(self.__bot_command, alias_name)
command_name = remove_alias['command_name']
self.__alias_db.remove(self.__query.alias_name == alias_name)
return 'bot unalias: Alias ' + command_name + ' -> ' + alias_name + ' is deleted'
return 'bot unalias: Does not exit. No such alias: ' + alias_name
def aliases(self):
alias_list = [a['command_name']+' -> '+a['alias_name'] for a in self.__alias_db.all()]
return '[' + ', '.join(alias_list) + ']'
class BotCommand(object):
def __init__(self):
self.__todo = TodoForBot()
self.__translator = TranslatorForBot()
self.__alias = AliasForBot(self)
self.__wordchecker = WordCheckerForBot()
def ping(self):
return 'pong'
def todo(self, data):
command_and_data = data.split(' ', 2)
try:
if command_and_data[0].startswith('_'):
raise AttributeError
func = getattr(self.__todo, command_and_data[0])
# first element is command
params = command_and_data[1:]
return func(*params)
except AttributeError as e:
return 'bot todo: No such command: ' + command_and_data[0]
except TypeError as e:
return 'bot todo: Arguments for command: "' + command_and_data[0] + '" is invalid or not require params'
def translate(self, data):
to_and_text = data.split(' ', 1)
if len(to_and_text) < 2:
return 'bot tnraslate: Invalid param. bot translate [lang] [text]'
to = to_and_text[0]
text = to_and_text[1]
return self.__translator.translate(to, text)
def clap(self):
return 'bot: \U0001F44F'
def thanks(self):
return 'bot: You are welcome :)'
def alias(self, data):
command_and_alias = data.split(' ', 1)
if len(command_and_alias) < 2:
return 'bot alias: Invalid param. bot alias [command_name] [alias_name]'
command = command_and_alias[0]
alias = command_and_alias[1]
return self.__alias.alias(command, alias)
def unalias(self, data):
return self.__alias.unalias(data)
def aliases(self):
return self.__alias.aliases()
def wordchecker(self, data):
command_and_data = data.split(' ', 1)
try:
if command_and_data[0].startswith('_'):
raise AttributeError
func = getattr(self.__wordchecker, command_and_data[0])
# first element is command
params = command_and_data[1:]
return func(*params)
except AttributeError as e:
return 'bot wordchecker: No such command: ' + command_and_data[0]
except TypeError as e:
return 'bot wordchecker: Arguments for command: "' + command_and_data[0] + '" is invalid or not require params'
| [
"hirotaka.suzuki@kofearistokrat.com"
] | hirotaka.suzuki@kofearistokrat.com |
60ad1528aa7163d2075791060ed97402da5e72c4 | ca49b9d81690c0ca98fe29bbef526e9ae84a5610 | /userbot/plugins/_helper.py | b3c76e21cfdc4e2cae912a40885f0537c3209613 | [
"MIT"
] | permissive | SourcePrivet/HardcoreUserbot | 885a50c9a7eb0c7ff1ed61361d6ba1175346cb81 | 3ded5be106f59a066c7a73ab559fcc22d4d1e4ea | refs/heads/master | 2022-08-01T15:41:54.102072 | 2020-05-22T05:46:31 | 2020-05-22T05:46:31 | 265,158,230 | 0 | 0 | null | 2020-05-19T05:55:23 | 2020-05-19T05:55:23 | null | UTF-8 | Python | false | false | 2,008 | py | from userbot import CMD_LIST
@command(pattern="^.help ?(.*)")
async def cmd_list(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
tgbotusername = Var.TG_BOT_USER_NAME_BF_HER
input_str = event.pattern_match.group(1)
if tgbotusername is None or input_str == "text":
string = ""
for i in CMD_LIST:
string += "✨ " + i + "\n"
for iter_list in CMD_LIST[i]:
string += " `" + str(iter_list) + "`"
string += "\n"
string += "\n"
if len(string) > 4095:
with io.BytesIO(str.encode(string)) as out_file:
out_file.name = "cmd.txt"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="**COMMANDS**",
reply_to=reply_to_id
)
await event.delete()
else:
await event.edit(string)
elif input_str:
if input_str in CMD_LIST:
string = "Commands found in {}:".format(input_str)
for i in CMD_LIST[input_str]:
string += " " + i
string += "\n"
await event.edit(string)
else:
await event.edit(input_str + " is not a valid plugin!")
else:
help_string = """Userbot Helper.. \n Provided by @visa4bin\n
`Helper to reveal all the commands`"""
results = await bot.inline_query( # pylint:disable=E0602
tgbotusername,
help_string
)
await results[0].click(
event.chat_id,
reply_to=event.reply_to_msg_id,
hide_via=True
)
await event.delete()
| [
"noreply@github.com"
] | noreply@github.com |
9b07f904a6d50b662b51570f13709328bce6958b | 7a884ce8dd90828bf7a6e29a2bab1a85810334d7 | /OPENiapp/OPENiapp/Providers/Google/connector.py | 6de201ab09ab2f1218d9b625643e0e1c199c6f23 | [] | no_license | OPENi-ict/api-framework | d724047f4d0157c91914dbdbf553076b8c073125 | b667236d6a9fb243d2c69f3efc1d15a6aa11f3fc | refs/heads/master | 2020-06-02T05:25:04.286876 | 2015-06-24T12:42:02 | 2015-06-24T12:42:02 | 20,104,796 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,630 | py | from googleplaces import GooglePlaces, types, lang
# https://github.com/slimkrazy/python-google-places
from OPENiapp.Providers.baseConnector import basicProvider
import json
from _goactivity import goActivity
from _golocation import goLocation
from _gomedia import goMedia
from _goproductsServices import goProductsServices
from _goprofiles import goProfiles
class GOPprovider(basicProvider, goActivity, goLocation, goMedia, goProductsServices, goProfiles):
''' This class is used to:
1. Make the connection to the Google Places API
2. Get user's Photos
3. Get OPENi album Photos
4. Post Photos to OPENi album
'''
def __init__(self):
''' Initiate the connector '''
YOUR_API_KEY = 'AIzaSyDoZ455JKv5GS2DgmK1jQc7R8Oj5JVjEnI'
self.connector = GooglePlaces(YOUR_API_KEY)
def get_nearby_places(self, data):
""" EXTRA!!! Find nearby places """
raw_datas = self.connector.nearby_search(location='London, England', keyword='Fish and Chips', radius=20000, types=[types.TYPE_FOOD])
fields = ['id', 'type', 'service', 'url', 'user.id', 'user.username', 'website', 'name', 'details.formatted_address', 'details.formatted_address.number', 'geo_location.lat', 'geo_location.lng', 'created_time', 'types']
alternatives = ['', 'place', 'openi', '', '', '', '', '', '', '', '', '', '', '']
response = {
'meta':
{
'total_count': 'blah',
'next': 'bla'
},
'data': []
}
for raw_data in raw_datas.places:
data = self.get_fields(raw_data, fields, alternatives)
response['data'].append(self.format_place_response(data))
return response
def add_a_place(self, data):
""" EXTRA!!! Add a new place """
# Returns a detailed instance of googleplaces.Place
raw_data = self.connector.add_place(name=data['name'],
lat_lng={'lat': data['lat'], 'lng': data['lng']},
accuracy=data['accuracy'],
types=data['type'],
language=data['lang'])
response = {
'added_place_reference': raw_data.reference,
'added_place_id': raw_data.id
}
return response
def delete_a_place(self, data):
""" DELETE API_PATH/[PLACE_ID] """
# Returns a detailed instance of googleplaces.Place
raw_data = self.connector.delete_place(data['reference'])
return { 'status': 'OK' } | [
"rtsouroplis@epu.ntua.gr"
] | rtsouroplis@epu.ntua.gr |
8c806cff81eecfd1f41bf8c68f5c3440ea4c6be4 | 45b1ad47b387c9328df8fa3f830ff741d4735045 | /Bubble Sort.py | 5802d51964e8a809504631250394f18821bdf4bc | [] | no_license | Richardkaran-74/Algorithms | 0828df4590e05bb211418216ae3bb3b00f2e7637 | cd2904558a60cb9c30ec2f54f530df50a4a97212 | refs/heads/master | 2022-12-10T19:37:55.732263 | 2020-09-04T06:17:07 | 2020-09-04T06:17:07 | 292,609,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | def Bubblesort(a):
b = len(a) -1
swaps = 0
for x in range(b):
for y in range(b-x):
if a[y] > a[y+1]:
a[y],a[y+1] = a[y+1],a[y]
swaps += 1
return a,swaps
n = int(input())
a = list(map(int, input().rstrip().split()))
sorts = Bubblesort(a)
print("The sorted Array is:", a)
print("Array is sorted in" ,sorts[1],"swaps.")
print("First Element:",a[0])
print("Last Element:",a[-1])
| [
"noreply@github.com"
] | noreply@github.com |
262a46b28e0f81a173486d6faa14c8be88a61e79 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2738/60598/281309.py | b37b2756d6e576c77a144d36933054d39da07823 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | input()
matrix = []
while 1:
s = input().replace(" ","")
if s == "]":
break
if s[-1] == ',':
matrix.append(s[1:-2].split(","))
else:
matrix.append(s[1:-1].split(","))
row = len(matrix)
col = len(matrix[0])
result = 0
are = []
for i in range(row):
for j in range(col):
if matrix[i][j] == "\"1\"":
high = 0
wides = []
for h in range(i, row):
high += 1
wide = 0
for s in range(j, col):
if matrix[h][s] == "\"1\"":
wide += 1
else:
break
wides.append(wide)
tempAre = high * min(wides)
if tempAre == 0:
break
are.append(tempAre)
print(max(are))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
9bc8cc5ec83168a559335537809291c4aa378374 | 0226180e1b9dc926b2c025a9f467f46f7ffb8b13 | /classes.py | 7bb8a61ac287d46b6b2aac353ccaa220c0544046 | [] | no_license | fallisd/validate3 | ce5460083f03b76b9a66df22e91c48179970c423 | 1266c5f305f59e84487bf48aa648130779749068 | refs/heads/master | 2021-01-10T12:07:48.485563 | 2016-03-07T21:20:40 | 2016-03-07T21:20:40 | 53,359,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,905 | py | import numpy as np
import scipy as sp
from scipy import stats as spstats
import cdo; cdo = cdo.Cdo()
import matplotlib.pyplot as plt
import matplotlib as mpl
from math import ceil
import datetime
from mpl_toolkits.basemap import Basemap, addcyclic
from netCDF4 import Dataset, num2date, date2num
import os
import datetime
from pcolor import default_pcolor_args
from taylor import TaylorDiagram
class Plot(object):
def __init__(self, **kwargs):
self.ifile = kwargs.pop('ifile', '')
self.runID = kwargs.pop('runID', None)
self.experiment = kwargs.pop('experiment', 'historical')
self.variable = kwargs.pop('variable', None)
self.data_type = kwargs.pop('data_type', 'climatology')
self.projection = kwargs.pop('projection', 'global_map')
self.start_date = str(kwargs.pop('start_date', self._default_start_date()))
self.end_date = str(kwargs.pop('end_date', self._default_end_date()))
self.remap = kwargs.pop('remap', 'remapdis')
self.remap_grid = kwargs.pop('remap_grid', 'r360x180')
self.scale = kwargs.pop('scale', 1)
self.shift = kwargs.pop('shift', 0)
self.frequency = kwargs.pop('frequency', 'mon')
self.idealdepth = kwargs.pop('depth', None)
self.realm = kwargs.pop('realm', 'atmos')
self.realization = kwargs.pop('realization', 'r1i1p1')
self.divergent = kwargs.pop('divergent', False)
self.pcolor_args = dict(kwargs.pop('pcolor_args', {}))
self.ax_args = dict(kwargs.pop('ax_args', {}))
self.fig = kwargs.pop('fig', None)
self.ax = kwargs.pop('ax', None)
self.seasons = kwargs.pop('seasons', ['DJF', 'MAM', 'JJA', 'SON'])
self.rmse = kwargs.pop('rmse', False)
self.depthneeded = kwargs.pop('depthneeded', None)
self.cdostring = kwargs.pop('cdostring', None)
self.png = kwargs.pop('png', False)
self.pdf = kwargs.pop('pdf', True)
self.cbaxis = kwargs.pop('cbaxis', None)
self.alpha = kwargs.pop('alpha', None)
self._flatdata = kwargs.pop('flatdata', None)
self._dataset = kwargs.pop('dataset', None)
self._data = kwargs.pop('data', None)
self._full_data = kwargs.pop('fulldata', None)
self._full_dataset = kwargs.pop('full_dataset', None)
self._full_ncvar = kwargs.pop('full_ncvar', None)
self._ffile = kwargs.pop('ffile', None)
self._units = kwargs.pop('units', None)
self._lat = kwargs.pop('lat', None)
self._lon = kwargs.pop('lon', None)
self._slat = kwargs.pop('slat', None)
self._slon = kwargs.pop('slon', None)
self._ncvar = kwargs.pop('ncvar', None)
self._realm_cat = kwargs.pop('realm_cat', None)
self._plotname = kwargs.pop('plotname', None)
self._plotdata = kwargs.pop('plotdata', None)
self._unscaleddata = kwargs.pop('unscaleddata', None)
self._truedepth = kwargs.pop('truedepth', None)
self._ofile = kwargs.pop('ofile', None)
self.sister = kwargs.pop('sister', None)
self._statstring = kwargs.pop('statstring', None)
self._pvalues = kwargs.pop('pvalues', None)
self._plotstd = kwargs.pop('plotstd', None)
self._std = kwargs.pop('std', None)
self._time_averaged = None
self._fulldepths = None
self._stats = None
self._standard_deviation = None
self._trendscale = None
self._nobs = None
self.plot_args = self._fill_plot_args(**kwargs.pop('plot_args', {}))
def _get_datetime(self, datestring):
datelist = ['%Y-%M-%d', '%Y-%M', '%Y']
for form in datelist:
try:
obj = datetime.datetime.strptime(datestring, form)
except:
continue
else:
return obj
@property
def nobs(self):
if self._nobs is None:
sd = self._get_datetime(self.start_date)
ed = self._get_datetime(self.end_date)
number_of_days = (ed - sd).days
if self.frequency == 'day':
self._nobs = number_of_days
elif self.frequency == 'mon':
self._nobs = number_of_days / 30
else:
self._nobs = number_of_days / 365
return self._nobs
@property
def plotstd(self):
if self._plotstd is None:
self._plotstd = self._removedepth(self.std) * self.scale
return self._plotstd
@property
def std(self):
if self._std is None:
dataset = Dataset(self._calculate_std(self.ifile), 'r')
ncvar = dataset.variables[self.variable]
self._std = ncvar[:].squeeze()
return self._std
@property
def slon(self):
if self._slon is None:
self._slon = [self.lon[lo] for (la, lo), value in np.ndenumerate(self.pvalues) if abs(value) < self.alpha and lo % 2 == 0]
return self._slon
@property
def slat(self):
if self._slat is None:
self._slat = [self.lat[la] for (la, lo), value in np.ndenumerate(self.pvalues) if abs(value) < self.alpha and lo % 2 == 0]
return self._slat
@property
def pvalues(self):
if self._pvalues is None:
t, self._pvalues = sp.stats.ttest_ind_from_stats(self.plotdata, self.plotstd, self.nobs, self.sister.plotdata, self.sister.plotstd, self.sister.nobs, equal_var=False)
# t, self._pvalues = sp.stats.ttest_ind(self.full_data, self.sister.full_data, axis=0, equal_var=False)
print self._pvalues.shape
print self.pvalues[0]
return self._pvalues
@property
def trendscale(self):
if self._trendscale is None:
if self.data_type == 'trends':
if self.frequency == 'day':
self._trendscale = 3650
if self.frequency == 'mon':
self._trendscale = 120
if self.frequency == 'yr':
self._trendscale = 10
else:
self._trendscale = 1
return self._trendscale
@property
def realm_cat(self):
if self._realm_cat is not None:
if self.realm == 'aerosol' or self.realm == 'atmos' or self.realm == 'seaIce':
self._realm_cat = 'atmos'
elif self.realm == 'land' or self.realm == 'landIce':
self._realm_cat = 'land'
else:
self._realm_cat = 'ocean'
return self._realm_cat
def _default_start_date(self):
if 'piControl' in self.experiment:
return '2900-01'
elif 'rcp' in self.experiment:
return '2070-01'
return '1900-01'
def _default_end_date(self):
if 'piControl' in self.experiment:
return '3000-01'
elif 'rcp' in self.experiment:
return '2100-01'
return '2005-01'
def _fill_plot_args(self, **kwargs):
d_args = {'fill_continents': False,
'draw_parallels': False,
'draw_meridians': False,
'cblabel': self.units,
}
for key in d_args:
if key not in kwargs:
kwargs[key] = d_args[key]
return kwargs
def _fill_pcolor_args(self):
if self.data_type == 'trends' or self.divergent:
d1pca = default_pcolor_args(self.plotdata, anom=True)
else:
d1pca = default_pcolor_args(self.plotdata)
if self.sister is not None:
if self.data_type == 'trends' or self.divergent:
d2pca = default_pcolor_args(self.sister.plotdata, anom=True)
else:
d2pca = default_pcolor_args(self.sister.plotdata)
vmin = np.min([d1pca['vmin'], d2pca['vmin']])
vmax = np.max([d1pca['vmax'], d2pca['vmax']])
d1pca['vmin'] = vmin
d1pca['vmax'] = vmax
for key in self.pcolor_args:
d1pca[key] = self.pcolor_args[key]
self.pcolor_args = d1pca
def _default_ax_args(self):
return {'title': self.variable + ' Model: ' + self.runID}
def _fill_ax_args(self):
args = self._default_ax_args()
for key in self.ax_args:
args[key] = self.ax_args[key]
self.ax_args = args
@property
def plotname(self):
if self._plotname == None:
self._plotname = ('plots/' + self.variable + '_' + self.runID + '_' + self.projection + '_' +
self.data_type + '_' + self.start_date + '-' + self.end_date +
'_' + ''.join(self.seasons))
try:
self._plotname.append('_' + str(self.truedepth))
except:
pass
return self._plotname
def _savefigures(self):
pdfname = self.plotname + '.pdf'
pngname = self.plotname + '.png'
if self.png:
plt.savefig(pngname, bbox_inches='tight')
if self.pdf:
plt.savefig(pdfname, bbox_inches='tight')
@property
def statstring(self):
if self._statstring is None:
vals = self.stats.values()
ss = [s + ': ' for s in self.stats.keys()]
val = [s + str(v) for s, v in zip(ss, vals)]
self._statstring = ' '.join(val)
return self._statstring
@property
def stats(self):
if self._stats is None:
if self.rmse:
vals = [str(np.round(self.plotdata.min(), 1)), str(np.round(self.plotdata.max(), 1)), str(np.round(np.sqrt(np.mean(np.square(self.plotdata))), 1))]
snam = ['min: ', 'max: ', 'rmse: ']
self._stats = {'rmse': float(vals[2]),
'min': float(vals[0]),
'max': float(vals[1]),
}
else:
vals = [str(np.round(self.plotdata.min(), 1)), str(np.round(self.plotdata.max(), 1)), str(np.round(self.plotdata.mean(), 1))]
snam = ['min: ', 'max: ', 'mean: ']
self._stats = {'mean': float(vals[2]),
'min': float(vals[0]),
'max': float(vals[1]),
}
return self._stats
@property
def fulldepths(self):
if self._fulldepths is None:
self._fulldepths = 'surface'
for dimension in self.ncvar.dimensions:
try:
if self.dataset.variables[dimension].axis == 'Z':
self._fulldepths = self.dataset.variables[dimension][:]
break
except:
pass # keep looping if the dimension doesn't have an 'axis' attribute
return self._fulldepths
def plot(self):
self._fill_pcolor_args()
self._fill_ax_args()
self._makeplot()
self._savefigures()
def _makeplot(self):
self._draw(**self.plot_args)
@property
def time_averaged(self):
if self._time_averaged is not None:
nc = Dataset(ifile, 'r')
try:
time = nc.variables['time'][:].squeeze()
except:
self._time_averaged = True
else:
self._time_averaged = time.size == 1
return self._time_averaged
@property
def flatdata(self):
if self._flatdata is None:
self._flatdata = self.plotdata.flatten()
return self._flatdata
@property
def standard_deviation(self):
if self._standard_deviation is None:
self._standard_deviation = float(self.plotdata.std())
return self._standard_deviation
@property
def full_plotdata(self):
pass
@property
def full_data(self):
if self._full_data is None:
self._full_data = self.full_ncvar[:].squeeze()
return self._full_data
@property
def full_ncvar(self):
if self._full_ncvar is None:
self._full_ncvar = self.full_dataset.variables[self.variable]
return self._full_ncvar
@property
def full_dataset(self):
if self._full_dataset is None:
self._full_dataset = Dataset(self.ffile, 'r')
return self._full_dataset
@property
def ffile(self):
if self._ffile is None:
self._ffile = self._calculate_full(self.ifile)
return self._ffile
@property
def data(self):
if self._data is None:
self._data = self.ncvar[:].squeeze()
return self._data
@property
def ncvar(self):
if self._ncvar is None:
self._ncvar = self.dataset.variables[self.variable]
return self._ncvar
@property
def units(self):
if self._units is None:
self._units = self.ncvar.units
if self.data_type == 'trends':
self._units += ' / decade'
return self._units
@property
def lat(self):
if self._lat is None:
self._lat = self.dataset.variables['lat'][:].squeeze()
return self._lat
@property
def lon(self):
if self._lon is None:
self._lon = self.dataset.variables['lon'][:].squeeze()
return self._lon
@property
def truedepth(self):
if self._truedepth is None:
if self.fulldepths == 'surface':
self._truedepth = 'surface'
else:
try:
self._truedepth = min(self.fulldepths, key=lambda x: abs(x - self.depthneeded))
except:
try:
self._truedepth = min(self.fulldepths, key=lambda x: abs(x - self.idealdepth))
except:
self._truedepth = self.fulldepths
return self._truedepth
@property
def plotdata(self):
if self._plotdata is None:
self._plotdata = (self.unscaleddata + self.shift) * self.scale * self.trendscale
return self._plotdata
def _removedepth(self, idata):
if len(idata.shape) < 3:
return idata
else:
depth_index = np.where(np.round(self.fulldepths) == np.round(self.truedepth))[0][0]
return idata[depth_index, :, :]
@property
def unscaleddata(self):
if self._unscaleddata is None:
self._unscaleddata = self._removedepth(self.data)
return self._unscaleddata
def _calculate(self, ifile):
if self.cdostring is not None:
return self._calculate_cdo(ifile)
elif self.data_type == 'trends':
return self._calculate_trends(ifile)
else:
return self._calculate_climatology(ifile)
@property
def ofile(self):
if self._ofile is None:
self._ofile = self._calculate(self.ifile)
return self._ofile
@property
def dataset(self):
if self._dataset is None:
self._dataset = Dataset(self.ofile, 'r')
return self._dataset
def _calculate_cdo(self, filename):
return self._cdos(filename)
def _calculate_std(self, filename):
return self._remap(self._setc(self._time_std(self._mask(self._season(self._sel_var(filename))))))
def _calculate_climatology(self, filename):
return self._remap(self._setc(self._time_mean(self._mask(self._season(self._sel_var(filename))))))
def _calculate_trends(self, filename):
return self._remap(self._trend(self._setc(self._season(self._sel_var(filename)))))
def _calculate_full(self, filename):
return self._remap(self._setc(self._sel_date(self._mask(self._season(self._sel_var(filename))))))
def _split(self, name):
path, filename = os.path.split(name)
return filename
def _sel_date(self, name):
if self.time_averaged:
return name
out = 'netcdf/seldate_' + self.start_date + '_' + self.end_date + '_' + self._split(name)
if not os.path.isfile(out):
datestring = self.start_date + ',' + self.end_date
cdo.seldate(datestring, input=name, output=out)
return out
def _sel_var(self, name):
out = 'netcdf/sel_' + self._split(name)
if not os.path.isfile(out):
cdo.selvar(self.variable, input=name, output=out)
return out
def _mask(self, name):
out = 'netcdf/masked_' + self._split(name)
if not os.path.isfile(out):
if self.realm_cat == 'ocean':
try:
cdo.ifthen(input='mask/ocean ' + name, output=out)
except:
with open('logs/log.txt', 'a') as outfile:
outfile.write('WARNING: Land data was not masked\n')
return name
elif self.realm_cat == 'land':
try:
cdo.ifthen(input='mask/land ' + name, output=out)
except:
with open('logs/log.txt', 'a') as outfile:
outfile.write('WARNING: Ocean data was not masked\n')
return name
else:
out = name
return out
def _time_std(self, name):
if self.time_averaged:
return name
out = 'netcdf/std_' + self.start_date + '_' + self.end_date + '_' + self._split(name)
if not os.path.isfile(out):
seldatestring = '-seldate,' + self.start_date + ',' + self.end_date
cdo.timstd(input=seldatestring + ' ' + name, output=out)
return out
def _time_mean(self, name):
if self.time_averaged:
return name
out = 'netcdf/climate_' + self.start_date + '_' + self.end_date + '_' + self._split(name)
if not os.path.isfile(out):
seldatestring = '-seldate,' + self.start_date + ',' + self.end_date
cdo.timmean(input=seldatestring + ' ' + name, output=out)
return out
def _trend(self, name):
out = 'netcdf/slope_' + self.start_date + '_' + self.end_date + '_' + self._split(name)
outintercept = 'netcdf/intercept_' + self._split(name)
if not os.path.isfile(out):
seldatestring = '-seldate,' + self.start_date + ',' + self.end_date
cdo.trend(input=seldatestring + ' ' + name, output=outintercept + ' ' + out)
return out
def _detrend(self, name):
out = 'netcdf/detrend_' + self._split(name)
if not os.path.isfile(out):
seldatestring = '-seldate,' + self.start_date + ',' + self.end_date
cdo.detrend(input=seldatestring + ' ' + name, output=out)
return out
def _setc(self, name):
if self.realm_cat == 'atmos':
return name
out = 'netcdf/setc_' + self._split(name)
if not os.path.isfile(out):
cdo.setctomiss(0, input=name, output=out)
return out
def _remap_function(self, remap):
""" Returns a cdo function from string of the same name.
"""
def cdoremap(r):
return {'remapbil': cdo.remapbil,
'remapbic': cdo.remapbic,
'remapdis': cdo.remapdis,
'remapnn': cdo.remapnn,
'remapcon': cdo.remapcon,
'remapcon2': cdo.remapcon2,
'remapplaf': cdo.remaplaf,
}[r]
return cdoremap(remap)
def _remap(self, name):
out = 'netcdf/' + self.remap + '-' + self.remap_grid + self._split(name)
if not os.path.isfile(out):
remap = self._remap_function(self.remap)
remap(self.remap_grid, input=name, output=out)
return out
def _field_mean(self, name):
out = 'netcdf/fldmean_' + self.start_date + '_' + self.end_date + '_' + self._split(name)
if not os.path.isfile(out):
seldatestring = '-seldate,' + self.start_date + ',' + self.end_date
cdo.fldmean(options='-L', input=seldatestring + ' ' + name, output=out)
return out
def _zonal_mean(self, name):
out = 'netcdf/zonmean_' + self._split(name)
if not os.path.isfile(out):
cdo.zonmean(input=name, output=out)
return out
def _season(self, name):
if self.seasons == None or self.seasons == ['DJF', 'MAM', 'JJA', 'SON']:
return name
seasonstring = ','.join(self.seasons)
outputstring = ''.join(self.seasons)
out = 'netcdf/selseason-' + outputstring + '_' + self._split(name)
if not os.path.isfile(out):
cdo.selseas(seasonstring, input=name, output=out)
return out
def _depthstring(self, depthlist):
try:
depthlist = list(depthlist)
except:
depthlist = [float(depthlist)]
depthneed = ["%.2f" % number for number in depthlist]
for i in xrange(len(depthneed)):
depthneed[i] = str(depthneed[i])
return ','.join(depthneed)
def _intlevel(self, name):
if self.depthneeded == None or self.depthneeded == 'surface':
return name
depth = self._depthstring(self.depthneeded)
depthname = depth.replace(' ', '')
if len(depthname) > 100:
depthname = depthname[:99]
out = 'netcdf/level-' + str(depthname) + '_' + self._split(name)
if depth:
if not os.path.isfile(out):
try:
cdo.intlevelx(str(depth), input=name, output=out)
except:
return name
else:
return name
return out
def _cdos(self, name):
out = 'netcdf/cdo_' + split(name)
if not os.path.isfile(out):
s = 'cdo ' + string + ' ' + name + ' ' + out
os.system(s)
return out
def _draw(self, latmin=-80, latmax=80, lonmin=0, lonmax=360, lon_0=0,
fill_continents=False, draw_parallels=False, draw_meridians=False, cblabel='',
**kwargs):
if not self.ax:
self.fig, self.ax = plt.subplots(1, 1, figsize=(8, 8))
else:
self.fig = self.ax.get_figure()
if self.projection == 'global_map':
m = Basemap(projection='kav7', llcrnrlat=latmin, urcrnrlat=latmax, llcrnrlon=lonmin, urcrnrlon=lonmax, lon_0=-180, resolution='c', ax=self.ax)
labx, laby = m(10, -88)
if self.projection == 'polar_map':
m = Basemap(projection='npstere', boundinglat=latmin, lon_0=lon_0, resolution='c', ax=self.ax)
labx, laby = m(-135, 12)
if self.projection == 'polar_map_south':
m = Basemap(projection='spstere', boundinglat=latmax, lon_0=lon_0, resolution='c', ax=self.ax)
labx, laby = m(-45, -12)
if self.projection == 'mercator':
m = Basemap(projection='merc', llcrnrlat=latmin, urcrnrlat=latmax, llcrnrlon=lonmin, urcrnrlon=lonmax, lat_ts=20, resolution='c', ax=self.ax)
labx, laby = m(lonmin + 1, latmin + 1)
lons, lats = np.meshgrid(self.lon, self.lat)
x, y = m(lons, lats)
cot = m.pcolor(x, y, self.plotdata, **self.pcolor_args)
plt.setp(self.ax, **self.ax_args)
if self.alpha is not None:
a, b = m(self.slon, self.slat)
m.plot(a, b, '.', markersize=0.2, color='k', zorder=1)
m.drawcoastlines(linewidth=1.25, ax=self.ax)
if fill_continents:
m.fillcontinents(color='0.8', ax=self.ax, zorder=2)
if draw_parallels:
m.drawparallels(np.arange(-80, 81, 20), labels=[1, 0, 0, 0], linewidth=0, ax=self.ax)
if draw_meridians:
m.drawmeridians(np.arange(0, 360, 90), labels=[0, 0, 0, 1], linewidth=0, yoffset=0.5e6, ax=self.ax)
m.colorbar(mappable=cot, location='right', label=cblabel)
self.ax.text(labx, laby, self.statstring, fontsize=8)
class Section(Plot):
def __init__(self, **kwargs):
super(Section, self).__init__(**kwargs)
self.set_yscale = kwargs.pop('set_yscale', 'log')
@property
def unscaleddata(self):
if self._unscaleddata is None:
if self.data.ndim == 3:
self._unscaleddata = self.data.mean(axis=2)
else:
self._unscaleddata = self.data
return self._unscaleddata
def _draw(self, latmin=-80, latmax=80, lonmin=0, lonmax=360, lon_0=0, cblabel='',
**kwargs):
if not self.ax:
self.fig, self.ax = plt.subplots(1, 1, figsize=(8, 3))
else:
self.fig = self.ax.get_figure()
cot = self.ax.pcolormesh(self.lat, self.fulldepths, self.plotdata, **self.pcolor_args)
self.ax.contour(self.lat, self.fulldepths, self.plotdata, colors=['k'],
vmin=self.pcolor_args['vmin'], vmax=self.pcolor_args['vmax'])
self.ax.invert_yaxis()
self.ax.autoscale(True, axis='both', tight='both')
self.ax.set_yscale(self.set_yscale)
if self.ax_args:
plt.setp(self.ax, **self.ax_args)
box = self.ax.get_position()
if self.cbaxis:
self.fig.colorbar(cot, cax=cbaxis, label=cblabel)
else:
tl = self.fig.add_axes([box.x1 + box.width * 0.05, box.y0, 0.02, box.height])
self.fig.colorbar(cot, cax=tl, label=cblabel)
class Compare(Plot):
def __init__(self, **kwargs):
super(Compare, self).__init__(**kwargs)
self.depthneeded = kwargs.pop('depthneeded', self.sister.truedepth)
def _calculate_climatology(self, filename):
return self._intlevel(super(self.__class__, self)._calculate_climatology(filename))
def _calculate_trends(self, filename):
return self._intlevel(super(self.__class__, self)._calculate_climatology(filename))
def _default_ax_args(self):
return {'title': self.variable + ' Model: ' + self.runID}
class CompareSection(Section, Compare):
def __init__(self, **kwargs):
super(CompareSection, self).__init__(**kwargs)
class Line(Plot):
def __init__(self, **kwargs):
super(Line, self).__init__(**kwargs)
self._xaxis = kwargs.pop('xaxis', None)
def _gettime(self, dataset):
nc_time = dataset.variables['time']
try:
cal = nc_time.calendar
except:
cal = 'standard'
date = num2date(nc_time[:], nc_time.units, cal)
x = [datetime.datetime(*item.timetuple()[:6]) for item in date]
return np.array(x)
def _removedepth(self, idata):
if len(idata.shape) < 2:
return idata
else:
depth_index = np.where(np.round(self.fulldepths) == np.round(self.truedepth))[0][0]
if self.projection == 'time_series':
return idata[:, depth_index]
else:
return idata[depth_index, :]
@property
def xaxis(self):
if self._xaxis is None:
if self.projection == 'time_series':
self._xaxis = self._gettime(self.dataset)
else:
self._xaxis = self.lat
return self._xaxis
def _calculate_climatology(self, filename):
if self.projection == 'time_series':
return self._field_mean(self._setc(self._mask(self._season(self._sel_var(filename)))))
else:
return self._zonal_mean(self._remap(self._time_mean(self._setc(self._mask(self._season(self._sel_var(filename)))))))
def _calculate_trends(self, filename):
if self.projection == 'time_series':
return self._field_mean(self._trend(self._setc(self._mask(self._season(self._sel_var(filename))))))
else:
return self._zonal_mean(self._remap(self._trend(self._setc(self._mask(self._season(self._sel_var(filename)))))))
def _default_ax_args(self):
args = {'title': self.variable + ' Model: ' + self.runID}
args['ylabel'] = str(self.units)
if self.projection == 'time_series':
args['xlabel'] = 'Time'
else:
args['xlabel'] = 'Latitude'
return args
def _draw(self, **kwargs):
if not self.ax:
self.fig, self.ax = plt.subplots(1, 1, figsize=(8, 8))
else:
self.fig = self.ax.get_figure()
self.ax.plot(self.xaxis, self.plotdata, label=self.runID, zorder=10)
plt.setp(self.ax, **self.ax_args)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
class CompareLine(Line, Compare):
def __init__(self, **kwargs):
super(CompareLine, self).__init__(**kwargs)
self.labelled = kwargs.pop('labelled', None)
def _calculate_climatology(self, filename):
return Compare._calculate_climatology(self, filename)
def _calculate_trends(self, filename):
return Compare._calculate_trends(self, filename)
def _draw(self, **kwargs):
if not self.ax:
self.fig, self.ax = plt.subplots(1, 1, figsize=(8, 8))
else:
self.fig = self.ax.get_figure()
if self.labelled:
self.ax.plot(self.xaxis, self.plotdata, label=self.runID, zorder=3)
else:
self.ax.plot(self.xaxis, self.plotdata, color='0.5', label='remove', zorder=1)
handles, labels = self.ax.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
try:
by_label.pop('remove')
except: pass
plt.legend(by_label.values(), by_label.keys())
class Figure(Plot):
def __init__(self, **kwargs):
super(Figure, self).__init__(**kwargs)
def _calculate_climatology(self, filename):
return self._remap(self._setc(self._time_mean(self._mask(self._season(self._sel_var(filename))))))
def _calculate_trends(self, filename):
return self._setc(self._remap(self._trend(self._season(self._sel_var(filename)))))
class Taylor(Figure, Compare):
def __init__(self, **kwargs):
super(Taylor, self).__init__(**kwargs)
self.plot_args = {}
self.labelledifiles = kwargs.pop('labelledifiles', {})
self.unlabelledifiles = kwargs.pop('unlabelledifiles', [])
self._stdrange = kwargs.pop('stdrange', None)
self._labelledflatdata = kwargs.pop('labelledflatdata', None)
self._unlabelledflatdata = kwargs.pop('unlabelledflatdata', None)
self._labelledstandard_deviations = kwargs.pop('labelledstandard_deviations', None)
self._unlabelledstandard_deviations = kwargs.pop('unlabelledstandard_deviations', None)
self._labelledcorrelation_coefficients = kwargs.pop('labelledcorrelation_coefficients', None)
self._unlabelledcorrelation_coefficients = kwargs.pop('unlabelledcorrelation_coefficients', None)
self._labelledplotdata = kwargs.pop('labelledplotdata', None)
self._unlabelledplotdata = kwargs.pop('unlabelledplotdata', None)
self._unlabelledunscaleddata = kwargs.pop('unlabelledunscaleddata', None)
self._labelledunscaleddata = kwargs.pop('labelledunscaleddata', None)
self._labelleddata = kwargs.pop('labelleddata', None)
self._unlabelleddata = kwargs.pop('unlabelleddata', None)
self._labelledncvar = kwargs.pop('labelledncvar', None)
self._unlabelledncvar = kwargs.pop('unlabelledncvar', None)
self._labelleddatasets = kwargs.pop('labelleddatasets', None)
self._unlabelleddatasets = kwargs.pop('unlabelleddatasets', None)
self._labelledofiles = kwargs.pop('labelledofiles', None)
self._unlabelledofiles = kwargs.pop('unlabelledofiles', None)
self._labelledsamples = kwargs.pop('labelledsamples', None)
self._unlabelledsamples = kwargs.pop('unlabelledsamples', None)
def plot(self):
self._fill_ax_args()
self._makeplot()
self._savefigures()
@property
def stdrange(self):
if self._stdrange is None:
combined_stds = list(self.unlabelledstandard_deviations)
combined_stds.extend(self.labelledstandard_deviations.values())
self._stdrange = max(combined_stds) * 1.3 / self.sister.standard_deviation
if self._stdrange <= 1.5:
self._stdrange = 1.5
return self._stdrange
@property
def labelledflatdata(self):
if self._labelledflatdata is None:
self._labelledflatdata = {label: data.flatten() for label, data in self.labelledplotdata.iteritems()}
return self._labelledflatdata
@property
def unlabelledflatdata(self):
if self._unlabelledflatdata is None:
self._unlabelledflatdata = [data.flatten() for data in self.unlabelledplotdata]
return self._unlabelledflatdata
@property
def labelledstandard_deviations(self):
if self._labelledstandard_deviations is None:
self. _labelledstandard_deviations = {label: float(data.std()) for label, data in self.labelledplotdata.iteritems()}
return self._labelledstandard_deviations
@property
def unlabelledstandard_deviations(self):
if self._unlabelledstandard_deviations is None:
self._unlabelledstandard_deviations = [float(data.std()) for data in self.unlabelledplotdata]
return self._unlabelledstandard_deviations
@property
def labelledcorrelation_coefficients(self):
if self._labelledcorrelation_coefficients is None:
self._labelledcorrelation_coefficients = {label: np.ma.corrcoef(self.sister.flatdata, data)[0, 1] for label, data in self.labelledflatdata.iteritems()}
return self._labelledcorrelation_coefficients
@property
def unlabelledcorrelation_coefficients(self):
if self._unlabelledcorrelation_coefficients is None:
self._unlabelledcorrelation_coefficients = [np.ma.corrcoef(self.sister.flatdata, data)[0, 1] for data in self.unlabelledflatdata]
return self._unlabelledcorrelation_coefficients
@property
def labelledplotdata(self):
if self._labelledplotdata is None:
self._labelledplotdata = {label: (data + self.shift) * self.scale * self.trendscale for label, data in self.labelledunscaleddata.iteritems()}
return self._labelledplotdata
@property
def unlabelledplotdata(self):
if self._unlabelledplotdata is None:
self._unlabelledplotdata = [(data + self.shift) * self.scale * self.trendscale for data in self.unlabelledunscaleddata]
return self._unlabelledplotdata
@property
def labelledunscaleddata(self):
if self._labelledunscaleddata is None:
self._labelledunscaleddata = {label: self._removedepth(data) for label, data in self.labelleddata.iteritems()}
return self._labelledunscaleddata
@property
def unlabelledunscaleddata(self):
if self._unlabelledunscaleddata is None:
self._unlabelledunscaleddata = [self._removedepth(data) for data in self.unlabelleddata]
return self._unlabelledunscaleddata
@property
def labelleddata(self):
if self._labelleddata is None:
self._labelleddata = {label: ncvar[:].squeeze() for label, ncvar in self.labelledncvar.iteritems()}
return self._labelleddata
@property
def unlabelleddata(self):
if self._unlabelleddata is None:
self._unlabelleddata = [ncvar[:].squeeze() for ncvar in self.unlabelledncvar]
return self._unlabelleddata
@property
def labelledncvar(self):
if self._labelledncvar is None:
self._labelledncvar = {label: dataset.variables[self.variable] for label, dataset in self.labelleddatasets.iteritems()}
return self._labelledncvar
@property
def unlabelledncvar(self):
if self._unlabelledncvar is None:
self._unlabelledncvar = [dataset.variables[self.variable] for dataset in self.unlabelleddatasets]
return self._unlabelledncvar
@property
def labelleddatasets(self):
if self._labelleddatasets is None:
self._labelleddatasets = {label: Dataset(ofile, 'r') for label, ofile in self.labelledofiles.iteritems()}
return self._labelleddatasets
@property
def unlabelleddatasets(self):
if self._unlabelleddatasets is None:
self._unlabelleddatasets = [Dataset(ofile, 'r') for ofile in self.unlabelledofiles]
return self._unlabelleddatasets
@property
def labelledofiles(self):
if self._labelledofiles is None:
self._labelledofiles = {}
for label in self.labelledifiles:
try:
self.labelledofiles[label] = self._calculate(self.labelledifiles[label])
except:
print 'could not append: ' + label + ': ' + ifile
return self._labelledofiles
@property
def unlabelledofiles(self):
if self._unlabelledofiles is None:
ofiles = []
for ifile in self.unlabelledifiles:
try:
ofiles.append(self._calculate(ifile))
except:
print 'could not append: ' + ifile
self._unlabelledofiles = ofiles
return self._unlabelledofiles
@property
def labelledsamples(self):
if self._labelledsamples is None:
self._labelledsamples = {label: (self.labelledstandard_deviations[label], self.labelledcorrelation_coefficients[label]) for label in self.labelledstandard_deviations}
return self._labelledsamples
@property
def unlabelledsamples(self):
if self._unlabelledsamples is None:
self._unlabelledsamples = zip(self.unlabelledstandard_deviations, self.unlabelledcorrelation_coefficients)
return self._unlabelledsamples
def _draw(self, **kwargs):
if not self.ax:
self.fig = plt.figure()
else:
self.fig = self.ax.get_figure()
dia = TaylorDiagram(self.sister.standard_deviation, fig=self.fig, label='obs', srange=(0, self.stdrange))
colors = plt.matplotlib.cm.jet(np.linspace(0,1, len(self.labelledsamples)))
for i, (label, (stddev, corrcoef)) in enumerate(self.labelledsamples.iteritems()):
dia.add_sample(stddev, corrcoef,
marker='.', ms=12, ls='',
mfc=colors[i], mec=colors[i],
label=label, zorder=2)
self.fig.legend(dia.samplePoints,
[p.get_label() for p in dia.samplePoints],
numpoints=1, prop=dict(size='small'), loc='upper right')
for (stddev, corrcoef) in self.unlabelledsamples:
dia.add_sample(stddev, corrcoef,
marker='.', ms=12, ls='',
mfc='grey', mec='grey',
label=None, zorder=1)
dia.add_grid()
contours = dia.add_contours(colors='0.5')
plt.clabel(contours, inline=1, fontsize=10)
plt.title(self.variable)
class Histogram(Plot):
def __init__(self, **kwargs):
super(Histogram, self).__init__(**kwargs)
def _default_ax_args(self):
return {'title': self.variable + ' Model: ' + self.runID,
'ylabel': '# Realizations',
'xlabel': self.units}
def _removedepth(self, idata):
if self.fulldepths == 'surface':
return idata
else:
depth_index = np.where(np.round(self.fulldepths) == np.round(self.truedepth))[0][0]
return idata[depth_index]
def _calculate_climatology(self, filename):
return self._field_mean(self._remap(self._time_mean(self._setc(self._mask(self._season(self._sel_var(filename)))))))
def _calculate_trends(self, filename):
return self._field_mean(self._remap(self._trend(self._setc(self._season(self._sel_var(filename))))))
class HistogramCompare(Histogram, Taylor):
def __init__(self, **kwargs):
super(HistogramCompare, self).__init__(**kwargs)
def _draw(self, **kwargs):
if not self.ax:
self.fig, self.ax = plt.subplots(1, 1, figsize=(8,8))
else:
self.fig = self.ax.get_figure()
n, bins, patches = plt.hist(self.unlabelledplotdata, 10, facecolor='grey', alpha=0.75)
ymax = int(ceil(1.2 * max(n)))
self.ax.set_ylim(0, ymax)
for key, value in self.labelledplotdata.iteritems():
plt.axvline(value, label=key, linewidth=4,
color=next(self.ax._get_lines.color_cycle))
plt.setp(self.ax, **self.ax_args)
plt.title(self.variable)
self.ax.legend(loc='best')
if __name__ == "__main__":
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
s = Plot(ifile='/raid/rc40/data/ncs/historical-cvu/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-1_historical-cvu_r1i1p1_185001-200512.nc',
runID='cvu',
experiment='historical',
variable='ta',
depth=100000,
data_type='climatology',
projection='mercator',
realm='ocean',
)
q = Compare(ifile='/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc',
runID='edr',
experiment='historical',
variable='ta',
data_type='climatology',
projection='mercator',
realm='atmos',
sister=s
)
s.sister = q
r = Plot(plotdata = s.plotdata - q.plotdata,
runID='comp',
experiment='historical',
variable='ta',
data_type='climatology',
projection='mercator',
realm='atmos',
alpha=0.05,
pvalues = s.pvalues,
divergent=True,
dataset = s.dataset
)
r.plot()
"""
s = Plot(ifile='/raid/rc40/data/ncs/historical-cvu/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-1_historical-cvu_r1i1p1_185001-200512.nc',
runID='cvu',
experiment='historical',
variable='ta',
data_type='climatology',
projection='time_series',
realm='ocean',
)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
q = Line(ifile='/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc',
runID='edr',
experiment='historical',
variable='ta',
depth=100000,
data_type='climatology',
projection='zonal_mean',
realm='atmos',
ax=ax,
)
s = CompareLine(ifile='/raid/rc40/data/ncs/historical-cvu/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-1_historical-cvu_r1i1p1_185001-200512.nc',
runID='cvu',
experiment='historical',
variable='ta',
data_type='climatology',
projection='zonal_mean',
realm='ocean',
sister=q,
ax=ax,
labelled=True
)
r = Plot(runID='comparison',
experiment='historical',
variable='ta',
data_type='climatology',
projection='global_map',
realm='atmos',
plotdata=s.plotdata - q.plotdata,
dataset=s.dataset,
rmse=True,
divergent=True
)
s.plot()
q.plot()
r.plot()
q = Plot(ifile='/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc',
runID='edr',
experiment='historical',
variable='ta',
depth=100000,
data_type='climatology',
projection='taylor',
realm='atmos',)
r = Taylor(ifile='/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc',
runID= 'edr',
unlabelledifiles = ['/raid/rc40/data/ncs/historical-cvu/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-1_historical-cvu_r1i1p1_185001-200512.nc'],
experiment='historical',
variable='ta',
data_type='climatology',
projection='taylor',
realm='atmos',
sister=q)
r.plot()
q = Histogram(ifile='/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc',
runID='edr',
experiment='historical',
variable='ta',
depth=100000,
data_type='climatology',
projection='taylor',
realm='atmos',)
r = HistogramCompare(ifile='/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc',
runID= 'edr',
labelledifiles = {'edr':'/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc'},
unlabelledifiles = ['/raid/rc40/data/ncs/historical-cvu/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-1_historical-cvu_r1i1p1_185001-200512.nc','/raid/rc40/data/ncs/historical-edr/mon/atmos/ta/r1i1p1/ta_Amon_DevAM4-2_historical-edr_r1i1p1_185001-200012.nc'],
experiment='historical',
variable='ta',
data_type='trends',
projection='taylor',
realm='atmos',
sister=q)
r.plot()
"""
| [
"dfallis@uvic.ca"
] | dfallis@uvic.ca |
4fa11bf0bf80e1c45ba384816c50e106b6e37996 | 63eb05febaac75f781a266d48d1cfff2debe64ea | /the_tale/game/actions/tests/test_meta_action_arena_pvp_1x1.py | cc1908c6fc5a58626d98e062129badc099331957 | [
"BSD-2-Clause-Views"
] | permissive | MadRiw/the-tale | 185ca33e410a59de63a594daf15fc8a5701338d2 | 1801beab2ed149556c0b3380e8adaaa976f74e6c | refs/heads/master | 2021-01-15T23:45:34.873857 | 2015-06-17T13:06:12 | 2015-06-17T13:06:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,832 | py | # coding: utf-8
import mock
from the_tale.common.utils import testcase
from the_tale.accounts.prototypes import AccountPrototype
from the_tale.accounts.logic import register_user
from the_tale.game.logic_storage import LogicStorage
from the_tale.game.logic import create_test_map
from the_tale.game.prototypes import TimePrototype
from the_tale.game.balance import constants as c
from the_tale.game.actions.meta_actions import MetaActionArenaPvP1x1Prototype
from the_tale.game.actions.models import MetaAction, MetaActionMember
from the_tale.game.pvp.models import Battle1x1, Battle1x1Result
from the_tale.game.pvp.relations import BATTLE_1X1_STATE
from the_tale.game.pvp.prototypes import Battle1x1Prototype
from the_tale.game.pvp.tests.helpers import PvPTestsMixin
from the_tale.game.pvp.abilities import ABILITIES, Flame
class ArenaPvP1x1MetaActionTest(testcase.TestCase, PvPTestsMixin):
def setUp(self):
super(ArenaPvP1x1MetaActionTest, self).setUp()
create_test_map()
result, account_1_id, bundle_id = register_user('test_user_1')
result, account_2_id, bundle_id = register_user('test_user_2')
self.account_1 = AccountPrototype.get_by_id(account_1_id)
self.account_2 = AccountPrototype.get_by_id(account_2_id)
self.storage = LogicStorage()
self.storage.load_account_data(self.account_1)
self.storage.load_account_data(self.account_2)
self.hero_1 = self.storage.accounts_to_heroes[self.account_1.id]
self.hero_2 = self.storage.accounts_to_heroes[self.account_2.id]
# for test data reset
self.hero_1.health = self.hero_1.max_health / 2
self.hero_1.pvp.set_advantage(1)
self.hero_1.pvp.set_effectiveness(0.5)
# for test data reset
self.hero_2.pvp.set_advantage(1)
self.hero_2.pvp.set_effectiveness(0.5)
self.battle_1 = self.pvp_create_battle(self.account_1, self.account_2, BATTLE_1X1_STATE.PROCESSING)
self.battle_1.calculate_rating = True
self.battle_1.save()
self.battle_2 = self.pvp_create_battle(self.account_2, self.account_1, BATTLE_1X1_STATE.PROCESSING)
self.battle_2.calculate_rating = True
self.battle_2.save()
self.bundle_id = 666
self.meta_action_battle = MetaActionArenaPvP1x1Prototype.create(self.storage, self.hero_1, self.hero_2, bundle_id=self.bundle_id)
self.meta_action_battle.set_storage(self.storage)
def test_initialization(self):
self.assertEqual(MetaAction.objects.all().count(), 1)
self.assertEqual(MetaActionMember.objects.all().count(), 2)
self.assertEqual(len(self.meta_action_battle.members), 2)
self.assertEqual(len(self.meta_action_battle.members_by_roles), 2)
self.assertTrue(self.meta_action_battle.storage)
self.assertEqual(self.meta_action_battle.hero_1, self.hero_1)
self.assertEqual(self.meta_action_battle.hero_2, self.hero_2)
# test reset of pvp_data
self.assertEqual(self.meta_action_battle.hero_1.health, self.hero_1.max_health)
self.assertEqual(self.meta_action_battle.hero_1.pvp.advantage, 0)
self.assertEqual(self.meta_action_battle.hero_1.pvp.effectiveness, c.PVP_EFFECTIVENESS_INITIAL)
self.assertEqual(self.meta_action_battle.hero_1.pvp.energy, 0)
self.assertEqual(self.meta_action_battle.hero_1.pvp.energy_speed, 1)
self.assertEqual(self.meta_action_battle.hero_1.pvp.turn_advantage, 0)
self.assertEqual(self.meta_action_battle.hero_1.pvp.turn_effectiveness, c.PVP_EFFECTIVENESS_INITIAL)
self.assertEqual(self.meta_action_battle.hero_1.pvp.turn_energy, 0)
self.assertEqual(self.meta_action_battle.hero_1.pvp.turn_energy_speed, 1)
self.assertTrue(self.meta_action_battle.hero_1_context.pvp_advantage_strike_damage > 0)
self.assertEqual(self.meta_action_battle.hero_2.health, self.hero_2.max_health)
self.assertEqual(self.meta_action_battle.hero_2.pvp.advantage, 0)
self.assertEqual(self.meta_action_battle.hero_2.pvp.effectiveness, c.PVP_EFFECTIVENESS_INITIAL)
self.assertEqual(self.meta_action_battle.hero_2.pvp.energy, 0)
self.assertEqual(self.meta_action_battle.hero_2.pvp.energy_speed, 1)
self.assertEqual(self.meta_action_battle.hero_2.pvp.turn_advantage, 0)
self.assertEqual(self.meta_action_battle.hero_2.pvp.turn_effectiveness, c.PVP_EFFECTIVENESS_INITIAL)
self.assertEqual(self.meta_action_battle.hero_2.pvp.turn_energy, 0)
self.assertEqual(self.meta_action_battle.hero_2.pvp.turn_energy_speed, 1)
self.assertTrue(self.meta_action_battle.hero_2_context.pvp_advantage_strike_damage > 0)
def test_one_hero_killed(self):
current_time = TimePrototype.get_current_time()
self.hero_1.health = 0
self.meta_action_battle.process()
self.assertEqual(self.meta_action_battle.state, MetaActionArenaPvP1x1Prototype.STATE.BATTLE_ENDING)
current_time.increment_turn()
self.meta_action_battle.process()
self.assertEqual(self.meta_action_battle.state, MetaActionArenaPvP1x1Prototype.STATE.PROCESSED)
self.assertTrue(self.hero_1.is_alive and self.hero_2.is_alive)
self.assertEqual(self.hero_1.health, self.hero_1.max_health / 2)
self.assertEqual(self.hero_2.health, self.hero_2.max_health)
def check_hero_pvp_statistics(self, hero, battles, victories, draws, defeats):
self.assertEqual(hero.statistics.pvp_battles_1x1_number, battles)
self.assertEqual(hero.statistics.pvp_battles_1x1_victories, victories)
self.assertEqual(hero.statistics.pvp_battles_1x1_draws, draws)
self.assertEqual(hero.statistics.pvp_battles_1x1_defeats, defeats)
def _end_battle(self, hero_1_health, hero_2_health):
self.hero_1.health = hero_1_health
self.hero_2.health = hero_2_health
current_time = TimePrototype.get_current_time()
self.meta_action_battle.process()
current_time.increment_turn()
self.meta_action_battle.process()
def test_hero_1_win(self):
self._end_battle(hero_1_health=self.hero_1.max_health, hero_2_health=0)
self.assertEqual(Battle1x1Prototype._model_class.objects.all().count(), 0)
self.check_hero_pvp_statistics(self.hero_1, 1, 1, 0, 0)
self.check_hero_pvp_statistics(self.hero_2, 1, 0, 0, 1)
def test_hero_2_win(self):
self._end_battle(hero_1_health=0, hero_2_health=self.hero_2.max_health)
self.assertEqual(Battle1x1Prototype._model_class.objects.all().count(), 0)
self.check_hero_pvp_statistics(self.hero_1, 1, 0, 0, 1)
self.check_hero_pvp_statistics(self.hero_2, 1, 1, 0, 0)
def test_draw(self):
self._end_battle(hero_1_health=0, hero_2_health=0)
self.assertEqual(Battle1x1Prototype._model_class.objects.all().count(), 0)
self.check_hero_pvp_statistics(self.hero_1, 1, 0, 1, 0)
self.check_hero_pvp_statistics(self.hero_2, 1, 0, 1, 0)
@mock.patch('the_tale.game.pvp.prototypes.Battle1x1Prototype.calculate_rating', False)
def test_hero_1_win_no_stats(self):
self._end_battle(hero_1_health=self.hero_1.max_health, hero_2_health=0)
self.check_hero_pvp_statistics(self.hero_1, 0, 0, 0, 0)
self.check_hero_pvp_statistics(self.hero_2, 0, 0, 0, 0)
@mock.patch('the_tale.game.pvp.prototypes.Battle1x1Prototype.calculate_rating', False)
def test_hero_2_win_no_stats(self):
self._end_battle(hero_1_health=0, hero_2_health=self.hero_1.max_health)
self.check_hero_pvp_statistics(self.hero_1, 0, 0, 0, 0)
self.check_hero_pvp_statistics(self.hero_2, 0, 0, 0, 0)
@mock.patch('the_tale.game.pvp.prototypes.Battle1x1Prototype.calculate_rating', False)
def test_draw_no_stats(self):
self._end_battle(hero_1_health=0, hero_2_health=0)
self.check_hero_pvp_statistics(self.hero_1, 0, 0, 0, 0)
self.check_hero_pvp_statistics(self.hero_2, 0, 0, 0, 0)
def test_second_process_call_in_one_turn(self):
with mock.patch('the_tale.game.actions.meta_actions.MetaActionArenaPvP1x1Prototype._process') as meta_action_process_counter:
self.meta_action_battle.process()
self.meta_action_battle.process()
self.assertEqual(meta_action_process_counter.call_count, 1)
def test_update_hero_pvp_info(self):
self.hero_2.pvp.set_effectiveness(50)
self.meta_action_battle.update_hero_pvp_info(self.hero_2)
self.assertTrue(self.hero_2.pvp.energy > self.hero_1.pvp.energy)
self.assertTrue(0 < self.hero_2.pvp.effectiveness < 50)
def test_advantage_after_turn(self):
self.hero_1.pvp.set_effectiveness(50)
self.hero_2.pvp.set_effectiveness(25)
self.meta_action_battle.process()
self.assertTrue(self.hero_1.pvp.advantage > 0)
self.assertTrue(self.hero_2.pvp.advantage < 0)
def test_full_battle(self):
current_time = TimePrototype.get_current_time()
self.assertEqual(Battle1x1.objects.filter(state=BATTLE_1X1_STATE.PROCESSING).count(), 2)
while self.meta_action_battle.state != MetaActionArenaPvP1x1Prototype.STATE.PROCESSED:
self.meta_action_battle.process()
current_time.increment_turn()
self.assertEqual(self.meta_action_battle.state, MetaActionArenaPvP1x1Prototype.STATE.PROCESSED)
self.assertTrue(self.hero_1.is_alive and self.hero_2.is_alive)
self.assertEqual(self.hero_1.health, self.hero_1.max_health / 2)
self.assertEqual(self.hero_2.health, self.hero_2.max_health)
self.assertEqual(Battle1x1.objects.all().count(), 0)
self.assertEqual(Battle1x1Result.objects.all().count(), 1)
battle_result = Battle1x1Result.objects.all()[0]
self.assertNotEqual(battle_result.participant_1_id, battle_result.participant_2_id)
def test_remove(self):
self.assertEqual(MetaAction.objects.all().count(), 1)
self.assertEqual(MetaActionMember.objects.all().count(), 2)
self.meta_action_battle.remove()
self.assertEqual(MetaAction.objects.all().count(), 0)
self.assertEqual(MetaActionMember.objects.all().count(), 0)
def test_get_bot_pvp_properties(self):
properties = self.meta_action_battle.get_bot_pvp_properties()
self.meta_action_battle.save()
self.meta_action_battle.reload()
self.assertEqual(set(properties.keys()), set(('ability_chance', 'priorities')))
self.assertTrue('bot_pvp_properties' in self.meta_action_battle.data)
self.assertEqual(set(properties.keys()), set(self.meta_action_battle.data['bot_pvp_properties']))
self.assertTrue(0 <properties['ability_chance'] <= 1)
self.assertEqual(set(properties['priorities']), set(ABILITIES.keys()))
self.assertEqual(properties, self.meta_action_battle.get_bot_pvp_properties())
for ability_priority in properties['priorities']:
self.assertTrue(ability_priority > 0)
def test_process_bot_called__hero_1(self):
self.hero_1._model.is_bot = True
self.meta_action_battle.reload()
with mock.patch('the_tale.game.actions.meta_actions.MetaActionArenaPvP1x1Prototype.process_bot') as process_bot:
self.meta_action_battle.process()
self.assertEqual(process_bot.call_count, 1)
self.assertEqual(process_bot.call_args[1]['bot'].id, self.hero_1.id )
self.assertEqual(process_bot.call_args[1]['enemy'].id, self.hero_2.id )
def test_process_bot_called__hero_2(self):
self.hero_2._model.is_bot = True
self.meta_action_battle.reload()
with mock.patch('the_tale.game.actions.meta_actions.MetaActionArenaPvP1x1Prototype.process_bot') as process_bot:
self.meta_action_battle.process()
self.assertEqual(process_bot.call_count, 1)
self.assertEqual(process_bot.call_args[1]['bot'].id, self.hero_2.id )
self.assertEqual(process_bot.call_args[1]['enemy'].id, self.hero_1.id )
def test_process_bot_called__use_ability(self):
self.hero_1._model.is_bot = True
self.hero_1.pvp.set_energy(10)
properties = self.meta_action_battle.get_bot_pvp_properties()
properties['ability_chance'] = 1.0
self.hero_2.pvp.set_energy_speed(2) # flame abilitie will not be used, if enemy energy speed is 1
self.meta_action_battle.process()
self.assertTrue(self.hero_1.pvp.energy in (1, 2))
def test_initialize_bots__bot_is_second(self):
result, account_1_id, bundle_id = register_user('test_user_3')
result, account_2_id, bundle_id = register_user('bot', 'bot@bot.bot', '111111', is_bot=True)
account_1 = AccountPrototype.get_by_id(account_1_id)
account_2 = AccountPrototype.get_by_id(account_2_id)
storage = LogicStorage()
storage.load_account_data(account_1)
storage.load_account_data(account_2)
hero_1 = storage.accounts_to_heroes[account_1.id]
hero_2 = storage.accounts_to_heroes[account_2.id]
hero_1._model.level = 50
self.assertEqual(hero_2.level, 1)
MetaActionArenaPvP1x1Prototype.create(storage, hero_1, hero_2, bundle_id=self.bundle_id+1)
self.assertEqual(hero_2.level, 50)
self.assertTrue(len(hero_2.abilities.all) > 1)
self.assertEqual(hero_2.health, hero_2.max_health)
def test_initialize_bots__bot_is_first(self):
result, account_1_id, bundle_id = register_user('bot', 'bot@bot.bot', '111111', is_bot=True)
result, account_2_id, bundle_id = register_user('test_user_3')
account_1 = AccountPrototype.get_by_id(account_1_id)
account_2 = AccountPrototype.get_by_id(account_2_id)
storage = LogicStorage()
storage.load_account_data(account_1)
storage.load_account_data(account_2)
hero_1 = storage.accounts_to_heroes[account_1.id]
hero_2 = storage.accounts_to_heroes[account_2.id]
hero_2._model.level = 50
self.assertEqual(hero_1.level, 1)
MetaActionArenaPvP1x1Prototype.create(storage, hero_1, hero_2, bundle_id=self.bundle_id+1)
self.assertEqual(hero_1.level, 50)
self.assertTrue(len(hero_1.abilities.all) > 1)
self.assertEqual(hero_1.health, hero_1.max_health)
def test_initialize_bots__second_create(self):
result, account_1_id, bundle_id = register_user('test_user_3')
result, account_2_id, bundle_id = register_user('bot', 'bot@bot.bot', '111111', is_bot=True)
account_1 = AccountPrototype.get_by_id(account_1_id)
account_2 = AccountPrototype.get_by_id(account_2_id)
storage = LogicStorage()
storage.load_account_data(account_1)
storage.load_account_data(account_2)
hero_1 = storage.accounts_to_heroes[account_1.id]
hero_2 = storage.accounts_to_heroes[account_2.id]
hero_1._model.level = 50
self.assertEqual(hero_2.level, 1)
self.pvp_create_battle(account_1, account_2, BATTLE_1X1_STATE.PROCESSING)
self.pvp_create_battle(account_2, account_1, BATTLE_1X1_STATE.PROCESSING)
meta_action = MetaActionArenaPvP1x1Prototype.create(storage, hero_1, hero_2, bundle_id=self.bundle_id+1)
meta_action.process_battle_ending()
MetaActionArenaPvP1x1Prototype.create(storage, hero_1, hero_2, bundle_id=self.bundle_id+2)
self.assertEqual(hero_2.level, 50)
self.assertTrue(len(hero_2.abilities.all) > 1)
self.assertEqual(hero_2.health, hero_2.max_health)
def test_process_bot__flame_ability_not_used(self):
result, account_1_id, bundle_id = register_user('bot', 'bot@bot.bot', '111111', is_bot=True)
result, account_2_id, bundle_id = register_user('test_user_3')
account_1 = AccountPrototype.get_by_id(account_1_id)
account_2 = AccountPrototype.get_by_id(account_2_id)
storage = LogicStorage()
storage.load_account_data(account_1)
storage.load_account_data(account_2)
hero_1 = storage.accounts_to_heroes[account_1.id]
hero_2 = storage.accounts_to_heroes[account_2.id]
MetaActionArenaPvP1x1Prototype.create(storage, hero_1, hero_2, bundle_id=self.bundle_id+1)
self.assertEqual(hero_2.pvp.energy_speed, 1)
with mock.patch('the_tale.game.actions.meta_actions.MetaActionArenaPvP1x1Prototype.get_bot_pvp_properties',
lambda a: {'priorities': {Flame.TYPE: 1}, 'ability_chance': 1}):
with mock.patch('the_tale.game.pvp.abilities.Flame.use') as use:
for i in xrange(100):
self.meta_action_battle.process_bot(hero_1, hero_2)
self.assertEqual(use.call_count, 0)
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
a34346580111533bbd2b1ba6ea905d5f5ef6b8e9 | 77d8af02305d0add9930ffc287c66b9fa080d9e4 | /활성화함수/시그모이드함수/sigmoid1.py | 8b081de068c242d7c3b09edb310431130d1b15be | [] | no_license | jaemo0321/woi | ffd496fc537bd96a66be671218dca2456e72a57d | ed29b2cb803c8d0f60456655774f596d5c20933c | refs/heads/main | 2023-08-11T10:38:22.694406 | 2021-09-21T05:30:17 | 2021-09-21T05:30:17 | 408,530,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import numpy as np
import matplotlib.pylab as plt
def sigmoid(x):
return 1/ (1 + np.exp(-x))
x = np.arange(-5.0,5.0,0.1)
y = sigmoid(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show() | [
"jaemo0321@gmail.com"
] | jaemo0321@gmail.com |
344f9662afb40ad4e593118502c860a3bc15bc9e | acf90501ba4f8b6c1db52f28746449b4bf3e2d2c | /cloudland/cloudland/wsgi.py | 019864acf6d28faa76c34b2575a2710a9e84b4bf | [] | no_license | michaelpermyashkin/Cloudland | 64729c7a2676eb02ddb55aba7571f7462492ce79 | 5c9d1a32eb6117ec106ca6e79c02e396342f4d34 | refs/heads/master | 2022-11-25T18:51:00.345507 | 2020-08-04T13:33:12 | 2020-08-04T13:33:12 | 264,047,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for Stitch project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stitch.settings')
application = get_wsgi_application()
| [
"mpermyashkin@gmail.com"
] | mpermyashkin@gmail.com |
eb35b2c2e51714228552809f270f084565a0632e | 85b0c95499b67aeed5885d0d5ad7fcef94d982e7 | /helper/data_logger.py | 874630f4beb2b2eddaaac45d638d0464b7e5cda4 | [
"MIT"
] | permissive | georgezouq/Personae | becd3c7a41f8c1d37ecb1fbd6c28a26ea24dc522 | 1a0d67b04d9b692aceb4f499207751829cafba9f | refs/heads/master | 2020-03-27T19:02:46.091638 | 2018-09-19T02:49:50 | 2018-09-19T02:49:50 | 146,962,267 | 1 | 0 | MIT | 2018-09-01T02:58:51 | 2018-09-01T02:58:51 | null | UTF-8 | Python | false | false | 1,507 | py | # coding=utf-8
import logging
import os
from datetime import datetime
from static import LOGS_DIR
DATETIME_NOW = datetime.now().strftime("%Y%m%d%H%M%S")
def generate_market_logger(model_name):
market_log_path = '{}-{}-{}'.format(model_name, DATETIME_NOW, 'stock_market.log')
market_logger = logging.getLogger('stock_market_logger')
market_logger.setLevel(logging.DEBUG)
market_log_sh = logging.StreamHandler()
market_log_sh.setLevel(logging.WARNING)
market_log_fh = logging.FileHandler(os.path.join(LOGS_DIR, market_log_path))
market_log_fh.setLevel(logging.DEBUG)
market_log_fh.setFormatter(logging.Formatter('[{}] {}'.format('%(asctime)s', '%(message)s')))
market_logger.addHandler(market_log_sh)
market_logger.addHandler(market_log_fh)
return market_logger
def generate_algorithm_logger(model_name):
algorithm_log_path = '{}-{}-{}'.format(model_name, DATETIME_NOW, 'algorithm.log')
algorithm_logger = logging.getLogger('algorithm_logger')
algorithm_logger.setLevel(logging.DEBUG)
algorithm_log_sh = logging.StreamHandler()
algorithm_log_sh.setLevel(logging.WARNING)
algorithm_log_fh = logging.FileHandler(os.path.join(LOGS_DIR, algorithm_log_path))
algorithm_log_fh.setLevel(logging.DEBUG)
algorithm_log_fh.setFormatter(logging.Formatter('[{}] {}'.format('%(asctime)s', '%(message)s')))
algorithm_logger.addHandler(algorithm_log_sh)
algorithm_logger.addHandler(algorithm_log_fh)
return algorithm_logger
| [
"ceruleanacg@gmail.com"
] | ceruleanacg@gmail.com |
af85091132f201033888c47fc10b43a4b7e8d32d | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/source-aircall/setup.py | 25b830a1e3cce6526bed07734eb77ef89e7f7d8b | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 628 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = [
"airbyte-cdk~=0.1",
]
TEST_REQUIREMENTS = [
"requests-mock~=1.9.3",
"pytest~=6.2",
"pytest-mock~=3.6.1",
]
setup(
name="source_aircall",
description="Source implementation for Aircall.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "*.yaml", "schemas/*.json", "schemas/shared/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
| [
"noreply@github.com"
] | noreply@github.com |
8c944888521251fc7283418e0320a1de6fc264f4 | 56398fccc60c404fbcd0770056e21c1522cf3807 | /pages/urls.py | a51a7c2a3103e156bfe9fd2e3940dace195a5723 | [] | no_license | agnesimbs/movers | 72efb5a8d79a615ce4f3f0d2168f8aee4bd7b37c | 3fe03b1d4942c5f0b1b2986a79fba9f7cad6dafd | refs/heads/master | 2020-03-27T15:37:10.534867 | 2018-09-03T08:16:35 | 2018-09-03T08:16:35 | 146,728,729 | 0 | 0 | null | 2018-09-03T08:16:36 | 2018-08-30T09:42:23 | Python | UTF-8 | Python | false | false | 189 | py | from django.urls import path
from .views import HomePageView,AboutPageView
urlpatterns=[
path('',HomePageView.as_view(),name='home'),
path('about/',AboutPageView.as_view(),name='about'),
] | [
"agneswambui504@gmail.com"
] | agneswambui504@gmail.com |
78cd6795ea18e69c2d486a85ec2aaeccd5af59d2 | 181d138383f19d4427a525b713d270b9a567fdb9 | /tests/test_csv.py | 96599288ea36fdcc63457339745f0cdcbf96401d | [
"MIT"
] | permissive | openelections/electionware | 2f07f6cf5cc1b077a1e8262546237915767095ab | d3998027223d848b9cb6172e224e62096b3b5916 | refs/heads/main | 2022-12-27T19:40:35.054496 | 2020-10-10T21:13:13 | 2020-10-10T21:13:13 | 314,941,407 | 1 | 1 | null | 2020-11-22T02:07:40 | 2020-11-22T02:07:39 | null | UTF-8 | Python | false | false | 2,374 | py | import os
from unittest import TestCase
from unittest.mock import patch
from electionware.csv import get_output_file_path, get_output_header, \
write_electionware_pdf_to_csv
class TestOutputFilePath(TestCase):
def test__output_file_path(self):
expected = os.path.join(
'..', '2000', '20001231__aa__test__county_name__precinct.csv')
election_description = {'yyyymmdd': '20001231', 'state_abbrev': 'AA',
'type': 'test', 'county': 'County Name'}
actual = get_output_file_path(election_description)
self.assertEqual(expected, actual)
class TestOutputHeader(TestCase):
def test__single_vote_type_header(self):
expected = ['county', 'precinct', 'office', 'district', 'party',
'candidate', 'votes']
table_processing = {'openelections_mapped_header': ['votes']}
actual = get_output_header(table_processing)
self.assertEqual(expected, actual)
def test__multiple_vote_type_header(self):
expected = ['county', 'precinct', 'office', 'district', 'party',
'candidate', 'election_day', 'absentee', 'votes']
table_processing = {
'openelections_mapped_header': ['votes', 'election_day', 'absentee']}
actual = get_output_header(table_processing)
self.assertEqual(expected, actual)
class TestCSV(TestCase):
def test__pdf_to_csv(self):
with patch('electionware.csv._write_electionware_pdf_to_csv') as mock:
configuration = {
'election_description': {
'yyyymmdd': '20001231', 'state_abbrev': 'AA',
'type': 'test', 'county': 'County Name'},
'table_processing': {
'openelections_mapped_header': ['votes']}
}
write_electionware_pdf_to_csv(configuration.copy())
expected_filepath = os.path.join(
'..', '2000', '20001231__aa__test__county_name__precinct.csv')
expected_header = ['county', 'precinct', 'office', 'district', 'party',
'candidate', 'votes']
self.assertEqual(mock.call_args[0][0], expected_filepath)
self.assertEqual(mock.call_args[0][1], expected_header)
self.assertEqual(mock.call_args[0][2]._configuration, configuration)
| [
"rbierbryer@gmail.com"
] | rbierbryer@gmail.com |
f060ef31d43c3220db23ba2d5f5b9638358bec69 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_nitpicked.py | c24415023c45b6fa685872e33916c3f83b705177 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
#calss header
class _NITPICKED():
def __init__(self,):
self.name = "NITPICKED"
self.definitions = nitpick
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['nitpick']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
14b9908178c8743363eb1a49b403aac42ecccffd | 040a49be9a254020899c67152094b7c6bd5e2a36 | /release/ca_descriptions/templates/forestsim_incinerator_wind_SW_3.5.py | e6d644d79d3d7e2e6f21c94d0e769ef6d723b699 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"BSD-2-Clause"
] | permissive | Leon-Singleton/Forest-Fire-Simulation-Using-Cellular-Automata-Python | 51e0b1f64d79855b506603479e423fbe7771efc8 | 6c890b53dce152bf03cbb325d285b06d2264d597 | refs/heads/master | 2021-06-16T21:31:24.730398 | 2021-04-29T11:56:33 | 2021-04-29T11:56:33 | 199,534,962 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,722 | py | # Name: NAME
# Dimensions: 2
import inspect
import math
import random
# --- Set up executable path, do not edit ---
import sys
import numpy as np
this_file_loc = (inspect.stack()[0][1])
main_dir_loc = this_file_loc[:this_file_loc.index('ca_descriptions')]
sys.path.append(main_dir_loc)
sys.path.append(main_dir_loc + 'capyle')
sys.path.append(main_dir_loc + 'capyle/ca')
sys.path.append(main_dir_loc + 'capyle/guicomponents')
# ---
import capyle.utils as utils
from capyle.ca import Grid2D, Neighbourhood, randomise2d
global water_counter
water_counter = 0
DIRECTIONS = {"NW", "N", "NE", "W", "E", "SW", "S", "SE"}
CHAPARRAL, DENSE_FORREST, LAKE, CANYON, BURNING, BURNT, START_BURN, END_BURN = range(
8)
global initial_grid
base_initial_grid = np.zeros((20, 20), dtype=int)
base_initial_grid[12:16, 6:10] = 1 # adding forrest
base_initial_grid[4:6, 2:8] = 2 # adding lake
base_initial_grid[2:14, 13] = 3 # adding canyon
scale = 10
grid_size = 20 * scale
initial_grid = np.kron(base_initial_grid, np.ones((scale, scale)))
initial_grid[0, (len(initial_grid) - 1)] = 4 #fire starting point
global ignition_grid
ignition_grid = np.zeros((grid_size, grid_size))
generation = 0.5
cell_size = 0.25
chaparral_decay_km = 168
forest_decay_km = 600
canyon_decay_km = 8
chaparral_decay = chaparral_decay_km * cell_size * (1 / generation)
forest_decay = forest_decay_km * cell_size * (1 / generation)
canyon_decay = canyon_decay_km * cell_size * (1 / generation)
global decay_values
decay_values = [
chaparral_decay, forest_decay, 1, canyon_decay, chaparral_decay
]
def r_value(
combust_index, wind_speed, psi
): # wind speed as m/s, psi is angle between wind dir & fire dir,
# temp in celcius
temp = 35 #highest temp in texas in july
humidity = 70 #average texas humidty (most forest fires there). refine later
w = math.floor(wind_speed / 0.836)**(2 / 3) #wind level
kp = math.exp(0.1783 * wind_speed * math.cos(psi))
r0 = 0.03 * temp + 0.05 * w + 0.01 * (100 - humidity) - 0.3
r = r0 * kp * combust_index
return r
def next_state(current_state, neighbours,
l): # l = cell size. not sure what this means
neighbours_states = 0
r_max = 0
for neighbours in neighbours:
this_r_value = 0 # + r_value(neighbour) -- put in info for each neighbour
if this_r_value > r_max:
r_max = this_r_value
neighbour_states = neighbour_states # + this_r_value
delta_t = (1 / 8)(l / r_max)
new_state = (current_state + neighbour_states * delta_t) / l
return new_state
def setup(args):
"""Set up the config object used to interact with the GUI"""
# chaparral,denseForest,lake,canyon,burning,burnt = neighbours
config_path = args[0]
config = utils.load(config_path)
# -- THE CA MUST BE RELOADED IN THE GUI IF ANY OF THE BELOW ARE CHANGED --
config.title = "Forest Fire"
config.dimensions = 2
config.states = \
(
CHAPARRAL,
DENSE_FORREST,
LAKE,
CANYON,
BURNING,
BURNT,
START_BURN,
END_BURN
)
# ------------ -------------------------------------------------------------
config.state_colors = \
[
(0.6,0.6,0), #chaparral
(0,0.4,0), #dense forrest
(0,0.5,1), #lake
(0.5,0.5,0.5), #canyon
(1,0,0), #burning
(0.25,0.25,0.25), #burnt
(1,0.7,0), #starting to burn
(0.8,0,0.2) #ending burn
]
config.grid_dims = (grid_size, grid_size)
config.num_generations = 1000
config.set_initial_grid(initial_grid)
config.wrap = False
# --------------------------------------------------------------------
# the GUI calls this to pass the user defined config
# into the main system with an extra argument
# do not change
if len(args) == 2:
config.save()
sys.exit()
return config
def transition_function(grid, neighbourstates, neighbourcounts, decay_grid,
water_decay_grid):
"""function that transitions cells in the grid to the next state.
the function runs through several processes to
transition the states correctly. calling calling the ignite function
on each cell. After this this is used to see by how much the ignition
grid is incremented.
Args:
grid: the grid of states representing the forrest to be transitioned
neighbourstates: the neighbouring states of each of the cells
this is a 2d array with an array for each direction
from the cell. (N, NE, NW, etc)
neighbourcounts: an array of arrays for each cell which gives the counts
of each of the states neighbouring the cell
decay_grid: grid of values which decrease by 1 for each generation
"""
global water_counter
global ignition_grid
neighbourstates = np.array(neighbourstates)
init_grid = initial_grid.astype(int)
ig_grid = np.array(ignition_grid)
windspeed_ignition_modifiers = wind_speed_rvalue("NE", 3.5)
new_ig_grid = []
for i, row in enumerate(grid):
new_ig_grid.append([
ignite(cell, neighbourstates[:, i, j],
windspeed_ignition_modifiers) for j, cell in enumerate(row)
])
new_ig_grid = np.array(new_ig_grid)
started_to_burn = []
for i, row in enumerate(grid):
started_to_burn.append([
started_burning(cell, ig_grid[i, j], new_ig_grid[i, j])
for j, cell in enumerate(row)
])
grid[started_to_burn] = START_BURN
ig_grid = np.add(new_ig_grid, ig_grid)
full_burn = []
for i, row in enumerate(grid):
full_burn.append([
fully_burning(cell, ig_grid[i, j], decay_grid[i, j])
for j, cell in enumerate(row)
])
grid[full_burn] = BURNING
end_burning = []
for i, row in enumerate(grid):
end_burning.append([
ending_burn(cell, decay_grid[i, j], decay_values[int(
initial_grid[i, j])]) for j, cell in enumerate(row)
])
grid[end_burning] = END_BURN
decay_grid[(grid == BURNING) | (grid == END_BURN)] -= 1
burnt_out = (decay_grid == 0) # find those which have decayed to 0
grid[(decay_grid == 0
)] = BURNT #set all that have decayed to zero to BURNT(7)
water_counter += 1
if (water_counter == 100): #time taken for water to dry up
grid[120:160, 80:120] = initial_grid[120:160, 80:120]
water_decay_grid[(grid != LAKE)] -= 1 # take one off their decay value
grid[(water_decay_grid == 0)] = BURNT # switch their state to 5
ignition_grid = ig_grid
return grid
def ignite(cell, neighbours, wind):
""" generates an ignition factor for a given cell
the function checks if the cell is eligable to have an ignition factor added
if it can then the function iterates through each of the cells neighbours
which have an asociated burning threshhold this number is the multiplied by
the wind speed modifier. a number is then random generated, if this
random number is less product of the windspeed modifier and the threshold
then the ignition factor is increased by the ignition factor multiplied by
the wind speed modifier. If the cell is in the START_BURN state then
an adition 10 is added to the ignition factor
Args:
cell: the cell in the grid that is having an ignition factor added to it
neighbours: the neighbour states of the cells
wind: the windspeed ignition modifiers
Returns:
An ignition factor which is generated by the above process. This
ignition factor determines whether a non burning state starts burning,
as well as whether a state that has started burning has been completely
consumed by the fire.
"""
cell = int(cell)
ignition_factor = 0
if cell in [LAKE, BURNING, BURNT, END_BURN]: return ignition_factor
neighbours = neighbours.astype(int)
fully_burning_threshhold = [0.04, 0.01, 0, 0.1, 0, 0, 0.04]
fully_burning_factor = 20
start_burning_threshhold = [0.02, 0.005, 0, 0.05, 0, 0, 0.04]
start_burning_factor = 10
# add to cell ignition factor by multiplying
# windspeed modifier and the cells burning threshhold
# if a random number is less than the resulting number add
# the burning factor multiplied by the wind speed modifier
for index, neighbour in enumerate(neighbours):
if neighbour == BURNING:
if fully_burning_threshhold[cell] * wind[index] >= random.uniform(
0, 1):
ignition_factor += int(
math.floor(wind[index] * fully_burning_factor))
if neighbour in [START_BURN,END_BURN] and \
start_burning_threshhold[cell] * wind[index] >= random.uniform(0,1):
ignition_factor += int(
math.floor(wind[index] * start_burning_factor))
# if the cell is has already started to burn then a burning factor is
# automatically applied
if cell == START_BURN: ignition_factor += start_burning_factor
return int(ignition_factor)
def started_burning(cell, prev_ig, new_ig):
"""checks whether a state has started to burn"""
new_ig = int(new_ig)
prev_ig = int(prev_ig)
cell = int(cell)
if cell == START_BURN: return True
if cell not in [LAKE, BURNING, BURNT, END_BURN]:
if prev_ig == 0 and new_ig > 0:
return True
return False
def fully_burning(cell, new_ig, decay):
"""checks whether a state can transition to a fully burning state
a state becomes fully burning when its ignition value is greater than
or equal to its initial decay value.
"""
new_ig = int(new_ig)
decay = int(decay)
cell = int(cell)
if cell == BURNING: return True
if cell == START_BURN and new_ig >= decay: return True
return False
def ending_burn(cell, decay, initial):
"""checks whether a state
has decayed enough to transition to an end burn state
a cell is ending its burning phase if its original decay value has halved
"""
cell = int(cell)
decay = int(decay)
initial = int(initial)
if cell == END_BURN: return True
if cell == BURNING:
if initial >= 2 * decay:
return True
return False
def wind_speed_rvalue(direction, speed):
if direction in DIRECTIONS:
list_directions = np.array(
["N", "NE", "E", "SE", "S", "SW", "W", "NW"])
item_index = np.where(list_directions == direction)[0]
listWeights = np.zeros(8)
angle_interval = 45
angle = 0 #initialises weight
wrapped = False
for x in range(
8
): #goes through array, including wrapping round and weights the directions
listWeights[(x + item_index) % len(list_directions)] = k_wind(
speed, angle)
angle = angle + angle_interval
rearranged_index = [
7, 0, 1, 6, 2, 5, 4, 3
] #rearranges list so is in same order as the CA programme
return listWeights[rearranged_index]
def k_wind(speed, angle):
return np.exp(0.1783 * speed * np.cos(np.deg2rad(angle)))
def main():
config = setup(sys.argv[1:])
s = -10000
decay_grid = [[decay_values[i] for i in row]
for row in initial_grid.astype(int)]
decay_grid = np.array(decay_grid)
water_decay_values = [s, s, s, s, s]
water_decay_grid = np.array([[water_decay_values[i] for i in row]
for row in initial_grid.astype(int)])
#Select section of grid to drop water
water_decay_grid[120:160, 80:120] = 0 #drop water after this time
ignition_grid = np.zeros((grid_size, grid_size))
ignition_grid = ignition_grid.astype(int)
grid = Grid2D(config, (transition_function, decay_grid, water_decay_grid))
# Create grid object using parameters from config + transition function
# Run the CA, save grid state every generation to timeline
timeline = grid.run()
config.save() # Save updated config to file
utils.save(timeline, config.timeline_path) # Save timeline to file
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
98491800978764c42bde1d1d36a77b8dc13c9ef3 | 1e249067ab2dabc17cb7ebda46f9f23a5cfad552 | /tests/test_processor.py | 149ae513f1fdcfc1b09bbec275c63aac1d55b556 | [
"BSD-2-Clause"
] | permissive | STIRLIN6/indra_cogex | 6e4cba84ee1ce82a404154e7370f88fc340400cb | 552cefd71431b08b8118b2cc0428fd8681e6fc83 | refs/heads/main | 2023-08-14T01:28:14.852108 | 2021-09-14T04:18:13 | 2021-09-14T04:18:13 | 377,100,238 | 0 | 0 | BSD-2-Clause | 2021-06-15T09:01:23 | 2021-06-15T09:01:23 | null | UTF-8 | Python | false | false | 184 | py | from indra_cogex.representation import norm_id
def test_norm_id():
assert norm_id("UP", "P12345") == "uniprot:P12345"
assert norm_id("CHEBI", "CHEBI:12345") == "chebi:12345"
| [
"ben.gyori@gmail.com"
] | ben.gyori@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.