blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e1b14b5791d705897342227ca9a919f4399bccf | 73f1075c99338984795f4bd7bd7b9563ecc36d87 | /Binary_Search/74.Search_a_2D_Matrix.py | 033d554ced3b5db33c3e0b08155a2a7e62fb0138 | [] | no_license | grg909/LCtrip | 314bd173f87ec98ff13234bdd148c76482db2df7 | 96836da905526b47f0cdee8c0bb4790c4cdd6c79 | refs/heads/master | 2020-09-12T13:38:52.486189 | 2020-08-28T14:22:50 | 2020-08-28T14:22:50 | 222,442,472 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | # -*- coding: UTF-8 -*-
# @Date : 2019/12/11
# @Author : WANG JINGE
# @Email : wang.j.au@m.titech.ac.jp
# @Language: python 3.7
"""
"""
# 思路1,把二维数组扁平化
class Solution:
def searchMatrix(self, matrix, target):
try:
n, m = len(matrix), len(matrix[0])
except:
return False
start, end = 0, n*m -1
while start + 1 < end:
mid = (start + end)//2
x, y = mid/m, mid%m
if matrix[x][y] > target:
end = mid
else:
start = mid
x, y = start/m, start%m
if matrix[x][y] == target:
return True
x, y = end / m, end % m
if matrix[x][y] == target:
return True
return False
| [
"grg909@foxmail.com"
] | grg909@foxmail.com |
87b9509ca6d5640d6e420dda4d191a0168607c51 | 45a917403e0865dcf161b24d2742f9cb766c1d69 | /floyd.py | 2021118f5f3f7e57549db3d5a4009445f37dbadc | [] | no_license | wkentdag/algorithms | 47daefa91ff0d0f39f8f885711ca667b9ee6a01b | a80d5d2df2906b82e4d2f916cfddcb8fddd82b02 | refs/heads/master | 2021-01-10T19:44:25.910009 | 2015-03-06T17:46:00 | 2015-03-06T17:46:00 | 31,781,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,130 | py | # Will Kent-Daggett + Scott Hurlow
# COMP-221 Algorithms
# Programming 5, Question 4 (Floyd's Algorithm)
###################################################
import random
import timeit
def floyd(a, p):
"""Takes in an adjacency matrix a, and performs floyd's
algorithm for shortest pairs, returning time elapsed.
Also takes in a boolean p, which, if True,
prints the matrix before and after as well as the
elapsed time. Returns elapsed time."""
if p:
print 'a:'
for elem in a:
print "\t".join([str(val) for val in elem])
print "\n"
start = timeit.default_timer()
n = len(a)
for k in range(n):
for i in range(n):
for j in range(n):
a[i][j] = min(a[i][j], a[i][k] + a[k][j])
stop = timeit.default_timer()
if p:
print 'after applying floyds algorithm:'
for elem in a:
print "\t".join([str(val) for val in elem])
print "\n"
print 'Found shortest pairs in', stop-start, 'seconds\n'
return stop-start
def random2dGraph(a, b):
"""Generates an AxA 2d array with random values within range 1 - b
When the random value = b, it becomes inf"""
m = [[0 for x in range(a)] for x in range(a)]
for i in range(0, a):
for j in range(0, a):
if (j == i):
m[i][j] = 0
else:
r = random.randint(1, b)
if (r == b):
r = float("inf")
m[i][j] = r
return m
def bigTests(a, b, c):
"""Runs floyd() A times on BxB-sized matrices,
populated with random values ranging from 1-c.
Returns and prints the average completion time.
floyd()'s 'p' variable is set to false by default for
easily readable output"""
s = 0
for i in range(a):
s += floyd(random2dGraph(b, c), False)
print 'Average time for', a, b, 'x', b, 'graphs:\t', s / a
return s / a
################################################### main:
inf = float("inf")
hwExample = [[0, inf, 8, 20, inf], [5, 0, inf, 9, inf], [10, inf, 0, 6, 12], [20, inf, 6, 0, 3], [inf, 2, 12, 3, 0]]
print "An example from the homework:\n"
floyd(hwExample, True)
print "Average results from several large trials:\n"
bigTests(10, 10, 10)
bigTests(10, 20, 10)
bigTests(10, 40, 10)
bigTests(10, 80, 10)
bigTests(10, 160, 10)
| [
"willkentdaggett@gmail.com"
] | willkentdaggett@gmail.com |
490cc5c17c7b44c4c6e43f0bad18fa2d38711e6e | bc8cd08c03e22625da04357fa95ca93f7f3dee26 | /backend/app/demo/__init__.py | a8e508d8ce4b43ad669d14edb367970e4f72289f | [] | no_license | huanglesi0605/room-seeker | 182149d2df725965d1e08ca007e73cf9bf56d556 | 9b004dba24f0ec9216d4fb0fa9ede2fe626f9f0e | refs/heads/master | 2023-02-06T04:07:24.623518 | 2019-12-08T13:24:33 | 2019-12-08T13:24:33 | 226,670,464 | 0 | 1 | null | 2023-02-02T05:11:20 | 2019-12-08T13:17:11 | JavaScript | UTF-8 | Python | false | false | 356 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask import Flask
from flask_cors import CORS
import v1
def create_app():
app = Flask(__name__, static_folder='static')
CORS(app)
app.register_blueprint(
v1.bp,
url_prefix='/v1')
return app
if __name__ == '__main__':
create_app().run(debug=True)
| [
"huanglesi0605@gmail.com"
] | huanglesi0605@gmail.com |
0f276a9b40c35cb921b2f49748656afb5c5442d9 | 0f0a7adfae45e07a896c5cd5648ae081d4ef7790 | /python数据结构/慕课测试题/打印实心矩形.py | f31f5eb66436884a6fbfd6372e3042c933196836 | [] | no_license | renlei-great/git_window- | e2c578544c7a8bdd97a7a9da7be0464d6955186f | 8bff20a18d7bbeeaf714aa49bf15ab706153cc28 | refs/heads/master | 2021-07-19T13:09:01.075494 | 2020-06-13T06:14:37 | 2020-06-13T06:14:37 | 227,722,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | args = input().split()
alist = [int(i) for i in args]
# min_number = min(alist)
min_number = alist[0]
for i in alist:
if min_number > i:
min_number = i
print(min_number)
| [
"1415977534@qq.com"
] | 1415977534@qq.com |
43aba86defd8b4fc8685a46d9e5ee312c9c10898 | 7e0b93b996c7eb89c450a65ea7c69b48780cbbc3 | /dingomata/cogs/bedtime/commands.py | d993e424754d7d0bc39e3c75a91253a3d41bce45 | [
"MIT"
] | permissive | KaiSkiFox/discord-dingomata | fcff446d26052abd0d21e8f69d0773f6e15f6357 | 79ffba2eff8a711c7bb6cf417d0ef7848d230738 | refs/heads/main | 2023-08-23T19:38:33.011227 | 2021-10-07T15:33:59 | 2021-10-07T15:33:59 | 410,990,784 | 0 | 0 | MIT | 2021-09-27T18:01:59 | 2021-09-27T18:01:58 | null | UTF-8 | Python | false | false | 8,882 | py | import logging
from datetime import datetime, timedelta
from random import choice
from typing import Optional, Dict
import pytz
import parsedatetime
from discord import Message, Forbidden
from discord.ext.commands import Bot, Cog
from discord_slash import SlashContext
from discord_slash.utils.manage_commands import create_option
from sqlalchemy import select, delete
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from sqlalchemy.orm import sessionmaker
from dingomata.config import service_config
from .models import BedtimeModel, Bedtime
from ...decorators import subcommand
from ...exceptions import DingomataUserError
_log = logging.getLogger(__name__)
_calendar = parsedatetime.Calendar()
class BedtimeSpecificationError(DingomataUserError):
"""Error specifying bedtime due to invalid time or zone"""
pass
class BedtimeCog(Cog, name='Bedtime'):
"""Remind users to go to bed."""
_GUILDS = service_config.get_command_guilds('bedtime')
_BASE_COMMAND = dict(base='bedtime', guild_ids=_GUILDS)
_BEDTIME_CACHE: Dict[int, Bedtime] = {}
_BEDTIME_KWDS = {'bed', 'sleep', 'bye', 'cya', 'see y', 'night', 'nini', 'nite'}
def __init__(self, bot: Bot, engine: AsyncEngine):
self._bot = bot
self._engine = engine
self._session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)
@Cog.listener()
async def on_ready(self):
async with self._engine.begin() as conn:
await conn.run_sync(BedtimeModel.metadata.create_all)
@subcommand(
name='set',
description='Set your own bed time.',
options=[
create_option(name='time', description='When do you go to sleep? e.g. 12:00am',
option_type=str, required=True),
create_option(name='timezone', description='Time zone you are in', option_type=str, required=True),
],
**_BASE_COMMAND,
)
async def bedtime_set(self, ctx: SlashContext, time: str, timezone: str) -> None:
# Convert user timezone to UTC
try:
tz = pytz.timezone(timezone.strip()) # test if timezone is valid
except pytz.UnknownTimeZoneError as e:
raise BedtimeSpecificationError(
f'Could not set your bedtime because timezone {timezone} is not recognized. Please use one of the '
f'"TZ Database Name"s listed here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
) from e
datetime_obj, parse_status = _calendar.parseDT(time, tzinfo=tz)
if parse_status != 2:
raise BedtimeSpecificationError(
f"Can't interpret {time} as a valid time. Try using something like '11:00pm', '23:00', '11pm'")
time_obj = datetime_obj.time()
bedtime = Bedtime(user_id=ctx.author.id, bedtime=time_obj, timezone=str(tz))
async with self._session() as session:
async with session.begin():
await session.merge(bedtime)
await session.commit()
self._BEDTIME_CACHE.pop(ctx.author.id, None)
await ctx.reply(f"Done! I've saved your bedtime as {time_obj} {tz}.", hidden=True)
@subcommand(name='off', description='Clears your bed time.', **_BASE_COMMAND)
async def bedtime_off(self, ctx: SlashContext) -> None:
async with self._session() as session:
async with session.begin():
statement = delete(Bedtime).filter(Bedtime.user_id == ctx.author.id)
await session.execute(statement)
await session.commit()
self._BEDTIME_CACHE.pop(ctx.author.id, None)
await ctx.reply("Done! I've removed your bedtime preferences.", hidden=True)
@subcommand(name='get', description='Get your current bed time.', **_BASE_COMMAND)
async def bedtime_get(self, ctx: SlashContext) -> None:
async with self._session() as session:
async with session.begin():
stmt = select(Bedtime).filter(Bedtime.user_id == ctx.author.id)
bedtime = (await session.execute(stmt)).scalar()
if bedtime:
await ctx.reply(f'Your current bedtime is {bedtime.bedtime} in {bedtime.timezone}', hidden=True)
else:
await ctx.reply('You do not have a bedtime set.', hidden=True)
@Cog.listener()
async def on_message(self, message: Message) -> None:
if not message.guild or message.guild.id not in self._GUILDS \
or any(kwd in message.content.lower() for kwd in self._BEDTIME_KWDS):
return
async with self._session() as session:
async with session.begin():
# Grab the user's bedtime
result = await self._get_bedtime(session, message.author.id)
utcnow = datetime.utcnow()
# Do nothing if the user dont have a bedtime set or if they're in cooldown
if not result:
_log.debug(f'User {message.author.id} does not have a bedtime set.')
return
elif result.last_notified and utcnow < result.last_notified + \
timedelta(minutes=service_config.servers[message.guild.id].bedtime.cooldown_minutes):
_log.debug(f'User {message.author.id} was last notified at {result.last_notified}, still in '
f'cooldown.')
return
# Find the nearest bedtime before current time in user's timezone, either earlier today or yesterday.
# Not comparing in UTC because bedtime can change due to DST
tz = pytz.timezone(result.timezone)
now_tz = datetime.now(tz)
bedtime = tz.localize(datetime.combine(now_tz.date(), result.bedtime))
if now_tz.time() < result.bedtime:
bedtime -= timedelta(days=1)
_log.debug(f'User {message.author.id} has bedtime {bedtime}; it is currently {now_tz}')
sleep_hours = service_config.servers[message.guild.id].bedtime.sleep_hours
try:
if now_tz < bedtime + timedelta(hours=sleep_hours):
if now_tz < bedtime + timedelta(hours=sleep_hours / 2):
# First half of bed time interval
text = choice([
"go to bed! It's past your bedtime now.",
"don't stay up too late. Good sleep is important for your health!",
"go to sleep now, so you're not miserable in the morning.",
"it's time to go to bed! Unless you're an owl, then go to sleep standing up.",
"sleep! NOW!",
"your eyes are getting very heavy. You are going into a deep slumber. **Now sleep.**",
"go to sleep! Everyone will still be here tomorrow. You can talk to them then.",
f"it's now {int((now_tz - bedtime).total_seconds() / 60)} minutes after your bedtime.",
])
else:
# Second half of bed time interval
text = choice([
"go back to bed! You're up way too early.",
"aren't you awake early today. Maybe consider catching up on those sleep hours?",
"you're awake! You were trying to cross the border...",
"you're finally awake.... You were trying to sleep, right? Walked right into this "
"discord server, same as us, and that furry over there.",
])
await message.channel.send(f"Hey {message.author.mention}, {text}")
result.last_notified = utcnow
await session.commit()
self._BEDTIME_CACHE[message.author.id] = result
_log.info(f'Notified {message.author} about bedtime.')
except Forbidden:
_log.warning(f'Failed to notify {message.author} in {message.guild} about bedtime. The '
f"bot doesn't have permissions to post there.")
async def _get_bedtime(self, session, user_id: int) -> Optional[Bedtime]:
if user_id in self._BEDTIME_CACHE:
return self._BEDTIME_CACHE[user_id]
else:
statement = select(Bedtime).filter(Bedtime.user_id == user_id)
bedtime = (await session.execute(statement)).scalar()
self._BEDTIME_CACHE[user_id] = bedtime
return bedtime
| [
"12706268+snazzyfox@users.noreply.github.com"
] | 12706268+snazzyfox@users.noreply.github.com |
4479613cc55c0bb5cd58770d3eb6318b6b0d2451 | 69c4313b5339243847dfae1658b04652ce59bda8 | /Naive-Bayes Classifier/prediction of movie reviews.py | 28999127fd4c1e862da79cff33359ed8bac41202 | [] | no_license | Ritik-Arora-576/DATA-SCIENCE | 667cd25bf01445572fe6ff823de8f4164ff6f412 | 8ef0bea6dcc6c06bfb5b2dd1266722fff1de435b | refs/heads/master | 2023-02-08T08:07:03.506197 | 2020-12-19T14:40:54 | 2020-12-19T14:40:54 | 280,922,746 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | #!/usr/bin/env python
# coding: utf-8
# # Training Dataset :
# In[1]:
train_data=[
'This was an awesome movie',
'Great movie ! I liked it alot',
'Happy Ending ! awesome acting by the hero',
'Loved it ! Truly great',
'bad not upto the mark',
'Surely a dissapointing movie'
]
y=[1,1,1,1,0,0]
# # Cleaning of a Dataset :
# In[2]:
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
# In[3]:
sw=stopwords.words('english')
tokenizer=RegexpTokenizer(r'\w+')
ps=PorterStemmer()
# In[4]:
def cleaning(text):
text=text.lower()
text=text.replace('<br /><br />',' ')
# tokenize the text
data=tokenizer.tokenize(text)
# remove stopwords
data=[word for word in data if word not in sw]
# stemming
data=[ps.stem(word) for word in data]
useful_text=' '.join(data)
return useful_text
# In[5]:
new_training_data=[cleaning(text) for text in train_data]
# In[6]:
new_training_data
# # Vectorizer :
# In[7]:
test_data=['I was happy by seeing the actions in a movie',
'This movie is bad']
new_test_data=[cleaning(text) for text in test_data]
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer()
# In[8]:
numbers=cv.fit_transform(new_training_data).toarray()
# In[9]:
# vectorize testing datasets
test_numbers=cv.transform(new_test_data).toarray()
print(test_numbers)
# # Multinomial Naive Bayes :
# In[10]:
from sklearn.naive_bayes import MultinomialNB,GaussianNB,BernoulliNB
# In[11]:
mnb=MultinomialNB()
mnb.fit(numbers,y)
# In[12]:
mnb.predict(test_numbers)
# # Bernaulli Naive Bayes :
# In[13]:
bnb=BernoulliNB()
bnb.fit(numbers,y)
# In[14]:
bnb.predict(test_numbers)
# In[ ]:
| [
"ritikarora656@gmail.com"
] | ritikarora656@gmail.com |
cc7b250a3c9f0394d2b4a95cc17b250ac8fc17f7 | bd2a975f5f6cd771393f994ebd428e43142ee869 | /new_render_data/input/p/script/abort/back20180419/CG/C4d/process/AnalyzeC4d.py | 8701fce3cb9979a4512eb94493a2858b24657c12 | [] | no_license | sol87/Pycharm_python36 | 1a297c9432462fc0d3189a1dc7393fdce26cb501 | fa7d53990040d888309a349cfa458a537b8d5f04 | refs/heads/master | 2023-03-16T10:35:55.697402 | 2018-11-08T09:52:14 | 2018-11-08T09:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | #!/usr/bin/env python
# -*- coding=utf-8 -*-
# Author: kaname
# QQ: 1394041054
""" C4d analyzer """
# RUN:
# 1. From C4Dloader.py to loading RBAnalzer.py to do it.
# 2. AnalyzeC4d.py loading C4Dloader.py to do it.
import os
import sys
import subprocess
import string
import logging
import time
import shutil
from C4d import C4d
from C4dLoader import C4dLoader
from C4dPluginManager import C4dPlugin, C4dPluginMgr
from CommonUtil import RBCommon as CLASS_COMMON_UTIL
class AnalyzeC4d(C4d):
def __init__(self, **paramDict):
C4d.__init__(self, **paramDict)
self.format_log('AnalyzeC4d.init', 'start')
self.G_TIPS_TXT_NODE=os.path.join(self.G_WORK_RENDER_TASK_CFG, 'tips.json').replace('\\','/')
for key, value in list(self.__dict__.items()):
self.G_DEBUG_LOG.info(key + '=' + str(value))
self.format_log('done','end')
def RB_MAP_DRIVE(self):#2.chongxie
#self.format_log('[映射盘符]','[start]'.decode('utf-8').encode('gbk'))
self.G_DEBUG_LOG.info('[c4d.RB_MAP_DRIVE.start.....]')
if self.G_RENDER_OS != '0':
#delete all mappings
CLASS_COMMON_UTIL.del_net_use()
CLASS_COMMON_UTIL.del_subst()
#net use
b_flag = False
if self.G_CG_NAME == 'C4d':
map_root = os.path.basename(self.G_INPUT_PROJECT_PATH)
print(map_root + '@KANADAmmmmmmm')
map_dict = os.path.join(self.G_INPUT_PROJECT_PATH)
print(map_root + '@KANADAnnnnnnn')
map_cmd = 'net use %s: "%s"' % (map_root, map_dict)
CLASS_COMMON_UTIL.cmd_python3(map_cmd,my_log=self.G_DEBUG_LOG)
# #base RB_MAP_DRIVE
# if self.G_CG_NAME != 'Max' and self.G_TASK_JSON_DICT['system_info'].has_key('mnt_map'):
# map_dict = self.G_TASK_JSON_DICT['system_info']['mnt_map']
# for key,value in map_dict.items():
# value = os.path.normpath(value)
# map_cmd = 'net use "%s" "%s"' % (key,value)
# CLASS_COMMON_UTIL.cmd_python3(map_cmd,my_log=self.G_DEBUG_LOG)
# if key.lower() == 'b:':
# b_flag = True
if not b_flag:
map_cmd_b = 'net use B: "%s"' % (os.path.normpath(self.G_PLUGIN_PATH))
CLASS_COMMON_UTIL.cmd(map_cmd_b,my_log=self.G_DEBUG_LOG,try_count=3)
self.G_DEBUG_LOG.info('[c4d.RB_MAP_DRIVE.end.....]')
self.format_log('done','end')
def RB_CONFIG(self):
self.G_DEBUG_LOG.info('[c4d.analyze.配置插件开始]')
self.G_DEBUG_LOG.info('[c4d.analyze.plugin.config.start......]')
self.plugin_config()
self.G_DEBUG_LOG.info('[c4d.analyze.配置插件完成]')
self.G_DEBUG_LOG.info('[c4d.analyze.plugin.config.end......]')
def RB_RENDER(self):
self.G_DEBUG_LOG.info('[c4d.RBanalyse.start.....]')
self.G_FEE_PARSER.set('render','start_time',str(int(time.time())))
cg_ver = self.G_CG_VERSION
task_id = self.G_TASK_ID
cg_file = self.G_INPUT_CG_FILE
task_json = self.G_TASK_JSON
asset_json = self.G_ASSET_JSON
tips_json = self.G_TIPS_TXT_NODE
c4d_loader = C4dLoader(cg_ver, task_id, cg_file, task_json, asset_json, tips_json)
c4d_loader.execute()
self.G_FEE_PARSER.set('render','end_time',str(int(time.time())))
self.G_DEBUG_LOG.info('[c4d.RBanalyse.end.....]')
| [
"superdkk@gmail.com"
] | superdkk@gmail.com |
dcd129a74603a0715965552c23d7abc56eb7e6da | 484d73935f057756df8bc6556fc5704327443108 | /234ICPC/D_test.py | 7711282531746f22a74f0eb79cc78163db4c7043 | [] | no_license | kazuya030/CodeForces | 5d93d25f456589ad6343e1140ca27c5ecbd0d652 | 8d859c7680c7dd1c40943bb05116bf032ea5f9bd | refs/heads/master | 2021-03-12T23:45:53.592799 | 2012-12-02T06:57:30 | 2012-12-02T06:57:30 | 6,964,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | #coding: utf8
import sys
import StringIO
__date__ = '2012/10/20'
from D import solve
def test(input, ans):
ans = str(ans)
s_in = StringIO.StringIO(input)
s_out = StringIO.StringIO()
sys.stdin = s_in; sys.stdout = s_out
str(solve())
sys.stdin = sys.__stdin__; sys.stdout = sys.__stdout__
sys.stdout.flush()
ans_tmp = s_out.getvalue()
if ans_tmp == ans:
print "Correct"
print repr(input)
print ans
print
else:
print "Wrong!!! :", repr(input)
print "TrueAnswer"
print ans
print "YourAnswer"
print ans_tmp
if __name__ == '__main__':
test("""5 3
1 2 3
6
firstfilm
3
0 0 0
secondfilm
4
0 0 4 5
thirdfilm
1
2
fourthfilm
1
5
fifthfilm
1
4
sixthfilm
2
1 0
"""
,"""2
2
1
1
1
2
""")
| [
"minami@Retinan.local"
] | minami@Retinan.local |
3f49e0b87034d1facf0d85d5bd8aa7183250ef3e | fa00de856cea27b4425474dcdc6cb8b4654b0278 | /seprjmvp/accounts/forms.py | c44674114929a9c92fa9641d962f19de5d609e75 | [] | no_license | faryabimm/se_prj_mvp | 03a7f90e12ea6d4c12dcb6304904b24e409ba0b5 | 092fccc9f62b4faa97e717d1d27ffae6184aa251 | refs/heads/master | 2020-03-19T18:41:27.915541 | 2018-06-10T15:24:25 | 2018-06-10T15:24:25 | 136,820,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class SignUpForm(UserCreationForm):
name = forms.CharField(max_length=50, required=True, help_text='required')
email = forms.EmailField(max_length=254, required=True, help_text='required')
# is_admin = forms.CheckboxInput(check_test=True)
class Meta:
model = User
fields = ('username', 'name', 'email', 'password1', 'password2', )
def save(self, commit=True):
user = super(SignUpForm, self).save(commit=False)
# user.is_admin = self.cleaned_data["is_admin"]
if commit:
user.save()
return user
| [
"faryabimohammadmahdi@gmail.com"
] | faryabimohammadmahdi@gmail.com |
c126f0f06bc1013db451d75b242298742e2ec9e8 | 389e5408ee9846219b52c68381a32f23deff6c68 | /app/core/migrations/0003_ingredient.py | 1f421c7b1e2b77927ddc2fafe9fcdd9365ec8969 | [
"MIT"
] | permissive | derwinterence04/recipe-app-api | 63703abb7b7d5e9c33a4ed5010927aee1c674ba4 | fee862987885d4dcd4878c48cb9cffa11d015ae1 | refs/heads/master | 2022-11-23T11:32:56.908589 | 2020-07-29T02:08:06 | 2020-07-29T02:08:06 | 282,761,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | # Generated by Django 3.0.8 on 2020-07-28 02:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"dtcastro@ph.indra.com"
] | dtcastro@ph.indra.com |
dba40b9bb798424bbf69d92708c6cfa4e6a370f6 | 3a0464e991ecb173bc4582892a8079e6312a82c8 | /app.py | 8da2a40496d28db0bf9e519e48a1895af6e072e4 | [] | no_license | noorbakerally/sparql-server-wrapper | d3003503b2d1892f0007114992372ac9cf8634ba | a7c6612cc20295871f4371c4a32c6c5cb64f5c92 | refs/heads/master | 2021-01-10T05:41:55.952301 | 2016-03-19T20:12:59 | 2016-03-19T20:12:59 | 54,285,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | from flask import Flask,request
from easyprocess import EasyProcess
from flask.ext.cors import CORS
app = Flask(__name__)
CORS(app)
@app.route("/generate", methods=['GET', 'POST'])
def generate():
#get the query
if request.method == 'POST':
query = request.form['query']
else:
query = request.args.get('query')
query = query.replace("\"","\\\"")
command = "java -jar g.jar -qs \""+query+"\""
command_results = EasyProcess(command).call().stdout
result = command_results.split("RDF Output:")[1]
return str(result)
if __name__ == "__main__":
app.debug = True
app.run()
| [
"noorani.bakerally@gmail.com"
] | noorani.bakerally@gmail.com |
ef9dd66a281bd4a8cfff524ae8a983149449e1cd | ca17bd80ac1d02c711423ac4093330172002a513 | /binary_tree_longest_consecutive_sequence/LongestSequence_better.py | 3413f0a627ca955427b2a27755e726678c29a746 | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def helper(self, root):
if not root: return 0, 0
leftG, leftL = self.helper(root.left)
rightG, rightL = self.helper(root.right)
currentL = 1
if root.left and root.val + 1 == root.left.val :
currentL =max(currentL, leftL + 1)
if root.right and root.val + 1 == root.right.val :
currentL = max(currentL, rightL + 1)
currentG = max(currentL, leftG, rightG)
return currentG, currentL
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.longest = 0
if not root: return 0
return self.helper(root)[0]
| [
"zhao_j1@denison.edu"
] | zhao_j1@denison.edu |
139d90a724afb570f73766dcec13eb22481baa23 | e8bfb4565c63538b14a68b798093b79bcc934723 | /labs/lab4.py | 2edb40ca16e7d65c0d28b14f703e770d9fd217e6 | [] | no_license | ameru/cs-fundamentals-101 | 630230c1c384792da4d563dbe1990f888861cace | 2d3841e1bc21885f1f274fe70b4071152520c53e | refs/heads/main | 2023-02-03T19:32:15.527283 | 2020-12-21T07:25:00 | 2020-12-21T07:25:00 | 303,852,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # no docstrings needed for __eq__ and __repr__
import turtle
def euclid_distance (point1, point2) -> float:
""" computes Euclidean distance
Arguments:
a (Point): a point
b (point): another point
Returns:
float: the euclidean distance between the points
"""
result = return ((a.x - b.x)**2) + (a.y - b.y)**2)**0.5
round(result)
def overlap (circle1, circle2) -> bool:
""" determine if two circles touch/overlap each other
Arguments:
circle1 (Circle): Circle 1 center point
circle2 (Circle): Circle 2 center point
Returns:
bool: True if circles overlap, False if they do not
"""
return euclid_distance(circle1.center, circle2.center) <= \
(circle1.r + circle2.r)
if __name__ == '__main__':
tortoise = turtle.Turtle()
def draw_circle (tortoise, circle) -> None:
""" draws a circle using turtle module
Arguments:
turtle (Turtle): draw circle
circle (Circle): import data to draw circle
"""
tortoise.penup()
tortoise.setx(x)
tortoise.sety(y)
tortoise.pendown()
tortoise.circle(radius)
turtle.done()
| [
"noreply@github.com"
] | noreply@github.com |
166d339829928c03eae087789acaafe7f5329a46 | 267f2c09420436e97275986f825045cbe81fd3ec | /buy & sell vinyl records 3.5.3.py | 5215d6374e530fd31aa37d163087968486904c55 | [] | no_license | aiqbal-hhs/91906-7 | f1ddc21846bee6dd9dcf4f75bdabe68989390769 | 8d6aadedff8c6585c204a256b5bd3ad8294a815f | refs/heads/main | 2023-05-15T00:17:41.407536 | 2021-06-04T10:32:21 | 2021-06-04T10:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,680 | py | from functools import partial
from tkinter import *
import random
root = Tk()
stock_list = ["Igor - Tyler The Creator",
"Good Kid Maad City - Kendrick Lamar",
"Demon Days - Gorillaz"]
class stock:
def __init__(self, name, amount):
self.name = name
self.amount
stock_list.append(self)
##########################################buy frame######################################################
#formatting variables....
background_color = "orange"
# converter Main Screen GUI...
buy_frame = Frame(width=360, bg=background_color)
buy_frame.grid()
# buy title (row 0)
buy_label = Label(buy_frame, text="Buy page",
font=("Arial", "16", "bold"),
bg=background_color,
padx=10, pady=5)
buy_label.grid(row=0, column=0)
# buy heading (label, row 1)
buy_heading = Label(buy_frame, text="Buy heading goes here",
font=("Arial", "12"),
bg=background_color,
padx=10, pady=5)
buy_heading.grid(row=1, column=0)
# buy heading (label, row 2)
buy_text = Label(buy_frame, text="this is where you buy vinyls",
font="Arial 9 italic", wrap=250, justify=LEFT,
bg=background_color,
padx=10, pady=10)
buy_text.grid(row=2, column=0)
#entry for amount of vinyls the user wants to buy
e = Entry(buy_frame, width=25)
e.insert(0,"")
e.grid(row=4, column=1)
myButton = Button(buy_frame, text="Enter", font=("Arial", "14"),
padx=10, pady=10, command=help)
myButton.grid(row=5, column=1)
#Creating the Dropdown Menu
chosen_option = StringVar()
option_menu = OptionMenu(buy_frame, chosen_option, stock_list[0], *stock_list)
option_menu.grid(row=1, column=1)
##########################################sell frame######################################################
#formatting variables....
sell_background_color = "blue"
# converter Main Screen GUI...
sell_frame = Frame(width=360, bg=sell_background_color)
sell_frame.grid()
# sell title (row 0)
sell_label = Label(sell_frame, text="Sell page",
font=("Arial", "16", "bold"),
bg=sell_background_color,
padx=10, pady=5)
sell_label.grid(row=0, column=0)
# sell heading (label, row 1)
sell_heading = Label(sell_frame, text="sell heading goes here",
font=("Arial", "12"),
bg=sell_background_color,
padx=10, pady=5)
sell_heading.grid(row=1, column=0)
# buy heading (label, row 2)
sell_text = Label(sell_frame, text="this is where you buy vinyls",
font="Arial 9 italic", wrap=250, justify=LEFT,
bg=sell_background_color,
padx=10, pady=10)
sell_text.grid(row=2, column=0)
#entry for amount of vinyls the user wants to buy
sell_e = Entry(sell_frame, width=25)
sell_e.insert(0,"")
sell_e.grid(row=4, column=1)
sell_Button = Button(sell_frame, text="Enter", font=("Arial", "14"),
padx=10, pady=10, command=help)
sell_Button.grid(row=5, column=1)
#Creating the Dropdown Menu
sell_chosen_option = StringVar()
sell_option_menu = OptionMenu(sell_frame, sell_chosen_option, stock_list[0], *stock_list)
sell_option_menu.grid(row=1, column=1)
##########################################stock frame############################
#main routine
if __name__ == "__main__":
root.title("Buy & Sell Vinyl Records")
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
40e657c8fd6cc4159d8d1aab9bd7146e9cb84561 | 7f8c9a91abfd776576edbefd9929b55ab249523e | /node_modules/uws/build/config.gypi | f29e879b0304e4bd03f5d2ec4d83ef0059b439f4 | [
"Zlib"
] | permissive | sam735/Direct_messaging | 6e854b195fe964c615501604ef4fc505e1968236 | e83b8cadcd29faa2fbb733250b76e8e0efe91996 | refs/heads/master | 2021-10-22T22:42:53.651443 | 2019-03-13T07:36:09 | 2019-03-13T07:36:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,235 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.57",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\ksanjeev\\.node-gyp\\8.10.0",
"standalone_static_library": 1,
"access": "",
"allow_same_version": "",
"also": "",
"always_auth": "",
"auth_type": "legacy",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\ksanjeev\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cert": "",
"cidr": "",
"color": "true",
"commit_hooks": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"dry_run": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\ksanjeev\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\ksanjeev\\AppData\\Roaming\\npm\\etc\\npmignore",
"global_style": "",
"group": "",
"ham_it_up": "",
"heading": "npm",
"https_proxy": "",
"if_present": "",
"ignore_prepublish": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\ksanjeev\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"legacy_bundling": "",
"link": "",
"local_address": "",
"logs_max": "10",
"long": "",
"maxsockets": "50",
"message": "%s",
"metrics_registry": "https://registry.npmjs.org/",
"node_gyp": "C:\\Program Files\\nodejs\\node_modules\\npm\\node_modules\\node-gyp\\bin\\node-gyp.js",
"node_options": "",
"node_version": "8.10.0",
"offline": "",
"onload_script": "",
"only": "",
"optional": "true",
"otp": "",
"package_lock": "true",
"package_lock_only": "",
"parseable": "",
"prefer_offline": "",
"prefer_online": "",
"prefix": "C:\\Users\\ksanjeev\\AppData\\Roaming\\npm",
"production": "",
"progress": "true",
"read_only": "",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "true",
"save_bundle": "",
"save_dev": "true",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"save_prod": "",
"scope": "",
"scripts_prepend_node_path": "warn-only",
"script_shell": "",
"searchexclude": "",
"searchlimit": "20",
"searchopts": "",
"searchstaleness": "900",
"send_metrics": "",
"shell": "C:\\Windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"sso_poll_frequency": "500",
"sso_type": "oauth",
"strict_ssl": "true",
"tag": "latest",
"tag_version_prefix": "v",
"timing": "",
"tmp": "C:\\Users\\ksanjeev\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "",
"unsafe_perm": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\ksanjeev\\.npmrc",
"user_agent": "npm/5.6.0 node/v8.10.0 win32 x64",
"version": "",
"versions": "",
"viewer": "browser"
}
}
| [
"ksanjeev@zeomega.com"
] | ksanjeev@zeomega.com |
54f5e3cdf45372380069c8efe4a8c3f74a44ee5b | 637ad73004c056397e1f7ecd039c9c0ece9a1114 | /ScrapingCongress/cget.py | e830f32de64f7bef673b60a03070a049ec5f8be8 | [
"MIT"
] | permissive | Krewn/bukeVote | cf07c26dfb91e9c750b4cf7ea6435531c3cf3159 | 646ff194957cd6e4dd642ee1f61021c3be01a0d4 | refs/heads/master | 2016-09-13T17:11:01.576846 | 2016-06-07T07:25:25 | 2016-06-07T07:25:25 | 58,336,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # $$$$$$$\ $$\
# $$ __$$\ |$$| |$$|
# $$ | $$ | |$$\ $$| $$| $$$$$$$\
# $$$$$$$\ | $$| $$| $$| $$ _____|
# $$ __$$\ $$| $$| $$| \$$$$$$\
# $$ | $$ | $$| $$| $$| \____$$\
# $$$$$$$ | $$| $$| $$| $$$$$$$ |
# \_______/ __| __| __| \_______/
from cssselect import GenericTranslator, SelectorError
def selector():
try:
expression = GenericTranslator().css_to_xpath('div.content')
except SelectorError:
print('Invalid selector.')
# Sample Bill
import urllib
from lxml import html
def versionTree(_root):
page = html.fromstring(urllib.urlopen(_root).read())
for link in page.xpath("//a"):
print "Name", link.text, "URL", link.get("href")
print(versionTree())
#data = {n : f.read() for n in }
#pickle.dump(data,"theBills.json") | [
"kpie314@gmail.com"
] | kpie314@gmail.com |
cf4860def85e20d2a28057aaa8e43165421b30f5 | abe88d197c288a553645a0add7ccc50f34ceb858 | /02_streamciphers/truthtable_to_anf.py | 09929b32eaaad66864d5afa71f15a82d803b1682 | [
"Unlicense"
] | permissive | StoneSwine/IMT4124-cryptology_software | eea3d8fe69c9c22929fdaa6e76cac1535f68effa | f480ad40a02802d82fdf2a3b35bc2251d5241f89 | refs/heads/main | 2023-01-24T03:13:25.562324 | 2020-11-19T07:55:35 | 2020-11-19T07:55:35 | 314,175,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | #!/usr/bin/env python3
import math
# CHANGEME:
#g = 0xE7
g = int("0b11010001", 2)
g = str(bin(g)[2:])
varnum = int(math.log2(len(g)))
n_rows = int("0b" + "1" * varnum, 2)
def find(s, ch):
return [x for x, ltr in enumerate(s) if ltr == ch]
def get_multiple_characters(s, indexes):
for i in indexes:
if s[i] != "0":
return None
return s
print("The truth table:")
for i, gc in zip(range(n_rows + 1), g):
print(f"{[x for x in bin(i)[2:].zfill(varnum)]} | {gc}")
print("- " * 10)
sequence = [(bin(i)[2:].zfill(varnum), gc) for i, gc in zip(range(n_rows + 1), g)]
# The Möbius transform
endsumvals = []
for i in range(n_rows + 1):
print("#" * 10)
a = []
u = bin(i)[2:].zfill(varnum)
print(f"u={u}")
for x in sequence:
for y in sequence:
if y[0] == get_multiple_characters(x[0], find(u, "0")):
a.append(int(y[1]))
print(y[0])
if sum(a) % 2 == 1:
endsumvals.append(u)
print("#" * 10)
print("END:", " + ".join(endsumvals))
| [
"StoneSwine@users.noreply.github.com"
] | StoneSwine@users.noreply.github.com |
2668f757293e1c34a37a46665c3e67873ae603b9 | 9e2e70257f707751fdff54f3322fad09a4029118 | /graphing.py | e909d13d3ea132022069e91b5d9e7f5f33190375 | [] | no_license | danielchen09/CS448-Project3 | eb254c848ea9aff73ddd60c4e814a05cb4150b3b | 37212dacc9696f7504117ce21f2073a1bc20ec57 | refs/heads/master | 2023-04-13T02:01:28.281860 | 2021-04-22T09:18:28 | 2021-04-22T09:18:28 | 357,829,794 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
x = []
y_min = []
y_avg = []
y_max = []
ys = []
with open('part1test1-out/time.txt') as f:
while (True):
line = f.readline()
if not line:
break
x.append(line)
for i in range(4):
ys += f.readline().split(' ')
x = np.array(x, dtype=float)
ys = np.array(ys, dtype=float)
print(x, ys)
fig, ax = plt.subplots(1)
ax.plot() | [
"chen3189@purdue.edu"
] | chen3189@purdue.edu |
53a0c5631fbc7d5df21cab0051bc053183e747e3 | a6df1449b52ab0bf393b2eaabf859e89f07377e3 | /article/migrations/0003_auto_20191127_1549.py | f7de0863968eb1ec983749b2f0538bbc5d0281af | [] | no_license | adwinugroho/cryppy_blog_django | 3ddaacb0389a17e8cba4120340c33e72226a0bca | 723974469613301af7a6f7add5e67c1de4656f43 | refs/heads/master | 2021-06-30T14:08:29.771672 | 2019-12-14T08:42:14 | 2019-12-14T08:42:14 | 227,990,314 | 0 | 0 | null | 2019-12-14T08:35:19 | 2019-12-14T08:27:59 | HTML | UTF-8 | Python | false | false | 398 | py | # Generated by Django 2.2.7 on 2019-11-27 15:49
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0002_article_slug'),
]
operations = [
migrations.AlterField(
model_name='article',
name='overview',
field=ckeditor.fields.RichTextField(),
),
]
| [
"adwinnugroho16@gmail.com"
] | adwinnugroho16@gmail.com |
ac0e7dab7b113ef703d686513e93015258211bce | aae2f576442eca943a3c3c73f9ae65075419e503 | /tests/test_api.py | 7c607f1502e5bb6ec421962a680e26403d8a386f | [] | no_license | joaquin-diaz/coding-challenge-backend | 38c6fa8d8d75f642ea3580a957e9c61871d7023d | f97b8ec6041525127794af4c00bf423af4f365bd | refs/heads/master | 2023-01-28T14:11:51.786278 | 2019-09-02T00:04:40 | 2019-09-02T00:04:40 | 205,566,615 | 0 | 0 | null | 2023-01-24T00:37:30 | 2019-08-31T16:06:40 | Python | UTF-8 | Python | false | false | 1,880 | py | import unittest
from unittest.mock import patch, MagicMock
from api import FilmLocationsAPI
from .common import mock_response
class TestAPI(unittest.TestCase):
def test_init_empty_api_key(self):
with self.assertRaises(Exception) as e:
FilmLocationsAPI(api_token=None)
self.assertEqual(
"Please provide a valid API token to fetch film locations", str(e),
"It should fail to initialize without an api token"
)
def test_get_headers(self):
api = FilmLocationsAPI(api_token='some_token')
headers = api._get_headers()
self.assertEqual(headers, {
'X-App-Token': 'some_token'
}, "It should set authentication header")
@patch('api.requests')
def test_get_locations_default_qs(self, mock_requests):
mock_json = MagicMock()
mock_json.return_value = mock_response
mock_requests.get.return_value = MagicMock(
json=mock_json
)
api = FilmLocationsAPI(api_token='some_token')
response = api.fetch_film_locations()
self.assertListEqual(response, mock_response)
mock_requests.get.assert_called
mock_requests.get.assert_called_with(
api.url,
params={"$q": "", "$limit": "10"},
headers={'X-App-Token': 'some_token'}
)
mock_json.assert_called
@patch('api.requests')
def test_get_locations_given_qs(self, mock_requests):
mock_json = MagicMock()
mock_json.return_value = mock_response
mock_requests.get.return_value = MagicMock(
json=mock_json
)
api = FilmLocationsAPI(api_token='some_token')
response = api.fetch_film_locations('venom', 50)
self.assertListEqual(response, mock_response)
mock_requests.get.assert_called
mock_requests.get.assert_called_with(
api.url,
params={"$q": "venom", "$limit": 50},
headers={'X-App-Token': 'some_token'}
)
mock_json.assert_called | [
"joaquin_fdv@goshippo.com"
] | joaquin_fdv@goshippo.com |
43987897857cb1721c0c822bde781613f26eea07 | 6bada87a5c36d1c711bf10d6a74fbb009aa6258c | /dumbpm/shared/shared.py | 7aed571d98d2badc46a66d2231dbab8ad3e75c48 | [
"Apache-2.0"
] | permissive | poros/dumbpm | 763e6d32645fe95af846ea4d3dc38a8714c8867f | 648d73caf4e8246fe2981907959fe8cae15bc98c | refs/heads/master | 2023-02-24T18:26:47.709512 | 2022-01-24T11:23:11 | 2022-01-24T11:23:11 | 162,490,710 | 7 | 0 | Apache-2.0 | 2023-02-11T00:31:53 | 2018-12-19T21:05:38 | Python | UTF-8 | Python | false | false | 275 | py | from pandas import DataFrame
def compute_stats(duration: list[int]) -> DataFrame:
"""Statistics to visualize for the result of a Monte Carlo simulation."""
return DataFrame(duration, columns=["Duration"]).describe(
percentiles=[0.5, 0.75, 0.90, 0.99]
)
| [
"noreply@github.com"
] | noreply@github.com |
4323d06509935ec626872716a0a5d1a8c8366b41 | 8feb5a91bfc7ffd267da5635a8c450f4db2da8dd | /Python Scripts/app_extractor_lib_based.py | dd70130260696bb9e07254d007e909a4396ddc4d | [] | no_license | jabhinav/Educational-Content-Enrichment | a8d8e62aed8c5a0cfa85fab1c8e26305cbe1426d | a73a466430142074e581f86ef91a5d35e22c6e76 | refs/heads/master | 2022-12-06T03:56:46.280482 | 2020-08-24T20:18:27 | 2020-08-24T20:18:27 | 213,155,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | import nltk
import wikipedia
import os
all_wiki_titles = "/home/abhinavj/video_enrichment/"
with open(os.path.join(all_wiki_titles,"wiki_all_titles.txt"),'r') as f:
titles = f.readlines()
# warning:: Calling `section` on a section that has subheadings will NOT return
# the full text of all of the subsections. It only gets the text between
# `section_title` and the next subheading, which is often empty.
with open(os.path.join(all_wiki_titles, "wiki_applications.txt"), 'w') as f:
for j, page_title in enumerate(titles):
page_title = page_title.replace('\n', '')
if j+1>181:
try:
print(j + 1, " : ", page_title)
page = wikipedia.WikipediaPage(title=page_title)
text = page.section("Applications")
if text:
sentences = nltk.sent_tokenize(text)
f.write(sentences[0]+"\n")
print("sentence founnd")
except wikipedia.exceptions.DisambiguationError:
continue
| [
"noreply@github.com"
] | noreply@github.com |
f17adc2a15a876ed0c7c3c479bb7aa490936b59d | 3cc112b83216e39c15fe86ec067118a730a16706 | /neural_networks/python/cifar-100.py | 0d7788e2e2528e81e1e5c4d04d4ed5647bd3eb08 | [] | no_license | pixarninja/machine_learning | 1af40c7728241505ecc0cc3840ea86dc5f8e0aea | 241a52114314dfb580870622c143bd7c3492a53d | refs/heads/master | 2020-12-02T04:11:01.959313 | 2020-01-02T18:56:11 | 2020-01-02T18:56:11 | 230,883,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | # Modeled off of "The Sequential model API" (Keras Documentation)
# Source: https://keras.io/models/sequential/
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
import matplotlib.pyplot as plt
import tensorflow as tf
import utils as utils
# Import training and testing data from TensorFlow API.
print('Loading CIFAR-100 Dataset...')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data()
# Normalization
shape = (32, 32, 3)
x_train = x_train.reshape(x_train.shape[0], shape[0], shape[1], shape[2]) / 255.0
x_test = x_test.reshape(x_test.shape[0], shape[0], shape[1], shape[2]) / 255.0
print('x_train shape:', x_train.shape)
# Create a Sequential base model.
model = Sequential()
# Add each layer to the model and set the shape for the nodes accordingly.
model.add(Conv2D(64, kernel_size=(3,3), input_shape=shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(100, activation=tf.nn.softmax))
# Compile the model and fit it with the training data.
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=x_train,y=y_train, epochs=10)
# Evaluate the model.
evaluation = model.evaluate(x_test, y_test)
print(evaluation)
| [
"wesharris505@gmail.com"
] | wesharris505@gmail.com |
577b3103f6d545706880b49e0ca68d04a83bb05f | 25764226e0dcad2e4f8fccf712a01a29c64eba23 | /api_save_csv.py | 0b9fa15149a00e2bfbb58313504c07d538bbe126 | [] | no_license | 19-2-oss/19-2-oss-project | 24d9b1680c72aff9432fe8a85465bb7e0e5ead68 | a47330cc7a9493a0f615c9d61572d5ea0f2d4a8d | refs/heads/master | 2023-01-12T12:55:23.388124 | 2019-12-19T11:33:08 | 2019-12-19T11:33:08 | 226,592,289 | 0 | 4 | null | 2022-12-11T16:30:03 | 2019-12-08T00:06:47 | CSS | UTF-8 | Python | false | false | 980 | py | import urllib.request
import json
import pandas
name = urllib.parse.quote(input("경기도 시/군 명을 입력해주세요 : "))
url = 'https://openapi.gg.go.kr/PlaceThatDoATasteyFoodSt?KEY=30c8bab88c6249babce184a75ce9be0f&Type=json&SIGUN_NM='+name
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
rescode = response.getcode()
data = []
if(rescode==200):
response_body = response.read()
dict = json.loads(response_body.decode('utf-8'))
try:
for i in dict['PlaceThatDoATasteyFoodSt'][1]['row']:
data.append([i['RESTRT_NM'], i['TASTFDPLC_TELNO'], i['REFINE_ROADNM_ADDR'], i['REFINE_WGS84_LAT'], i['REFINE_WGS84_LOGT']])
print(data)
#frame = pandas.DataFrame(data)
#frame.to_csv(r'C:\Users\wogus\PycharmProjects\untitled1\data.csv',header=False, index=False)
except: #예외처리 : 오류 메시지출력
print(dict['RESULT']['MESSAGE'])
else:
print("Error Code:" + rescode) | [
"wogus2838@naver.com"
] | wogus2838@naver.com |
74fd6216df35173d3380117185232b4dbee41beb | ff65468b40a4879a6bb5389bb3ec4a7d29f6b4ed | /madcore/localtemplate.py | e6b743b22a7c32b9a1a9fd10f60efa54f4af0ea4 | [
"MIT"
] | permissive | Worldie-com/cli | 178dee726ff182f7efc81afac26dabaaab3b4627 | cfa530b001014f31058d1310eab14939a3086c67 | refs/heads/master | 2023-03-25T05:29:44.280753 | 2018-08-26T17:49:08 | 2018-08-26T17:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | """
MIT License
Copyright (c) 2016-2018 Madcore Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from jinja2 import Environment, PackageLoader, FileSystemLoader
import os
from prettytable import PrettyTable
import static
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class LocalTemplate(object):
settings = None
path_populated = None
def __init__(self, in_settings):
self.settings = in_settings
def generate_template(self, name):
#env = Environment(loader=PackageLoader('localtemplate', 'templates'))
env = Environment(loader=FileSystemLoader(self.settings.folder_app_templates))
template = env.get_template(name)
rendered = template.render(settings=self.settings)
self.path_populated = "{0}/{1}".format(self.settings.folder_user_populated, name)
with open(self.path_populated, "wb") as f:
f.write(rendered.encode("UTF-8"))
f.close()
def generate_template_node(self, file_template, file_populated, ig):
env = Environment(loader=FileSystemLoader(self.settings.folder_app_templates))
template = env.get_template(file_template)
rendered = template.render(ig=ig, settings=self.settings)
self.path_populated = "{0}/{1}".format(self.settings.folder_user_populated, file_populated)
with open(self.path_populated, "wb") as f:
f.write(rendered.encode("UTF-8"))
f.close()
def generate_template_element(self, item):
env = Environment(loader=FileSystemLoader(self.settings.folder_app_templates))
template = env.get_template(item.template)
rendered = template.render(component=item, settings=self.settings)
if self.settings.provision.cloud == "minikube":
rendered = self.overwrite_nodeselector_for_minikube (rendered)
self.path_populated = "{0}/{1}".format(self.settings.folder_user_populated, item.template)
with open(self.path_populated, "wb") as f:
f.write(rendered.encode("UTF-8"))
f.close()
def overwrite_nodeselector_for_minikube(self, data):
out = ''
lines = data.split('\n')
for line in lines:
if "kops.k8s.io/instancegroup:" in line:
number_of_leading_spaces = len(line) - len(line.lstrip())
line = "{0}kubernetes.io/hostname: minikube".format(' ' * number_of_leading_spaces)
out += '{0}\n'.format(line)
return out
| [
"peter@styk.tv"
] | peter@styk.tv |
6f845db5411f67f682b1111722d978af04ad23c4 | 8d9d88743c9fdbc1115e652b114de3038ed5f5cc | /keyboards.py | b785a3bd64e1a0dc67b6b869650a06e70dea1dc8 | [] | no_license | yarosha/meeting_in_restaurant_project | c2d182b776146087fe6c1b9d371ed70109add2ec | 670a3a6f1394894780aa99569f55a24c59928840 | refs/heads/master | 2020-09-21T01:44:56.670731 | 2019-12-22T08:03:26 | 2019-12-22T08:03:26 | 224,643,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,994 | py | from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import vk_api
vk = vk_api.VkApi(token='cb6145a957442835528d14699d5601685ccd389d585ab3f55786b6ec56d0600436670449ef71f0fc70f59')
vk._auth_token()
def main_keyboard():
keyboard = VkKeyboard(one_time=True)
keyboard.add_button('Хочу выпить кофе', color=VkKeyboardColor.PRIMARY)
# keyboard.add_button('Хочу фильм', color=VkKeyboardColor.NEGATIVE)
# keyboard.add_button('Хочу книгу', color=VkKeyboardColor.POSITIVE)
keyboard.add_line()
keyboard.add_button('Что тут вообще происходит?', color=VkKeyboardColor.NEGATIVE)
keyboard = keyboard.get_keyboard()
return keyboard
def quize_keyboard():
keyboard = VkKeyboard(one_time=False)
keyboard.add_button('Пройти опрос', color=VkKeyboardColor.PRIMARY)
keyboard.add_line()
keyboard.add_button('Что тут вообще происходит?', color=VkKeyboardColor.NEGATIVE)
keyboard = keyboard.get_keyboard()
return keyboard
def help_keyboard():
keyboard = VkKeyboard(one_time=True)
keyboard.add_button('Хочу выпить кофе', color=VkKeyboardColor.PRIMARY)
keyboard.add_line()
keyboard.add_button('Пройти опрос', color=VkKeyboardColor.POSITIVE)
keyboard = keyboard.get_keyboard()
return keyboard
def days_of_week_keyboard():
keyboard = VkKeyboard(one_time=False)
keyboard.add_button('Понедельник', color=VkKeyboardColor.NEGATIVE)
keyboard.add_button('Вторник', color=VkKeyboardColor.POSITIVE)
keyboard.add_line()
keyboard.add_button('Среда', color=VkKeyboardColor.POSITIVE)
keyboard.add_button('Четверг', color=VkKeyboardColor.NEGATIVE)
keyboard.add_line()
keyboard.add_button('Пятница', color=VkKeyboardColor.NEGATIVE)
keyboard.add_button('Суббота', color=VkKeyboardColor.POSITIVE)
keyboard = keyboard.get_keyboard()
return keyboard
| [
"yarosha_ponomar@mail.ru"
] | yarosha_ponomar@mail.ru |
94edc96b3d3caf3e7a2601956b85dd4d973dd127 | acdc1c11bb2b05e5a7ea0f28a071c0799cbe408e | /atuv/urls.py | 209066227336bd293e86e624f3484edfa459b759 | [] | no_license | adrianojalves/atuv2 | a8ec068c1920ae1c4fef2cdf4fdb2445e2f6777d | bf073022f02e15b438611c74476933f7d007fb4d | refs/heads/master | 2020-11-24T02:42:05.868296 | 2019-12-13T17:39:36 | 2019-12-13T17:39:36 | 227,931,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | """atuv URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('atuv/', include('app.urls')),
]
| [
"adriano@prodados.inf.br"
] | adriano@prodados.inf.br |
c837a74da3788a0da98913e94694bbd9595db67c | f1d3274a55b8a2f059b47ab7ed812338d47617a5 | /untitled/SSW540_ASSIGNMENT/ASSIGNMENT06_AKSHAYSUNDERWANI_SSW540/Project07_AkshaySunderwani_SSW540.py | 10fd1bf2328a52ca94a670ef5f7f080135084877 | [] | no_license | akshya672222/PycharmProjects | aa04b495514c48401b75cb4c92f0bdecfb143398 | 20bddc14e164e9785ab831f06a0ada618bc627f9 | refs/heads/master | 2020-09-25T06:24:53.974135 | 2016-12-17T19:52:30 | 2016-12-17T19:52:30 | 67,933,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | # SSW 540 - Assignment 07 - P7: Finding unique strings
# Akshay Sunderwani
import re
def parsefileforpath(filepath):
try:
# opening the file
with open ( filepath ) as filetoread:
senderlist = [ ]
for lines in filetoread:
linelist = lines.split ( )
if len ( linelist ) > 0:
# check for tag "From:" for getting email ids
if 'From:' in linelist[ 0 ]:
for items in linelist:
if linelist.index ( items ) != 0:
# used regex to identify valid email address and add it to the list if it is not in the
# list
if items not in senderlist and re.match (
r"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$" , items ):
senderlist.append ( items )
return len ( senderlist )
except EnvironmentError as error:
return error
path = input ( "Please provide path for file to read : " ) # input path of file from user.
noofunique = parsefileforpath ( path ) # call parser method to read
if isinstance(noofunique, FileNotFoundError):
print('ERROR: ', noofunique)
else:
print ( 'There are ' , noofunique , ' email address in the file.' )
| [
"akshya672222@gmail.com"
] | akshya672222@gmail.com |
1221006e69505d6b9673acda6b899b46957c5ac5 | 9873c0b05522ea0ce574944712e6d5c6f829de2f | /diarrheapiechart.py | 1c3fe9d5dd80f77fee6f6c6035d6259251e67cab | [] | no_license | jiankaiwang/tcdc_opendata_visualization_data | 972f64606ede14355a65f8ea88b218dd1fed1978 | 2de3346ea8950d7027d6123169219d39802f09cb | refs/heads/master | 2021-05-04T16:09:56.239403 | 2018-02-05T02:45:41 | 2018-02-05T02:45:41 | 120,245,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,366 | py | # -*- coding: utf-8 -*-
"""
#
# author : jiankaiwang (https://jiankaiwang.no-ip.biz/)
# project : CKAN Visualization
# service : diarrheapiechart
#
"""
import csv
import urllib
import urllib2
import py2mysql
import json
import general
import sys
# service name (must be noticed if it is going to change)
serviceName = 'diarrheapiechart'
# checker : check mysql connection statue
py2my = py2mysql.py2mysql(\
general.mysqlServerInfo['host'], \
general.mysqlServerInfo['port'], \
general.mysqlServerInfo['user'], \
general.mysqlServerInfo['pass'], \
general.mysqlServerInfo['dbname']\
)
if py2my.checkConnectionValid()['state'] != 'success':
print general.writeOutErrorLog(serviceName, py2my.checkConnectionValid()['info'])
sys.exit()
# checker : starttimestamp
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['exec'], \
'starttimestamp' : general.getCrtTimeStamp(), \
'endtimestamp' : "", \
'note' : general.normalizedNote['stmp']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['stmpflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
url = 'https://od.cdc.gov.tw/eic/NHI_Diarrhea.csv'
encode = 'utf8'
try:
response = urllib2.urlopen(url)
cr = csv.reader(response)
# checker : fetch data
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['exec'], \
'endtimestamp' : "", \
'note' : general.normalizedNote['fdflagdesc']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
except:
# checker : fetch data
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['error'], \
'endtimestamp' : general.getCrtTimeStamp(), \
'note' : general.normalizedNote['fderror']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
# fetch data is failure
sys.exit()
# 2016 : {'0-4' : 0, '5-14' : 0, '15-24' : 0, '25-64' : 0, '65+' : 0}
ttl = {}
#
# desc : initial the dictionary for calculating number
#
def initList(listobj, year):
if year not in listobj.keys():
tmp = {'0-4' : 0, '5-14' : 0, '15-24' : 0, '25-64' : 0, '65+' : 0}
listobj.setdefault(year, tmp)
# start to parse each row on the data
# a[0].decode(encode)
header = []
for line in cr:
if len(header) < 1:
header = line
continue
year = (int)(line[0].decode(encode))
age = (line[3].decode(encode))
initList(ttl, year)
# prevent empty (or null)
if line[2].decode(encode) == u'住院' or len(line[2].decode(encode)) <= 1:
continue
ttl[year][age] += (int)(line[5].decode(encode))
# start to calculate influ ratio
yearList = ttl.keys()
# checker : fetching data positive control flag
if len(yearList) < 1:
# Fetching data is complete but data is not match
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['error'], \
'endtimestamp' : general.getCrtTimeStamp(), \
'note' : general.normalizedNote['fdpcerror']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
# the data is no more parsing
sys.exit()
else:
# Fetching data and checking data is complete
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['exec'], \
'endtimestamp' : "", \
'note' : general.normalizedNote['fdcheckcomp']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
# checker : data preparation ready
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['exec'], \
'endtimestamp' : "", \
'note' : general.normalizedNote['dataready']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
# checker : data preparation ready
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['exec'], \
'endtimestamp' : "", \
'note' : general.normalizedNote['insertintodb']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
# start to insert data into the database
for yearindex in range(0, len(yearList), 1):
ageList = ttl[yearList[yearindex]].keys()
for ageindex in range(0, len(ageList), 1):
queryData = py2my.execsql(\
"select * from diarrheapiechart where year = %s and age = %s;", \
(yearList[yearindex], ageList[ageindex]), True, True\
)
if queryData['state'] != 'success':
print queryData['info']
# skip this data
continue
syncdb = 0
if len(queryData['data']) > 0:
# aleady existing
syncdb = py2my.execsql(\
"update diarrheapiechart set diaval = %s where year = %s and age = %s;", \
(ttl[yearList[yearindex]][ageList[ageindex]], yearList[yearindex], ageList[ageindex]), False, False\
)
else:
# insert a new entity
syncdb = py2my.execsql(\
"insert into diarrheapiechart (year, age, diaval) values (%s, %s, %s);", \
(yearList[yearindex], ageList[ageindex], ttl[yearList[yearindex]][ageList[ageindex]]), False, False\
)
if syncdb['state'] != 'success':
print syncdb['info']
# checker : confirm api data
try:
apidata = urllib.urlopen(\
general.apiInfo['protocol'] \
+ general.apiInfo['host'] \
+ general.apiInfo['port'] \
+ general.apiInfo['path'] \
+ general.apiInfo['diarrheapc']
)
jsonData = json.loads(apidata.read())
if len(jsonData) < 1:
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['error'], \
'endtimestamp' : general.getCrtTimeStamp(), \
'note' : general.normalizedNote['apicheckfail']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
# api check is failure
sys.exit()
else:
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['exec'], \
'endtimestamp' : "", \
'note' : general.normalizedNote['apicheckdesc']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
except:
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['error'], \
'endtimestamp' : general.getCrtTimeStamp(), \
'note' : general.normalizedNote['apicheckfail']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['fdflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
# fetch data is failure
sys.exit()
# checker : end time stamp
getDataDict = {'servicename' : serviceName, \
'status' : general.normalizedNote['comp'], \
'endtimestamp' : general.getCrtTimeStamp(), \
'note' : general.normalizedNote['etmp']\
}
getWriteState = general.writeStatusIntoDB(py2my, getDataDict, general.normalizedNote['etmpflag'])
if getWriteState['state'] != 'success':
print general.writeOutErrorLog(serviceName, getWriteState['info'])
| [
"s0973078@mail.ncyu.edu.tw"
] | s0973078@mail.ncyu.edu.tw |
7635383670a6850622d4fbe7ae512186c811aa78 | b2e5313f271d6e01c41081c03d86c7e54cb81766 | /users/views.py | 00ce66cb3b93464dd9b5fcddf570a001d2765928 | [] | no_license | iambajie/movie-recommend | c39e2e83996bee3245a80b4f1c4fbc6c180e38ed | e47b124116ecaa655b74b23622c331cbc04d41b6 | refs/heads/main | 2023-01-19T03:49:40.771894 | 2020-11-26T08:45:37 | 2020-11-26T08:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,776 | py | from django.shortcuts import render, redirect
from .forms import RegisterForm
from users.models import Resulttable,Insertposter
from users.recommend.myRecommend import *
from users.evaluate import *
from users.testRS2.experiment import *
from users.testRS2.experiment2 import *
from users.testRS3.experiment3 import *
from users.testRS4.experiment4 import *
#用户注册
def register(request):
# 只有当请求为 POST 时,才表示用户提交了注册信息
if request.method == 'POST':
form = RegisterForm(request.POST)
# 验证数据的合法性
if form.is_valid():
# 如果提交数据合法,调用表单的 save 方法将用户数据保存到数据库
form.save()
# 注册成功,跳转回首页
return redirect('/')
else:
# 请求不是 POST,表明用户正在访问注册页面,展示一个空的注册表单给用户
form = RegisterForm()
# 渲染模板
# 如果用户正在访问注册页面,则渲染的是一个空的注册表单
# 如果用户通过表单提交注册信息,但是数据验证不合法,则渲染的是一个带有错误信息的表单
return render(request, 'users/register.html', context={'form': form})
#首页页面
def index(request):
return render(request, 'users/..//index.html')
def check(request):
return render((request, 'users/..//index.html'))
# def showregist(request):
# pass
#10-22 可以随着不同用户名显示评价的电影
def showmessage(request,id):
usermovieid = []
usermovietitle = []
print(id)
this_id=str(id)+str(1000)
data=Resulttable.objects.filter(userId=this_id)
for row in data:
usermovieid.append(row.imdbId)
try:
conn = get_conn()
cur = conn.cursor()
#Insertposter.objects.filter(userId=USERID).delete()
for i in usermovieid:
cur.execute('select * from mv where imdbId = %s',i) #movie的id在moviegenre3中找到电影名
rr = cur.fetchall()
for imdbId,title,poster in rr:
usermovietitle.append(title)
# print(title)
# print(poster_result)
finally:
conn.close()
# return render(request, 'users/message.html', locals())
return render(request, 'users/message.html', {'usermovietitle':usermovietitle})
def myrecommend(request):
rec_style = 1
print(request.GET["userIdd"])
USERID = int(request.GET["userIdd"])
Insertposter.objects.filter(userId=USERID).delete()
# selectMysql()
read_mysql_to_csv('users/static', 'user') # 追加数据,提高速率
read_mysql_to_csv('users/static', 'mv') # 追加数据,提高速率
read_mysql_to_csv3('users/static','users_user') # 追加数据,提高速率
ratingfile = os.path.join('users/static', 'user.csv')
mvfile = os.path.join('users/static', 'mv.csv')
userfile = os.path.join('users/static', 'users_user.csv')
N = 3
rs = recommendSys(N)
userid=USERID-1
rs.initial_dataset(ratingfile)
rs.splitDataset(ratingfile,0,8,4)
rs.bulid_users_movies_matrix(userfile,mvfile)
rs.construct_graph()
#判断是否没有评价过电影
isEvaluate=rs.evaluate(userid)
print(isEvaluate)
if isEvaluate=="": #fFlse
rec_items=rs.cold_start() #使用冷启动
else:
if rec_style==1: #基于用户的推荐
rs.build_users_sim_matrix()
rec_items=rs.recommend_linyu_users(userid)
elif rec_style==2: #基于电影的推荐
rs.build_movies_sim_matrix()
rec_items = rs.recommend_linyu_items(userid)
elif rec_style == 3: #隐语义模型
rec_items = rs.recommend_lfm(userid)
elif rec_style == 4: #基于图的随机游走
# rs.personalrank(0.5,userid,50)
rec_items = rs.recommend_personalrank(userid)
try:
conn = get_conn()
cur = conn.cursor()
# Insertposter.objects.filter(userId=USERID).delete()
for i in rec_items:
cur.execute('select * from mv where imdbId = %s', i)
rr = cur.fetchall()
for imdbId, title, poster in rr:
# print(value) #value才是真正的海报链接
if (Insertposter.objects.filter(title=title)):
continue
else:
Insertposter.objects.create(userId=USERID, title=title, poster=poster)
# print(poster_result)
finally:
conn.close()
results = Insertposter.objects.filter(userId=USERID)
if rec_style==1:
return render(request, 'users/movieRecommend.html', {'results':results})
else:
return render(request, 'users/movieRecommend2.html', {'results':results})
#测试基于用户行为数据的推荐系统下的各项指标
def test(request):
rec_style = 1
read_mysql_to_csv('users/static', 'user') # 追加数据,提高速率
read_mysql_to_csv('users/static', 'mv') # 追加数据,提高速率
read_mysql_to_csv3('users/static','users_user') # 追加数据,提高速率
ratingfile = os.path.join('users/static', 'user.csv')
mvfile = os.path.join('users/static', 'mv.csv')
userfile = os.path.join('users/static', 'users_user.csv')
N = 3
rs = recommendSys(N)
rs.initial_dataset(ratingfile)
rs.splitDataset(ratingfile,0,8,4)
rs.bulid_users_movies_matrix(userfile, mvfile)
rs.construct_graph()
dataset=rs.user_movie_matrix
testset=rs.testset
if rec_style == 1: # 基于用户的推荐
rs.build_users_sim_matrix()
r = recall(dataset, testset, N, rs.recommend_linyu_users,rs.movie_matrix,rs.user_matrix)
p = precision(dataset, testset, N, rs.recommend_linyu_users,rs.movie_matrix,rs.user_matrix)
c = coverage(dataset, testset, N, rs.recommend_linyu_users,rs.movie_matrix,rs.user_matrix)
pop = popularity(dataset, testset, N, rs.recommend_linyu_users,rs.movie_matrix,rs.user_matrix)
print(r, p, c, pop)
# rec_items = rs.recommend_linyu_users(userid)
elif rec_style == 2: # 基于电影的推荐
rs.build_movies_sim_matrix()
r = recall(rs.user_movie_matrix, testset, N, rs.recommend_linyu_items,rs.movie_matrix,rs.user_matrix)
p = precision(dataset, testset, N, rs.recommend_linyu_items,rs.movie_matrix,rs.user_matrix)
c = coverage(dataset, testset, N, rs.recommend_linyu_items,rs.movie_matrix,rs.user_matrix)
pop = popularity(dataset, testset, N, rs.recommend_linyu_items,rs.movie_matrix,rs.user_matrix)
print(r, p, c, pop)
# rec_items = rs.recommend_linyu_items(userid)
elif rec_style == 3: # 隐语义模型
r = recall(dataset, dataset, N, rs.recommend_lfm,rs.movie_matrix,rs.user_matrix)
p = precision(dataset, dataset, N, rs.recommend_lfm,rs.movie_matrix,rs.user_matrix)
c = coverage(dataset, dataset, N, rs.recommend_lfm,rs.movie_matrix,rs.user_matrix)
pop = popularity(dataset, dataset, N, rs.recommend_lfm,rs.movie_matrix,rs.user_matrix)
print(r, p, c, pop)
# rec_items = rs.recommend_lfm(userid)
elif rec_style == 4: # 基于图的随机游走
r = recall(dataset, dataset, N, rs.recommend_personalrank,rs.movie_matrix,rs.user_matrix)
p = precision(dataset, dataset, N, rs.recommend_personalrank,rs.movie_matrix,rs.user_matrix)
c = coverage(dataset, dataset, N, rs.recommend_personalrank,rs.movie_matrix,rs.user_matrix)
pop = popularity(dataset, dataset, N, rs.recommend_personalrank,rs.movie_matrix,rs.user_matrix)
print(r, p, c, pop)
# rs.personalrank(0.5, userid, 50)
# rec_items = rs.recommend_pk(userid)
return render(request,'users/test.html',{'r':r,'p':p,'c':c,'pop':pop})
#测试基于用户标签数据的推荐系统
def test2(request):
fp='./dataset/delicious/user_taggedbookmarks.dat'
print(os.getcwd())
res='sucess'
M,N=2,10 #进行两次实验,推荐10个物品
#基于标签的推荐系统
# exp=Experiment(M, N, fp,rt='improveTag')
# exp.run()
##测试有误
#给用户推荐标签
exp=Experiment2(M, N, fp,rt='hybridPopularTags') # popularTags userPopularTags itemPopularTags hybridPopularTags
exp.run()
return render(request,'users/test2.html',{'res':res})
#测试基于时间上下文信息的推荐系统
def test3(request):
print(os.getcwd())
res='sucess'
# popularItem Titemcf Tusercf itemcf usercf
# 1. popularItem
# K = 0 # 为保持一致而设置,随便填一个值
# for site in ['www.nytimes.com', 'en.wikipedia.org']:
# for N in range(10, 110, 10):
# exp = Experiment3(K, N, site=site, rt='popularItem')
# exp.run()
# 2. Titemcf
K = 10
for site in ['www.nytimes.com', 'en.wikipedia.org']:
for N in range(10, 110, 10):
exp = Experiment3(K, N, site=site, rt='usercf')#Titemcf Tusercf itemcf usercf
exp.run()
return render(request,'users/test2.html',{'res':res})
#以上测试完成 11.20
#测试基于时间上下文信息的推荐系统
def test4(request):
print(os.getcwd())
res='sucess'
M,N=2,10 #进行两次实验,推荐10个物品
fp='./dataset/slashdot/soc-Slashdot0902.txt'
rt_list=['Out', 'In', 'In_Out', 'In_Out2']
for rt in rt_list:
print(rt)
exp = Experiment4(M, N,fp,rt)
exp.run()
return render(request,'users/test2.html',{'res':res})
#每个用户都给分配一个id,id从1开始
def insert(request):
# MOVIEID = int(request.GET["movieId"])
global USERID
USERID = int(request.GET["userId"])
# USERID = {{}}
RATING = float(request.GET["rating"])
IMDBID = int(request.GET["imdbId"])
Resulttable.objects.create(userId=USERID, rating=RATING,imdbId=IMDBID)
#print(USERID)
# return HttpResponseRedirect('/')
# return render(request, 'index.html',{'userId':USERID,'rating':RATING,'imdbId':IMDBID})
# messages.success(request, "哈哈哈")
return render(request, 'index.html',{'userId':USERID,'rating':RATING,'imdbId':IMDBID})
import os
import pymysql
import csv
import codecs
#连接mysql数据库
def get_conn():
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='xpf123321', db='moviesys', charset='utf8')
return conn
def query_all(cur, sql, args):
cur.execute(sql, args)
return cur.fetchall()
#将数据库内所有用户的评分写入excel文件中
def read_mysql_to_csv(filename,filecsv):
file=filename+"/"+filecsv+'.csv'
print(file)
with codecs.open(filename=file, mode='w', encoding='utf-8') as f:
write = csv.writer(f, dialect='excel')
conn = get_conn()
cur = conn.cursor()
cur.execute('select * from '+filecsv)
#sql = ('select * from users_resulttable WHERE userId = 1001')
rr = cur.fetchall()
#results = query_all(cur=cur, sql=sql, args=None)
for result in rr:
#print(result)
write.writerow(result[:])
def read_mysql_to_csv3(filename,filecsv):
file = filename + "/" + filecsv + '.csv'
print(file)
with codecs.open(filename=file, mode='w', encoding='utf-8') as f:
write = csv.writer(f, dialect='excel')
conn = get_conn()
cur = conn.cursor()
cur.execute('select distinct(userId) from user')
#sql = ('select * from users_resulttable WHERE userId = 1001')
rr = cur.fetchall()
#results = query_all(cur=cur, sql=sql, args=None)
for result in rr:
#print(result)
write.writerow(result[:])
def read_mysql_to_csv4(filename,filecsv):
file = filename + "/" + filecsv + '.csv'
print(file)
with codecs.open(filename=file, mode='w', encoding='utf-8') as f:
write = csv.writer(f, dialect='excel')
conn = get_conn()
cur = conn.cursor()
cur.execute('select distinct(userId) from users_resulttable')
#sql = ('select * from users_resulttable WHERE userId = 1001')
rr = cur.fetchall()
#results = query_all(cur=cur, sql=sql, args=None)
for result in rr:
#print(result)
write.writerow(result[:])
# #
# if __name__ == '__main__':
# ratingfile2 = os.path.join('static', 'users_resulttable.csv') # 一共671个用户
#
# usercf = UserBasedCF()
# userId = '1'
# # usercf.initial_dataset(ratingfile1)
# usercf.generate_dataset(ratingfile2)
# usercf.calc_user_sim()
# # usercf.evaluate()
# usercf.recommend(userId)
# # 给用户推荐10部电影 输出的是‘movieId’,兴趣度
| [
"1079248835@qq.com"
] | 1079248835@qq.com |
1677423680a3a2b5ea98689051194bfd8bc53b7b | eed071aa6ff3387cb2eafe94e3f90fe2a2c992d5 | /Task4.py | cdd3581aabd7d0e22cd52e782af594b235c0701a | [] | no_license | sanxiux/Task | 0a73429d75b6b038a6830311a2797618a4af4e33 | d9d6f92e1a81d358c7f61e4c86e6d15eae490cfc | refs/heads/master | 2020-03-07T08:50:45.572504 | 2018-04-02T02:18:52 | 2018-04-02T02:18:52 | 127,390,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | """
下面的文件将会从csv文件中读取读取短信与电话记录,
你将在以后的课程中了解更多有关读取文件的知识。
"""
import csv
#这是所有的播出电话号码
zidian_first =[]
#这是接听电话的
zidian_second = []
#这个是发短信的
zidian_third = []
#这个是接收短信的
zidian_forth = []
filename2 = 'calls.csv'
with open(filename2, 'r') as f:
reader = csv.reader(f)
calls = list(reader)
for i in range(len(calls)):
arrive = calls[i]
if arrive[0] not in zidian_first:
zidian_first.append(arrive[0])
if arrive[1] not in zidian_second:
zidian_second.append(arrive[1])
filename = 'texts.csv'
with open(filename, 'r') as f:
reader = csv.reader(f)
texts = list(reader)
for i in range(len(texts)):
arrive = texts[i]
if arrive[0] not in zidian_third:
zidian_third.append(arrive[0])
if arrive[1] not in zidian_forth:
zidian_forth.append(arrive[1])
tmp = [val1 for val1 in zidian_first if val1 not in zidian_second]
tmp2 = [val2 for val2 in tmp if val2 not in zidian_third]
tmp3 = sorted([val3 for val3 in tmp2 if val3 not in zidian_forth])
print("These numbers could be telemarketers")
for tp in tmp3:
print(tp)
"""
任务4:
电话公司希望辨认出可能正在用于进行电话推销的电话号码。
找出所有可能的电话推销员:
这样的电话总是向其他人拨出电话,
但从来不发短信、接收短信或是收到来电
请输出如下内容
"These numbers could be telemarketers: "
<list of numbers>
电话号码不能重复,每行打印一条,按字典顺序排序后输出。
"""
| [
"91934382@qq.com"
] | 91934382@qq.com |
1b84850c1175fdf546f45c022e88d96e61c7b279 | 647ecca1802d5802325c9025c0dce2cbcaea2a9d | /homepage/models.py | 85961a3eabd6a08180624f31e7052c07a984f411 | [] | no_license | dukelester/flight-hotel-booking | 33cc4db535da19a5f4d4e64f98cf9f4eeb23ba0b | c8835a8ed49e4f4ee48a2acd60ad9a8c2e3fe7bc | refs/heads/master | 2023-06-23T11:23:44.489440 | 2021-07-15T11:14:46 | 2021-07-15T11:14:46 | 386,043,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from cloudinary.models import CloudinaryField
from ckeditor.fields import RichTextField
from django.utils import timezone
class EmailSubscribers(models.Model):
email=models.EmailField(max_length=54)
class Meta:
verbose_name_plural = "Email Subscribers"
class MarkettingEmail(models.Model):
thistime = timezone.now()
email=RichTextField()
time_created=models.CharField(default=thistime,blank=False,max_length=256)
def __str__(self):
return self.email
class Meta:
verbose_name_plural = "Marketting Emails"
| [
"dukelester4@gmail.com"
] | dukelester4@gmail.com |
588ac1a11719ecbd1b0003a30f176d8313194e4d | c98f0fa3ee2ac6c062ce349f885d5111ad4a7e58 | /practice/test.py | e972eabf27c2c2e61940bb7f3832982fbcc645fd | [] | no_license | PDeck101/NJIT_REU_Eye_Blinks | c9e5fee7fd719dd2d8fbc6a3597cad6f2ba5bedc | 65abfa6b1476fa2eba8363c30c7b82515292b35d | refs/heads/master | 2022-01-17T13:28:58.874327 | 2019-06-18T22:41:33 | 2019-06-18T22:41:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | #import kivy # importing main package
#from kivy.app import App # required base class for your app.
#from kivy.uix.videoplayer import VideoPlayer
#from kivy.uix.gridlayout import GridLayout
#from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg
#import numpy as np
#import matplotlib.pyplot as plt
#import matplotlib.animation as animation
#kivy.require("1.11.0") # make sure people running py file have right version
#fig, ax = plt.subplots()
#x = np.arange(0, 2*np.pi, 0.01)
#line, = ax.plot(x, np.sin(x))
#def init(): # only required for blitting to give a clean slate.
#line.set_ydata([np.nan] * len(x))
#return line,
#def animate(i):
#line.set_ydata(np.sin(x + i / 100)) # update the data.
#return line,
#ani = animation.FuncAnimation(
#fig, animate, init_func=init, interval=2, blit=True, save_count=50)
# To save the animation, use e.g.
#
# ani.save("movie.mp4")
#
# or
#
# from matplotlib.animation import FFMpegWriter
# writer = FFMpegWriter(fps=15, metadata=dict(artist='Me'), bitrate=1800)
# ani.save("movie.mp4", writer=writer)
#plt.show()
#video = VideoPlayer(source='2019-06-13 14-35-16.flv', play=True)
#class MainWindow(GridLayout):
#def __init__(self, **kwargs):
#super().__init__(**kwargs)
#self.rows = 2
#self.add_widget(video)
#self.add_widget(FigureCanvasKivyAgg(plt.gcf()))
#class EpicApp(App):
#def build(self):
#return MainWindow()
# Run the app.
#if __name__ == "__main__":
#EpicApp().run()
import matplotlib
matplotlib.use('module://kivy.garden.matplotlib.backend_kivy')
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
canvas = fig.canvas
class MyApp(App):
def build(self):
box = BoxLayout()
self.i = 0
self.line = [self.i]
box.add_widget(canvas)
#plt.show()
Clock.schedule_interval(self.update, 1)
return box
def update(self, *args):
plt.plot(self.line, self.line)
self.i += 1
self.line.append(self.i)
canvas.draw_idle()
MyApp().run()
| [
"noreply@github.com"
] | noreply@github.com |
b9b123916eba2a46e552b8cb0e286f5b55b8e3e2 | e6f2d7e407d2b516152094d0834e78603c9eb60b | /wen_python_16/pic_1.py | 6be48cde753d4cc2948ea9632e02d8c0580a5dbd | [] | no_license | pylinx64/wen_python_16 | 5d63a44d2cbc8380e57b9f3c6887ab91578ec6cb | c9e2f9083f848d502bce2e0cf049ccba2677e981 | refs/heads/main | 2023-04-18T04:43:32.601474 | 2021-05-05T10:07:30 | 2021-05-05T10:07:30 | 336,603,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | import turtle
import time
t = turtle.Pen()
colors = ['lime', '#C35A62', '#9CC35A', '#5AC3B7', '#C35AB8']
turtle.bgcolor('black')
t.pencolor(colors[2])
t.circle(100)
t.left(320)
t.forward(200)
t.circle(100)
time.sleep(50)
| [
"noreply@github.com"
] | noreply@github.com |
a2c75d7e2b2e0d54e1631a4ce6785d7266097d6e | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/network/v20170901/zone.py | 11bfa1976eb4864afd69e9171e7f0790cc681bd9 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,856 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Zone']
class Zone(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Describes a DNS zone.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: The etag of the zone.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] zone_name: The name of the DNS zone (without a terminating dot).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['etag'] = etag
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if zone_name is None:
raise TypeError("Missing required property 'zone_name'")
__props__['zone_name'] = zone_name
__props__['max_number_of_record_sets'] = None
__props__['name'] = None
__props__['name_servers'] = None
__props__['number_of_record_sets'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20150504preview:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20160401:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20180301preview:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20180501:Zone")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Zone, __self__).__init__(
'azure-nextgen:network/v20170901:Zone',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Zone':
"""
Get an existing Zone resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Zone(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
The etag of the zone.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfRecordSets")
def max_number_of_record_sets(self) -> pulumi.Output[int]:
"""
The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "max_number_of_record_sets")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameServers")
def name_servers(self) -> pulumi.Output[Sequence[str]]:
"""
The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "name_servers")
@property
@pulumi.getter(name="numberOfRecordSets")
def number_of_record_sets(self) -> pulumi.Output[int]:
"""
The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "number_of_record_sets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
e258038aad904c2a62e39e78d3c0d2cf97592f7e | 7714d7fe86c99c059e339e895e265658fa3ce36e | /backend/home/migrations/0005_auto_20200807_0839.py | aa38d5dae63fac410eabc371a886dabc919134b3 | [] | no_license | crowdbotics-apps/mobile-7-aug-dev-8582 | f9454c8a9b3ca34e0b7dce328554658fd3fe02e9 | f569d0a9ae3effb99d6ee00127f87015296a4993 | refs/heads/master | 2023-07-11T13:56:39.164407 | 2020-08-07T09:01:31 | 2020-08-07T09:01:31 | 285,739,310 | 0 | 0 | null | 2021-08-03T20:03:29 | 2020-08-07T04:46:04 | JavaScript | UTF-8 | Python | false | false | 551 | py | # Generated by Django 2.2.15 on 2020-08-07 08:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0004_hjkhgkjhkjhkj'),
]
operations = [
migrations.RemoveField(
model_name='customtext',
name='hgfhgfhgf',
),
migrations.RemoveField(
model_name='customtext',
name='hjgjhgjhghjg',
),
migrations.RemoveField(
model_name='customtext',
name='kjhkjhkjh',
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c6b1d2325848e61f3d8eca55c4365fd3c3ab4671 | f5ee654bdc121f435514149c71a2e16703d95883 | /image_manager/abstract_image_manager.py | a408c84c9cb890f54f6ee83241c200db4b28a5f5 | [] | no_license | Alexundar/ObjectDetector | d4211c29c0af108e112fa3f1629ff9ffa8ac4f2a | 1afb97d9effe18cfffe1af0f19b55281c3647d70 | refs/heads/master | 2022-11-21T07:41:05.693205 | 2020-06-30T14:13:51 | 2020-06-30T14:13:51 | 272,921,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import pickle
from abc import ABC, abstractmethod
class AbstractImageManager(ABC):
def __init__(self):
super().__init__()
self.colors = pickle.load(open("../pallete", "rb"))
@abstractmethod
def read_images(self, images):
pass
@abstractmethod
def draw_bounding_boxes(self, x, results, classes):
pass
@abstractmethod
def write_images(self, image_names, images):
pass
| [
"a.lushnevsky@yandex.by"
] | a.lushnevsky@yandex.by |
1104320467509e437b8b361c7d15ee66fe93879c | 9491ec2a6ee861b4fb68065ec14c81c10806d746 | /src/pykeen/datasets/dbpedia.py | f5633c757e0cfe55836e5113f8b1e2c5d968c15a | [
"MIT"
] | permissive | Moon-xm/pykeen | 53f9693a432ad51b525c4309a1b638aca0d05c72 | eeaf1d623aa881c0c897772372988390e1d8302d | refs/heads/master | 2023-08-09T12:28:55.943173 | 2021-03-22T21:43:27 | 2021-03-22T21:43:27 | 351,089,062 | 1 | 0 | MIT | 2021-09-17T06:20:18 | 2021-03-24T13:25:50 | null | UTF-8 | Python | false | false | 1,966 | py | # -*- coding: utf-8 -*-
"""The DBpedia datasets from [shi2017b]_.
- GitHub Repository: https://github.com/bxshi/ConMask
- Paper: https://arxiv.org/abs/1711.03438
"""
from docdata import parse_docdata
from .base import UnpackedRemoteDataset
__all__ = [
'DBpedia50',
]
BASE = 'https://raw.githubusercontent.com/ZhenfengLei/KGDatasets/master/DBpedia50'
TEST_URL = f'{BASE}/test.txt'
TRAIN_URL = f'{BASE}/train.txt'
VALID_URL = f'{BASE}/valid.txt'
@parse_docdata
class DBpedia50(UnpackedRemoteDataset):
"""The DBpedia50 dataset.
---
name: DBpedia50
citation:
author: Shi
year: 2017
link: https://arxiv.org/abs/1711.03438
statistics:
entities: 24624
relations: 351
training: 32203
testing: 2095
validation: 123
triples: 34421
"""
def __init__(self, create_inverse_triples: bool = False, **kwargs):
"""Initialize the DBpedia50 small dataset from [shi2017b]_.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.UnpackedRemoteDataset`.
"""
# GitHub's raw.githubusercontent.com service rejects requests that are streamable. This is
# normally the default for all of PyKEEN's remote datasets, so just switch the default here.
kwargs.setdefault('stream', False)
super().__init__(
training_url=TRAIN_URL,
testing_url=TEST_URL,
validation_url=VALID_URL,
create_inverse_triples=create_inverse_triples,
load_triples_kwargs={
# as pointed out in https://github.com/pykeen/pykeen/issues/275#issuecomment-776412294,
# the columns are not ordered properly.
'column_remapping': [0, 2, 1],
},
**kwargs,
)
if __name__ == '__main__':
DBpedia50().summarize()
| [
"noreply@github.com"
] | noreply@github.com |
bf883990f5d5a2a677f673e28b5c4877284c147d | fde186bd141ed055ba8ab915b2ad25355f8f3fb6 | /ABC/070/py/A.py | 66689bce17b2f299f4639476d5684fcfd9e35d34 | [] | no_license | Tsukumo3/Atcoder | 259ea6487ad25ba2d4bf96d3e1cf9be4a427d24e | 5f8d5cf4c0edee5f54b8e78bc14a62e23cab69cb | refs/heads/master | 2020-12-20T05:04:39.222657 | 2020-10-17T01:39:04 | 2020-10-17T01:39:04 | 235,969,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | '''
ABC070 A - Palindromic Number
https://atcoder.jp/contests/abc070/tasks/abc070_a
'''
n = input()
if n[0] == n[2]:
ans = 'Yes'
else:
ans = 'No'
print(ans)
| [
"53821328+Tsukumo3@users.noreply.github.com"
] | 53821328+Tsukumo3@users.noreply.github.com |
4b2e7d44c346f2d96b0a0cf42b48cfe1bbadc0ee | 6d6f28080019e400ea0fd8c87ec2761ea79ccd1a | /users/migrations/0001_initial.py | d59fbc7bbcc741c412ebdc0edfb73b0a06b23fc4 | [] | no_license | N01329655/Python_Django_FV_NewsP_APP | ce219f89fe50079ac714960a75296c26393b5f9e | d961bf509d08a1524d1d015551dce635e72c76bb | refs/heads/master | 2023-06-10T23:30:39.174180 | 2021-07-07T22:57:17 | 2021-07-07T22:57:17 | 380,044,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,956 | py | # Generated by Django 3.2.4 on 2021-06-05 00:23
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('age', models.PositiveIntegerField(blank=True, null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"georgenechyporenko@gmail.com"
] | georgenechyporenko@gmail.com |
d5f2b424d4ed273d886ee3533b078836331a62e5 | 97eac4a05c77e1b6898b84c9606afa13428e45df | /Important_Functions/fib.py | 84c34f782b19848ecb61c528a94af491a974b47a | [] | no_license | ryanmcg86/Euler_Answers | 8f71b93ea15fceeeeb6b661d7401e40b760a38e6 | 28374025448b16aab9ed1dd801aafc3d602f7da8 | refs/heads/master | 2022-08-11T13:31:11.038918 | 2022-07-28T00:35:11 | 2022-07-28T00:35:11 | 190,278,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | '''This is a O(log n) implementation of a function that retreives the nth number in Fibonacci's sequence.'''
fibs = {0: 0, 1: 1}
def fib(n):
if n in fibs: return fibs[n]
if n % 2 == 0:
fibs[n] = ((2 * fib((n / 2) - 1)) + fib(n / 2)) * fib(n / 2)
else:
fibs[n] = fib((n - 1) / 2)**2 + fib((n + 1) / 2)**2
return fibs[n]
| [
"noreply@github.com"
] | noreply@github.com |
c1c0b55b76c86f1b101dd1186d123430cec8cf5f | b9f8af3f56577c2e15b7c4a91901b831ec39b1d9 | /Fortisw_Class.py | 70806594d1d6b4cc0d3886fc51172fae3bbabaff | [] | no_license | haobinzheng/ixia_automation | 3d9efef9e63d4c93598eeeffd39a0822a8f9b3fb | 7a0aeebb6b89d014cd8b4406f54459d57d87a2c1 | refs/heads/master | 2023-05-25T00:44:13.054388 | 2023-05-23T20:20:07 | 2023-05-23T20:20:07 | 213,509,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59,642 | py | from __future__ import print_function, unicode_literals
from __future__ import absolute_import, division
import telnetlib
import sys
import time
import logging
import traceback
import paramiko
import time
from time import sleep
import re
import os
from datetime import datetime
import pdb
# import xlsxwriter
# from excel import *
# #from ixia_ngfp_lib import *
# import settings
# from console_util import *
# import pexpect
# from threading import Thread
# import subprocess
# import spur
from robot.api import logger
from local_util import *
DEBUG = True
# S524DF5018000010 # show switch interface port13
# config switch interface
# edit "port13"
# set allowed-vlans 4093
# set untagged-vlans 4093
# set snmp-index 13
# next
# end
# S524DF5018000010 # show switch interface port13
# config switch interface
# edit "port13"
# set allowed-vlans 10,4089-4093
# set untagged-vlans 4093
# set snmp-index 13
# next
# end
# def parse_allow_vlans(output):
class port_class():
def __init__(self,name):
self.name = name
self.allow_vlans = []
self.lldp_neighbor = None
def parse_port_config(self,output):
# printr("Enter parse_port_config")
for line in output:
if "allowed-vlans" in line:
regex = r'set allowed-vlans ([0-9|\-\,]+)'
matched = re.search(regex,line)
if matched:
vlans = matched.group(1)
vlan_list = vlans.split(',')
self.allow_vlans = vlan_list
# printr(vlan_list)
def update_allowed_vlan(self,allow_vlans):
pass
class switch_vlan():
def __init__(self,output):
self.sample_output = """
id : 10
private-vlan : disable
description : (null)
learning : enable
rspan-mode : disable
igmp-snooping : disable
mld-snooping : enable
mld-snooping-fast-leave: enable
mld-snooping-querier: enable
mld-snooping-querier-addr: ::
mld-snooping-proxy : enable
mld-snooping-static-group:
== [ static-group-1 ]
name: static-group-1 mcast-addr: ff3e::10 members:
== [ port1 ]
member-name: port1
dhcp-snooping : disable
dhcp6-snooping : disable
member-by-mac:
member-by-ipv4:
member-by-ipv6:
member-by-proto:
"""
self.id = None
self.private_vlan = None
self.learning = None
self.rspan_mode = None
self.igmp_snooping = None
self.mld_snooping = None
self.mld_snooping_fast_leave = None
self.mld_snooping_querier = None
self.mld_snooping_querier_addr = None
self.mld_snooping_proxy = None
self.mld_snooping_static_group = None
self.dhcp_snooping = None
self.dhcp6_snooping = None
self.member_by_mac = None
self.member_by_ipv4 = None
self.member_by_proto = None
self.config_dict = None
self.parse_vlan_get(output)
self.print_sw_vlan()
def parse_vlan_get(self,output):
if type(output) == dict:
vlan_dict = output
else:
vlan_dict = {}
for line in output:
if "::" in line:
k,v = line.split()
k = k.strip(":")
vlan_dict[k.strip()] = v.strip()
else:
words = line.split(":")
if len(words) == 2:
k = words[0]
v = words[1]
vlan_dict[k.strip()] = v.strip()
elif len(words) == 1:
k = words[0]
vlan_dict[k.strip()] = "Null"
printr("vlan_dict = {}".format(vlan_dict))
if 'id' in vlan_dict:
self.id = vlan_dict.get('id',None)
else:
self.id = None
vlan_dict['id'] = None
if 'private-vlan' in vlan_dict:
self.private_vlan = vlan_dict.get('private-vlan',None)
else:
self.private_vlan = None
vlan_dict['private-vlan'] = None
if 'learning' in vlan_dict:
self.learning = vlan_dict.get('learning',None)
else:
self.learning = None
vlan_dict['learning'] = None
if 'rspan-mode' in vlan_dict:
self.rspan_mode = vlan_dict.get('rspan-mode',None)
else:
self.rspan_mode = None
vlan_dict['rspan-mode'] = None
if 'igmp-snooping' in vlan_dict:
self.igmp_snooping = vlan_dict.get('igmp-snooping',None)
else:
self.igmp_snooping = None
vlan_dict['igmp-snooping'] = None
if 'mld-snooping' in vlan_dict:
self.igmp_snooping = vlan_dict.get('mld-snooping',None)
else:
self.mld_snooping = None
vlan_dict['mld-snooping'] = None
if 'mld-snooping-fast-leave' in vlan_dict:
self.mld_snooping_fast_leave = vlan_dict.get('mld-snooping-fast-leave',None)
else:
self.mld_snooping_fast_leave = None
vlan_dict['mld-snooping-fast-leave'] = None
if 'mld-snooping-querier' in vlan_dict:
self.mld_snooping_querier = vlan_dict.get('mld-snooping-querier',None)
else:
self.mld_snooping_querier = None
vlan_dict['mld-snooping-querier'] = None
if 'mld-snooping-querier-addr' in vlan_dict:
self.mld_snooping_querier_addr = vlan_dict.get('mld-snooping-querier-addr',None)
else:
self.mld_snooping_querier_addr = None
vlan_dict['mld-snooping-querier-addr'] = None
if 'mld-snooping-proxy' in vlan_dict:
self.mld_snooping_proxy = vlan_dict.get('mld-snooping-proxy',None)
else:
self.mld_snooping_proxy = None
vlan_dict['mld-snooping-proxy'] = None
if 'mld-snooping-static-group' in vlan_dict:
self.mld_snooping_static_group = vlan_dict.get('mld-snooping-static-group',None)
else:
self.mld_snooping_static_group = None
vlan_dict['mld-snooping-static-group'] = None
if 'dhcp-snooping' in vlan_dict:
self.dhcp_snooping = vlan_dict.get('dhcp-snooping',None)
else:
self.dhcp_snooping = None
vlan_dict['dhcp-snooping'] = None
if 'dhcp6-snooping' in vlan_dict:
self.dhcp6_snooping = vlan_dict.get('dhcp6-snooping',None)
else:
self.dhcp6_snooping = None
vlan_dict['dhcp6-snooping'] = None
if 'member-by-mac' in vlan_dict:
self.member_by_mac = vlan_dict.get('member-by-mac',None)
else:
self.member_by_mac = None
vlan_dict['member-by-mac'] = None
if 'member-by-ipv4' in vlan_dict:
self.member_by_ipv4 = vlan_dict.get('member-by-ipv4',None)
else:
self.member_by_ipv4 = None
vlan_dict['member-by-ipv4'] = None
if 'member-by-proto' in vlan_dict:
self.member_by_proto = vlan_dict.get('member-by-proto',None)
else:
self.member_by_proto = None
vlan_dict['member-by-proto'] = None
self.config_dict = vlan_dict
def print_sw_vlan(self):
printr("self.id = {}".format(self.id))
printr("self.private_vlan = {}".format(self.private_vlan))
printr("self.learning = {}".format(self.learning))
printr("self.rspan_mode = {}".format(self.rspan_mode))
printr("self.igmp_snooping ={}".format(self.igmp_snooping))
printr("self.mld_snooping = {}".format(self.mld_snooping))
printr("self.mld_snooping_fast_leave = {}".format(self.mld_snooping_fast_leave))
printr("self.mld_snooping_querier = {}".format(self.mld_snooping_querier))
printr("self.mld_snooping_querier_addr = {}".format(self.mld_snooping_querier_addr))
printr("self.mld_snooping_proxy = {}".format(self.mld_snooping_proxy))
printr("self.mld_snooping_static_group = {}".format(self.mld_snooping_static_group))
printr("self.dhcp_snooping = {}".format(self.dhcp_snooping))
printr("self.dhcp6_snooping = {}".format(self.dhcp6_snooping))
printr("self.member_by_mac = {}".format(self.member_by_mac))
printr("self.member_by_ipv4 = {}".format(self.member_by_ipv4))
printr("self.member_by_proto = {}".format(self.member_by_proto))
class lldp_class():
def __init__(self):
self.type = None
self.local_port = None
self.status = None
self.ttl = None
self.neighbor = None
self.capability = None
self.med_type = None
self.remote_port = None
def __repr__(self):
return '{self.__class__.__name__}(fsw obj)'.format(self=self)
def __str__(self):
return '{self.__class__.__name__}:{self.local_port}, {self.neighbor}, {self.med_type},{self.remote_port'.format(self=self)
def parse_stats(self,items):
self.local_port = items[0]
self.tx = items[1]
self.rx = items[2]
self.discard = items[3]
self.added = items[4]
self.neighbor_deleted = items[5]
self.aged_out = items[6]
self.unknow_tlvs = items[7]
def print_lldp(self):
printr("type = {}".format(self.type))
printr("local_port = {}".format(self.local_port))
printr("med_type = {}".format(self.med_type))
printr("capability = {}".format(self.capability))
printr("remote_port = {}".format(self.remote_port))
printr("status = {}".format(self.status))
class fortisw():
def __init__(self,instance):
self.instance = instance
self.name = None
self.user = None
self.password = None
self.console = None
self.port = None
self.telnet = None
self.lldp_obj_list = None
self.lldp_dict_list = None
self.switch_ports = []
self.tpid = None
self.sw_vlans = []
def __repr__(self):
return '{self.__class__.__name__}({self.instance}):{self.name}'.format(self=self)
def __str__(self):
return '{self.__class__.__name__}:{self.name}, {self.console}, {self.port}'.format(self=self)
#Portname Status Device-name TTL Capability MED-type Port-ID
# port17 Up S448DP3X17000253 120 BR - port17
# port17 Up S448DN3X15000026 120 BR Network port18
def find_vlan_config(self,*args,**kwargs):
k = kwargs['key']
v = kwargs['value']
vlan_id = kwargs['vlan']
for vlan_obj in self.sw_vlans:
if vlan_obj.id == vlan_id or vlan_id in vlan_obj.id:
if vlan_obj.config_dict[k] == None:
return False
elif vlan_obj.config_dict[k] == v or v in vlan_obj.config_dict[k]:
return True
else:
return False
return False
def update_switch_vlan(self,get_output):
if type(get_output) == dict:
output = get_output
else:
output = self.CollectShowCmdRobot(get_output)
printr(output)
sw_vlan = switch_vlan(output)
self.sw_vlans = []
self.sw_vlans.append(sw_vlan)
def update_lldp_stats(self,stats):
# printr(stats)
port_found = False
lldp_found = False
for line in stats:
if "port" in line:
items = line.split()
if len(items) == 0:
return False
port_name = items[0]
for port in self.switch_ports:
if port_name == port.name:
port_found = True
for lldp_obj in self.lldp_obj_list:
if port.name == lldp_obj.local_port:
port.lldp_neighbor = lldp_obj
lldp_found = True
break
if port_found and lldp_found:
port.lldp_neighbor.parse_stats(items)
else:
port = port_class(port_name)
lldp = lldp_class()
port.lldp_neighbor = lldp
port.lldp_neighbor.parse_stats(items)
return port.lldp_neighbor
def update_port_config(self,port_name,output):
# printr("Enter update_port_config")
port= port_class(port_name)
port.parse_port_config(output)
return port.allow_vlans
def update_switch_config(self,output):
if output == None or len(output) == 0:
return None
try:
for line in output:
if "fortilink-p2p-tpid" in line:
regex = r"0[xX][0-9a-fA-F]+"
matched = re.search(regex,line)
if matched:
self.tpid = matched.group()
return self.tpid
except Exception as e:
return None
return None
def switch_commands(self,cmd):
#printr("========== DiscoveryLLDP starts here ===========")
result = self.ShowCmd(cmd)
printr(result)
return result
def parse_lldp(self,output):
lldp_obj_list = []
lldp_dict_list = []
printr(output)
for line in output:
if "Portname" in line and "Status" in line and "Device-name" in line:
items = line.split()
continue
if " Up " in line:
lldp_dict = {k:v for k, v in zip(items,line.split())}
# printr(lldp_dict)
if lldp_dict["Device-name"] == "-" or lldp_dict["Capability"] == "-":
pass
else:
lldp = lldp_class()
lldp.local_port = lldp_dict["Portname"]
lldp.status = lldp_dict["Status"]
lldp.ttl = lldp_dict["TTL"]
lldp.neighbor = lldp_dict["Device-name"]
lldp.capability = lldp_dict["Capability"]
if lldp_dict["MED-type"] == "-":
lldp.med_type = "p2p".encode('utf-8')
lldp_dict["MED-type"] = "p2p".encode('utf-8')
else:
lldp.med_type = lldp_dict["MED-type"]
lldp.remote_port = lldp_dict["Port-ID"]
lldp_obj_list.append(lldp)
lldp_dict_list.append(lldp_dict)
self.lldp_dict_list = lldp_dict_list
self.lldp_obj_list = lldp_obj_list
printr(self.lldp_dict_list)
# def parse_lldp_robot(self,output):
# lldp_neighbor_list = []
# for line in output:
# if "Portname" in line and "Status" in line and "Device-name" in line:
# items = line.split()
# continue
# if " Up " in line:
# lldp_dict = {k:v for k, v in zip(items,line.split())}
# if lldp_dict["Device-name"] == '-':
# continue
# if lldp_dict["MED-type"] == "-":
# lldp_dict["MED-type"] = "p2p".encode('utf-8')
# printr(lldp_dict)
# lldp_neighbor_list.append(lldp_dict)
# self.lldp_neighbors_dict = lldp_neighbor_list
# printr(self.lldp_neighbors_dict)
# return lldp_neighbor_list
def discovery_lldp(self):
###################### debug ############################
result = """
S424DP3X16000238 #
2020-07-12 17:36:29:S424DP3X16000238=get switch lldp neighbors-summary
Capability codes:
R:Router, B:Bridge, T:Telephone, C:DOCSIS Cable Device
W:WLAN Access Point, P:Repeater, S:Station, O:Other
MED type codes:
Generic:Generic Endpoint (Class 1), Media:Media Endpoint (Class 2)
Comms:Communications Endpoint (Class 3), Network:Network Connectivity Device
Portname Status Device-name TTL Capability MED-type Port-ID
__________ _______ __________________________ ____ __________ ________ _______
port1 Up - - - - -
port2 Down - - - - -
port3 Down - - - - -
port4 Down - - - - -
port5 Down - - - - -
port6 Down - - - - -
port7 Down - - - - -
port8 Down - - - - -
port9 Down - - - - -
port10 Down - - - - -
port11 Down - - - - -
port12 Down - - - - -
port13 Down - - - - -
port14 Down - - - - -
port15 Down - - - - -
port16 Down - - - - -
port17 Down - - - - -
port18 Down - - - - -
port19 Down - - - - -
port20 Down - - - - -
port21 Down - - - - -
port22 Down - - - - -
port23 Up S224EPTF19000002 120 BR - port23
port24 Down - - - - -
port25 Down - - - - -
port26 Down - - - - -
S424DP3X16000238 #
"""
b= result.split("\n")
result = [x.encode('utf-8').strip() for x in b if x.strip()]
printr("========== DiscoveryLLDP starts here ===========")
# result = self.ShowCmd("get switch lldp neighbors-summary")
printr(result)
###################### debug ############################
self.parse_lldp(result)
def DiscoveryLLDPRobot(self,output):
# printr("========== Debug: DiscoveryLLDPRobot starts here ===========")
# printr(output)
self.parse_lldp(output)
# printr("========== Debug: DiscoveryLLDPRobot after parse_lldp method")
#printr(self.lldp_neighbors)
def CollectShowCmdRobot(self,output,**kwargs):
# printr("Entering collect_show_cmd")
# printr(output)
#string coming from Robot is unicode. So split should NOT use b'. To make things worse in this
#3to2 transformation, need to be more careful.
#out_list = output.split(b'\r\n')
out_str_list = []
out_list = output.split('\r\n')
encoding = 'utf-8'
for o in out_list:
o_str = o.strip()
#o_str = o.decode(encoding).strip(' ')
o_str = o_str.encode(encoding).strip()
if o_str:
out_str_list.append(o_str)
#printr(out_str_list)
return out_str_list
def ClearLine(self):
status = clear_console_line(self.console,str(self.port),login_pwd='Fortinet123!', exec_pwd='Fortinet123!', prompt='#')
printr(status['status'])
if status['status'] != 0:
printr('unable clear console port {}:{}'.format(self.console,self.port))
return False
else:
printr('Cleared console port {}:{}'.format(self.console,self.port))
return True
def find_p2p_port(self):
#printr("========= Enter find_p2p_port () =============")
for n in self.lldp_obj_list:
#printr("n.med_type = {}".format(n.med_type))
if n.med_type == "p2p":
return (True, n.local_port)
return (False, None)
def FindP2pPort(self):
Network = False
P2P = False
p2p_num = 0
net_port = None
p2p_port = None
port_set = set()
#printr("========= Enter FindP2pPort () =============")
for n in self.lldp_obj_list:
n.print_lldp()
if n.capability == "-" or n.neighbor == "-":
continue
if n.med_type == "Network" :
Network = True
# printr("------------Found network port")
# printr(n.local_port)
net_port = n.local_port
port_set.add(n.local_port)
if n.med_type == "p2p":
P2P = True
p2p_num += 1
p2p_port = n.local_port
port_set.add(n.local_port)
# printr("------------Found p2p port")
# printr(n.local_port)
########## DEBUG ######################
printr("p2p_num = {}".format(p2p_num))
printr("Network = {}".format(Network))
printr("P2P = {}".format(P2P))
######### DEBUG #######################
if Network and P2P and net_port == p2p_port:
return (True,p2p_port)
elif P2P and p2p_num == 2 and len(port_set) == 1:
return (True,p2p_port)
else:
return (False, None)
# def FindP2pPort(self):
# #printr("========= Enter FindP2pPort () =============")
# for n in self.lldp_obj_list:
# if n.med_type == "p2p" and len(self.lldp_obj_list) > 2:
# printr("------------Found p2p port")
# printr(n.local_port)
# return (True, n.local_port)
# return (False,None)
def FindNetworkPort(self):
Network = False
P2P = False
p2p_num = 0
net_num = 0
net_port = None
p2p_port = None
#printr("========= Enter FindP2pPort () =============")
for n in self.lldp_obj_list:
if n.capability == "-":
continue
if n.med_type == "Network" :
Network = True
# printr("------------Found network port")
# printr(n.local_port)
net_port = n.local_port
net_num += 1
if n.med_type == "p2p":
P2P = True
p2p_num += 1
p2p_port = n.local_port
# printr("------------Found p2p port")
# printr(n.local_port)
########## DEBUG ######################
printr("p2p_num = {}".format(p2p_num))
printr("Network Port ? = {}".format(Network))
printr("P2P Port ? = {}".format(P2P))
######### DEBUG #######################
if Network and P2P and net_port == p2p_port:
return (True,net_port)
elif P2P and p2p_num == 1:
return (True,p2p_port)
elif P2P and p2p_num == 2:
return (True,p2p_port)
elif Network and net_num == 1:
return (True,net_port)
else:
return (False, None)
# def FindP2pPortDict(self):
# # printr("========= Enter find_ptp_rebot () =============")
# for n in self.lldp_dict_list:
# printr("loop through.......")
# if n["MED-type"] == "p2p":
# # printr("------------Found p2p port")
# # printr(n["Portname"])
# return (True,n["Portname"])
# #return True
# return (False,None)
def UpdateSwInfo(self,name,user,password,console,port):
self.name = name
self.user = user
self.password = password
self.console = console
self.port = port
printr("Switch name = {}, user name = {}, password={},console ip= {}, console port = {}".format(self.name,self.user,self.password,self.console,self.port))
def RunTest(self):
printr("This is a test of Fortiswitch class")
def ShowDeviceInfo(self):
printr("Switch name = {}, user name = {}, password={},console ip= {}, console port = {}".format(self.name,self.user,self.password,self.console,self.port))
def Telnet(self):
# ip = "10.105.50.3"
# line = "2057"
# ip="10.105.152.2"
# line="2083"
self.ClearLine()
self.telnet = TelnetConsole(self.console,self.port)
def VerifyLldpNeighbor(self):
pass
def ShowCmd(self,cmd):
output = switch_show_cmd(self.telnet,cmd)
return output
class fortigate():
def __init__(self,instance):
self.instance = instance
self.name = None
self.user = None
self.password = None
self.console = None
self.port = None
self.telnet = None
def __repr__(self):
return '{self.__class__.__name__}({self.instance}):{self.name}'.format(self=self)
def __str__(self):
return '{self.__class__.__name__}:{self.name}, {self.console}, {self.port}'.format(self=self)
def GetLldp(self):
self.collect_show_cmd("get switch lldp summary")
def CollectShowCmdRobot(self,output,**kwargs):
# printr("Entering collect_show_cmd")
# printr(output)
#string coming from Robot is unicode. So split should not use b'. To make things worse in this
#3to2 transformation, need to be more careful.
#out_list = output.split(b'\r\n')
out_list = output.split('\r\n')
encoding = 'utf-8'
out_str_list = []
for o in out_list:
o_str = o.strip()
#o_str = o.decode(encoding).strip(' ')
o_str = o_str.encode(encoding).strip()
if o_str:
out_str_list.append(o_str)
#printr(out_str_list)
return out_str_list
def ClearLine(self):
status = clear_console_line(self.console,str(self.port),login_pwd='Fortinet123!', exec_pwd='Fortinet123!', prompt='#')
printr(status['status'])
if status['status'] != 0:
printr('unable clear console port {}:{}'.format(self.console,self.port))
return False
else:
printr('Cleared console port {}:{}'.format(self.console,self.port))
return True
def UpdateSwInfo(self,name,user,password,console,port):
self.name = name
self.user = user
self.password = password
self.console = console
self.port = port
printr("Switch name = {}, user name = {}, password={},console ip= {}, console port = {}".format(self.name,self.user,self.password,self.console,self.port))
def Telnet(self):
ip = "10.105.50.3"
line = "2057"
# ip="10.105.152.2"
# line="2083"
self.ClearLine()
self.telnet = TelnetConsole(self.console,self.port)
def ShowCmd(self,cmd):
output = switch_show_cmd(self.telnet,cmd)
return output
def RunTest(self):
printr("This is a test of Fortigate class")
def ShowDeviceInfo(self):
printr("Switch name = {}, user name = {}, password={},console ip= {}, console port = {}".format(self.name,self.user,self.password,self.console,self.port))
def verify_ptp_robot(fsw):
# printr("========= Enter find_ptp_rebot () =============")
for n in fsw.lldp_neighbors_dict:
printr("loop through.......")
if n["MED-type"] == "p2p":
printr("------------Found p2p port")
return True
return False
def find_p2p_port(fsw):
# printr("========= Enter find_p2p_port () =============")
for n in fsw.lldp_neighbors:
if n.med_type == "p2p":
return (True, n.local_port)
return (False,None)
def testing(*args,**kwargs):
name = kwargs["name"]
age = kwargs["age"]
printr("testing")
printr("testing name = {},age={}".format(name,age))
b = {"name":"mike","age":14,"location":"china"}
a = "mike"
printr("{} is a good man".format(a))
for k,v in b.items():
printr(k,v)
return a
def transform_robot_output(output,**kwargs):
# printr("Entering collect_show_cmd")
# printr(output)
#string coming from Robot is unicode. So split should not use b'. To make things worse in this
#3to2 transformation, need to be more careful.
#out_list = output.split(b'\r\n')
out_list = output.split('\r\n')
encoding = 'utf-8'
out_str_list = []
for o in out_list:
o_str = o.strip()
print("o_str = ".format(o_str))
#o_str = o.decode(encoding).strip(' ')
o_str = o_str.encode(encoding).strip()
if o_str:
out_str_list.append(o_str)
#printr(out_str_list)
return out_str_list
def robot_2_python(word):
if type(word) is list:
return word
if type(word) is int:
return str(word).encode('utf-8')
return str(word.strip().encode('utf-8').replace('"',''))
def verify_tpid_config(*args, **kwargs):
output = kwargs['output']
state = kwargs["state"]
try:
output_list = transform_robot_output(output)
except Exception as e:
return False
state = robot_2_python(state)
if state == "disable":
for line in output_list:
if "fortilink-p2p-tpid" in line:
return False
return True
if state == "enable":
for line in output_list:
if "fortilink-p2p-tpid" in line:
return True
return False
def verify_p2p_config(*args,**kwargs):
output = kwargs['output']
state = kwargs["state"]
output_list = transform_robot_output(output)
printr("debug: state = {}".format(repr(state)))
state = robot_2_python(state)
printr("debug: state = {}".format(repr(state)))
if state == "disable":
for line in output_list:
if "portlink-p2p" in line:
return False
return True
if state == "enable":
for line in output_list:
if state in line:
return True
return False
def verify_config_key_value(*args, **kwargs):
sampple_config = """
1048D-R4-40 # show switch interface port2
config switch interface
edit "port2"
set allowed-vlans 1-2000
set auto-discovery-fortilink enable
set snmp-index 2
next
end
"""
output = kwargs['output']
check_item = kwargs['key']
output_list = transform_robot_output(output)
for line in output_list:
if check_item in line:
return True
return False
def verify_commands_output_keys(*args,**kwargs):
sample = """
FS1D483Z16000018 # get switch mld-snooping globals
aging-time : 30
leave-response-timeout: 10
query-interval : 125
FS1D483Z16000018 #
"""
output = kwargs['output']
output_list = transform_robot_output(output)
total = len(args)
printr("args = {},total number of args = {}".format(args,total))
counter = 0
for item in args:
printr("args item = {}".format(item))
for line in output_list:
if item in line:
counter += 1
break
if total == counter:
return True
else:
return False
def verify_get_key_value(*args, **kwargs):
sampple_config = """
1048D-R4-40 (global) # get
auto-fortilink-discovery: enable
auto-isl : enable
auto-isl-port-group : 0
dhcp-snooping-database-export: disable
dmi-global-all : enable
flapguard-retain-trigger: disable
flood-unknown-multicast: disable
forti-trunk-dmac : 02:80:c2:00:00:02
ip-mac-binding : disable
loop-guard-tx-interval: 3
mac-aging-interval : 300
max-path-in-ecmp-group: 8
mclag-igmpsnooping-aware: disable
mclag-peer-info-timeout: 60
mclag-port-base : 0
mclag-split-brain-detect: disable
mclag-stp-aware : enable
name : (null)
packet-buffer-mode : store-forward
port-security:
link-down-auth : set-unauth
mab-reauth : disable
max-reauth-attempt : 0
quarantine-vlan : enable
reauth-period : 60
tx-period : 30
trunk-hash-mode : default
trunk-hash-unicast-src-port: disable
trunk-hash-unkunicast-src-dst: enable
virtual-wire-tpid : 0xdee5
"""
output = kwargs['output']
key = kwargs['key']
value = kwargs['value']
if type(output) == dict:
printr("output = {},key={},value={}".format(output,key,value))
if output[key] == value or value in output[key]:
return True
else:
return False
else:
output_list = transform_robot_output(output)
for line in output_list:
if key in line:
k,v = line.split(":")
if value == v.strip() or int(value) == int(v.strip()):
return True
return False
def verify_config_lines(*args, **kwargs):
output = kwargs['output']
configs = args
output_list = transform_robot_output(output)
results = []
for config in configs:
for line in output_list:
if config in line:
results.append(True)
break
if len(results) == len(configs):
return True
else:
return False
def verify_switch_interface_config(*args, **kwargs):
sampple_config = """
1048D-R4-40 # show switch interface port2
config switch interface
edit "port2"
set allowed-vlans 1-2000
set auto-discovery-fortilink enable
set snmp-index 2
next
end
"""
output = kwargs['output']
if "vlan" in kwargs:
allowed_vlan = kwargs["vlan"]
else:
allowed_vlan = None
output_list = transform_robot_output(output)
for line in output_list:
if allowed_vlan in line:
return True
return False
def verify_vlan_mld_config(*args,**kwargs):
sample_config = """
3032E-R7-19 # show switch vlan 10
config switch vlan
edit 10
set mld-snooping enable
set mld-snooping-proxy enable
config mld-snooping-static-group
edit "static-group-1"
set mcast-addr ff3e::10
set members "port1"
next
end
"""
if 'output' in kwargs:
output = kwargs['output']
else:
output = "Null"
if 'state' in kwargs:
state = kwargs["state"]
else:
state = "Null"
if 'vlan' in kwargs:
vlan = kwargs["vlan"]
else:
vlan = "Null"
if "proxy" in kwargs:
proxy = kwargs['proxy']
else:
proxy = "Null"
if "querier" in kwargs:
querier = kwargs['querier']
else:
querier = "Null"
if "querier_addr" in kwargs:
querier_addr = kwargs["querier_addr"]
else:
querier_addr = "Null"
if "static_group" in kwargs:
static_group = kwargs["static_group"]
else:
static_group = "Null"
if "member" in kwargs:
member = kwargs["member"]
else:
member = "Null"
if "members" in kwargs:
members = kwargs["members"]
else:
members = "Null"
if "group_list" in kwargs:
group_list = kwargs["group_list"]
else:
group_list = "Null"
if "config_groups_list" in kwargs:
config_groups_list = kwargs["config_groups_list"]
else:
config_groups_list = "Null"
output_list = transform_robot_output(output)
state = robot_2_python(state)
vlan = robot_2_python(vlan)
proxy = robot_2_python(proxy)
querier = robot_2_python(querier)
querier_addr = robot_2_python(querier_addr)
static_group = robot_2_python(static_group)
members= robot_2_python(members)
member= robot_2_python(member)
group_list= robot_2_python(group_list)
config_groups_list= robot_2_python(config_groups_list)
printr("debug: state = {}".format(repr(state)))
printr("debug: vlan = {}".format(repr(vlan)))
printr("debug: porxy = {}".format(repr(proxy)))
printr("debug: querier = {}".format(repr(querier)))
printr("debug: querier_addr = {}".format(repr(querier_addr)))
printr("debug: static_group = {}".format(repr(static_group)))
printr("debug: member = {}".format(repr(member)))
printr("debug: members = {}".format(repr(members)))
printr("debug: group_list = {}".format(repr(group_list)))
printr("debug: config_groups_list = {}".format(repr(config_groups_list)))
try:
if state == "enable":
for line in output_list:
if "set mld-snooping enable" in line:
return True
return False
elif state == "enable" and querier == "disable":
for line in output_list:
if "set mld-snooping-querier enable" in line:
return False
return True
elif state == "enable" and querier == "enable":
for line in output_list:
if "set mld-snooping-querier enable" in line:
return True
return False
elif state == "disable" and querier == "enable":
for line in output_list:
if "set mld-snooping-querier enable" in line:
return False
return True
elif querier == "enable" and querier_addr != "Null":
printr("I am here where querier = enable && querier_addr != Null")
for line in output_list:
printr(line)
if querier_addr in line:
return True
return False
elif querier == "disable" and querier_addr != "Null":
for line in output_list:
if querier_addr in line:
return False
return True
elif static_group != "Null" and member == "Null" :
for line in output_list:
if static_group in line:
return True
return False
elif static_group != "Null" and member != "Null" :
group_found = False
for line in output_list:
if static_group in line:
group_found = True
member_found = False
for line in output_list:
if member in line:
member_found = True
if group_found == True and member_found == True:
return True
else:
return False
elif static_group != "Null" and members != "Null" :
group_found = False
for line in output_list:
if static_group in line:
group_found = True
members_found = []
for line in output_list:
for m in line:
if m in line:
members_found.append(True)
if group_found == True and len(members_found) == len(members):
return True
else:
return False
elif group_list != "Null":
group_match = []
for group in group_list:
for line in output_list:
if group in line:
group_match.append(True)
if len(group_match) == len(group_list):
return True
else:
return False
elif config_groups_list != "Null":
group_match = []
for group in config_groups_list:
for line in output_list:
if group in line:
group_match.append(True)
if len(group_match) == len(config_groups_list):
return True
else:
return False
except Exception as e:
pass
return False
def verify_two_output_list(*args,**kwargs):
list1 = args[0]
list2 = args[1]
for i,j in zip(list1,list2):
if i != j:
return False
return True
def verify_static_group(*args, **kwargs):
sample = """
FS1D483Z16000018 # get switch mld-snooping static-group
VLAN ID Group-Name Multicast-addr Member-interface
_______ ______________ ______________ ________________
20 static-group-1 ff3e:0:0:0:0:0:0:10 port7
"""
key = kwargs['key']
value = kwargs['value']
output = kwargs['output']
key = robot_2_python(key)
value = robot_2_python(value)
output_list = transform_robot_output(output)
printr("key = {}".format(repr(key)))
printr("value = {}".format(repr(value)))
if "number" in key:
count = 0
for line in output_list:
if "static-group" in line:
count += 1
if int(value) == count:
return True
else:
return False
def verify_mld_config(*args,**kwargs):
sample_output_enable = """
3032E-R7-19 # get switch mld-snooping status
MLD-SNOOPING enabled vlans:
VLAN PROXY QUERIER VERSION
---- ----- ---------------
10 DISABLED MLDv1
Max multicast snooping groups 1022
Total MLD groups 0 (Learned 0, Static 0)
Total IGMP groups 0 (Learned 0, Static 0)
Remaining allowed mcast snooping groups: 1022
"""
sample_output_disable = """
3032E-R7-19 # get switch mld-snooping status
MLD-SNOOPING enabled vlans:
VLAN PROXY QUERIER VERSION
---- ----- ---------------
Max multicast snooping groups 1022
Total MLD groups 0 (Learned 0, Static 0)
Total IGMP groups 0 (Learned 0, Static 0)
Remaining allowed mcast snooping groups: 1022
"""
sampe_output_proxy = """
3032E-R7-19 # get switch mld-snooping status
MLD-SNOOPING enabled vlans:
VLAN PROXY QUERIER VERSION
---- ----- ---------------
10 ENABLED MLDv1
Max multicast snooping groups 1022
Total MLD groups 0 (Learned 0, Static 0)
Total IGMP groups 0 (Learned 0, Static 0)
Remaining allowed mcast snooping groups: 1022
"""
output = kwargs['output']
state = kwargs["state"]
vlan = kwargs["vlan"]
proxy = kwargs['proxy']
output_list = transform_robot_output(output)
state = robot_2_python(state)
vlan = robot_2_python(vlan)
proxy = robot_2_python(proxy)
printr("debug: state = {}".format(repr(state)))
printr("debug: vlan = {}".format(repr(vlan)))
printr("debug: porxy = {}".format(repr(proxy)))
for i in range(len(output_list)):
if "---------------" in output_list[i]:
target = output_list[i+1]
break
if state.upper() == "ENABLE" and vlan in target:
if proxy.upper() == "DISABLE" and "DISABLED" in target:
return True
elif proxy.upper() == "ENABLE" and "ENABLED" in target:
return True
else:
return False
if state.upper() == "DISABLE" and vlan in target:
return False
else:
return True
def find_access_vlan(access_vlans,vlan):
if vlan in access_vlans:
return True
return False
def verify_tpid_id(found,target):
if found == None:
return False
if target in found:
return True
else:
return False
def verify_storm_control(output):
check_point_1 = False
check_point_2 = False
check_point_3 = False
for line in output:
if "storm-control-mode override" in line:
check_point_1 = True
elif "broadcast enable" in line:
check_point_2 = True
elif "rate 111" in line:
check_point_3 = True
if check_point_1 and check_point_2 and check_point_3:
return True
else:
return False
def compare_lldp_stats(lldp1,lldp2):
# printr(int(lldp2.rx))
# printr(int(lldp1.rx))
# printr(int(lldp2.tx))
# printr(int(lldp1.tx))
if int(lldp2.rx) > int(lldp1.rx) and int(lldp2.tx) > int(lldp1.tx):
return True
else:
return False
def stp_port_status(*args, **kwargs):
stp_data = kwargs["data"]
port_dut = kwargs["port"]
role = kwargs["role"]
for line in stp_data:
if "FORWARDING" in line:
items = line.split()
port_name = items[0]
if port_dut == port_name:
role_line = items[4]
if role_line.upper() == role.upper():
return True
return False
def verify_pdu_counters(*args,**kwargs):
sample_output = """
FS1D483Z16000018 # diagnose switch pdu-counters list
primary CPU counters:
packet receive error : 0
Non-zero port counters:
port1:
LLDP packet : 128
DHCP6 Packet : 6
IGMP Query packet : 6
MLD Query packet : 38
MLDv1 Report packet : 78
MLD Done packet : 6
port2:
STP packet : 3
LLDP packet : 128
port14:
MLD Query packet : 12
"""
key = kwargs['key']
value = kwargs['value']
output = kwargs['output']
key = robot_2_python(key)
value = robot_2_python(value)
output_list = transform_robot_output(output)
printr("key = {}".format(repr(key)))
printr("value = {}".format(repr(value)))
k = None
v = None
for item in output_list:
if key in item:
k,v = [x.strip() for x in item.split(":")]
printr("k,v = {},{}".format(repr(k),repr(v)))
break
if v != None :
if "MLDv1 Report packet" in key:
if int(v) == int(value) or int(v) == int(value)/2 :
return True
else:
return False
elif "MLD Done packet" in key:
if value != "any":
if int(v) == int(value) or int(v) == int(value)*2 :
return True
elif value == "any":
if int(v) > 0:
return True
else:
return False
else:
return None
def get_pdu_counters(*args,**kwargs):
sample_output = """
FS1D483Z16000018 # diagnose switch pdu-counters list
primary CPU counters:
packet receive error : 0
Non-zero port counters:
port1:
LLDP packet : 128
DHCP6 Packet : 6
IGMP Query packet : 6
MLD Query packet : 38
MLDv1 Report packet : 78
MLD Done packet : 6
port2:
STP packet : 3
LLDP packet : 128
port14:
MLD Query packet : 12
"""
key = kwargs['key']
value = kwargs['value']
output = kwargs['output']
key = robot_2_python(key)
value = robot_2_python(value)
output_list = transform_robot_output(output)
printr("key = {}".format(repr(key)))
printr("value = {}".format(repr(value)))
k = None
v = None
for item in output_list:
if key in item:
k,v = [x.strip() for x in item.split(":")]
printr("k,v = {},{}".format(repr(k),repr(v)))
return int(v)
return None
def verify_mld_snooping_mrouter_port(*args,**kwargs):
sample_output = """
MLD-SNOOPING learned mcast-groups:
port VLAN GROUP Age-timeout MLD-Version
port1 20 querier 163 MLDv1
port3 20 ff3e:0:0:0:0:1:1:6 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:5 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:4 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:3 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:2 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:1 186 MLDv1
port13 20 ff3e:0:0:0:0:1:1:6 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:5 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:4 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:3 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:2 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:1 185 MLDv1
port14 20 ff3e:0:0:0:0:1:1:6 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:5 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:4 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:3 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:2 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:1 183 MLDv1
Total MLD Hosts: 18
"""
key = kwargs['key']
value = kwargs['value']
output = kwargs['output']
key = robot_2_python(key)
value = robot_2_python(value)
output_list = transform_robot_output(output)
printr("key = {}".format(repr(key)))
printr("value = {}".format(repr(value)))
if value == "any":
for item in output_list:
if key in item:
return True
return False
else:
count = 0
for item in output_list:
if key in item:
count += 1
printr("count = {}".format(repr(count)))
if count == int(value):
return True
else:
return False
def verify_mld_snooping_group(*args,**kwargs):
sample_output = """
MLD-SNOOPING learned mcast-groups:
port VLAN GROUP Age-timeout MLD-Version
port1 20 querier 163 MLDv1
port3 20 ff3e:0:0:0:0:1:1:6 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:5 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:4 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:3 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:2 186 MLDv1
port3 20 ff3e:0:0:0:0:1:1:1 186 MLDv1
port13 20 ff3e:0:0:0:0:1:1:6 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:5 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:4 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:3 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:2 185 MLDv1
port13 20 ff3e:0:0:0:0:1:1:1 185 MLDv1
port14 20 ff3e:0:0:0:0:1:1:6 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:5 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:4 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:3 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:2 183 MLDv1
port14 20 ff3e:0:0:0:0:1:1:1 183 MLDv1
Total MLD Hosts: 18
"""
key = kwargs['key']
value = kwargs['value']
output = kwargs['output']
key = robot_2_python(key)
value = robot_2_python(value)
output_list = transform_robot_output(output)
printr("key = {}".format(repr(key)))
printr("value = {}".format(repr(value)))
k = None
v = None
for item in output_list:
if key in item:
#printr("debug: within loop item = {}".format(repr(item)))
k,v = [x for x in item.split(":")]
# k,v = [x.strip() for x in item.split(":")]
#printr("k,v = {},{}".format(repr(k),repr(v)))
break
if v != None:
if int(v) == int(value):
return True
else:
return False
else:
return False
def clear_lines(console_ip,port):
status = clear_console_line(console_ip,str(port),login_pwd='fortinet123', exec_pwd='fortinet123', prompt='#')
printr(status['status'])
if status['status'] != 0:
printr('unable clear console port {}:{}'.format(console_ip,port))
return False
else:
printr('Cleared console port {}:{}'.format(console_ip,port))
return True
def verify_system_performance(*args,**kwargs):
sample = """
CPU states: 0% user 10% system 14% nice 76% idle
Memory states: 12% used
Average network usage: 0 kbps in 1 minute, 0 kbps in 10 minutes, 0 kbps in 30 minutes
Uptime: 1 days, 16 hours, 39 minutes
"""
output = kwargs['output']
output_list = transform_robot_output(output)
cpu_regex = r'CPU states: ([0-9%]+) user ([0-9%]+) system ([0-9%]+) nice ([0-9%]+) idle'
mem_regex = r'Memory states: ([0-9%]+) used'
printr("key = {}".format(repr(output_list)))
# printr("value = {}".format(repr(value)))
for line in output_list:
matched = re.match(cpu_regex,line)
if matched:
cpu_user = int(matched.group(1).strip("%"))/100
printr("cpu_user = {}".format(repr(cpu_user)))
cpu_sys = int(matched.group(2).strip("%"))/100
printr("cpu_sys = {}".format(repr(cpu_sys)))
continue
matched = re.match(mem_regex,line)
if matched:
mem_used = int(matched.group(1).strip("%"))/100
printr("mem_used = {}".format(repr(mem_used)))
for i in [cpu_user,cpu_sys,mem_used]:
if i > 0.9:
printr("cpu_user={},cpu_sys={},mem_used={}".format(cpu_user,cpu_sys,mem_used))
return False
return True
def find_packet_with_pattern(*args,**kwargs):
packet = kwargs['packet']
key = kwargs['key']
pattern = kwargs['pattern']
if "all" in kwargs:
Count_all = True
else:
Count_all = False
key = robot_2_python(key)
pattern = robot_2_python(pattern)
printr("key = {}".format(repr(key)))
printr("pattern = {}".format(repr(pattern)))
if key == "mcast_dst" and not Count_all:
pattern_regex = pattern + "[0-9a-fA-F]+"
mcast_regex = r"'display_name': 'Destination', 'value': '{}'".format(pattern_regex)
printr("mcast_regex = {}".format(mcast_regex))
matched = re.search(mcast_regex,str(packet))
if matched:
printr(matched.group())
return True
else:
printr("Nothing matched")
return False
if key == "mcast_dst" and Count_all:
pattern_regex = pattern + "[0-9a-fA-F]+"
mcast_regex = r"'display_name': 'Destination', 'value': '{}'".format(pattern_regex)
printr("mcast_regex = {}".format(mcast_regex))
counter = 0
matches = re.findall(mcast_regex,str(packet))
if matches:
for m in matches:
printr(m)
counter += 1
return counter
if key == "mcast_src" and pattern == "allzero" and Count_all:
mcast_regex = r"'display_name': 'Source Host', 'value': '::'"
counter = 0
matches = re.findall(mcast_regex,str(packet))
if matches:
for m in matches:
printr(m)
counter += 1
return counter
if key == "mcast_src" and pattern == "link_local" and Count_all:
pattern_regex = pattern + "[0-9a-fA-F]+"
mcast_regex = r"'display_name': 'Source Host', 'value': 'fe80[:0-9a-fA-F]+'"
printr("mcast_regex = {}".format(mcast_regex))
counter = 0
matches = re.findall(mcast_regex,str(packet))
if matches:
for m in matches:
printr(m)
counter += 1
return counter
if __name__ == "__main__":
testing(name="steve",age=30)
# sw = fortisw()
# sw.collect_show_cmd("this is a test")
# ip="10.105.152.2"
# line="2083"
# ip="10.105.152.52"
# line="23"
#wait_with_timer(10)
ip = "10.105.50.1"
line = "2075"
fsw = fortisw(1)
fsw.console = ip
fsw.port= line
fsw.Telnet()
fsw.discovery_lldp()
result,port = fsw.FindP2pPort()
printr("Find P2P Port result = {}".format(result))
if result:
printr("=============== P2P port = {}".format(port))
result,port = fsw.FindNetworkPort()
printr("Find Network Port result = {}".format(result))
if result:
printr("=============== Network port = {}".format(port))
exit()
output = fsw.switch_commands("show switch global")
output = fsw.CollectShowCmdRobot(output)
fsw.update_switch_config(output)
exit()
output = fsw.switch_commands("diagnose stp vlan list 10")
result = stp_port_status(role="root",port="port17",data=output)
print(result)
exit()
output = fsw.switch_commands("get switch lldp stat port17")
lldp1 = fsw.update_lldp_stats(output)
sleep(10)
output = fsw.switch_commands("get switch lldp stats port17")
lldp2 = fsw.update_lldp_stats(output)
print(compare_lldp_stats(lldp1,lldp2))
exit()
output = fsw.switch_commands("show switch global")
value = fsw.update_switch_config(output)
print(verify_tpid_id(value,"0x8818"))
exit(1)
fsw.discovery_lldp()
result,port = fsw.FindP2pPort()
if result:
printr("=============== P2P port = {}".format(port))
#pdb.set_trace()
#ftg = fortigate(1)
# ftg.Telnet()
# ftg.ShowCmd("get switch lldp neighbors-summary")
# log_with_time(ftg)
# ftg.console = ip
# ftg.port = line
# ftg.ClearLine()
# TelnetConsole(ip,line)
| [
"mike.zheng2008@Mikes-MacBook-Pro-2.local"
] | mike.zheng2008@Mikes-MacBook-Pro-2.local |
a85a4f29cf11b7633524ee4c215038de0901fa93 | 5041bdc8ce649616b6dcf32aeade9ae27075ae2b | /ppapi/native_client/src/shared/ppapi_proxy/DEPS | 1b9100dcede2c314940575b5beae13e3bddb5539 | [
"BSD-3-Clause",
"LicenseRef-scancode-khronos"
] | permissive | aSeijiNagai/Readium-Chromium | a15a1ea421c797fab6e0876785f9ce4afb784e60 | 404328b0541dd3da835b288785aed080f73d85dd | refs/heads/master | 2021-01-16T22:00:32.748245 | 2012-09-24T07:57:13 | 2012-09-24T07:57:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | include_rules = [
# TODO(bradnelson): eliminate this as its actually an incorrectly rooted
# internal reference.
"+native_client/src/trusted/plugin",
"+gpu",
"+native_client/src/shared/imc",
"+native_client/src/shared/srpc",
"+native_client/src/trusted/desc",
"+native_client/src/trusted/service_runtime/include/machine",
"+native_client/src/untrusted/irt",
"+native_client/src/untrusted/pthread",
]
| [
"kerz@chromium.org@4ff67af0-8c30-449e-8e8b-ad334ec8d88c"
] | kerz@chromium.org@4ff67af0-8c30-449e-8e8b-ad334ec8d88c | |
91b70b9e7f99fd5ea8db13db13c522fb98d55d9a | ce1ccae95278fcb7ccd0a187f36074bebe1aa3c3 | /fc_community/board/models.py | a12553c9ab8a6d644f5b0a5e1b0c98b05c360098 | [] | no_license | GeonWooPaeng/Django_basic | 32369de0c4a561cf4aa2552afb1b40fa4527996d | 77708b90f5d8060bf51e344ed858b24d422f92a8 | refs/heads/master | 2022-12-23T13:30:27.769818 | 2020-09-27T07:40:31 | 2020-09-27T07:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | from django.db import models
# Create your models here.
class Board(models.Model):
title = models.CharField(max_length=128,
verbose_name='제목')
contents = models.TextField(verbose_name='내용') #길이 제한 없다
writer = models.ForeignKey('fcuser.Fcuser', on_delete=models.CASCADE,
verbose_name='작성자') #db에서의 모델과 연결하는 방법(fcuser의 Fcuser모델과 연결하겠다.)
# on_delete=models.CASCADE은 사용자가 탈퇴하면 사용자 글 모두 지우겠다.
tags = models.ManyToManyField('tag.Tag', verbose_name='태그')
registered_dttm = models.DateTimeField(auto_now_add=True,
verbose_name='등록시간')
def __str__(self):
return self.title
class Meta:
db_table = 'paeng_board'
verbose_name = '사용자'
verbose_name_plural = '사용자' | [
"gwpaeng@naver.com"
] | gwpaeng@naver.com |
6168609a1bad6daf128ab051fb5b472ecef76552 | 1c2f2db1268667c253e3d6ee013a6c5b8ace76c4 | /calculator/.~c9_invoke_PvUwro.py | 849cd6c2b20646753076ed095d690aae9f856eef | [
"MIT"
] | permissive | rosiecakes/pyp-w1-gw-extensible-calculator | 69052e164230703109cf24da5f54f09fc669e3f5 | aa103a89d38d04a069f2fa62ed3ec8fda2ab7195 | refs/heads/master | 2021-01-21T04:13:25.142484 | 2016-05-25T02:37:30 | 2016-05-25T02:37:30 | 59,613,174 | 0 | 0 | null | 2016-05-24T22:34:04 | 2016-05-24T22:34:04 | null | UTF-8 | Python | false | false | 2,817 | py | from datetime import datetime
from calculator.operations import *
from calculator.exceptions import *
def create_new_calculator(operations=None):
"""
Creates a configuration dict for a new calculator. Optionally pre loads an
initial set of operations. By default a calculator with no operations
is created.
:param operations: Dict with initial operations.
ie: {'sum': sum_function, ...}
"""
calc = {
'operations': operations,
'history': []
}
return calc
def perform_operation(calc, operation, params):
"""
Executes given operation with given params. It returns the result of the
operation execution.
:param calc: A calculator.
:param operation: String with the operation name. ie: 'add'
:param params: Tuple containing the list of nums to operate with.
ie: (1, 2, 3, 4.5, -2)
"""
try:
result = calc['operations'][operation](*params)
except KeyError: #key not in dictionary
raise InvalidOperation('Given operation is invalid.')
except TypeError:
raise InvalidParams('Given params are invalid.')
#add to history ('2016-05-18 12:00:00', 'add', (1, 2, 3, 4), 10),
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
calc['history'].append((time, operation, params, result))
return result
'''
op = 'add'
if op in operations:
op_function = operations[op]
else:
raise InvalidOperation
'''
def add_new_operation(calc, operation):
"""
Adds given operation to the list of supported operations for given calculator.
:param calc: A calculator.
:param operation: Dict with the single operation to be added.
ie: {'add': add_function}
"""
for key, value in operation.items():
calc['operations'][key] = value
return calc
def get_operations(calc):
"""
Returns the list of operation names supported by given calculator.
"""
return calc['operations'] #?
def get_history(calc):
"""
Returns the history of the executed operations since the last reset or
since the calculator creation.
History items must have the following format:
(:execution_time, :operation_name, :params, :result)
ie:
('2016-05-20 12:00:00', 'add', (1, 2), 3),
"""
return calc['history'] #
def reset_history(calc):
"""
Resets the calculator history back to an empty list.
"""
calc['history'] = []
return calc
def repeat_last_operation(calc):
"""
Returns the result of the last operation executed in the history.
"""
if calc['history'] == []:
return None
return calc['history'][-1][1]
| [
"naha.hana@gmail.com"
] | naha.hana@gmail.com |
5cb4b3265a4257fb238248d7885ca2ac89655b57 | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/__init__.py | a8bd8939dcba1b7eac4ab69d320c8aff3409529a | [
"Apache-2.0"
] | permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 19,744 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class undefined_subtlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__state")
_yang_name = "undefined-subtlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
YANG Description: A reference to a subTLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: A reference to a subTLV
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
YANG Description: State parameters of the undefined sub-TLV.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the undefined sub-TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
type = __builtin__.property(_get_type)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("type", type), ("state", state)])
from . import state
class undefined_subtlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__state")
_yang_name = "undefined-subtlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
YANG Description: A reference to a subTLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: A reference to a subTLV
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
YANG Description: State parameters of the undefined sub-TLV.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the undefined sub-TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
type = __builtin__.property(_get_type)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("type", type), ("state", state)])
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
7d515ced4047eea5e9670410e3ea380ecc89b463 | 0f5a1b4c8abeab2c745c74f5a25e41a76b7502b0 | /playerset3.py | 13af6550fa71e6e8c91da1b447ea8c202c847309 | [] | no_license | sujatha-2/test | d0c7af8a11061ce45acd19e86c3e7cfc50f43179 | 9a6338fabd4168ae327cdc61e790dddfda8bba9d | refs/heads/master | 2021-07-19T13:28:46.356761 | 2019-01-09T09:19:10 | 2019-01-09T09:19:10 | 148,427,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | def extractMaximum(ss):
num, res = 0, 0
for i in range(len(ss)):
if ss[i] >= "0" and ss[i] <= "9":
num = num * 10 + int(int(ss[i]) - 0)
else:
res = max(res, num)
num = 0
return max(res, num)
ss = "100klh564abc365bg"
print(extractMaximum(ss))
| [
"noreply@github.com"
] | noreply@github.com |
4a7f5f2357fa7d77b29d6bf6ba5e13354365e83f | db9d8309266fd230b58b255201efa9e85f6a1909 | /Lesson9/list_proba.py | 9a9d791e54b3427358aaf4c5339a72c777a1ca73 | [] | no_license | vskolan/smartninja | 3470d80ac7aca41759d4bb9a36f16fd4b4027b9d | c8119be7ad0b4d2514cc7483cf834b1b37d62cf6 | refs/heads/master | 2020-12-30T13:20:52.835687 | 2017-11-15T21:25:19 | 2017-11-15T21:25:19 | 91,204,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | prazna_lista = []
print prazna_lista
lista_s_podacima = [1, 3, 3.4, "Text", True ]
print lista_s_podacima
var1 = "neki tekst"
var2 = 3.4
neka_lista=[var1,var2]
print neka_lista | [
"skolan.viktor@gmail.com"
] | skolan.viktor@gmail.com |
6a87a8e5d278ede9d444df333d662804bf68b370 | fbd347498b4ec04440dd91da0f62d3bc8aa85bff | /ex.031.precoPassagemOnibus.py | a323c227e902c1c41edaa64acec1370c78d468cd | [
"MIT"
] | permissive | romulorm/cev-python | 254ae208b468aa4e23bf59838de389d045f7d8ef | b5c6844956c131a9e4e02355459c218739ebf8c5 | refs/heads/master | 2021-05-18T22:27:31.179430 | 2020-04-17T01:39:04 | 2020-04-17T01:39:04 | 251,455,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # Exercício Python 031: Desenvolva um programa que pergunte a distância de uma viagem em Km. Calcule o preço da passagem,
# cobrando R$0,50 por Km para viagens de até 200Km e R$0,45 parta viagens mais longas.
distancia = float(input("Qual a distância da viagem, em quilômetros? "))
preco = distancia * 0.45 if distancia > 200 else distancia * 0.50
print("Você vai pagar R$ {:.2f} por uma viagem de {} quilômetros.".format(preco, int(distancia)))
| [
"62728349+romulorm@users.noreply.github.com"
] | 62728349+romulorm@users.noreply.github.com |
74dfd93a93cab894593cc99b17f0005ace2dc769 | 3e71f4d64b63e74a61447994a68f497f66c5e905 | /nnutil/model/adversarial_transformer.py | b216c878941bcd5cfa1c15722b704591dca182ec | [
"BSD-3-Clause"
] | permissive | aroig/nnutil | 40a648ec56214dbad8610ec8d9c1bdc642f136e9 | 88df41ee89f592a28c1661ee8837dd8e8ca42cf3 | refs/heads/master | 2020-03-25T18:58:01.708160 | 2019-06-18T22:00:54 | 2019-06-18T22:00:54 | 144,058,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,274 | py | import os
import numpy as np
import tensorflow as tf
import nnutil as nn
from .base_model import BaseModel
class AdversarialTransformer(BaseModel):
def __init__(self, name, shape):
super(AdversarialTransformer, self).__init__(name)
self._shape = shape
self._transformer = None
self._discriminator = None
@property
def input_shape(self):
return self._shape
@property
def output_shape(self):
return self._shape
@property
def layers(self):
return self._transformer.layers
def transformer_network(self, params):
raise NotImplementedError
def discriminator_network(self, params):
raise NotImplementedError
def features_placeholder(self, batch_size=1):
return {
'source': tf.placeholder(dtype=tf.float32,
shape=(batch_size,) + self._shape,
name='source'),
'target': tf.placeholder(dtype=tf.float32,
shape=(batch_size,) + self._shape,
name='target')
}
def loss_function(self, tgt_image, synth_image, params):
step = tf.train.get_global_step()
# Sample weights, so that easy samples weight less
sample_bias = params.get('sample_bias', 0.0)
sample_bias_step = params.get('sample_bias_step', 0)
# Regularizer weight
regularizer = params.get('regularizer', 0.0)
regularizer_step = params.get('regularizer_step', 0)
# Calculate total loss function
with tf.name_scope('losses'):
sample_loss = tf.norm(nn.util.flatten(synth_image - tgt_image), ord=2, axis=1)
# TODO: perform importance sampling here
model_loss = tf.reduce_mean(sample_loss)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularization_dampening = tf.sigmoid(tf.cast(step - regularizer_step, dtype=tf.float32) / 10.0)
total_loss = model_loss + regularizer * regularization_dampening * sum([l for l in regularization_losses])
tf.summary.scalar("model_loss", model_loss)
return total_loss
def model_fn(self, features, labels, mode, params, config):
src_image = features['source']
tgt_image = features['target']
step = tf.train.get_global_step()
training = (mode == tf.estimator.ModeKeys.TRAIN)
self._transformer = nn.layers.Segment(self.transformer_network(params), name="transformer")
self._discriminator = nn.layers.Segment(self.transformer_network(params), name="discriminator")
synth_image = self._transformer.apply(src_image, training=training)
if mode == tf.estimator.ModeKeys.PREDICT:
return self.prediction_estimator_spec(src_image, synth_image, params, config)
loss = self.loss_function(tgt_image, synth_image, params)
# Configure the training and eval phases
if mode == tf.estimator.ModeKeys.TRAIN:
return self.training_estimator_spec(loss, src_image, synth_image, tgt_image, params, config)
else:
return self.evaluation_estimator_spec(loss, src_image, synth_image, tgt_image, params, config)
def training_estimator_spec(self, loss, src_image, synth_image, tgt_image, params, config):
step = tf.train.get_global_step()
learning_rate = params.get('learning_rate', 0.0001)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0, beta2=0.9)
# Manually apply gradients. We want the gradients for summaries. We need
# to apply them manually in order to avoid having duplicate gradient ops.
gradients = optimizer.compute_gradients(loss)
# Make sure we update averages on each training step
extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_ops):
train_op = optimizer.apply_gradients(gradients, global_step=step)
nn.summary.image_transformation(
"transformation",
src_image[0, :],
synth_image[0, :])
nn.summary.image_transformation(
"truth",
tgt_image[0, :],
synth_image[0, :])
nn.summary.layers("layer_summary_{}".format(self._transformer.name),
layers=self._transformer.layers,
gradients=gradients,
activations=self._transformer.layer_activations)
nn.summary.layers("layer_summary_{}".format(self._discriminator.name),
layers=self._discriminator.layers,
gradients=gradients,
activations=self._discriminator.layer_activations)
training_hooks = []
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
training_hooks=training_hooks,
train_op=train_op)
def evaluation_estimator_spec(self, loss, src_image, synth_image, tgt_image, params, config):
eval_metric_ops = {}
evaluation_hooks = []
# Make sure we run update averages on each training step
extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_ops):
loss = tf.identity(loss)
eval_dir = os.path.join(config.model_dir, "eval")
evaluation_hooks.append(
nn.train.EvalSummarySaverHook(
output_dir=eval_dir,
summary_op=tf.summary.merge_all()
)
)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
evaluation_hooks=evaluation_hooks,
eval_metric_ops=eval_metric_ops)
def prediction_estimator_spec(self, src_image, synth_image, params, config):
predictions = {
"synth": synth_image
}
exports = {}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=exports)
| [
"abdo.roig@gmail.com"
] | abdo.roig@gmail.com |
df4962c536735e4684bc988e1e756f437ffe86f5 | 0aa870d4a1605dbdd3bcd1d9c0470634dba6c16b | /DD2419-PRAS/localization/scripts/scrap3.py | 029491bed25e7ab46fe3e8f0f4ab9d16d5be5531 | [] | no_license | sumodnandanwar/Crazyflie-project | ace2c6e0ff244be1933036eb23babd84f1553024 | d87e338fa4ae97fe3e97d6bcd9a6100f9c816070 | refs/heads/master | 2022-11-15T13:45:06.168175 | 2020-07-15T15:14:33 | 2020-07-15T15:14:33 | 276,144,552 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | #!/usr/bin/env python
from os.path import expanduser
import csv
def Dict_goal():
global dictgoal
dictgoal = []
pathcsv = expanduser('~')
pathcsv += '/dd2419_ws/src/DD2419-PRAS/localization/scripts/Pathcsv'
with open(pathcsv, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for line in csv_reader:
line[0] = float(line[0])
line[1] = float(line[1])
dictgoal.append(line)
print dictgoal[0][1]
return dictgoal
def main():
Dict_goal()
if __name__ == '__main__':
main() | [
"sumodnandanwar@gmail.com"
] | sumodnandanwar@gmail.com |
af441a31b3a3854e5673bc4608ee3552b6733d79 | 5f78a8878db55b3ab4db48c4629bf72f06f7a4de | /mainapp/forms.py | 1ed5707ee3e32912a24d6e25e596547f586c8ced | [] | no_license | pehks1980/test2 | cc8cbf8813aabd1e1001177bad7404d005329c5f | 1031cbdae895af7f164800a3a2a4a42b35c65a68 | refs/heads/main | 2023-01-21T09:55:05.042693 | 2020-11-29T23:15:06 | 2020-11-29T23:15:06 | 312,154,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from django.contrib.auth.forms import AuthenticationForm
from .models import ShopUser
class ShopUserLoginForm(AuthenticationForm):
"""
Форма захода пользователя
"""
class Meta:
model = ShopUser
fields = ('email', 'password')
def __init__(self, *args, **kwargs):
super(ShopUserLoginForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control' | [
"pehks1980@gmail.com"
] | pehks1980@gmail.com |
69cc4c7ebca922ff9a78d85842f463ce7049491b | bb1c17cdcbe941bb1d48da26e096f942b7b91d35 | /baselines/common/transformer.py | 0cb8a66b9bbe5c00d987a7dce422fcb7a5f5315f | [] | no_license | mauxam/kaithy | 3adb62824d3264327318339494197aa5bf5580ef | fbbfb76e670bdc46dd877593401e3a21ea46f488 | refs/heads/master | 2021-09-08T15:18:10.265014 | 2017-12-03T10:31:27 | 2017-12-03T10:31:27 | 124,669,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | import tensorflow as tf
from . import position as pos
from . import image
def transform_actions(actions, board_size):
batch_size = tf.shape(actions)[0]
return tf.concat((
actions[0: batch_size // 8],
pos.rot90(
board_size, actions[batch_size // 8: batch_size // 4], 1),
pos.rot90(
board_size, actions[batch_size // 4: batch_size // 8 * 3], 2),
pos.rot90(
board_size, actions[batch_size // 8 * 3: batch_size // 2], 3),
pos.flip_left_right_rot90(
board_size, actions[batch_size // 2: batch_size // 8 * 5], 0),
pos.flip_left_right_rot90(
board_size, actions[batch_size // 8 * 5: batch_size // 8 * 6], 1),
pos.flip_left_right_rot90(
board_size, actions[batch_size // 8 * 6: batch_size // 8 * 7], 2),
pos.flip_left_right_rot90(
board_size, actions[batch_size // 8 * 7: batch_size], 3),
), axis=0)
def transform_obses(obses):
batch_size = tf.shape(obses)[0]
return tf.concat((
obses[0: batch_size // 8],
tf.map_fn(lambda obs: image.rot90(obs, 1),
obses[batch_size // 8: batch_size // 4]),
tf.map_fn(lambda obs: image.rot90(obs, 2),
obses[batch_size // 4: batch_size // 8 * 3]),
tf.map_fn(lambda obs: image.rot90(obs, 3),
obses[batch_size // 8 * 3: batch_size // 2]),
tf.map_fn(lambda obs: tf.image.flip_left_right(obs),
obses[batch_size // 2: batch_size // 8 * 5]),
tf.map_fn(lambda obs: image.rot90(tf.image.flip_left_right(obs), 1),
obses[batch_size // 8 * 5: batch_size // 8 * 6]),
tf.map_fn(lambda obs: image.rot90(tf.image.flip_left_right(obs), 2),
obses[batch_size // 8 * 6: batch_size // 8 * 7]),
tf.map_fn(lambda obs: image.rot90(tf.image.flip_left_right(obs), 3),
obses[batch_size // 8 * 7: batch_size]),
), axis=0)
| [
"buidoigiauco@gmail.com"
] | buidoigiauco@gmail.com |
04da6b3662e7776928386b7dc8e0924cec0cc143 | 5b2f3fc7461aae47192c2e7554c68dbe2e0f7142 | /apps/pet_app/urls.py | 7a2f70899e64658ccba7590ec2e5985cb6be7dd7 | [] | no_license | Chareesa/pet_project | 878b3efba26d3f785820fee036e758e894bd8ea1 | f5b062ab0c9fa23bdacfb1cc62d75507661f4493 | refs/heads/master | 2021-07-17T20:18:21.930155 | 2017-10-25T04:28:34 | 2017-10-25T04:28:34 | 108,219,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py |
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^dashboard$', views.dashboard),
#routes to user dashboard after logging in
url(r'^add$', views.add),
#routes to add function in pet_app views
url(r'^show/(?P<id>\d+)$', views.show),
#routes to the show/profile page of a certain user
url(r'^createPet$', views.createPet),
#comes from the form in add.html to createPet fx in views
url(r'^destroy/(?P<id>\d+$)', views.destroy)
]
| [
"chareesagraham@gmail.com"
] | chareesagraham@gmail.com |
371c65ef7e4cb56d160d1f0b7acfc34e96f128e2 | 549b7ecf880757483f6789e0532a65d497569626 | /exno66pro.py | e0ee4b9238972f049d3fe124f64ecc2fff863568 | [] | no_license | rahul120799/rahul | c093114c3598bab1421f161e7205da977b80039d | 5385bafca898822d25ff1aa3844a96e5b6a9a2cf | refs/heads/master | 2020-04-15T12:55:27.885699 | 2019-09-14T14:19:04 | 2019-09-14T14:19:04 | 164,692,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | o, a, f, k = map(int, input().split())
count = 0
a2 = a-f
if (a2 >= 0):
di = (o-f)*2
for i in range (k):
if (i == k-1):
di =di/ 2
if (a2 < di):
a2 = a
count += 1
a2 = a2 - di
if (a2 < 0):
count = -1
break
di = 2*o - di
print (count)
else:
print (-1)
| [
"noreply@github.com"
] | noreply@github.com |
1f6cdf7fa7dc542487efb3af6de7c40aed5fc954 | 9b944c37d52912f1fc24c7209f1688b6382c6f83 | /RapMatcher.py | a292c89afc826e3deba0485bc8214781ad7110c0 | [
"Apache-2.0"
] | permissive | Rapleaf/RapMatcher | 328ddbf239ac83072414edb993a4321806768e09 | 3ec99f664cc8cd2656a2f681389fbd0244fd39f4 | refs/heads/master | 2021-01-01T06:50:28.111048 | 2011-01-06T20:16:56 | 2011-01-06T20:16:56 | 1,207,880 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,908 | py | import Levenshtein
from nltk.corpus import wordnet
import RapleafInterest
import string
class RapMatcher:
"""
This class' constructor takes a list of interests and is called via
the match function. The match function is passed a tag and returns a dictionary
which maps a subset of the interests onto scores which indicate how likely it is
that their respective key matches the tag.
"""
equalScore = 15 # Weight attributed to equality
def __init__(self, interests):
"""
Takes a list of string interests and creates a list of
RapleafInterest objects in addition to an empty set of matches
"""
self.interests = map(RapleafInterest.RapleafInterest, map(string.rstrip, interests))
self.scores = {}
def __get_synset(self, word):
"""
Returns a list of synonyms for a given word
"""
s = []
for synset in wordnet.synsets(word):
s += synset.lemma_names
return s
def __equality_match(self, word1, word2):
"""
Returns equalScore if word1 is word2
"""
if word1 == word2:
return RapMatcher.equalScore
return 0
def __substring_match(self, word1, word2):
"""
If one word is a substring of the other, returns the length of that word
"""
if (word1 in word2):
return len(word1)
elif (word2 in word1):
return len(word2)
return 0
def __levenshtein_match(self, word1, word2):
"""
If two words are within an edit distance of 1 from each other
returns the length of the shorter of the two
"""
if (1 == Levenshtein.distance(word1, word2)):
return min(len(word1), len(word2))
return 0
def __wordnet_match(self, word1, word2):
"""
Performs a variety of tests on mutual WordNet neighbors and returns
an overall WordNet score to indicate the closeness of word1 and word2
"""
synset1 = self.__get_synset(word1)
if not synset1:
return 0
synset2 = self.__get_synset(word2)
if not synset2:
return 0
wn_score = 0
for syn1 in synset1:
syn1 = syn1.upper()
for syn2 in synset2:
syn2 = syn2.upper()
wn_score += (self.__equality_match(syn1, syn2))/2
wn_score += (self.__substring_match(syn1, syn2))/2
wn_score += (self.__levenshtein_match(syn1, syn2))/2
return wn_score
def match(self, query):
"""
Matches a query with a Rapleaf interest
"""
self.scores.clear()
query = RapleafInterest.RapleafInterest(query.rstrip())
for query_word in query.get_words():
query_word = query_word.upper()
for interest in self.interests:
score = 0
for interest_word in interest.get_words():
interest_word = interest_word.upper()
score += self.__equality_match(query_word, interest_word)
score += self.__substring_match(query_word, interest_word)
score += self.__levenshtein_match(query_word, interest_word)
score += self.__wordnet_match(query_word, interest_word)
if score:
self.scores[interest.get_name()] = score
return self.scores
| [
"gareth.aye@gmail.com"
] | gareth.aye@gmail.com |
a1067b028137e10af4a91f55ec8afdd3016ea6ce | f3c6b2f8e8612f1df71b87a7602245e9b94aa5f1 | /echelon/schema2asp.py | 2377ab74fb0641b606897691aee1282ab684e797 | [
"MIT"
] | permissive | Tubbz-alt/echelon-vis | a9a8911e5a45a938a3426831f536a5b06494b213 | 21be9ba13e622369abcdd9d7abd7c165937bff69 | refs/heads/master | 2022-11-25T01:06:11.009627 | 2020-03-29T10:46:02 | 2020-03-29T10:46:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,951 | py | #!/usr/bin/env python3
# Todo: consider using model transformation language (e.g. JSON equivalent of XSLT) to
# covert schema to ASP.
import json
import sys
def getClassNameIts(ctx, prop):
return "{}".format(prop)
def getClassNameObj(ctx, prop):
return "{}".format(prop)
def deref(schema, jsonobj):
if "$ref" in jsonobj:
path = jsonobj["$ref"]
if path.startswith("#/"):
path = path[2:] # strip leading '#/'
hierarchy = path.split("/")
node = schema
for h in hierarchy:
node = node[h]
jsonobj = node
# TODO: Handle non-absolute paths
return jsonobj
def process(inputjson):
result = []
processing_queue = [("graph", inputjson)]
while processing_queue != []:
res = processing_queue.pop()
(ctx,obj) = res
obj = deref(inputjson, obj)
result.append("class({}).".format(ctx))
for prop, propval in obj["properties"].items():
type = propval["type"]
if type == "array":
its = propval["items"]
itsclass = getClassNameIts(ctx, prop)
result.append("datarel({},{},hasclass).".format(ctx, itsclass))
processing_queue.append((itsclass, its))
elif type == "object":
itclass = getClassNameObj(ctx, prop)
# TODO: Consider turning this into single tuple
#result.append("prop({0},{1}).\nfieldtype({1},{2}).".format(ctx, prop, itclass))
result.append("datarel({},{},hasinst).".format(ctx, itclass))
processing_queue.append((itclass, propval))
else:
# if type == "number":
# result.append("prop({},{}).\nfieldtype({},number).".format(ctx,prop))
# elif type == "integer":
# result.append("prop({},{}).\nfieldtype({},integer).".format(ctx,prop))
result.append("prop({0},{1}).\nfieldtype({1},{2}).".format(ctx,prop,type))
if "description" in propval:
desc = propval["description"]
result.append("suggests_prop({},{}).".format(prop, desc))
if desc == "angular":
result.append("fieldtype({},angular).".format(prop))
elif desc == "latitude":
result.append("fieldtype({},latitude).".format(prop))
elif desc == "longitude":
result.append("fieldtype({},longitude).".format(prop))
if "enum" in propval:
result.append("fieldtype({},enum).".format(prop))
for val in propval["enum"]:
result.append("fieldval({},{}).".format(prop, val))
if "minimum" in propval:
result.append("fieldmin({},{}).".format(prop, propval["minimum"]))
if "maximum" in propval:
result.append("fieldmax({},{}).".format(prop, propval["maximum"]))
if "description" in obj:
desc = obj["description"]
result.append("suggests_class({0},{1}).".format(ctx, desc))
return result
def main():
# Usage ./schema2asp.py schema.json > rules.lp
try:
inputfile = sys.argv[1]
except IndexError:
# default
inputfile = "asp/examples/traffic_schema_simple.json"
with open(inputfile) as input:
inputjson = json.load(input)
result = process(inputjson)
# Strip out rules for root graph
include_graph = True
include_graph_rel = True
if not include_graph:
result = [r for r in result if not r.startswith("class(graph)")]
if not include_graph_rel:
result = [r for r in result if not r.startswith("datarel(graph,")]
print ("\n".join(result))
if __name__ == "__main__":
main()
| [
"a.simmons@deakin.edu.au"
] | a.simmons@deakin.edu.au |
c2a46f1f6041e0e81f8ca3ee069efd797ef9e04c | 9301b535ce8856ec4fee3ed62169f6eae0a347a1 | /nlp_module/utils.py | 1cf3f9d13d5427ac9cca46688d182eedcf2261e6 | [
"MIT"
] | permissive | L-M-Sherlock/HITszQAbot | d7c130e1f0dd56ebb2d7f003372022f5038d86fa | 556779a1957e4d21214a3afe2c959b04d96db855 | refs/heads/master | 2023-08-07T22:57:51.382665 | 2023-07-26T01:35:18 | 2023-07-26T01:35:18 | 232,526,186 | 10 | 5 | null | 2023-07-26T01:35:19 | 2020-01-08T09:24:21 | Python | UTF-8 | Python | false | false | 3,311 | py | # coding: UTF-8
import time
from datetime import timedelta
import torch
from tqdm import tqdm
PAD, CLS = '[PAD]', '[CLS]' # padding符号, bert中综合信息符号
def build_dataset(config):
def load_dataset(path, pad_size=32):
contents = []
with open(path, 'r', encoding='UTF-8') as f:
for line in tqdm(f):
lin = line.strip()
if not lin:
continue
content, label = lin.split('\t')
token = config.tokenizer.tokenize(content)
token = [CLS] + token
seq_len = len(token)
mask = []
token_ids = config.tokenizer.convert_tokens_to_ids(token)
if pad_size:
if len(token) < pad_size:
mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
token_ids += ([0] * (pad_size - len(token)))
else:
mask = [1] * pad_size
token_ids = token_ids[:pad_size]
seq_len = pad_size
contents.append((token_ids, int(label), seq_len, mask))
return contents
train = load_dataset(config.train_path, config.pad_size)
dev = load_dataset(config.dev_path, config.pad_size)
test = load_dataset(config.test_path, config.pad_size)
return train, dev, test
class DatasetIterater(object):
def __init__(self, batches, batch_size, device):
self.batch_size = batch_size
self.batches = batches
self.n_batches = len(batches) // batch_size
self.residue = False # 记录batch数量是否为整数
if len(batches) % self.n_batches != 0:
self.residue = True
self.index = 0
self.device = device
def _to_tensor(self, datas):
x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
# pad前的长度(超过pad_size的设为pad_size)
seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)
return (x, seq_len, mask), y
def __next__(self):
if self.residue and self.index == self.n_batches:
batches = self.batches[self.index * self.batch_size: len(self.batches)]
self.index += 1
batches = self._to_tensor(batches)
return batches
elif self.index >= self.n_batches:
self.index = 0
raise StopIteration
else:
batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
self.index += 1
batches = self._to_tensor(batches)
return batches
def __iter__(self):
return self
def __len__(self):
if self.residue:
return self.n_batches + 1
else:
return self.n_batches
def build_iterator(dataset, config):
iter = DatasetIterater(dataset, config.batch_size, config.device)
return iter
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
| [
"jarrett.ye@outlook.com"
] | jarrett.ye@outlook.com |
e1dcaaad2a4fe69510a3a88e69f5edbf668180d9 | 9e5f891b51f3b5f0c78b0fbf97316df21ba929bb | /ArrayPartition1.py | d0ced6134c427b587150b5ddb48aa17e4a95a74a | [] | no_license | TanvirKaur17/Array-4 | cd80253b3c3970c7369b036417d9ca143e7ecbbc | f81d1520bd407c424e4234ac92f50ddd3c915df8 | refs/heads/master | 2020-08-04T13:31:04.851773 | 2019-10-01T17:04:33 | 2019-10-01T17:04:33 | 212,152,462 | 0 | 0 | null | 2019-10-01T17:03:45 | 2019-10-01T17:03:45 | null | UTF-8 | Python | false | false | 422 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 10:01:57 2019
@author: tanvirkaur
"""
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = sorted(nums)
summ =0
if not nums:
return None
for i in range(0,len(nums)-1,2):
summ += nums[i]
return summ | [
"noreply@github.com"
] | noreply@github.com |
883d700804d9b19145bc3f36b3590a29fd7206bc | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptdiagp/rsfcoddiag.py | e9fdbd6d639608f643b2b2e047fbb1d9ba7e0857 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 8,484 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsFcOdDiag(Mo):
"""
A source relation to the fabric card connecting different IO cards. Note that this relation is an internal object.
"""
meta = SourceRelationMeta("cobra.model.eqptdiagp.RsFcOdDiag", "cobra.model.eqpt.FC")
meta.cardinality = SourceRelationMeta.ONE_TO_M
meta.moClassName = "eqptdiagpRsFcOdDiag"
meta.rnFormat = "rsfcOdDiag-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Relation to Fabric Module"
meta.writeAccessMask = 0x800080800000001
meta.readAccessMask = 0x800080800000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.parentClasses.add("cobra.model.eqptdiagp.SpTsOdFc")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsfcOdDiag-', True),
]
prop = PropMeta("str", "annotation", "annotation", 37727, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 39866, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "nodeId", "nodeId", 12569, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
prop.range = [(1, 16000)]
prop.defaultValue = 0
prop.defaultValueStr = "not-found"
prop._addConstant("not-found", "not-found", 0)
meta.props.add("nodeId", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 12568, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 3205
prop.defaultValueStr = "eqptFC"
prop._addConstant("eqptFC", None, 3205)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 12567, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Path"
meta.deploymentQueryPaths.append(DeploymentPathMeta("DiagFCRelOnDemandPolToNode", "On demand diag fabric card relation to Fabric Node", "cobra.model.fabric.Node"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
0d0379c91606561fd1684c3b56b5a59c7ac79ac6 | 2147b03faa984c3f82b452bfa2e44738762c0620 | /users/models.py | 0e5dba17c8ef60df08f56723c7b7cee4655f5822 | [] | no_license | crowdbotics-apps/pawn-shop-30678 | 44d485d1e4bf5540320518921750293c8649ea53 | 844572b9e385948fdfbe1c3113481bf0961e810e | refs/heads/master | 2023-07-30T16:02:19.844017 | 2021-09-19T11:07:57 | 2021-09-19T11:07:57 | 408,103,844 | 2 | 0 | null | 2021-10-06T00:15:01 | 2021-09-19T11:05:16 | Python | UTF-8 | Python | false | false | 890 | py | from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# WARNING!
"""
Some officially supported features of Crowdbotics Dashboard depend on the initial
state of this User model (Such as the creation of superusers using the CLI
or password reset in the dashboard). Changing, extending, or modifying this model
may lead to unexpected bugs and or behaviors in the automated flows provided
by Crowdbotics. Change it at your own risk.
This model represents the User instance of the system, login system and
everything that relates with an `User` is represented by this model.
"""
name = models.CharField(
null=True,
blank=True,
max_length=255,
)
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9a945c38b5ac272314f3cc18b8d69a3004068b3d | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/trendmicro_visionone/icon_trendmicro_visionone/actions/submit_file_to_sandbox/__init__.py | aff6b3ae31a70139a54adafc52b8b179ae63bb49 | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 80 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import SubmitFileToSandbox
| [
"noreply@github.com"
] | noreply@github.com |
cc3d5864edd06fb58730325f6c22ab03bf004256 | 6f30b6fb0d096e7190904715c68ef3cfec5b721e | /example_python/6-the_wisdom_class/test_erase_exported.py | 9233a843e93109163a6841de4b95098374f27a63 | [
"BSD-3-Clause"
] | permissive | jbone/examples | bf4b8bbb4efd1a9c07196701a740fb58e8f36b50 | cdd34819770fbe04f6623006147a1e9d1f52b379 | refs/heads/master | 2021-01-15T10:53:38.253162 | 2014-11-25T12:47:23 | 2014-11-25T12:47:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #!/usr/bin/env python
"""
NLUlite is a high-level Natural Language Understanding
framework.
This file is an example of the framework. This file is released with
BSD license.
"""
__author__ = 'NLUlite'
__version__ = '0.1.4'
__license__ = 'BSD'
from NLUlite import ServerProxy, Wisdom
server = ServerProxy('localhost', 4001)
wisdom = Wisdom(server)
server.erase_exported('name_ID')
| [
"contact@nlulite.com"
] | contact@nlulite.com |
0b7abfd2850c397d76469bbbb02b3dd8687bcfde | cfc1be3ae11cfee97aac0cfc56672026f1a7c853 | /ping.py | 6f92fce1eecd55e7e8dfa301d2a64795d1f70761 | [] | no_license | AlfonsoOvalle/puertos_abiertos | 162301f57a76f0a1c2468aaa99db3b9181391fd7 | dd559475202e7dcb899112d6a677b5a0c6f152a4 | refs/heads/master | 2023-01-20T19:18:49.966281 | 2020-11-28T01:25:09 | 2020-11-28T01:25:09 | 316,634,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import socket;
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# host = '40.76.231.100'
print("Ingrese la Ip o nombre del host:")
Host = input()
print(f"Buscando puertos abiertos en el host:, {Host}")
for puerto in range(0, 65536):
result = sock.connect_ex((Host,puerto))
if result == 0:
print ('El puerto: ', puerto, ' esta abierto en el host: ' + Host)
| [
"alfonso200925014@gmail.com"
] | alfonso200925014@gmail.com |
50561e28bfc2a862656bf2ed45c24ed34ac0cd05 | 3e659c92b20f8e08c0b60212351d74fce24d8974 | /migrations/0001_initial.py | 8c81f4886964505f16b79e4fb4ef317fd53620a5 | [] | no_license | Arpan612/Blogging-app-updated | e6d12c3fd9a690035a4f0c25ccb86a17f43b55da | 0068a53a978ff33bd5a603bc1ebe9db57f4bb3b5 | refs/heads/master | 2021-07-06T21:22:34.761440 | 2020-08-27T12:20:35 | 2020-08-27T12:20:35 | 169,705,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | # Generated by Django 2.0.9 on 2019-01-02 06:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
f55c8a4be2c1181299895c4fe33e44f6c2de40c5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /ia95ckhN5ztgfJHe4_7.py | 3ba1f214826ddee32eec6ed44940399db61237ca | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | """
In JavaScript, there are two types of comments:
1. Single-line comments start with `//`
2. Multi-line or inline comments start with `/*` and end with `*/`
The input will be a sequence of `//`, `/*` and `*/`. **Every`/*` must have a
`*/` that immediately follows it**. To add, there can be **no single-line
comments in between multi-line comments** in between the `/*` and `*/`.
Create a function that returns `True` if comments are properly formatted, and
`False` otherwise.
### Examples
comments_correct("//////") ➞ True
# 3 single-line comments: ["//", "//", "//"]
comments_correct("/**//**////**/") ➞ True
# 3 multi-line comments + 1 single-line comment:
# ["/*", "*/", "/*", "*/", "//", "/*", "*/"]
comments_correct("///*/**/") ➞ False
# The first /* is missing a */
comments_correct("/////") ➞ False
# The 5th / is single, not a double //
### Notes
N/A
"""
def comments_correct(txt):
if len(txt)%2 !=0:
return False
chunks = []
for n in range(0,len(txt)-2,2):
chunks.append(txt[n:n+2])
for i in range(len(chunks)-1):
if chunks[i] == '/*' and chunks[i+1] != '*/':
return False
return True
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c7235d22a3086e2c781cd82e0f5e3d942d6c6445 | b2ffd58cb7af71b5909b516e0a4677d097db94d5 | /project01/record.py | bca9587895ff03cab4fee225492c038f3959fefe | [] | no_license | VietAnhLe2399/SpeechProcessing | 547641118eeaa4098e4822f07ce513d7a6987323 | b2be9eaf71482cd6e9dbda14c8a82699bf2db4d5 | refs/heads/master | 2022-12-25T21:05:40.590139 | 2020-10-05T09:42:46 | 2020-10-05T09:42:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | import re
import numpy as np
import sounddevice as sd
import soundfile as sf
import time
import queue
import os, shutil
from underthesea import sent_tokenize
q = queue.Queue()
category = ['thoi_su', 'goc_nhin', 'the_gioi', 'kinh_doanh', 'giai_tri', 'the_thao', 'phap_luat', 'giao_duc', 'suc_khoe', 'doi_song',
'du_lich', 'khoa_hoc', 'so_hoa', 'xe', 'y_kien', 'tam_su']
recordingCat = category[12]
pathToData = 'data/' + recordingCat + '/'
folder = pathToData
# for filename in os.listdir(folder):
# file_path = os.path.join(folder, filename)
# try:
# if os.path.isfile(file_path) or os.path.islink(file_path):
# os.unlink(file_path)
# elif os.path.isdir(file_path):
# shutil.rmtree(file_path)
# except Exception as e:
# print('Failed to delete %s. Reason: %s' % (file_path, e))
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status)
q.put(indata.copy())
with open('article/' + recordingCat + '.txt', 'r', encoding='utf-8') as f:
# To not to read the url in the first line
f.readline()
text = f.read()
sentences = sent_tokenize(text)
# text = re.sub('[\n]+', ' ', text)
# sentences = re.split(r' *[\.\?!][\'"\)\]]* *', text)
i = 16
for sentence in sentences[16:]:
print(str(i) + '\t' + sentence)
input('Press Enter to start recording...')
try:
fileName = pathToData + recordingCat + '_' + str(i) + '.wav'
if os.path.exists(pathToData + fileName):
os.remove(pathToData + fileName)
file = sf.SoundFile(fileName, mode='x', samplerate=44100, channels=2)
with sd.InputStream(samplerate=44100, channels=2, callback=callback):
print('press Ctrl+C to stop the recording')
while True:
file.write(q.get())
except KeyboardInterrupt:
print('Recording finished: ' + repr(fileName))
i+=1
| [
"vietanhle2399@gmail.com"
] | vietanhle2399@gmail.com |
5eb1a5ea9b7f705bd38dba510644b752d8cf4162 | a035633afbc4fa12faf5a83270be6640b61ecc4e | /328. 奇偶链表.py | 1fadc4bf4a4333eb00031ee16ea18ac32c9853df | [] | no_license | opwtryl/leetcode | cf6208628a643cb79666f0868d2de17b8110b770 | b28f6998d4153921c30e8c66c70a77681e67fe15 | refs/heads/master | 2020-04-01T03:56:06.938178 | 2018-11-13T02:06:16 | 2018-11-13T02:06:16 | 152,842,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | '''
给定一个单链表,把所有的奇数节点和偶数节点分别排在一起。请注意,这里的奇数节点和偶数节点指的是节点编号的奇偶性,而不是节点的值的奇偶性。
请尝试使用原地算法完成。你的算法的空间复杂度应为 O(1),时间复杂度应为 O(nodes),nodes 为节点总数。
示例 1:
输入: 1->2->3->4->5->NULL
输出: 1->3->5->2->4->NULL
示例 2:
输入: 2->1->3->5->6->4->7->NULL
输出: 2->3->6->7->1->5->4->NULL
说明:
应当保持奇数节点和偶数节点的相对顺序。
链表的第一个节点视为奇数节点,第二个节点视为偶数节点,以此类推。
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:return None
second=head.next
odd=head
even=head.next
while odd and even and odd.next and even.next:
odd.next=even.next
odd=odd.next
even.next=odd.next
even=even.next
odd.next=second
return head
| [
"noreply@github.com"
] | noreply@github.com |
b1d6ae4dd46b033c203319c87878910bf65f15ff | c892379a87343ee326c9fd29ba573dcb6d3ef863 | /quizsite/wsgi.py | 073ec64d4925c451e2259220bfc9af4783d1459b | [] | no_license | Raffan11/quizproject | 20f853cd43f3fa764c1328e95a252013d04ca20a | 3e8e8dfc7a760869f0591b2404f13198d3242997 | refs/heads/master | 2021-05-05T13:34:23.787107 | 2017-09-29T13:31:28 | 2017-09-29T13:31:28 | 104,997,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | """
WSGI config for quizsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
# import os
# from django.core.wsgi import get_wsgi_application
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quizsite.settings")
# application = get_wsgi_application()
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quizsite.settings")
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(get_wsgi_application())
| [
"32261499+Raffan11@users.noreply.github.com"
] | 32261499+Raffan11@users.noreply.github.com |
0265b67928a755c785a7fe897af01a048622a747 | 5e120b28006e054f0c524c8155598d73fd2d4281 | /base/catalog/exportViews.py | ce4969850529417ae6b999ba7a18a5fa28a542a0 | [
"MIT"
] | permissive | daavelino/vulnerability-catalog | 92bf12df36be9ab8937f6010d13b5a2eb02c819c | 61e0db9cc4656a16847ec635a4cac3e9a6c67dd4 | refs/heads/master | 2023-01-06T14:54:41.802416 | 2021-12-21T12:45:25 | 2021-12-21T12:45:25 | 80,522,058 | 12 | 7 | MIT | 2022-12-24T08:22:10 | 2017-01-31T13:19:07 | JavaScript | UTF-8 | Python | false | false | 1,525 | py | from datetime import datetime
from django.contrib.auth.decorators import login_required, permission_required
from django.core import serializers
from django.http import HttpRequest, HttpResponse, HttpResponseForbidden
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from .models import Vulnerability, RiskQuestions
login_url='catalog:login'
decorators = [login_required(login_url=login_url)]
today = datetime.today().strftime('%Y-%b-%d-%H%M')
output_file = 'VC-export-' + today + '.json'
@method_decorator(decorators, name='dispatch')
@method_decorator(permission_required('catalog.detail_vulnerability', \
raise_exception=True), name='dispatch')
class JsonExportView(ListView):
'''/catalog/vulnerability/data/json/export'''
'''Exports vulnerabilities and risk questions to a JSON file.'''
def export_database(HttpRequest):
if not HttpRequest.user.is_authenticated:
return HttpResponseRedirect(reverse(login_url))
if not HttpRequest.user.has_perm('catalog.detail_vulnerability'):
return HttpResponseForbidden('<h1>403 Forbidden</h1>')
all_objects = list(Vulnerability.objects.all()) \
+ list(RiskQuestions.objects.all())
result = serializers.serialize('json', all_objects)
response = HttpResponse(result, content_type='application/json')
response['Content-Disposition'] = 'attachment; filename=' + output_file
return response
| [
"daavelino@gmail.com"
] | daavelino@gmail.com |
115080ce1f0bce0813ec4a0f2dfe786aea708513 | ae181d786a3a7f010d18a7680dc9704273d31a47 | /useful/hamming_distance.py | ca23f0fb8f55abc38d23de60eb912621df337cf6 | [
"MIT",
"Unlicense"
] | permissive | zelr0x/solutions | f1a3a1deec6be5280193ec07da7e5ec4e6402fca | e073c64e866fd9b25d9bb9ae80f92fd3bc5c604d | refs/heads/master | 2023-08-31T21:41:04.250456 | 2023-08-03T18:32:44 | 2023-08-03T18:32:44 | 175,304,414 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | from typing import Iterable, TypeVar
from itertools import chain
def s_hamming_dist_bit(a: str, b: str, encoding='utf-8') -> int:
return hamming_dist_bit(a.encode(encoding), b.encode(encoding))
def hamming_dist_bit(a: Iterable[int], b: Iterable[int]) -> int:
a_bits = to_bits(a)
b_bits = to_bits(b)
return sum(1 for (ab, bb) in zip(a_bits, b_bits) if ab != bb)
def to_bits(items: Iterable[int]) -> Iterable[int]:
return flatten(byte_bits(item) for item in items)
def byte_bits(c: int) -> Iterable[int]:
return ((c >> x) & 1 for x in range(7, -1, -1))
T = TypeVar('T')
def flatten(seq: Iterable[Iterable[T]]) -> Iterable[T]:
return chain.from_iterable(seq)
if __name__ == '__main__':
hd_t = s_hamming_dist_bit('this is a test', 'wokka wokka!!!')
print(f'Hamming distance calculated: {hd_t}, expected: 37')
| [
"noreply@github.com"
] | noreply@github.com |
dd05162bb6283bcd9dadd28dd5050c53e8467b4f | d8a39f2a8bcc41cb81dcda292ba81f65502417f2 | /deep_net/image_quality_tagging/utils.py | 75833d6ad0613bad76bef39d0c77d23453bba373 | [] | no_license | santoshgsk/DeepNet | 95d92ca9ea6db1dc13dde50b70b9657573015f3b | 00d5386b61aa105c11105db962876c5e5827412b | refs/heads/master | 2021-01-10T04:00:57.804955 | 2018-06-04T10:54:27 | 2018-06-04T10:54:27 | 44,368,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | from models import *
from random import randint
import time
import numpy as np
import keras
import os
from keras.models import model_from_json
from Crypto.Hash import MD5
from django.db.models import Min
from onetimeload import model
def encrypt(data):
data = data + str(time.time())
return MD5.new(data).hexdigest()
def get_flat_to_tag():
result = {}
tot_images = FlatImages.objects.using('housing_analytics').count()
if tot_images >= 1:
num_least_tags = int(
FlatImages.objects.using('housing_analytics').aggregate(Min('num_user_tags'))['num_user_tags__min'])
result_images = FlatImages.objects.using('housing_analytics').filter(num_user_tags=num_least_tags)
random_id = randint(0, len(result_images) - 1)
result = result_images[random_id].__dict__
if not result:
get_flat_to_tag()
return result
# git clone https://github.com/fchollet/keras.git
# cd keras
# sudo pythons setup.py install
def get_prediction(abc):
abc = np.array(abc)
abc = abc.reshape((1, 2500))
# dir_path = os.path.abspath(os.path.dirname(__file__))
# model = model_from_json(open(dir_path + '/model.json').read())
# model.load_weights(dir_path + '/model_weights.h5')
if model.predict(abc)[:,1] > 0.5:
return "Good"
else:
return "Bad"
| [
"gsk.krishna@gmail.com"
] | gsk.krishna@gmail.com |
1972d15adead71f72550f9c4ff7bfc3a1ad6a084 | 62c6884e9597d96a25d274515d6124c46daffec8 | /zvt/stats/stats.py | 7128f18fc673ad777d15adc0d5b1552ed4641b1b | [
"MIT"
] | permissive | doncat99/zvt | 0f9305442af287e63f15de11cb2e2f6b5f9b3d05 | 831183bdf7a6d0fc3acd3ea51984df590078eec6 | refs/heads/master | 2023-03-22T13:35:17.277276 | 2021-03-10T14:02:08 | 2021-03-10T14:02:08 | 284,984,720 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 22,003 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# QuantStats: Portfolio analytics for quants
# https://github.com/ranaroussi/quantstats
#
# Copyright 2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as _pd
import numpy as _np
from math import ceil as _ceil
from scipy.stats import (norm as _norm, linregress as _linregress)
from . import utils as _utils
# ======== STATS ========
def pct_rank(prices, window=60):
""" rank prices by window """
rank = _utils.multi_shift(prices, window).T.rank(pct=True).T
return rank.iloc[:, 0] * 100.
def compsum(returns):
""" Calculates rolling compounded returns """
return returns.add(1).cumprod() - 1
def comp(returns):
""" Calculates total compounded returns """
return returns.add(1).prod() - 1
def expected_return(returns, aggregate=None, compounded=True):
"""
returns the expected return for a given period
by calculating the geometric holding period return
"""
returns = _utils._prepare_returns(returns)
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return _np.product(1 + returns) ** (1 / len(returns)) - 1
def geometric_mean(retruns, aggregate=None, compounded=True):
""" shorthand for expected_return() """
return expected_return(retruns, aggregate, compounded)
def ghpr(retruns, aggregate=None, compounded=True):
""" shorthand for expected_return() """
return expected_return(retruns, aggregate, compounded)
def outliers(returns, quantile=.95):
"""returns series of outliers """
return returns[returns > returns.quantile(quantile)].dropna(how='all')
def remove_outliers(returns, quantile=.95):
""" returns series of returns without the outliers """
return returns[returns < returns.quantile(quantile)]
def best(returns, aggregate=None, compounded=True):
""" returns the best day/month/week/quarter/year's return """
returns = _utils._prepare_returns(returns)
return _utils.aggregate_returns(returns, aggregate, compounded).max()
def worst(returns, aggregate=None, compounded=True):
""" returns the worst day/month/week/quarter/year's return """
returns = _utils._prepare_returns(returns)
return _utils.aggregate_returns(returns, aggregate, compounded).min()
def consecutive_wins(returns, aggregate=None, compounded=True):
""" returns the maximum consecutive wins by day/month/week/quarter/year """
returns = _utils._prepare_returns(returns)
returns = _utils.aggregate_returns(returns, aggregate, compounded) > 0
return _utils.count_consecutive(returns).max()
def consecutive_losses(returns, aggregate=None, compounded=True):
"""
returns the maximum consecutive losses by
day/month/week/quarter/year
"""
returns = _utils._prepare_returns(returns)
returns = _utils.aggregate_returns(returns, aggregate, compounded) < 0
return _utils.count_consecutive(returns).max()
def exposure(returns):
""" returns the market exposure time (returns != 0) """
returns = _utils._prepare_returns(returns)
def _exposure(ret):
ex = len(ret[(~_np.isnan(ret)) & (ret != 0)]) / len(ret)
return _ceil(ex * 100) / 100
if isinstance(returns, _pd.DataFrame):
_df = {}
for col in returns.columns:
_df[col] = _exposure(returns[col])
return _pd.Series(_df)
return _exposure(returns)
def win_rate(returns, aggregate=None, compounded=True):
""" calculates the win ratio for a period """
def _win_rate(series):
try:
return len(series[series > 0]) / len(series[series != 0])
except Exception:
return 0.
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
if isinstance(returns, _pd.DataFrame):
_df = {}
for col in returns.columns:
_df[col] = _win_rate(returns[col])
return _pd.Series(_df)
return _win_rate(returns)
def avg_return(returns, aggregate=None, compounded=True):
"""
calculates the average return/trade return for a period
returns = _utils._prepare_returns(returns)
"""
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return returns[returns != 0].dropna().mean()
def avg_win(returns, aggregate=None, compounded=True):
"""
calculates the average winning
return/trade return for a period
"""
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return returns[returns > 0].dropna().mean()
def avg_loss(returns, aggregate=None, compounded=True):
"""
calculates the average low if
return/trade return for a period
"""
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return returns[returns < 0].dropna().mean()
def volatility(returns, periods=252, annualize=True):
""" calculates the volatility of returns for a period """
std = _utils._prepare_returns(returns).std()
if annualize:
return std * _np.sqrt(periods)
return std
def implied_volatility(returns, periods=252, annualize=True):
""" calculates the implied volatility of returns for a period """
logret = _utils.log_returns(returns)
if annualize:
return logret.rolling(periods).std() * _np.sqrt(periods)
return logret.std()
# ======= METRICS =======
def sharpe(returns, rf=0., periods=252, annualize=True):
"""
calculates the sharpe ratio of access returns
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
Args:
* returns (Series, DataFrame): Input return series
* rf (float): Risk-free rate expressed as a yearly (annualized) return
* periods (int): Frequency of returns (252 for daily, 12 for monthly)
* annualize: return annualize sharpe?
"""
if rf != 0 and periods is None:
raise Exception('Must provide periods if rf != 0')
returns = _utils._prepare_returns(returns, rf, periods)
res = returns.mean() / returns.std()
if annualize:
return res * _np.sqrt(1 if periods is None else periods)
return res
def sortino(returns, rf=0, periods=252, annualize=True):
"""
calculates the sortino ratio of access returns
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
Calculation is based on this paper by Red Rock Capital
http://www.redrockcapital.com/Sortino__A__Sharper__Ratio_Red_Rock_Capital.pdf
"""
if rf != 0 and periods is None:
raise Exception('Must provide periods if rf != 0')
returns = _utils._prepare_returns(returns, rf, periods)
downside = (returns[returns < 0] ** 2).sum() / len(returns)
res = returns.mean() / _np.sqrt(downside)
if annualize:
return res * _np.sqrt(1 if periods is None else periods)
return res
def cagr(returns, rf=0., compounded=True):
"""
calculates the communicative annualized growth return
(CAGR%) of access returns
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
"""
total = _utils._prepare_returns(returns, rf)
if compounded:
total = comp(total)
else:
total = _np.sum(total)
years = (returns.index[-1] - returns.index[0]).days / 365.
res = abs(total + 1.0) ** (1.0 / years) - 1
if isinstance(returns, _pd.DataFrame):
res = _pd.Series(res)
res.index = returns.columns
return res
def rar(returns, rf=0.):
"""
calculates the risk-adjusted return of access returns
(CAGR / exposure. takes time into account.)
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
"""
returns = _utils._prepare_returns(returns, rf)
return cagr(returns) / exposure(returns)
def skew(returns):
"""
calculates returns' skewness
(the degree of asymmetry of a distribution around its mean)
"""
return _utils._prepare_returns(returns).skew()
def kurtosis(returns):
"""
calculates returns' kurtosis
(the degree to which a distribution peak compared to a normal distribution)
"""
return _utils._prepare_returns(returns).kurtosis()
def calmar(returns):
""" calculates the calmar ratio (CAGR% / MaxDD%) """
returns = _utils._prepare_returns(returns)
cagr_ratio = cagr(returns)
max_dd = max_drawdown(returns)
return cagr_ratio / abs(max_dd)
def ulcer_index(returns, rf=0):
""" calculates the ulcer index score (downside risk measurment) """
returns = _utils._prepare_returns(returns, rf)
dd = 1. - returns / returns.cummax()
return _np.sqrt(_np.divide((dd**2).sum(), returns.shape[0] - 1))
def ulcer_performance_index(returns, rf=0):
"""
calculates the ulcer index score
(downside risk measurment)
"""
returns = _utils._prepare_returns(returns, rf)
dd = 1. - returns / returns.cummax()
ulcer = _np.sqrt(_np.divide((dd**2).sum(), returns.shape[0] - 1))
return returns.mean() / ulcer
def upi(returns, rf=0):
""" shorthand for ulcer_performance_index() """
return ulcer_performance_index(returns, rf)
def risk_of_ruin(returns):
"""
calculates the risk of ruin
(the likelihood of losing all one's investment capital)
"""
returns = _utils._prepare_returns(returns)
wins = win_rate(returns)
return ((1 - wins) / (1 + wins)) ** len(returns)
def ror(returns):
""" shorthand for risk_of_ruin() """
return risk_of_ruin(returns)
def value_at_risk(returns, sigma=1, confidence=0.95):
"""
calculats the daily value-at-risk
(variance-covariance calculation with confidence n)
"""
returns = _utils._prepare_returns(returns)
mu = returns.mean()
sigma *= returns.std()
if confidence > 1:
confidence = confidence / 100
return _norm.ppf(1 - confidence, mu, sigma)
def var(returns, sigma=1, confidence=0.95):
""" shorthand for value_at_risk() """
return value_at_risk(returns, sigma, confidence)
def conditional_value_at_risk(returns, sigma=1, confidence=0.95):
"""
calculats the conditional daily value-at-risk (aka expected shortfall)
quantifies the amount of tail risk an investment
"""
returns = _utils._prepare_returns(returns)
var = value_at_risk(returns, sigma, confidence)
c_var = returns[returns < var].values.mean()
return c_var if ~_np.isnan(c_var) else var
def cvar(returns, sigma=1, confidence=0.95):
""" shorthand for conditional_value_at_risk() """
return conditional_value_at_risk(returns, sigma, confidence)
def expected_shortfall(returns, sigma=1, confidence=0.95):
""" shorthand for conditional_value_at_risk() """
return conditional_value_at_risk(returns, sigma, confidence)
def tail_ratio(returns, cutoff=0.95):
"""
measures the ratio between the right
(95%) and left tail (5%).
"""
returns = _utils._prepare_returns(returns)
return abs(returns.quantile(cutoff) / returns.quantile(1 - cutoff))
def payoff_ratio(returns):
""" measures the payoff ratio (average win/average loss) """
returns = _utils._prepare_returns(returns)
return avg_win(returns) / abs(avg_loss(returns))
def win_loss_ratio(returns):
""" shorthand for payoff_ratio() """
return payoff_ratio(returns)
def profit_ratio(returns):
""" measures the profit ratio (win ratio / loss ratio) """
returns = _utils._prepare_returns(returns)
wins = returns[returns >= 0]
loss = returns[returns < 0]
win_ratio = abs(wins.mean() / wins.count())
loss_ratio = abs(loss.mean() / loss.count())
try:
return win_ratio / loss_ratio
except Exception:
return 0.
def profit_factor(returns):
""" measures the profit ratio (wins/loss) """
returns = _utils._prepare_returns(returns)
return abs(returns[returns >= 0].sum() / returns[returns < 0].sum())
def gain_to_pain_ratio(returns):
""" shorthand for profit_factor() """
return profit_factor(returns)
def cpc_index(returns):
"""
measures the cpc ratio
(profit factor * win % * win loss ratio)
"""
returns = _utils._prepare_returns(returns)
return profit_factor(returns) * win_rate(returns) * \
win_loss_ratio(returns)
def common_sense_ratio(returns):
""" measures the common sense ratio (profit factor * tail ratio) """
returns = _utils._prepare_returns(returns)
return profit_factor(returns) * tail_ratio(returns)
def outlier_win_ratio(returns, quantile=.99):
"""
calculates the outlier winners ratio
99th percentile of returns / mean positive return
"""
returns = _utils._prepare_returns(returns)
return returns.quantile(quantile).mean() / returns[returns >= 0].mean()
def outlier_loss_ratio(returns, quantile=.01):
"""
calculates the outlier losers ratio
1st percentile of returns / mean negative return
"""
returns = _utils._prepare_returns(returns)
return returns.quantile(quantile).mean() / returns[returns < 0].mean()
def recovery_factor(returns):
""" measures how fast the strategy recovers from drawdowns """
returns = _utils._prepare_returns(returns)
total_returns = comp(returns)
max_dd = max_drawdown(returns)
return total_returns / abs(max_dd)
def risk_return_ratio(returns):
"""
calculates the return / risk ratio
(sharpe ratio without factoring in the risk-free rate)
"""
returns = _utils._prepare_returns(returns)
return returns.mean() / returns.std()
def max_drawdown(prices):
""" calculates the maximum drawdown """
prices = _utils._prepare_prices(prices)
return (prices / prices.expanding(min_periods=0).max()).min() - 1
def to_drawdown_series(prices):
""" convert price series to drawdown series """
prices = _utils._prepare_prices(prices)
dd = prices / _np.maximum.accumulate(prices) - 1.
return dd.replace([_np.inf, -_np.inf, -0], 0)
def drawdown_details(drawdown):
"""
calculates drawdown details, including start/end/valley dates,
duration, max drawdown and max dd for 99% of the dd period
for every drawdown period
"""
def _drawdown_details(drawdown):
# mark no drawdown
no_dd = drawdown == 0
# extract dd start dates
starts = ~no_dd & no_dd.shift(1)
starts = list(starts[starts].index)
# extract end dates
ends = no_dd & (~no_dd).shift(1)
ends = list(ends[ends].index)
# no drawdown :)
if not starts:
return _pd.DataFrame(
index=[], columns=('start', 'valley', 'end', 'days',
'max drawdown', '99% max drawdown'))
# drawdown series begins in a drawdown
if ends and starts[0] > ends[0]:
starts.insert(0, drawdown.index[0])
# series ends in a drawdown fill with last date
if not ends or starts[-1] > ends[-1]:
ends.append(drawdown.index[-1])
# build dataframe from results
data = []
for i, _ in enumerate(starts):
dd = drawdown[starts[i]:ends[i]]
clean_dd = -remove_outliers(-dd, .99)
data.append((starts[i], dd.idxmin(), ends[i],
(ends[i] - starts[i]).days,
dd.min() * 100, clean_dd.min() * 100))
df = _pd.DataFrame(data=data,
columns=('start', 'valley', 'end', 'days',
'max drawdown',
'99% max drawdown'))
df['days'] = df['days'].astype(int)
df['max drawdown'] = df['max drawdown'].astype(float)
df['99% max drawdown'] = df['99% max drawdown'].astype(float)
df['start'] = df['start'].dt.strftime('%Y-%m-%d')
df['end'] = df['end'].dt.strftime('%Y-%m-%d')
df['valley'] = df['valley'].dt.strftime('%Y-%m-%d')
return df
if isinstance(drawdown, _pd.DataFrame):
_dfs = {}
for col in drawdown.columns:
_dfs[col] = _drawdown_details(drawdown[col])
return _pd.concat(_dfs, axis=1)
return _drawdown_details(drawdown)
def kelly_criterion(returns):
"""
calculates the recommended maximum amount of capital that
should be allocated to the given strategy, based on the
Kelly Criterion (http://en.wikipedia.org/wiki/Kelly_criterion)
"""
returns = _utils._prepare_returns(returns)
win_loss_ratio = payoff_ratio(returns)
win_prob = win_rate(returns)
lose_prob = 1 - win_prob
return ((win_loss_ratio * win_prob) - lose_prob) / win_loss_ratio
# ==== VS. BENCHMARK ====
def r_squared(returns, benchmark):
""" measures the straight line fit of the equity curve """
# slope, intercept, r_val, p_val, std_err = _linregress(
_, _, r_val, _, _ = _linregress(
_utils._prepare_returns(returns),
_utils._prepare_benchmark(benchmark, returns.index))
return r_val**2
def r2(returns, benchmark):
""" shorthand for r_squared() """
return r_squared(returns, benchmark)
def information_ratio(returns, benchmark):
"""
calculates the information ratio
(basically the risk return ratio of the net profits)
"""
diff_rets = _utils._prepare_returns(returns) - \
_utils._prepare_benchmark(benchmark, returns.index)
return diff_rets.mean() / diff_rets.std()
def greeks(returns, benchmark, periods=252.):
""" calculates alpha and beta of the portfolio """
# ----------------------------
# data cleanup
returns = _utils._prepare_returns(returns)
benchmark = _utils._prepare_benchmark(benchmark, returns.index)
# ----------------------------
# find covariance
matrix = _np.cov(returns, benchmark)
beta = matrix[0, 1] / matrix[1, 1]
# calculates measures now
alpha = returns.mean() - beta * benchmark.mean()
alpha = alpha * periods
return _pd.Series({
"beta": beta,
"alpha": alpha,
# "vol": _np.sqrt(matrix[0, 0]) * _np.sqrt(periods)
}).fillna(0)
def rolling_greeks(returns, benchmark, periods=252):
""" calculates rolling alpha and beta of the portfolio """
df = _pd.DataFrame(data={
"returns": _utils._prepare_returns(returns),
"benchmark": _utils._prepare_benchmark(benchmark, returns.index)
})
corr = df.rolling(int(periods)).corr().unstack()['returns']['benchmark']
std = df.rolling(int(periods)).std()
beta = corr * std['returns'] / std['benchmark']
alpha = df['returns'].mean() - beta * df['benchmark'].mean()
# alpha = alpha * periods
return _pd.DataFrame(index=returns.index, data={
"beta": beta,
"alpha": alpha
}).fillna(0)
def compare(returns, benchmark, aggregate=None, compounded=True,
round_vals=None):
"""
compare returns to benchmark on a
day/week/month/quarter/year basis
"""
returns = _utils._prepare_returns(returns)
benchmark = _utils._prepare_benchmark(benchmark, returns.index)
data = _pd.DataFrame(data={
'Benchmark': _utils.aggregate_returns(
benchmark, aggregate, compounded) * 100,
'Returns': _utils.aggregate_returns(
returns, aggregate, compounded) * 100
})
data['Multiplier'] = data['Returns'] / data['Benchmark']
data['Won'] = _np.where(data['Returns'] >= data['Benchmark'], '+', '-')
if round_vals is not None:
return _np.round(data, round_vals)
return data
def monthly_returns(returns, eoy=True, compounded=True):
""" calculates monthly returns """
if isinstance(returns, _pd.DataFrame):
returns.columns = map(str.lower, returns.columns)
if len(returns.columns) > 1 and 'close' in returns.columns:
returns = returns['close']
else:
returns = returns[returns.columns[0]]
returns = _utils._prepare_returns(returns)
original_returns = returns.copy()
returns = _pd.DataFrame(
_utils.group_returns(returns,
returns.index.strftime('%Y-%m-01'),
compounded))
returns.columns = ['Returns']
returns.index = _pd.to_datetime(returns.index)
# get returnsframe
returns['Year'] = returns.index.strftime('%Y')
returns['Month'] = returns.index.strftime('%b')
# make pivot table
returns = returns.pivot('Year', 'Month', 'Returns').fillna(0)
# handle missing months
for month in ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']:
if month not in returns.columns:
returns.loc[:, month] = 0
# order columns by month
returns = returns[['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']]
if eoy:
returns['eoy'] = _utils.group_returns(
original_returns, original_returns.index.year).values
returns.columns = map(lambda x: str(x).upper(), returns.columns)
returns.index.name = None
return returns
| [
"doncat99@gmail.com"
] | doncat99@gmail.com |
22a8a08b7f13534d6a4c9439ebb9324d7efb9f49 | 46ad34abc8681ee69bb35dc75e5aa6c9b48e79f6 | /script/post_example.py | b1956c61ed6204bf3922912785593e9b90eb53d4 | [] | no_license | dingdan539/work_script | 23275a2b92238c9c736c6a4460f20aac6ecc1753 | 2e5833aa2f31b74b07b78f06ced5db687303b5b3 | refs/heads/master | 2021-01-25T11:27:50.809653 | 2017-06-17T04:16:50 | 2017-06-17T04:16:50 | 93,924,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import urllib
import urllib2
def getresponse(post_parm):
test_data_urlencode = urllib.urlencode(post_param)
requrl="http://oms.yihaodian.com.cn/api/notification/event/"
req = urllib2.Request(url=requrl,data=test_data_urlencode,headers={"Authorization": "Basic ZXZlbnRfcG9zdDpkaW5nZ28="})
return urllib2.urlopen(req).read()
post_param={"level_id":500,"source_id":12,"ip":"10.4.1.1","title":"RROR LOG","message":"you are a boy 11111111111","type_id":7}
appjson=getresponse(post_param)
print appjson | [
"dingdan@dingdande-MBP.lan"
] | dingdan@dingdande-MBP.lan |
25d739c64d67666b3e512fd4009ad70803619a9c | 860ee2fc11471e5d08b7576fe1b539a1efa4ff2b | /AppleTomato.py | 72785b0bb10eca2a2bc94377df993c5ef35081ea | [] | no_license | itakuto/AppleTomatoWeb | 19b7ebfe9b4de5dbf614ce0964351dfb8e3d6be1 | 96a7ff425ae84a9ce47e45f184fe13d9039f2f48 | refs/heads/master | 2020-06-11T11:16:51.613092 | 2019-06-26T16:56:59 | 2019-06-26T16:56:59 | 193,944,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | # coding: utf-8
import os
import cv2
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras
import numpy as np
# 各年代の画像をリスト化
Apple_files = os.listdir('downloads/Apple')
Tomato_files = os.listdir('downloads/Tomato')
# 配列Xに画像を入れる
X = []
for i in Apple_files:
img = cv2.imread('downloads/Apple/'+i)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rgb = cv2.resize(rgb, dsize=(100, 100))
X.append(rgb)
for i in Tomato_files:
img = cv2.imread('downloads/Tomato/'+i)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rgb = cv2.resize(rgb, dsize=(100, 100))
X.append(rgb)
# Yに各ラベルを入れる
Y = []
label_names = ['Apple', 'Tomato']
for i in range(len(Apple_files)):
Y.append(0)
for i in range(len(Tomato_files)):
Y.append(1)
# 顔画像全表示
plt.figure(figsize=(10, 10))
for i in range(len(X)):
plt.subplot(10, 40, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X[i], cmap=plt.cm.binary)
plt.xlabel(label_names[Y[i]])
plt.show()
# 正規化
for i in range(len(X)):
X[i] = X[i]/255
# 学習用とテスト用
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
X_train = np.array(X_train)
X_train = X_train.reshape(-1, 100, 100, 3)
X_test = np.array(X_test)
X_test = X_test.reshape(-1, 100, 100, 3)
input_shape = X_train.shape[1:]
# モデル構築
model = keras.Sequential([
keras.layers.Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Flatten(),
keras.layers.Dropout(0.5),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(2, activation='softmax')
])
model.compile(optimizer=keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# 学習実行
model.fit(X_train, Y_train, epochs=30)
model.save('AppleTomato_model.h5')
test_loss, test_acc = model.evaluate(X_test, Y_test)
print('Test accuracy:', test_acc)
predictions = model.predict(X_test)
# 正解不正解画像表示定義
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(label_names[predicted_label],
100 * np.max(predictions_array),
label_names[true_label]),
color=color)
# 正解不正解ラベル表示定義
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(2), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# 結果表示
num_rows = 2
num_cols = 2
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, Y_test, X_test)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, Y_test)
plt.show()
| [
"itakuto.pw4@gmail.com"
] | itakuto.pw4@gmail.com |
1ffc53b3185a5ea505a06024cb8e86d6300109ef | 7e80274e4940123c11dee20b94d5142725ab6150 | /Minesweeper_Python/src/draft_MyAI.py | e5aea03a7f9480affc79ae907e74d74c9ddf7ab8 | [] | no_license | codewitty/GaiaAI | 18fc4eb2baa143ceb8c4702e905d49a214f961c6 | 0dd365b0f1ab1567e840c71d48befcdf28191519 | refs/heads/main | 2023-05-06T19:09:51.924170 | 2021-06-06T07:59:43 | 2021-06-06T07:59:43 | 368,739,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,604 | py | # ==============================CS-199==================================
# FILE: MyAI.py
#
# AUTHOR: Justin Chung
#
# DESCRIPTION: This file contains the MyAI class. You will implement your
# agent in this file. You will write the 'getAction' function,
# the constructor, and any additional helper functions.
#
# NOTES: - MyAI inherits from the abstract AI class in AI.py.
#
# - DO NOT MAKE CHANGES TO THIS FILE.
# ==============================CS-199==================================
from AI import AI
from Action import Action
from typing import List
import random
class Tile:
def __init__(self):
self.val1 = '*' # Covered/Marked or covered/Unmarked or Label
self.val2 = 0 # Effective Label
self.val3 = 8 # Number of neighbors that are covered or unmarked
self.val4 = 0 # Probability field
class MyAI( AI ):
def __init__(self, rowDimension, colDimension, totalMines, startX, startY):
self.zeroes = []
self.total = 0
self.ones = []
self.bombs = []
self.row = rowDimension
self.col = colDimension
print(self.row, self.col)
self.x_coord = startX
self.y_coord = startY
self.current = (startX+1, startY+1)
self.flag = True
self.zeroes.append(self.current)
self.__initializeboard()
self.timestoUncover = (rowDimension * colDimension)
self.__updateboardneighbors(self.current[0], self.current[1])
self.neighbors = self.__getCoveredNeighbors(self.current[0]-1, self.current[1]-1)
def getAction(self, number: int) -> "Action Object":
if (self.board[self.current[0]-1][self.current[1]-1].val1 == '*'):
self.__updateboardneighbors(self.current[0], self.current[1])
self.__updateboard(self.current[0], self.current[1], number)
#self.__printboard() Uncomment to use (Only if running in debug mode)
#self.__printboard2() Uncomment to use (Only if running in debug mode)
while self.flag:
self.total = 0
for i in range(self.row):
for j in range(self.col):
if self.board[i][j].val1 != '*':
self.total += 1
if self.total == self.timestoUncover:
return Action(AI.Action.LEAVE)
# Uncovers anyone in self.neighbors
if len(self.neighbors) >= 1:
self.current = self.neighbors.pop(0)
x = self.current[0] - 1
y = self.current[1] - 1
action = AI.Action.UNCOVER
return Action(action, x, y)
# If self.neighbors is empty we must use algorithms to add more to uncover
# Below clears every 0
else:
for i in range(self.row):
for j in range(self.col):
if self.board[i][j].val1 == 0 and self.board[i][j].val3 > 0:
action = AI.Action.UNCOVER
self.neighbors = self.__getCoveredNeighbors(i, j)
if len(self.neighbors) >= 1:
new = self.neighbors.pop(0)
self.current = new
return Action(action, new[0]-1, new[1]-1)
# Below clears marks bombs around ones and updates
self.ones = self.__generateOnesList()
for one in self.ones:
if int(self.board[one[0]-1][one[1]-1].val1) == int(self.board[one[0]-1][one[1]-1].val3):
neighbors = self.__getneighbors(one[0],one[1]) # Neighbors of all tiles where num of label is equal to uncovered tiles
#print(f'Tile Coordinate is {one}, and neighbors is {neighbors}')
#print(f'CHECKING')
for neighbor in neighbors: # for each neighbor in those neighbors
if self.board[neighbor[0]-1][neighbor[1]-1].val1 == '*' and self.board[neighbor[0]-1][neighbor[1]-1].val2 != 9: # if the neighbor is covered
self.board[neighbor[0]-1][neighbor[1]-1].val1 = 'B' # mark it as a bomb
self.bombs.append((neighbor[0],neighbor[1])) # add coordinate of bomb to bomb list
# now all bombs have been appended to bomb list
for bomb in self.bombs: # now update neighbors of each bomb coordinate
self.__updateboardneighbors(bomb[0],bomb[1])
self.__updateEffectiveLabel(bomb[0],bomb[1])
# now that bombs around ones are marked, we want to uncover all tles with effective label 0
self.neighbors = self.__getCoordsofEffectiveZeroes()
#print(f'Coords of all effective zeroes are {self.neighbors}')
if len(self.neighbors) == 0:
for i in range(self.row):
for j in range(self.col):
if self.board[i][j].val2 == self.board[i][j].val3: # if effective label of any tile is equal to uncovered neighbors
n = self.__getneighbors(i+1,j+1)
for neighbor in n: # for neighbor in neighbors of any tile that has effective label equal to uncovered neighbors
if self.board[neighbor[0]-1][neighbor[1]-1].val1 == '*': # if the neighbor is covered it is a bomb
self.board[neighbor[0]-1][neighbor[1]-1].val1 = 'B'
self.__updateboardneighbors(neighbor[0],neighbor[1]) # so we update the labels of everyone around that bomb
self.__updateEffectiveLabel(neighbor[0],neighbor[1])
self.neighbors = self.__getCoordsofEffectiveZeroes() # after updating, we now get more effective zeroes
#self.__printboard2()
# probability check
self.bombs = []
if len(self.neighbors) == 0:
#print("Checking game board before probability")
#self.__printboard2()
maxi = self.__getProbability()
if maxi == 999: # This means that if there are no covered tiles left, we just leave)
return Action(AI.Action.LEAVE)
#self.__printboard2()
self.board[maxi[0]][maxi[1]].val1 = 'B'
self.__updateboardneighbors(maxi[0]+1,maxi[1]+1)
self.__updateEffectiveLabel(maxi[0]+1,maxi[1]+1)
#print("Checking game board after probability check")
#self.__printboard2()
self.neighbors = self.__getCoordsofEffectiveZeroes()
if len(self.neighbors) == 0:
#print("FINAL CHECK")
finalcheck = self.__getCoveredTiles()
if len(finalcheck) >= 1:
self.neighbors.append(finalcheck.pop(0))
if len(self.neighbors) == 0:
return Action(AI.Action.LEAVE)
#####################################################
# HELPER FUNCTIONS #
#####################################################
def __getneighbors(self, x: int, y: int) -> List:
""" Return a list of all neighbors of the given co-ordinate"""
neighbors = []
neighbors.append((x, y + 1))
neighbors.append((x, y - 1))
neighbors.append((x + 1, y))
neighbors.append((x - 1, y))
neighbors.append((x + 1, y + 1))
neighbors.append((x - 1, y + 1))
neighbors.append((x - 1, y - 1))
neighbors.append((x + 1, y - 1))
valid_neighbors = [x for x in neighbors if x[0] > 0 and x[0] <= self.row and x[1] > 0 and x[1] <= self.col]
return valid_neighbors
def __getCoveredNeighbors(self, x: int, y: int) -> List:
""" Return a list of all neighbors of the given co-ordinate"""
neighbors = self.__getneighbors(x+1, y+1)
covered_neighbors = [i for i in neighbors if self.board[i[0]-1][i[1]-1].val1 == '*']
return covered_neighbors
def __getUncoveredNeighbors(self, x: int, y: int) -> List: # Uncovered neighbor means no * or B (doesnt include bombs)
""" Return a list of all neighbors of the given co-ordinate"""
neighbors = self.__getneighbors(x+1, y+1)
uncovered_neighbors = [i for i in neighbors if self.board[i[0]-1][i[1]-1].val1 != '*' and self.board[i[0]-1][i[1]-1].val1 != 'B']
return uncovered_neighbors
# This helper function initializes the board according to the model from Kask's discussion
def __initializeboard(self) -> None:
self.board = [[i for i in range(self.row)] for j in range(self.col)]
for i in range(self.col):
for j in range(self.row):
print(i,j)
tile = Tile()
self.board[i][j] = tile
self.board[self.x_coord][self.y_coord].val1 = 0 # You can assume first label always 0
for i in range(self.row):
self.board[0][i].val3 = 5
self.board[-1][i].val3 = 5
for i in range(self.col):
self.board[i][0].val3 = 5
self.board[i][-1].val3 = 5
self.board[0][0].val3 = 3
self.board[self.col-1][self.row-1].val3 = 3
self.board[0][self.row-1].val3 = 3
self.board[self.col-1][0].val3 = 3
# This helper function prints out how the model looks in terms of our board nested array
# You have to look at it sideways
# Indices are accurate for this one
def __printboard(self) -> None:
counter = 0
for i in range(self.row):
print(' ' + str(i) + ' ', end="")
print()
for i in range(self.col):
print(' ' + '-----' + ' ', end="")
print()
flag = True
for l in self.board:
for tile in l:
if flag == True:
print(str(counter) + '|', end=" ")
flag = False
print(str(tile.val1) + ':' + str(tile.val2) + ':' + str(tile.val3) + ' ', end=" ")
flag = True
counter+= 1
print()
# This helper function prints out how the model looks on the actual board
# It basically flips the board from __printboard sideways so you can see how it actually looks
# Indices are inaccurate in this case so ignore those because the board was flipped sideways
def __printboard2(self) -> None:
counter = 0
subtract = -1
for i in range(self.row):
print(' ' + str(i) + ' ', end="")
print()
for i in range(self.col):
print(' ' + '-----' + ' ', end="")
print()
flag = True
for i in range(self.row):
for j in range(self.col):
if flag == True:
print(str(counter) + '|', end=" ")
flag = False
print(str(self.board[j][subtract].val1) + ':' +
str(self.board[j][subtract].val2) + ':' +
str(self.board[j][subtract].val3) + ' ', end=" ")
flag = True
counter+= 1
subtract -= 1
print()
# Updates our board's labels as we uncover
# Does not have functionality for changing anything but the label only so far
# Coordinate of current tile to uncover must be subtracted by 1 before accessing the board
def __updateboard(self, x: int, y: int, label: int) -> None:
self.board[x-1][y-1].val1 = int(label)
num_bombs = 0
neighbors = self.__getneighbors(x,y)
for neighbor in neighbors:
if self.board[neighbor[0]-1][neighbor[1]-1].val1 == 'B': # Possible optimize in the future
num_bombs += 1
self.board[x-1][y-1].val2 = int(label) - num_bombs
def __updateboardneighbors(self, x: int, y: int) -> None:
neighbors = self.__getneighbors(x,y)
for neighbor in neighbors:
self.board[neighbor[0]-1][neighbor[1]-1].val3 -= 1
def __generateOnesList(self) -> None:
ones = []
for i in range(self.row):
for j in range(self.col):
if (self.board[i][j].val1) == 1:
ones.append((i+1,j+1))
return ones
def __updateEffectiveLabel(self, x: int, y: int) -> None:
bombneighbors = self.__getneighbors(x,y)
for neighbor in bombneighbors:
if self.board[neighbor[0]-1][neighbor[1]-1].val1 == '*' or self.board[neighbor[0]-1][neighbor[1]-1].val1 == 'B': # if a bomb's neighbor is uncovered, set to 9
self.board[neighbor[0]-1][neighbor[1]-1].val2 = 9
else:
self.board[neighbor[0]-1][neighbor[1]-1].val2 -= 1 # otherwise, decrement effective label of neighbor
def __getCoordsofEffectiveZeroes(self) -> None:
neighborss = []
for i in range(self.row):
for j in range(self.col):
if (self.board[i][j].val1 != 'B' and self.board[i][j].val1 != '*' and self.board[i][j].val2 == 0):
neighbors = self.__getCoveredNeighbors(i,j)
#print(f'Tile of coordinate {(i+1,j+1)} has neighbors of {neighbors}')
for neighbor in neighbors:
if neighbor not in neighborss:
neighborss.append(neighbor)
return neighborss
def __getProbability(self) -> tuple: # tuple coordinate is return in array coords
neighborss = []
maxi = []
maxval = 0
for i in range(self.row):
for j in range(self.col):
if (self.board[i][j].val1 == '*'): # if a tile is covered
neighborss = self.__getUncoveredNeighbors(i,j) # get uncovered neighbors of that tile
#print(f'Tile of coordinate {(i+1,j+1)} is covered and we\'re checking for probability')
#print(f'Uncovered Neighbors: {neighborss}')
for neighbor in neighborss: # for every neighbor in those neighbors
self.board[i][j].val4 += self.board[neighbor[0]-1][neighbor[1]-1].val2/self.board[neighbor[0]-1][neighbor[1]-1].val3
#print(f'Tile of coordinate ({i+1},{j+1}) has probability value = {self.board[i][j].val4}')
if self.board[i][j].val4 > maxval:
maxval = self.board[i][j].val4
maxi.clear()
maxi.append(i)
maxi.append(j)
if len(maxi) == 0: # if wall of bombs surround covered tiles,we must do random
allCovered = self.__getCoveredTiles()
if len(allCovered) == 0: # No covered tiles left
return 999
#print(f'Covered tiles are {allCovered} from probability')
else:
randomval = random.randint(0,len(allCovered)-1)
maxi.append(allCovered[0][0]-1)
maxi.append(allCovered[0][1]-1)
return maxi
def __getCoveredTiles(self): # returns covered tile coordinate in list of tuples which are actual game coordinate
covered = []
for i in range(self.row):
for j in range(self.col):
if self.board[i][j].val1 == '*': # if a tile is covered
covered.append((i+1,j+1)) # May have to fix this if row and column dimensions different
return covered
# An Optimization PROBLEM if we ever wanna optimize this in the future
# self.__geteffectiveZeroes
# The issue is that this function gets the coordinates of every tile in the board
# that has an effective label of 0 and is not uncovered or a bomb
# this means that it picks up on tiles that have been done for a very long time also
# fo these tiles it calls self.__getCoveredNeighbors but for most of these tiles, since they have been done
# for a very long time, the obviously dont have any covered neighbors for it goes through pointless iterations
# which could be making our code take longer
| [
"gomesjoshua@gmail.com"
] | gomesjoshua@gmail.com |
4d7441191bb081ae089a0ca2507ab8be71437200 | 4ddf61397e02ba4638ba1137e3f18e8ca5979ac7 | /examples/respmod_copy.py | a78e7288a0e94c685bb233cddde9bc52a8e3973c | [] | no_license | snobb/pyicap | 209282b433730b682fb47886f5abc04ad2851a08 | 2696697b40d2534ce0c9af0f3078525fd038a902 | refs/heads/master | 2021-05-27T23:49:38.687001 | 2013-07-08T07:30:13 | 2013-07-08T07:32:23 | 18,606,360 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | #!/bin/env python
# -*- coding: utf8 -*-
import random
import SocketServer
from pyicap import *
class ThreadingSimpleServer(SocketServer.ThreadingMixIn, ICAPServer):
pass
class ICAPHandler(BaseICAPRequestHandler):
def example_OPTIONS(self):
self.set_icap_response(200)
self.set_icap_header('Methods', 'RESPMOD')
self.set_icap_header('Service', 'PyICAP Server 1.0')
self.set_icap_header('Preview', '0')
self.set_icap_header('Transfer-Preview', '*')
self.set_icap_header('Transfer-Ignore', 'jpg,jpeg,gif,png,swf,flv')
self.set_icap_header('Transfer-Complete', '')
self.set_icap_header('Max-Connections', '100')
self.set_icap_header('Options-TTL', '3600')
self.send_headers(False)
def example_RESPMOD(self):
#while True:
# chunk = self.read_chunk()
# if chunk == '':
# break
#self.send_enc_error(500, body='<html><head><title>Whoops</title></head><body><h1>500 ICAP meditation</h1></body></html>')
#return
self.set_icap_response(200)
self.set_enc_status(' '.join(self.enc_res_status))
for h in self.enc_res_headers:
for v in self.enc_res_headers[h]:
self.set_enc_header(h, v)
# The code below is only copying some data.
# Very convoluted for such a simple task.
# This thing needs a serious redesign.
# Well, without preview, it'd be quite simple...
if not self.has_body:
self.send_headers(False)
return
if self.preview:
prevbuf = ''
while True:
chunk = self.read_chunk()
if chunk == '':
break
prevbuf += chunk
if self.ieof:
self.send_headers(True)
if len(prevbuf) > 0:
self.write_chunk(prevbuf)
self.write_chunk('')
return
self.cont()
self.send_headers(True)
if len(prevbuf) > 0:
self.write_chunk(prevbuf)
while True:
chunk = self.read_chunk()
self.write_chunk(chunk)
if chunk == '':
break
else:
self.send_headers(True)
while True:
chunk = self.read_chunk()
self.write_chunk(chunk)
if chunk == '':
break
port = 13440
server = ThreadingSimpleServer(('', port), ICAPHandler)
try:
while 1:
server.handle_request()
except KeyboardInterrupt:
print "Finished"
| [
"tamas.fabian@vengit.com"
] | tamas.fabian@vengit.com |
9327c7c353f57edc531a78952f182e4b45b0c405 | a46e3ab5260c819e2b1a20343205b248a76314f3 | /pycharm_dict_str_split_unexpected.py | 9c4c8332fb5b3185d40c302f5e19bc170359ecf9 | [] | no_license | albertz/playground | 97ea882eb077e341c69f9e593918d38f89f8bc64 | f30c6330d855056f1756eeb558aa51fe72040c4e | refs/heads/master | 2023-08-16T16:33:01.780047 | 2023-07-31T11:46:58 | 2023-07-31T11:46:58 | 3,687,829 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 139 | py |
"""
https://youtrack.jetbrains.com/issue/PY-43916
"""
s = "a=b,c=d"
opts = dict([opt.split("=", 1) for opt in s.split(",")])
print(opts)
| [
"albzey@gmail.com"
] | albzey@gmail.com |
b3dc5ea3d89b1e07a55ce83f932073cd9b52c537 | b6df7cda5c23cda304fcc0af1450ac3c27a224c1 | /data/codes/ericmartel_Perforce.py | f0b775db30e972ffd0563d511eca16f05a452ea5 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | vieira-rafael/py-search | 88ee167fa1949414cc4f3c98d33f8ecec1ce756d | b8c6dccc58d72af35e4d4631f21178296f610b8a | refs/heads/master | 2021-01-21T04:59:36.220510 | 2016-06-20T01:45:34 | 2016-06-20T01:45:34 | 54,433,313 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 35,544 | py | # Written by Eric Martel (emartel@gmail.com / www.ericmartel.com)
# Direct port of the Sublime Text 2 version also available on my github, see README.md for more info.
import sublimeimport sublime_plugin
import osimport statimport subprocessimport tempfileimport threadingimport jsonimport systry: from Queue import Queue, Emptyexcept ImportError: from queue import Queue, Empty # python 3.x# Plugin Settings are located in 'perforce.sublime-settings' make a copy in the User folder to keep changes
# global variable used when calling p4 - it stores the path of the file in the current view, used to determine with P4CONFIG to use# whenever a view is selected, the variable gets updatedglobal_folder = ''
class PerforceP4CONFIGHandler(sublime_plugin.EventListener): def on_activated(self, view): if view.file_name(): global global_folder global_folder, filename = os.path.split(view.file_name())
# Executed at startup to store the path of the plugin... necessary to open files relative to the pluginperforceplugin_dir = os.getcwd()
# Utility functionsdef ConstructCommand(in_command): perforce_settings = sublime.load_settings('Perforce.sublime-settings') p4Env = perforce_settings.get('perforce_p4env') p4Path = perforce_settings.get('perforce_p4path') if ( p4Path == None or p4Path == '' ): p4Path = '' command = '' if(p4Env and p4Env != ''): command = '. {0} && {1}'.format(p4Env, p4Path) elif(sublime.platform() == "osx"): command = '. ~/.bash_profile && {0}'.format(p4Path) # Revert change until threading is fixed # command = getPerforceConfigFromPreferences(command) command += in_command return command
def getPerforceConfigFromPreferences(command): perforce_settings = sublime.load_settings('Perforce.sublime-settings')
# check to see if the sublime preferences include the given p4 config # if it does, then add it to the command in the form "var=value command" # so that they get inserted into the environment the command runs in def addP4Var(command, var): p4var = perforce_settings.get(var) if p4var: if sublime.platform() == "windows": return command + "SET {0}={1} && ".format(var, p4var) return "{0}{1}={2} ".format(command, var, p4var) return command command = addP4Var(command, "P4PORT") command = addP4Var(command, "P4CLIENT") command = addP4Var(command, "P4USER") command = addP4Var(command, "P4PASSWD") return command
def GetUserFromClientspec(): command = ConstructCommand('p4 info') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): WarnUser("usererr {0}".format(err.strip())) return -1
# locate the line containing "User name: " and extract the following name startindex = result.find("User name: ") if(startindex == -1): WarnUser("Unexpected output from 'p4 info'.") return -1 startindex += 11 # advance after 'User name: '
endindex = result.find("\n", startindex) if(endindex == -1): WarnUser("Unexpected output from 'p4 info'.") return -1
return result[startindex:endindex].strip();
def GetClientRoot(in_dir): # check if the file is in the depot command = ConstructCommand('p4 info') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): WarnUser(err.strip()) return -1 # locate the line containing "Client root: " and extract the following path startindex = result.find("Client root: ") if(startindex == -1): # sometimes the clientspec is not displayed sublime.error_message("Perforce Plugin: p4 info didn't supply a valid clientspec, launching p4 client"); command = ConstructCommand('p4 client') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() return -1 startindex += 13 # advance after 'Client root: '
endindex = result.find("\n", startindex) if(endindex == -1): WarnUser("Unexpected output from 'p4 info'.") return -1
# convert all paths to "os.sep" slashes convertedclientroot = result[startindex:endindex].strip().replace('\\', os.sep).replace('/', os.sep)
return convertedclientroot
def IsFolderUnderClientRoot(in_folder): # check if the file is in the depot clientroot = GetClientRoot(in_folder) if(clientroot == -1): return 0
clientroot = clientroot.lower() if(clientroot == "null"): return 1;
# convert all paths to "os.sep" slashes convertedfolder = in_folder.lower().replace('\\', os.sep).replace('/', os.sep); clientrootindex = convertedfolder.find(clientroot);
if(clientrootindex == -1): return 0 return 1
def IsFileInDepot(in_folder, in_filename): isUnderClientRoot = IsFolderUnderClientRoot(in_folder); if(os.path.isfile(os.path.join(in_folder, in_filename))): # file exists on disk, not being added if(isUnderClientRoot): return 1 else: return 0 else: if(isUnderClientRoot): return -1 # will be in the depot, it's being added else: return 0
def GetPendingChangelists(): # Launch p4 changes to retrieve all the pending changelists currentuser = GetUserFromClientspec() if(currentuser == -1): return 0, "Unexpected output from 'p4 info'."
command = ConstructCommand('p4 changes -s pending -u {0}'.format(currentuser))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8") if(not err): return 1, result return 0, result
def AppendToChangelistDescription(changelist, input): # First, create an empty changelist, we will then get the cl number and set the description command = ConstructCommand('p4 change -o {0}'.format(changelist)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err
# Find the description field and modify it lines = result.splitlines()
descriptionindex = -1 for index, line in enumerate(lines): if(line.strip() == "Description:"): descriptionindex = index break; filesindex = -1 for index, line in enumerate(lines): if(line.strip() == "Files:"): filesindex = index break;
if(filesindex == -1): # The changelist is empty endindex = index else: endindex = filesindex - 1
perforce_settings = sublime.load_settings('Perforce.sublime-settings') lines.insert(endindex , "\t{0}".format(input))
temp_changelist_description_file = open(os.path.join(tempfile.gettempdir(), "tempchangelist.txt"), 'w')
try: temp_changelist_description_file.write(perforce_settings.get('perforce_end_line_separator').join(lines)) finally: temp_changelist_description_file.close()
command = ConstructCommand('p4 change -i < {0}'.format(temp_changelist_description_file.name)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
# Clean up os.unlink(temp_changelist_description_file.name)
if(err): return 0, err
return 1, result
def PerforceCommandOnFile(in_command, in_folder, in_filename): command = ConstructCommand('p4 {0} "{1}"'.format(in_command, in_filename)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(not err): return 1, result.strip() else: return 0, err.strip()
def WarnUser(message): perforce_settings = sublime.load_settings('Perforce.sublime-settings') if(perforce_settings.get('perforce_warnings_enabled')): if(perforce_settings.get('perforce_log_warnings_to_status')): sublime.status_message("Perforce [warning]: {0}".format(message)) else: print("Perforce [warning]: {0}".format(message))
def LogResults(success, message): if(success >= 0): print("Perforce: {0}".format(message)) else: WarnUser(message);
def IsFileWritable(in_filename): if(not in_filename): return 0
# if it doesn't exist, it's "writable" if(not os.path.isfile(in_filename)): return 1
filestats = os.stat(in_filename)[0]; if(filestats & stat.S_IWRITE): return 1 return 0
# Checkout sectiondef Checkout(in_filename): if(IsFileWritable(in_filename)): return -1, "File is already writable."
folder_name, filename = os.path.split(in_filename) isInDepot = IsFileInDepot(folder_name, filename)
if(isInDepot != 1): return -1, "File is not under the client root." # check out the file return PerforceCommandOnFile("edit", folder_name, in_filename); class PerforceAutoCheckout(sublime_plugin.EventListener): def on_modified(self, view): if(not view.file_name()): return
if(IsFileWritable(view.file_name())): return
perforce_settings = sublime.load_settings('Perforce.sublime-settings')
# check if this part of the plugin is enabled if(not perforce_settings.get('perforce_auto_checkout') or not perforce_settings.get('perforce_auto_checkout_on_modified')): return if(view.is_dirty()): success, message = Checkout(view.file_name()) LogResults(success, message);
def on_pre_save(self, view): perforce_settings = sublime.load_settings('Perforce.sublime-settings')
# check if this part of the plugin is enabled if(not perforce_settings.get('perforce_auto_checkout') or not perforce_settings.get('perforce_auto_checkout_on_save')): return if(view.is_dirty()): success, message = Checkout(view.file_name()) LogResults(success, message);
class PerforceCheckoutCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): success, message = Checkout(self.view.file_name()) LogResults(success, message) else: WarnUser("View does not contain a file")
# Add sectiondef Add(in_folder, in_filename): # add the file return PerforceCommandOnFile("add", in_folder, in_filename);
class PerforceAutoAdd(sublime_plugin.EventListener): preSaveIsFileInDepot = 0 def on_pre_save(self, view): # file already exists, no need to add if view.file_name() and os.path.isfile(view.file_name()): return
global global_folder global_folder, filename = os.path.split(view.file_name())
perforce_settings = sublime.load_settings('Perforce.sublime-settings')
self.preSaveIsFileInDepot = 0
# check if this part of the plugin is enabled if(not perforce_settings.get('perforce_auto_add')): WarnUser("Auto Add disabled") return
folder_name, filename = os.path.split(view.file_name())
if(not IsFolderUnderClientRoot(folder_name)): WarnUser("Adding file outside of clientspec, ignored for auto add") return
self.preSaveIsFileInDepot = IsFileInDepot(folder_name, filename)
def on_post_save(self, view): if(self.preSaveIsFileInDepot == -1): folder_name, filename = os.path.split(view.file_name()) success, message = Add(folder_name, filename) LogResults(success, message)
class PerforceAddCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Add(folder_name, filename) else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
# Rename sectiondef Rename(in_filename, in_newname): command = ConstructCommand('p4 integrate -d -t -Di -f "{0}" "{1}"'.format(in_filename, in_newname)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err.strip() command = ConstructCommand('p4 delete "{0}" "{1}"'.format(in_filename, in_newname)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(not err): return 1, result.strip() else: return 0, err.strip()
class PerforceRenameCommand(sublime_plugin.WindowCommand): def run(self): # Get the description self.window.show_input_panel('New File Name', self.window.active_view().file_name(), self.on_done, self.on_change, self.on_cancel)
def on_done(self, input): success, message = Rename(self.window.active_view().file_name(), input) if(success): self.window.run_command('close') self.window.open_file(input)
LogResults(success, message)
def on_change(self, input): pass
def on_cancel(self): pass
# Delete sectiondef Delete(in_folder, in_filename): success, message = PerforceCommandOnFile("delete", in_folder, in_filename) if(success): # test if the file is deleted if(os.path.isfile(os.path.join(in_folder, in_filename))): success = 0
return success, message
class PerforceDeleteCommand(sublime_plugin.WindowCommand): def run(self): if(self.window.active_view().file_name()): folder_name, filename = os.path.split(self.window.active_view().file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Delete(folder_name, filename) if(success): # the file was properly deleted on perforce, ask Sublime Text to close the view self.window.run_command('close'); else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
# Revert sectiondef Revert(in_folder, in_filename): # revert the file return PerforceCommandOnFile("revert", in_folder, in_filename);
class PerforceRevertCommand(sublime_plugin.TextCommand): def run_(self, edit_token, args): # revert cannot be called when an Edit object exists, manually handle the run routine if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Revert(folder_name, filename) if(success): # the file was properly reverted, ask Sublime Text to refresh the view self.view.run_command('revert'); else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
# Diff sectiondef Diff(in_folder, in_filename): # diff the file return PerforceCommandOnFile("diff", in_folder, in_filename);
class PerforceDiffCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Diff(folder_name, filename) else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file") # Graphical Diff With Depot sectionclass GraphicalDiffThread(threading.Thread): def __init__(self, in_folder, in_filename, in_endlineseparator, in_command): self.folder = in_folder self.filename = in_filename self.endlineseparator = in_endlineseparator self.command = in_command threading.Thread.__init__(self)
def run(self): success, content = PerforceCommandOnFile("print", self.folder, self.filename) if(not success): return 0, content
# Create a temporary file to hold the depot version depotFileName = "depot"+self.filename tmp_file = open(os.path.join(tempfile.gettempdir(), depotFileName), 'w')
# Remove the first two lines of content linebyline = content.splitlines(); content=self.endlineseparator.join(linebyline[1:]);
try: tmp_file.write(content) finally: tmp_file.close()
# Launch P4Diff with both files and the same arguments P4Win passes it diffCommand = self.command diffCommand = diffCommand.replace('%depotfile_path', tmp_file.name) diffCommand = diffCommand.replace('%depotfile_name', depotFileName) diffCommand = diffCommand.replace('%file_path', os.path.join(self.folder, self.filename)) diffCommand = diffCommand.replace('%file_name', self.filename)
command = ConstructCommand(diffCommand) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
# Clean up os.unlink(tmp_file.name);
def GraphicalDiffWithDepot(self, in_folder, in_filename): perforce_settings = sublime.load_settings('Perforce.sublime-settings') diffcommand = perforce_settings.get('perforce_selectedgraphicaldiffapp_command') if not diffcommand: diffcommand = perforce_settings.get('perforce_default_graphical_diff_command') GraphicalDiffThread(in_folder, in_filename, perforce_settings.get('perforce_end_line_separator'), diffcommand).start()
return 1, "Launching thread for Graphical Diff"
class PerforceGraphicalDiffWithDepotCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = GraphicalDiffWithDepot(self, folder_name, filename) else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
class PerforceSelectGraphicalDiffApplicationCommand(sublime_plugin.WindowCommand): def run(self): diffapps = [] if os.path.exists(perforceplugin_dir + os.sep + 'graphicaldiffapplications.json'): f = open(perforceplugin_dir + os.sep + 'graphicaldiffapplications.json') applications = json.load(f) f.close()
for entry in applications.get('applications'): formattedentry = [] formattedentry.append(entry.get('name')) formattedentry.append(entry.get('exename')) diffapps.append(formattedentry)
self.window.show_quick_panel(diffapps, self.on_done) def on_done(self, picked): if picked == -1: return f = open(perforceplugin_dir + os.sep + 'graphicaldiffapplications.json') applications = json.load(f) entry = applications.get('applications')[picked] f.close()
sublime.status_message(__name__ + ': Please make sure that {0} is reachable - you might need to restart Sublime Text 2.'.format(entry['exename']))
settings = sublime.load_settings('Perforce.sublime-settings') settings.set('perforce_selectedgraphicaldiffapp', entry['name']) settings.set('perforce_selectedgraphicaldiffapp_command', entry['diffcommand']) sublime.save_settings('Perforce.sublime-settings')
# List Checked Out Files sectionclass ListCheckedOutFilesThread(threading.Thread): def __init__(self, window): self.window = window threading.Thread.__init__(self)
def ConvertFileNameToFileOnDisk(self, in_filename): clientroot = GetClientRoot(os.path.dirname(in_filename)) if(clientroot == -1): return 0
if(clientroot == "null"): return in_filename;
filename = clientroot + os.sep + in_filename.replace('\\', os.sep).replace('/', os.sep)
return filename
def MakeFileListFromChangelist(self, in_changelistline): files_list = [] currentuser = GetUserFromClientspec() # Launch p4 opened to retrieve all files from changelist command = ConstructCommand('p4 opened -c {0} -u {1}'.format(in_changelistline[1], currentuser)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8") if(not err): lines = result.splitlines() for line in lines: # remove the change # poundindex = line.rfind('#') cleanedfile = line[0:poundindex]
# just keep the filename cleanedfile = '/'.join(cleanedfile.split('/')[3:])
file_entry = [cleanedfile[cleanedfile.rfind('/')+1:]] file_entry.append("Changelist: {0}".format(in_changelistline[1])) file_entry.append(' '.join(in_changelistline[7:])); localfile = self.ConvertFileNameToFileOnDisk(cleanedfile) if(localfile != 0): file_entry.append(localfile) files_list.append(file_entry)
return files_list
def MakeCheckedOutFileList(self): files_list = self.MakeFileListFromChangelist(['','default','','','','','','Default Changelist']);
currentuser = GetUserFromClientspec() if(currentuser == -1): return files_list
# Launch p4 changes to retrieve all the pending changelists command = ConstructCommand('p4 changes -s pending -u {0}'.format(currentuser));
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(not err): changelists = result.splitlines()
# for each line, extract the change, and run p4 opened on it to list all the files for changelistline in changelists: changelistlinesplit = changelistline.split(' ') files_list.extend(self.MakeFileListFromChangelist(changelistlinesplit))
return files_list
def run(self): self.files_list = self.MakeCheckedOutFileList()
def show_quick_panel(): if not self.files_list: sublime.error_message(__name__ + ': There are no checked out files to list.') return self.window.show_quick_panel(self.files_list, self.on_done) sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return file_name = self.files_list[picked][3]
def open_file(): self.window.open_file(file_name) sublime.set_timeout(open_file, 10)
class PerforceListCheckedOutFilesCommand(sublime_plugin.WindowCommand): def run(self): ListCheckedOutFilesThread(self.window).start()
# Create Changelist sectiondef CreateChangelist(description): # First, create an empty changelist, we will then get the cl number and set the description command = ConstructCommand('p4 change -o') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err
# Find the description field and modify it desclabel = 'Description:{0}'.format(os.linesep) descindex = result.find(desclabel) + len(desclabel) descend = result.find(os.linesep*2, descindex) result = '{0}\t{1}{2}'.format(result[0:descindex], description, result[descend:])
# Remove all files from the query, we want them to stay in Default filesindex = result.rfind("Files:") # The Files: section we want to get rid of is only present if there's files in the default changelist if(filesindex > 640): result = result[0:filesindex];
temp_changelist_description_file = open(os.path.join(tempfile.gettempdir(), "tempchangelist.txt"), 'w')
try: temp_changelist_description_file.write(result) finally: temp_changelist_description_file.close()
command = ConstructCommand('p4 change -i < {0}'.format(temp_changelist_description_file.name)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
# Clean up os.unlink(temp_changelist_description_file.name)
if(err): return 0, err
return 1, result
class PerforceCreateChangelistCommand(sublime_plugin.WindowCommand): def run(self): # Get the description self.window.show_input_panel('Changelist Description', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, input): success, message = CreateChangelist(input) LogResults(success, message)
def on_change(self, input): pass
def on_cancel(self): pass
# Move Current File to Changelistdef MoveFileToChangelist(in_filename, in_changelist): folder_name, filename = os.path.split(in_filename)
command = ConstructCommand('p4 reopen -c {0} "{1}"'.format(in_changelist, filename)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err return 1, result
class ListChangelistsAndMoveFileThread(threading.Thread): def __init__(self, window): self.window = window self.view = window.active_view() threading.Thread.__init__(self)
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = ['New', 'Default'];
if(success): changelists = rawchangelists.splitlines()
# for each line, extract the change for changelistline in changelists: changelistlinesplit = changelistline.split(' ') # Insert at two because we receive the changelist in the opposite order and want to keep new and default on top resultchangelists.insert(2, "Changelist {0} - {1}".format(changelistlinesplit[1], ' '.join(changelistlinesplit[7:])))
return resultchangelists
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelistlist = self.changelists_list[picked].split(' ')
def move_file(): changelist = 'Default' if(len(changelistlist) > 1): # Numbered changelist changelist = changelistlist[1] else: changelist = changelistlist[0]
if(changelist == 'New'): # Special Case self.window.show_input_panel('Changelist Description', '', self.on_description_done, self.on_description_change, self.on_description_cancel) else: success, message = MoveFileToChangelist(self.view.file_name(), changelist.lower()) LogResults(success, message);
sublime.set_timeout(move_file, 10)
def on_description_done(self, input): success, message = CreateChangelist(input) if(success == 1): # Extract the changelist name from the message changelist = message.split(' ')[1] # Move the file success, message = MoveFileToChangelist(self.view.file_name(), changelist)
LogResults(success, message) def on_description_change(self, input): pass
def on_description_cancel(self): pass
class PerforceMoveCurrentFileToChangelistCommand(sublime_plugin.WindowCommand): def run(self): # first, test if the file is under the client root folder_name, filename = os.path.split(self.window.active_view().file_name()) isInDepot = IsFileInDepot(folder_name, filename)
if(isInDepot != 1): WarnUser("File is not under the client root.") return 0
ListChangelistsAndMoveFileThread(self.window).start()
# Add Line to Changelist Descriptionclass AddLineToChangelistDescriptionThread(threading.Thread): def __init__(self, window): self.window = window self.view = window.active_view() threading.Thread.__init__(self)
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = [];
if(success): changelists = rawchangelists.splitlines()
# for each line, extract the change, and run p4 opened on it to list all the files for changelistline in changelists: changelistlinesplit = changelistline.split(' ') # Insert at zero because we receive the changelist in the opposite order # Might be more efficient to sort... changelist_entry = ["Changelist {0}".format(changelistlinesplit[1])] changelist_entry.append(' '.join(changelistlinesplit[7:])); resultchangelists.insert(0, changelist_entry)
return resultchangelists
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelistlist = self.changelists_list[picked][0].split(' ')
def get_description_line(): self.changelist = changelistlist[1] self.window.show_input_panel('Changelist Description', '', self.on_description_done, self.on_description_change, self.on_description_cancel)
sublime.set_timeout(get_description_line, 10)
def on_description_done(self, input): success, message = AppendToChangelistDescription(self.changelist, input) LogResults(success, message) def on_description_change(self, input): pass
def on_description_cancel(self): pass
class PerforceAddLineToChangelistDescriptionCommand(sublime_plugin.WindowCommand): def run(self): AddLineToChangelistDescriptionThread(self.window).start()
# Submit sectionclass SubmitThread(threading.Thread): def __init__(self, window): self.window = window self.view = window.active_view() threading.Thread.__init__(self)
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = ['Default'];
currentuser = GetUserFromClientspec(); command = ConstructCommand('p4 opened -c default -u {0}'.format(currentuser)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if err: resultchangelists.pop()
if success: changelists = rawchangelists.splitlines()
# for each line, extract the change for changelistline in changelists: changelistlinesplit = changelistline.split(' ') # Insert at two because we receive the changelist in the opposite order and want to keep default on top resultchangelists.insert(1, "Changelist {0} - {1}".format(changelistlinesplit[1], ' '.join(changelistlinesplit[7:])))
return resultchangelists
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelist = self.changelists_list[picked] changelistsections = changelist.split(' ')
command = '' # Check in the selected changelist if changelistsections[0] != 'Default': command = ConstructCommand('p4 submit -c {0}'.format(changelistsections[1])) else: command = ConstructCommand('p4 submit') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8") def on_description_change(self, input): pass
def on_description_cancel(self): pass
class PerforceSubmitCommand(sublime_plugin.WindowCommand): def run(self): SubmitThread(self.window).start()
class PerforceLogoutCommand(sublime_plugin.WindowCommand): def run(self): try: command = ConstructCommand("p4 set P4PASSWD=") p = subprocess.Popen(command, stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) p.communicate() except ValueError: pass
class PerforceLoginCommand(sublime_plugin.WindowCommand): def run(self): self.window.show_input_panel("Enter Perforce Password", "", self.on_done, None, None)
def on_done(self, password): try: command = ConstructCommand("p4 logout") p = subprocess.Popen(command, stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) p.communicate() #unset var command = ConstructCommand("p4 set P4PASSWD={0}".format(password)) p = subprocess.Popen(command, stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) p.communicate() except ValueError: pass
class PerforceUnshelveClCommand(sublime_plugin.WindowCommand): def run(self): try: ShelveClCommand(self.window, False).start() except: WarnUser("Unknown Error, does the included P4 Version support Shelve?") return -1class PerforceShelveClCommand(sublime_plugin.WindowCommand): def run(self): try: ShelveClCommand(self.window, True).start() except: WarnUser("Unknown Error, does the included P4 Version support Shelve?") return -1
class ShelveClCommand(threading.Thread): def __init__(self, window, shelve=True): self.shelve = shelve self.window = window threading.Thread.__init__(self)
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelistlist = self.changelists_list[picked].split(' ')
changelist = 'Default' if(len(changelistlist) > 1): # Numbered changelist changelist = changelistlist[1] else: changelist = changelistlist[0] if self.shelve: cmdString = "shelve -c{0}".format(changelist) else: cmdString = "unshelve -s{0} -f".format(changelist) command = ConstructCommand("p4 {0}".format(cmdString)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): WarnUser("usererr {0}".format(err.strip())) return -1
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = []
if(success): changelists = rawchangelists.splitlines()
# for each line, extract the change for changelistline in changelists: changelistlinesplit = changelistline.split(' ') resultchangelists.insert(0, "Changelist {0} - {1}".format(changelistlinesplit[1], ' '.join(changelistlinesplit[7:])))
return resultchangelists | [
"thaisnviana@gmail.com"
] | thaisnviana@gmail.com |
39cd88f0f87221d1954869382294290420c184ff | f8f88c947975c569025f7c7ffd35014ecfc7f639 | /pyThosT/__init__.py | edd16243957c8e5f520fe363c9bdea579a00af5b | [] | no_license | afalaize/exchange | 16dbafbf2bff63c57112474184de4984a7eb7927 | 135f8ccf97ff88f1fe7c7c38fe0ce499d5ed9186 | refs/heads/master | 2020-07-05T05:04:55.806692 | 2017-03-14T09:41:27 | 2017-03-14T09:41:28 | 74,126,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 18 16:00:46 2017
@author: afalaize
"""
# | [
"noreply@github.com"
] | noreply@github.com |
d841badd34ccdcfe5480954322d0517052ffb407 | 23471682c3664940ed3af33908087c700b0e2da2 | /ControlVentas/src/accounts/views.py | 41a8c771d85344d71d6f7ca75e2b257bf199f4c3 | [] | no_license | MarianaYucra/ProyectoFinal | 0fa7a2992208573bb13521d4846d99a738b9494e | 30b1345d1a4aa37ad106532c6d55c2e733d85ff1 | refs/heads/master | 2022-11-30T13:21:27.325188 | 2020-08-13T14:35:21 | 2020-08-13T14:35:21 | 278,252,876 | 0 | 1 | null | 2020-07-12T00:27:58 | 2020-07-09T03:25:57 | Python | UTF-8 | Python | false | false | 1,956 | py | from django.shortcuts import render,redirect
from django.contrib import messages
from django.contrib.auth.models import User,auth
# Create your views here.
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username,password=password)
if user is not None:
auth.login(request,user)
if user.is_superuser:
return redirect("/administrar")
else:
return redirect("/")
else:
messages.info(request,"ERROR: revice usuario y constraseña")
return redirect('login')
else:
return render(request,'accounts/login.html')
def register(request):
if request.method == 'POST':
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password1 = request.POST['password1']
password2 = request.POST['password2']
if password1 == password2:
if User.objects.filter(username=username).exists():
messages.info(request,'nombre de usuario usado')
return redirect('register')
elif User.objects.filter(email=email).exists():
messages.info(request,'correo ya registrado')
return redirect('register')
else:
user = User.objects.create_user(username=username, password=password1,email=email,first_name=first_name,last_name=last_name)
user.save()
return redirect('login')
else:
messages.info(request,'contraseñas no coinciden ')
return redirect('register')
else:
return render(request,'accounts/register.html')
def logout(request):
auth.logout(request)
return redirect('/')
| [
"whuaracha@unsa.edu.pe"
] | whuaracha@unsa.edu.pe |
75c1ffff51472f9e8835840089ae9d5bf686ca71 | 7a2b129b1ba60efa5a525c1e5e499dca69283ee9 | /Selenium/cppic.py | 0e43b850330c9c488318b0ad04c7781658f87d3d | [] | no_license | shyfeel/myTestCode | 364bc8ff19d9e588892090c165399f726fda2987 | a5a833eeafd72f228840cc8c2596f4613df0bd61 | refs/heads/master | 2021-01-25T07:28:57.140923 | 2018-07-31T03:05:49 | 2018-07-31T03:05:49 | 39,937,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | import cv2
import numpy as np
img = cv2.imread("./cat.jpg")
emptyImage = np.zeros(img.shape, np.uint8)
emptyImage2 = img.copy()
emptyImage3=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#emptyImage3[...]=0
cv2.imshow("EmptyImage", emptyImage)
cv2.imshow("Image", img)
cv2.imshow("EmptyImage2", emptyImage2)
cv2.imshow("EmptyImage3", emptyImage3)
cv2.imwrite("./cat2.jpg", img, [int(cv2.IMWRITE_JPEG_QUALITY), 5])
cv2.imwrite("./cat3.jpg", img, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
cv2.imwrite("./cat.png", img, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
cv2.imwrite("./cat2.png", img, [int(cv2.IMWRITE_PNG_COMPRESSION), 9])
cv2.waitKey (0)
cv2.destroyAllWindows() | [
"shyfeel@qq.com"
] | shyfeel@qq.com |
b2ccb7f564008491a78614bd236d79beac6db0d3 | ce57fdf78884d99aacb3936f59146af81a56ccea | /scripts/global_distribution.py | 65dce48ed6346348dffcdd22d5a627db2a34c94c | [] | no_license | MenggeZhao/LinkedSV | 8f9c38cccfd5e8fa6575e755da2fa59c7dbd72dd | cb09730f55266931308c770c9e2d0056050fd5a6 | refs/heads/master | 2020-04-29T04:29:21.923274 | 2019-03-07T21:17:19 | 2019-03-07T21:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,093 | py | #!/usr/bin/env python
import os
import sys
from bed import *
from my_utils import *
import numpy as np
from fragment import *
import math
import subprocess
from sys import getrefcount
import cluster_reads
tab = '\t'
endl = '\n'
arg = sys.argv[1:]
def main():
args, dbo_args, endpoint_args = parse_user_arguments()
target_bcd22_file = endpoint_args.tmpbcd22_file
estimate_global_distribution(args, dbo_args, endpoint_args, target_bcd22_file, is_fast_mode = False)
return
def read_global_distribution_file(args, endpoint_args):
global_dist_fp = open(args.global_distribution_file, 'r')
while 1:
line = global_dist_fp.readline()
if not line: break
line = line.strip().split(tab)
key, value = line[0:2]
if key == 'num_reads_genome': args.num_reads_genome = int(value)
elif key == 'num_reads_ontarget': args.num_reads_ontarget = int(value)
elif key == 'num_reads_offtarget': args.num_reads_offtarget = int(value)
elif key == 'min_frag_length': endpoint_args.min_frag_length = int(value)
elif key == 'fragment_length_lmda': args.fragment_length_lmda = float(value)
elif key == 'median_fragment_length': args.median_fragment_length = float(value)
elif key == 'mean_fragment_length': args.mean_fragment_length = float(value)
elif key == 'gap_distance_lmda': args.gap_distance_lmda = float(value)
elif key == 'gap_distance500': args.gap_distance500 = float(value)
elif key == 'gap_distance750': args.gap_distance750 = float(value)
elif key == 'gap_distance900': args.gap_distance900 = float(value)
elif key == 'gap_distance950': args.gap_distance950 = float(value)
elif key == 'gap_distance990': args.gap_distance990 = float(value)
elif key == 'gap_distance999': args.gap_distance999 = float(value)
elif key == 'mean_num_fragment_per_bcd': args.mean_num_fragment_per_bcd = float(value)
elif key == 'median_num_fragment_per_bcd': args.median_num_fragment_per_bcd = float(value)
elif key == 'total_num_fragment': args.total_num_fragment = int(value)
elif key == 'total_num_bcd': args.total_num_bcd = int(value)
elif key == 'read_per_bp_genome': args.read_per_bp_genome = float(value)
elif key == 'genome_length': args.genome_length = int(value)
elif key == 'target_region_length': args.target_region_length = int(value)
elif key == 'read_per_bp_ontarget': args.read_per_bp_ontarget = float(value)
elif key == 'read_per_bp_offtarget': args.read_per_bp_offtarget = float(value)
elif key == 'gap_distance_cutoff': args.gap_distance_cutoff = float(value)
global_dist_fp.close()
return
def get_num_reads_from_satistics_file(stat_file):
if os.path.exists(stat_file) == False:
myprint('ERROR! statistics file (%s) does not exist!' % stat_file)
sys.exit()
stat_fp = open(stat_file, 'r')
lines = list(stat_fp)
stat_fp.close()
num_reads = 0
for line in lines:
line = line.strip()
if '=' not in line: continue
col_list = line.split('=')
key = col_list[0]
value = col_list[1]
key = key.strip()
value = value.strip()
if key == 'number of effective reads':
num_reads = int(value)
return num_reads
def estimate_global_distribution(args, dbo_args, endpoint_args, target_bcd22_file, is_fast_mode = False):
myprint('calculating distribution parameters')
global_dist_fp = open(args.global_distribution_file, 'w')
args.num_reads_genome = get_num_reads_from_satistics_file(args.stat_file)
myprint('total number of reads in the genome is: %d' % args.num_reads_genome)
global_dist_fp.write('num_reads_genome\t%d\n' % args.num_reads_genome)
if is_fast_mode == False and args.is_wgs == False:
### get reads in target region ###
target_region_bed_file = os.path.abspath(args.target_region_bed)
target_region_bed_file_name = os.path.split(target_region_bed_file)[1]
target_region_tidbed_file = os.path.join(args.out_dir, target_region_bed_file_name + '.tidbed')
bed2tidbed_file(target_region_bed_file, args.chrname2tid, target_region_tidbed_file)
cmd = '%s intersect -u -a %s -b %s | gzip --fast - > %s ' % (args.bedtools, endpoint_args.bcd21_file, target_region_tidbed_file, endpoint_args.bcd21_file_of_target_region)
myprint('running command: %s' % cmd)
os.system(cmd)
if os.path.getsize(endpoint_args.bcd21_file_of_target_region) <= 0:
myprint('failed to get reads in target region')
sys.exit()
args.num_reads_ontarget = cluster_reads.calculate_num_reads_from_bcd21_file(endpoint_args.bcd21_file_of_target_region, args.min_mapq)
args.num_reads_offtarget = args.num_reads_genome - args.num_reads_ontarget
global_dist_fp.write('num_reads_ontarget\t%d\n' % args.num_reads_ontarget)
global_dist_fp.write('num_reads_offtarget\t%d\n' % args.num_reads_offtarget)
get_fragment_parameters(args, dbo_args, endpoint_args, global_dist_fp, target_bcd22_file, is_fast_mode)
cal_gap_distance_cutoff(args, dbo_args, endpoint_args)
global_dist_fp.write('gap_distance_cutoff\t%.10f\n' % args.gap_distance_cutoff)
global_dist_fp.close()
if is_fast_mode == False:
args.global_distribution_calculated = True
if is_fast_mode == True:
if os.path.exists(args.global_distribution_file): os.remove(args.global_distribution_file)
gc.collect()
return
def cal_gap_distance_cutoff(args, dbo_args, endpoint_args):
cut_quantile = 0.99
if args.is_wgs:
args.gap_distance_cutoff = max( 5000, math.log(1.0 - cut_quantile) / math.log(1.0 - args.read_per_bp_genome))
else:
args.gap_distance_cutoff = max(25000, math.log(1.0 - cut_quantile) / math.log(1.0 - args.read_per_bp_genome))
if args.user_defined_gap_distance_cut_off > 5000:
args.gap_distance_cutoff = args.user_defined_gap_distance_cut_off
return
def fit_geometric_distribution(length_list, readpair=True):
cdf1 = [90.0, 92.0, 94.0, 96.0, 98.0]
if readpair == True:
cdf2 = [95.0, 96.0, 97.0, 98.0, 99.0]
else:
cdf2 = cdf1
k = np.percentile(length_list, cdf2)
p_list = list()
for i in range(0, len(cdf1)):
p = 1.0 - math.pow( (1.0 - cdf1[i]/100.0), (1.0 / (k[i]+1.0)) )
p_list.append(p)
pmedian = np.median(p_list)
return pmedian
def get_fragment_parameters (args, dbo_args, endpoint_args, global_dist_fp, target_bcd22_file, is_fast_mode):
frm_length_list = list()
myprint ('calculating fragment parameters from file: %s' % target_bcd22_file)
bcd22_fp = open(target_bcd22_file, 'r')
while 1:
line = bcd22_fp.readline()
if not line: break
if line[0] == '#': continue
line = line.strip().split(tab)
frm = Fragment(line)
frm_length_list.append(frm.length)
N50_length, N95_length, N98_length, N99_length, total_length = calculate_length_statistics(frm_length_list)
global_dist_fp.write('N50_fragment_length\t%d\n' % N50_length)
global_dist_fp.write('N95_fragment_length\t%d\n' % N95_length)
global_dist_fp.write('N98_fragment_length\t%d\n' % N98_length)
global_dist_fp.write('N99_fragment_length\t%d\n' % N99_length)
myprint ('N95_fragment_length is: %d' % N95_length)
if args.is_wgs:
endpoint_args.min_frag_length = max(N95_length, 5000) # at least 5000 for WGS data
else:
endpoint_args.min_frag_length = max(N95_length, 2000) # at least 2000 for WES data
if args.user_defined_min_frag_length > 0:
endpoint_args.min_frag_length = args.user_defined_min_frag_length
global_dist_fp.write('min_frag_length\t%d\n' % endpoint_args.min_frag_length)
args.fragment_length_lmda = fit_geometric_distribution(frm_length_list, readpair = False)
global_dist_fp.write ('fragment_length_lmda\t%.20f\n' % args.fragment_length_lmda)
bcd22_fp.seek(0, 0)
bcd_count_dict = dict()
total_num_reads_in_fragment = 0
total_num_fragment = 0
total_gap_distance_list = list()
frm_length_list = list()
while 1:
line = bcd22_fp.readline()
if not line: break
if line[0] == '#': continue
line = line.strip().split(tab)
frm = Fragment(line)
if frm.length < endpoint_args.min_frag_length: continue
frm_length_list.append(frm.length)
if frm.bcd not in bcd_count_dict:
bcd_count_dict[frm.bcd] = 1
else:
bcd_count_dict[frm.bcd] += 1
total_num_reads_in_fragment += frm.num_reads
total_num_fragment += 1
if len(total_gap_distance_list) < int(1e7): total_gap_distance_list += frm.gap_distances()
bcd22_fp.close()
args.median_fragment_length = np.median(frm_length_list)
args.mean_fragment_length = np.mean(frm_length_list)
global_dist_fp.write('median_fragment_length\t%.10f\n' % args.median_fragment_length)
global_dist_fp.write('mean_fragment_length\t%.10f\n' % args.mean_fragment_length)
args.gap_distance_lmda = fit_geometric_distribution(total_gap_distance_list, readpair=True)
global_dist_fp.write('gap_distance_lmda\t%.20f\n' % args.gap_distance_lmda)
q = [50, 75, 90, 95, 99, 99.9]
quantile_nparray = np.percentile(total_gap_distance_list, q)
args.gap_distance500 = quantile_nparray[0]
args.gap_distance750 = quantile_nparray[1]
args.gap_distance900 = quantile_nparray[2]
args.gap_distance950 = quantile_nparray[3]
args.gap_distance990 = quantile_nparray[4]
args.gap_distance999 = quantile_nparray[5]
global_dist_fp.write('gap_distance500\t%.10f\n' % args.gap_distance500)
global_dist_fp.write('gap_distance750\t%.10f\n' % args.gap_distance750)
global_dist_fp.write('gap_distance900\t%.10f\n' % args.gap_distance900)
global_dist_fp.write('gap_distance950\t%.10f\n' % args.gap_distance950)
global_dist_fp.write('gap_distance990\t%.10f\n' % args.gap_distance990)
global_dist_fp.write('gap_distance999\t%.10f\n' % args.gap_distance999)
num_barcode = len(bcd_count_dict)
if num_barcode < 1:
myprint ('ERROR! no valid barcode is found')
sys.exit()
num_fragment_per_bcd_list = list()
for bcd in bcd_count_dict:
num_fragment_per_bcd_list.append(bcd_count_dict[bcd])
args.mean_num_fragment_per_bcd = np.mean(num_fragment_per_bcd_list)
args.median_num_fragment_per_bcd = np.median(num_fragment_per_bcd_list)
args.total_num_fragment = total_num_fragment
args.total_num_bcd = num_barcode
global_dist_fp.write('mean_num_fragment_per_bcd\t%.10f\n' % args.mean_num_fragment_per_bcd)
global_dist_fp.write('median_num_fragment_per_bcd\t%.10f\n' % args.median_num_fragment_per_bcd)
global_dist_fp.write('total_num_fragment\t%d\n' % args.total_num_fragment)
global_dist_fp.write('total_num_bcd\t%d\n' % args.total_num_bcd)
args.read_per_bp_genome = args.gap_distance_lmda
args.genome_length = calculate_genome_length(args.faidx_file)
global_dist_fp.write('read_per_bp_genome\t%.20f\n' % args.read_per_bp_genome)
global_dist_fp.write('genome_length\t%d\n' % args.genome_length)
if is_fast_mode == False and args.is_wgs == False:
args.target_region_length = calculate_bed_length(args.target_region_bed)
global_dist_fp.write('target_region_length\t%d\n' % args.target_region_length)
off_target_length = args.genome_length - args.target_region_length
b = float(args.num_reads_ontarget) / float(args.num_reads_ontarget + args.num_reads_offtarget)
args.read_per_bp_ontarget = b * float(args.genome_length) / args.target_region_length * args.read_per_bp_genome
args.read_per_bp_offtarget = (1.0-b) * float(args.genome_length) / off_target_length * args.read_per_bp_genome
global_dist_fp.write('read_per_bp_ontarget\t%.20f\n' % args.read_per_bp_ontarget)
global_dist_fp.write('read_per_bp_offtarget\t%.20f\n' % args.read_per_bp_offtarget)
del total_gap_distance_list
del bcd_count_dict
del frm_length_list
myprint('finished getting fragment parameters')
return
def calculate_length_statistics(length_list):
length_list.sort(reverse=True)
total_length = float(sum(length_list))
sum_length = 0
N50_length = -1
N95_length = -1
N98_length = -1
N99_length = -1
for length in length_list:
sum_length += length
if N50_length == -1 and sum_length > 0.50 * total_length: N50_length = length
if N95_length == -1 and sum_length > 0.95 * total_length: N95_length = length
if N98_length == -1 and sum_length > 0.98 * total_length: N98_length = length
if N99_length == -1 and sum_length > 0.99 * total_length: N99_length = length
return N50_length, N95_length, N98_length, N99_length, total_length
if __name__ == '__main__':
main()
| [
"fangli0813@gmail.com"
] | fangli0813@gmail.com |
b7aca8b75628ed479dd4d1e1a071dc321a3000c5 | 45a2cce962a0fa3231fb206e3f2121e675ad6fcd | /get_commit.py | 5aabdd5f2a164375409b033d2ac2f9ba735457b1 | [] | no_license | relentless-coder/github_to_s3 | 9d461a1ecb2c3ab92eafff552ffe19eb2a97f4e3 | 25dc8b18703ad43a1124ba522d79b930d2644b57 | refs/heads/master | 2022-12-11T08:34:34.514646 | 2021-01-20T10:43:33 | 2021-01-20T10:43:33 | 126,624,681 | 2 | 0 | null | 2022-12-08T02:04:17 | 2018-03-24T17:44:01 | Python | UTF-8 | Python | false | false | 882 | py | """Fetches the commit for a commit_id"""
import os
from request_handler import request_handler
from error import ResponseError
def get_commit(commit_id):
"""Returns JSON with commit data
Arguments:
commit_id - a valid id of the commit
"""
url = 'https://api.github.com/repos/{:1}/{:2}/commits/{:3}'\
.format(os.environ['GITHUB_USER'], os.environ['GITHUB_REPO'],
commit_id)
headers = {'Authorization': 'token {:1}'
.format(os.environ['GITHUB_TOKEN']),
'User-Agent': 'my_api'
}
received_request = request_handler('get', url, headers=headers)
if received_request.status_code != 200:
raise ResponseError(received_request.status_code,
received_request.json()['message'])
else:
return received_request.json()
| [
"contact@ayushbahuguna.com"
] | contact@ayushbahuguna.com |
b7a60a72aec305247050106d7a6c5b6847ad98b0 | f2a70409df3d3ea1cc74a650038ffe2c8d648497 | /settings/__init__.py | 689240e02006106488d3a37b1c0c0ea91e096d06 | [] | no_license | abhi420bhardwaj/Django-Socialize | 120ca34c68d42b9fc2a73bd4b11eb1d32154d9b4 | b3fddab4150abb2f8721a07637c3c49d647d43b0 | refs/heads/master | 2021-03-03T01:49:31.007578 | 2020-03-09T02:21:43 | 2020-03-09T02:21:43 | 245,923,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | from .base import *
try:
from .local import *
except:
pass | [
"abhi420bhardwaj@gmail.com"
] | abhi420bhardwaj@gmail.com |
e58e8361f66d4ba00ac7e23212da5f974c710fad | 67b2be20c931cf53df782dbfa7783542e60fa484 | /dgreed/tools/mkdig/SConscript | 6d14bd23c00ec634d9aaa7e916d44f23f3b7c9a3 | [] | no_license | carlossless/quibble | 78d3c19b552c447b2181c24d3bbd9462f9e20cc0 | f816d7fc2aa8566d3e410ed9ed6579364c9b9913 | refs/heads/master | 2021-01-10T13:27:55.083085 | 2014-10-13T11:29:03 | 2014-10-13T11:29:03 | 45,132,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | Import('env')
NAME='mkdig'
sources = Glob('*.c', strings=True)
app = env.Program(NAME + env['DGREED_POSTFIX'], sources, LIBS=env['DGREED_LIBS'])
env.Install('#'+env['DGREED_BIN_DIR'], app)
| [
"domas.marko@gmail.com"
] | domas.marko@gmail.com | |
f03e64fbebef264b8c776239227111949d6b19ed | 934d1653e6c48fdd32b82edd0167767ea8e38c32 | /code/resources/store.py | 3d9d82bfa5b34ff8dabbe912e31643c881381eeb | [] | no_license | Nishantk1906/flask-tut-api | 40f2e7df98aaa22d14f4a1edf96558d51f7ff92c | ef6d79bb12a9b32e29fb8f7c0160978eae9b4a45 | refs/heads/main | 2023-08-25T16:23:06.585136 | 2021-10-23T06:07:11 | 2021-10-23T06:07:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | from flask_restful import Resource, reqparse
from models.store import Storemodel
class Store(Resource):
def get(self, name):
store= Storemodel.find_by_name(name)
if store:
return store.json()
return {'message': 'Store not found'}, 404
def post(self, name):
if Storemodel.find_by_name(name):
return {'message': 'A store with name already exists'}, 400
store = Storemodel(name)
try:
store.save_to_db()
except:
return {'message': 'internal server error'}, 500
return store.json(), 201
def delete(self, name):
store = Storemodel.find_by_name(name)
if store:
store.delete_from_db()
return {'message': 'store deleted'}
class Storelist(Resource):
def get(self):
return {'stores': [store.json() for store in Storemodel.query.all()]}
| [
"nishant@corum8.com"
] | nishant@corum8.com |
6f0da9774e428291d826ce32f0b2b035b3d95848 | adb6fe118613d60af9abfa73055599d205cf2108 | /视觉/XLwork/XL4/XL4_2.py | 5566d364d670d79d8f81b8ab2cda2c8a9d120eab | [] | no_license | lijianmin01/Third_college_grade | 18845f666a7fc1ece24d2ee45ee287e1efc0ca11 | 5e5b1f64375506de79ed94c8b2fc266fe1af4d6a | refs/heads/master | 2022-12-31T15:26:05.521297 | 2020-10-17T01:34:32 | 2020-10-17T01:34:32 | 295,317,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | import cv2
import numpy as np
# 全局变量
# 第几张图片 0 第一张 1 第二张
img_flag = 0
# 第一张图片
def on_EVENT_LBUTTONDOWN1(event, x, y,flags, param):
# 点击三次,获得三个位置的坐标,销毁窗口
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a.append(x)
b.append(y)
cv2.circle(img1, (x, y), 1, ( 0, 0,255), thickness=4)
cv2.putText(img1, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,1.0, (0, 0, 0), thickness=1)
cv2.imshow("image1", img1)
# 第二张图片
def on_EVENT_LBUTTONDOWN2(event, x, y,flags, param):
# 点击三次,获得三个位置的坐标,销毁窗口
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a1.append(x)
b1.append(y)
cv2.circle(img2, (x, y), 1, (255, 0, 0), thickness=4)
cv2.putText(img2, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,1.0, (0, 0, 0), thickness=1)
cv2.imshow("image2", img2)
# 获取同名点对
def get_same_point(img_flag):
# 第一张图片
cv2.namedWindow("image1")
cv2.setMouseCallback("image1", on_EVENT_LBUTTONDOWN1)
cv2.imshow("image1", img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 第二张图片
cv2.namedWindow("image2")
cv2.setMouseCallback("image2", on_EVENT_LBUTTONDOWN2)
cv2.imshow("image2", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print(a)
# print(b)
# print(a1)
# print(b1)
len_1 = len(a)
img_sq_1 = np.ones((len_1,3),dtype='int')
img_sq_2 = np.ones((len_1,2), dtype='int')
img_sq_1[:,0] = a[:]
img_sq_1[:,1] = b[:]
img_sq_2[:,0] = a1[:]
img_sq_2[:,1] = b1[:]
print(img_sq_1)
print(img_sq_2)
return img_sq_1,img_sq_2
if __name__ == '__main__':
# 分表保存第一张图片与第二张图片的同名点对
# 记录同名点对
# 第一张图片
a,b = [], []
# 第二张图片
a1, b1 = [], []
img1 = cv2.imread(r'T:\imgs\XL4\klcc_a.png')
img2 = cv2.imread(r"T:\imgs\XL4\klcc_b.png")
img1_copy = img1[:]
img2_copy = img2[:]
# img_sq_1, img_sq_2 = get_same_point(img_flag)
# 获取同点对
img_sq_1,img_sq_2 = get_same_point(img_flag)
"""
[[318 250 1]
[153 318 1]
[344 351 1]]
[[243 270]
[ 74 342]
[272 369]]
# 为了避免重复获取同点对,所以直接获取,后期删了
X = np.mat([[318,250,1],[153,318,1],[344,351,1]])
U = np.mat([[243,270],[ 74,342],[272,369]])
"""
X = np.mat(img_sq_1)
U = np.mat(img_sq_2)
# 前A
A = np.dot(X.I,U)
print(A)
# 因为当时画图的时候,图像已经被修改,所以,恢复原图像
img1 = img1_copy[:]
img2 = img2_copy[:]
M1,N1 = img1.shape[0],img1.shape[1]
M2, N2 = img2.shape[0], img2.shape[1]
img1_cnt = img1[:]
img2_cnt = img2[:]
# 建立一个大型图片
# 确定变换后的图片坐标
# 变换后图片的坐标 X
save_img2 = []
for x in range(M1):
for y in range(N1):
cnt_sq = np.array([x, y, 1]).dot(A)
cnt_sq = [int(cnt_sq.tolist()[0][0]),int(cnt_sq.tolist()[0][1])]
save_img2.append(cnt_sq)
# 参考图片 U
save_img1 = []
for x in range(M2):
for y in range(N2):
save_img1.append([x,y])
save_img1 = np.array(save_img1)
# 找变换后的图片的最小坐标
save_img2=np.array(save_img2)
min_h = np.min(save_img2,axis=1)
# 记录x,y 最小坐标
x_min,y_min = min_h[0],min_h[1]
img3 = np.zeros([1000,1000, 3], np.uint8)
save_img1_1 = save_img1[:]
save_img2_1 = save_img2[:]
if x_min<0:
cnt = abs(x_min)
for i in range(len(save_img1)):
save_img1[i][0]+=cnt
for i in range(len(save_img2)):
save_img2[i][0]+=cnt
if y_min<0:
cnt = abs(y_min)
for i in range(len(save_img1)):
save_img1[i][1]+=cnt
for i in range(len(save_img2)):
save_img2[i][1]+=cnt
# print(save_img1_1)
# print(save_img2_1)
for i in range(len(save_img1)):
try:
img3[save_img1_1[i][0],save_img1_1[i][1]]=img1[save_img1[i][0],save_img1[i][1]]
except:
# img3[save_img1_1[i][0], save_img1_1[i][1]] = img1[save_img1[i][0]-1, save_img1[i][1]-1]
continue
for i in range(len(save_img2)):
try:
img3[save_img2_1[i][0],save_img2_1[i][1]]=img2[save_img2[i][0],save_img2[i][1]]
except:
#img3[save_img1_1[i][0], save_img1_1[i][1]] = img1[save_img2[i][0]-1, save_img2[i][1]-1]
continue
cv2.imshow("3",img3)
cv2.waitKey(0)
| [
"lijianmin01@126.com"
] | lijianmin01@126.com |
b741ff399ab76da7346243c4a6b8b998babe038f | b3f33d53507b09bc8193c5fc113fe2f28d95f6da | /empinfo/forms.py | 4e01d73915ed2eae414d2d03cf9e13da6356e549 | [] | no_license | Jayant2185/Django_Employee | f07beb3b3497a84c75ba43a623a7ebb7390b18b4 | ac3650670ddecd634b847bb39c3f0e9372b1bb4f | refs/heads/master | 2020-04-24T07:49:20.986569 | 2019-02-21T06:13:06 | 2019-02-21T06:13:06 | 171,810,493 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django import forms
from empinfo.models import Employee
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__" | [
"me@gmail.com"
] | me@gmail.com |
51643692269809ea5f60e8640cb27fcf7291b775 | ccef22b749658409c4a7665196d8264824ebb9f4 | /ctci/arrays/setRowColumnZero.py | cd3fa085c658918b5cffc75923f53896a02e0630 | [] | no_license | anjaligr05/TechInterviews | 12afc9cb69758f129dbba213b88a7075b4484cdd | a6ba26f39afef3f91b2fbf4b0ff759ce096a03c7 | refs/heads/master | 2020-03-19T05:36:40.003074 | 2018-11-28T19:45:33 | 2018-11-28T19:45:33 | 135,947,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | def nullifyColumn(matrix, c):
for i in range(len(matrix)):
matrix[i][c] = 0
def nullifyRow(matrix, r):
for j in range(len(matrix[0])):
matrix[r][j] = 0
def setToZero(matrix):
for r in matrix:
print r
print('\n')
firstRowZero = False
firstColumnZero = False
for c in range(len(matrix[0])):
if matrix[0][c]==0:
firstRowZero = True
for r in range(len(matrix)):
if matrix[r][0] == 0:
firstColumnZero = True
for i in range(1,len(matrix)):
for j in range(1,len(matrix[0])):
if matrix[i][j]==0:
matrix[0][j] = 0
matrix[i][0] = 0
for e in range(len(matrix[0])):
if matrix[0][e]==0:
nullifyColumn(matrix, e)
for e in range(len(matrix)):
if matrix[e][0] == 0:
nullifyRow(matrix, e)
if firstRowZero:
nullifyRow(matrix,0)
if firstColumnZero:
nullifyColumn(matrix, 0)
for rows in matrix:
print rows
setToZero([[1,2,0,1],[2,1,4,1],[2,1,1,0],[5,0,1,2]])
| [
"anjred@amazon.com"
] | anjred@amazon.com |
4073df6d3ccfdb1360789995dbae5c54b8f6be5a | 778ddc9e596f368bc5fbf4ea78bd7044b94cd99e | /textutils/views.py | 9ba9ea7818e08bca4c93b9fb3cf59fb294984f86 | [] | no_license | sskanishk/django-textutil | e777202a8dac5312db1575ac19cc62c3349861a0 | f3c29de3c4a69af491dacb7bf1f71779bad3651a | refs/heads/master | 2020-12-02T20:38:06.619019 | 2019-12-31T16:06:09 | 2019-12-31T16:06:09 | 231,114,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,484 | py | # star
# views it is a python program
# views return HttpResponse
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'index.html')
# return HttpResponse("hello wotld")
# orignial
def analyse(request):
#Get the text
djtext = request.POST.get('text', 'default')
# Check checkbox values
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
#Check which checkbox is on
if(djtext == ""):
return render(request, 'error.html')
elif(removepunc != "on" and fullcaps != "on" and newlineremover != "on" and extraspaceremover != "on" and charcount != "on"):
return render(request, 'error.html')
else:
if removepunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params = {'purpose':'Removed Punctuations', 'analyzed_text': analyzed}
# return render(request, 'analyze.html', params)
djtext = analyzed
if(fullcaps == "on"):
analyzed = ""
for char in djtext:
analyzed = analyzed + char.upper()
params = {'purpose':'Changed to UPPERCASE', 'analyzed_text': analyzed}
# return render(request, 'analyze.html', params)
djtext = analyzed
if (newlineremover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char!="\r":
analyzed = analyzed + char
else:
print("no")
print("pre", analyzed)
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
print(params)
# Analyze the text
# return render(request, 'analyze.html', params)
djtext = analyzed
if(extraspaceremover == "on"):
analyzed = ""
for index, char in enumerate(djtext):
if djtext[index] == " " and djtext[index+1] == " ":
pass
else:
analyzed = analyzed + char
params = {'purpose':'Removed ExtraSpace', 'analyzed_text': analyzed}
# return render(request, 'analyze.html', params)
djtext = analyzed
if(charcount == "on"):
count = 0
for i in djtext:
if i != " ":
count += 1
# params = {'purpose':'Number of Character', 'analyzed_text': analyzed}
params = {'purpose':'Number of Character', 'analyzed_text': count}
# return render(request, 'analyze.html', params)
# djtext = analyzed
return render(request, 'analyze.html', params)
def dictx(request):
params = {'name':'kanish', 'place':'Bali'}
return render(request, 'dictx.html', params)
def about(request):
a = '''<h2>about page</h2>
<a href="http://127.0.0.1:8000/about">About</a><br>
<a href="http://127.0.0.1:8000/notes">Notes</a>'''
return HttpResponse(a)
# pipelines
# def removepunc(request):
# # get the text
# print(request.GET.get('text', 'default'))
# # analyse the text
# return HttpResponse("removepunc")
# def capfirst(request):
# return HttpResponse("capfirst")
# def newlineremove(request):
# return HttpResponse("newlineremove")
# def spaceremove(request):
# return HttpResponse("spaceremove")
# def charcount(request):
# return HttpResponse("charcount")
def link(request):
s = '''<h2>Navigation Bar<br></h2>
<a href="https://www.youtube.com/watch?v=5BDgKJFZMl8&list=PLu0W_9lII9ah7DDtYtflgwMwpT3xmjXY9">Django with Harry Bhai</a><br>
<a href="https://www.facebook.com/">Facebook</a><br>
<a href="https://www.flipkart.com/">Flipkart</a><br>
<a href="https://www.hindustantimes.com">News</a><br>
<a href="https://www.google.com/">Google</a>'''
return HttpResponse(s)
def notes(request):
file = open("notes.txt",'r+')
return HttpResponse(file.read())
| [
"kanishkmalviya78@gmail.com"
] | kanishkmalviya78@gmail.com |
ac56b27c967404a47fd578fdbff927c73f1753f0 | 5e684563a57d28b4fd353f7c0ec6155d52db3b8c | /exerciciosw3resource/01.py | 327f2de8264ae9ac72ef39c21ee74f79f07a1d86 | [] | no_license | rafaellamgs/prog_comp2018.1 | a42de9c3e0eae8c27c616664187f3917470d3a0e | 89017f8dd8b77f8edee571ecb047eee6534d51f9 | refs/heads/master | 2020-03-28T02:10:54.899557 | 2019-01-02T19:09:26 | 2019-01-02T19:09:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py |
for number in range ( 1500, 2701):
if number % 7 == 0 and number % 5 == 0:
print(number, 'é multiplo de 5 e divisivel por 7')
| [
"39624306+camumbembe@users.noreply.github.com"
] | 39624306+camumbembe@users.noreply.github.com |
6b174a065fbde2ba1c834f3c080c64112a4ee370 | 8c7e58a1c80dfb0b0f62175173bf56f48ee90880 | /models/store.py | da9628512c368a2b14cd37457ba0aea815bb2b81 | [] | no_license | veerdoshi/stores-rest-api | 90692bd7ca80a6d8e52306782bb68dc354c69878 | 6868a2305f0222e9cb58e12156f5a019ba1a6fbc | refs/heads/master | 2021-09-01T11:37:07.069630 | 2017-12-26T18:27:38 | 2017-12-26T18:27:38 | 114,919,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | from db import db
class StoreModel(db.Model):
__tablename__ = 'stores'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
items = db.relationship('ItemModel', lazy='dynamic')
def __init__(self, name):
self.name = name
def json(self):
return {'name': self.name, 'items': [item.json() for item in self.items.all()]}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| [
"noreply@github.com"
] | noreply@github.com |
07f5f0febe13636216e15925edf3d44f1db27c2f | 2d80791a21a049243dd2bf7dd95a46c4d4b2510b | /domains/datastructures/linkedlist/CompareLists.py | c0436a2864f8f8fe985aca810532048acb762799 | [] | no_license | jayrav13/jr-hackerrank | 909346d101fdf08a54ff75ec2ee39c90e661b251 | b7e0130fdd1c2eb4436871be3255200eac8ca3d9 | refs/heads/master | 2021-01-15T15:36:16.772814 | 2016-11-21T18:59:15 | 2016-11-21T18:59:15 | 48,657,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | """
Compare two linked list
head could be None as well for empty list
Node is defined as
return back the head of the linked list in the below method.
"""
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
def CompareLists(headA, headB):
count = 0
while headA != None or headB != None:
if (headA == None and headB != None) or (headA != None and headB == None):
return 0
if headA.data != headB.data:
count = count + 1
headA = headA.next
headB = headB.next
if count == 0:
return 1
else:
return 0
| [
"jayrav13@gmail.com"
] | jayrav13@gmail.com |
f875953eeb6bc655bf365406127b7e55f238a6e8 | d91fe0e972f2befab71987a732111b56245c5efc | /example_sm_pkg/nodes/subscriber_publisher_class_example.py | 9c626cf9b3902a827c6dde41ecd95bc0f3438280 | [] | no_license | karla3jo/robocup2014 | 2064e8102d5a3251ae582b7ed37ab80d0398f71c | 3d8563956fd1276b7e034402a9348dd5cb3dc165 | refs/heads/master | 2020-07-26T08:22:13.932741 | 2014-07-14T13:58:48 | 2014-07-14T13:58:48 | 21,850,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 12:00:00 2013
@author: sampfeiffer
"""
import roslib; roslib.load_manifest('example_sm_pkg')
import rospy
import sys
from std_msgs.msg import String
class myNode():
def __init__(self, argument_one):
# my class variables
self.my_variable_string = "I'm a string yo!"
self.subs = rospy.Subscriber('my_topic_to_subscribe', String, self.myCallback)
self.pub = rospy.Publisher('my_topic_to_publish', String, latch=True)
self.myMethod()
def myCallback(self, data):
rospy.loginfo("Received from topic data!")
self.myMethod()
def myMethod(self):
rospy.loginfo("Using the method!")
publish_this_thing = String("I'm the content of a string!")
self.pub.publish(publish_this_thing)
if __name__ == '__main__':
rospy.init_node('node_name')
if len(sys.argv) < 2:
print "Error, we need an arg!"
rospy.loginfo("No args given, closing...")
exit()
node = myNode("this is an argument")
rospy.spin() | [
"sammypfeiffer@gmail.com"
] | sammypfeiffer@gmail.com |
2bacde3214b0e7c787edcea2d2dd0a33a87aee18 | d638e4a000327c7b0163020f9aef188023570ce2 | /initialiser.py | 7b43f4c8f2e93380b5898d829c43fe7dc337f992 | [] | no_license | t-afo/Lymph | 9d9fd205870d0badbf2349107267f58b3c006b28 | e87a9517f8828fe1fda65ad2902c7a2deb202b40 | refs/heads/master | 2023-05-11T02:13:23.431036 | 2021-06-03T11:14:59 | 2021-06-03T11:14:59 | 373,480,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,412 | py | #Code for initialising conditions, along with supporting functions for 1 lymphangion
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from scipy import optimize
#Constants
pa = 2190
pb = 2500
pe = 2140
Pd = 35
Dd = 0.025
mu = 1
L = 0.3
f = 0.5
t0 = np.pi / 2
#Valve resistance constants throughout lymphagngions
Rvmin = 600.0 #min. valve resistance
Rvmax = 1.2 * 10**7 #max. valve resistance
sopen = 4.9 * 10**-2 #valve opening slope
sfail = 0.04 #valve failure slope
popen = -70.0 #valve opening pressure
pfail = -1.8 * 10**4 #valve failure pressure
def initcon(D, initialt): #initial conditions
p1 = minimiserval(fp1, D, initialt, pa).x
print("initial value of p1 is ", p1)
p2 = minimiserval(fp2, D, initialt, pb).x
print("initial value of p2 is", p2)
#Print values of Valve Resistance
Rv1 = ValveRes(pa,p1)
Rv2 = ValveRes(p2,pb)
print("initial value of Rv for p1 is", Rv1)
print("initial value of Rv for p2 is", Rv2)
#Print values of flow rate
Q1 = flowrate(pa, p1)
Q2 = flowrate(p2, pb)
print("initial value of Q1 is: ", Q1)
print("initial value of Q2 is: ", Q2)
Dall = np.array([D])
Qall = np.array([Q1, Q2], ndmin=2).reshape(-1, 1)
pall = np.array([p1, p2], ndmin=2).reshape(-1, 1)
Rvall = np.array([Rv1, Rv2], ndmin=2).reshape(-1, 1)
tall = np.array([initialt])
return Dall, Qall, pall, Rvall, tall, p1, p2
def fp1(p1, D, t):
return p1 - pm(t, D) - (64 * mu * L) / (np.pi * D) * (pa - p1) / ValveRes(pa,p1)
def fp2(p2, D, t):
return p2 - pm(t, D) + (64 * mu * L) / (np.pi * D) * (p2 - pb) / ValveRes(p2,pb)
def pm(t, D):
return pe + fpassive(D) + factive(t, D)
def fpassive(D):
return Pd * (np.exp(D/Dd) - np.power(Dd/D, 3))
def factive(t, D):
return Mcon(t, t0) / D * (1 - np.cos(2 * np.pi * f * (t - t0)))
def Mcon(t, t0):
if (t < t0):
return 0
else:
return 3.5
def minimiserval(func, D, t, p):
x0 = p - 100
pressure = sp.optimize.least_squares(func, x0, jac = "2-point", bounds=([p - 500, p + 500]), args= (D, t))
return pressure
def ValveRes(pin, pout):
deltap = (pin - pout)
siglimit = 1 / (1 + np.exp(sopen * (deltap - popen)))
"""sigmoidal function for normal opening and transition
from high resistance RVmax to low resistance RVmin when the pressure drop across it
exceeds a small positive value popen"""
sigfailure = 1 / (1 + np.exp(-sfail * (deltap - pfail)))
"""sigmoidal function describing valve failure (prolapse) at the
(large) adverse pressure difference pfail"""
Rv = Rvmin + Rvmax * (siglimit + sigfailure - 1) #valve resistance equation
#print(Rvmax * (siglimit + sigfailure - 1))
#print(Rv)
return Rv
def flowrate(pin, pout): #Calculate flow rate
return (pout - pin) / ValveRes(pin, pout)
def savevalues(D, Q, p, t, Dall, Qall, pall, tall):
Dall = np.append(Dall, D, axis = 0)
Qall = np.append(Qall.reshape(-1, 1), Q.reshape(-1, 1), axis = 1)
pall = np.append(pall, p.reshape(-1, 1), axis = 1)
tall = np.append(tall, t, axis = 0)
return Dall, Qall, pall
def savedata(Dall, Qall, pall, timearray):
np.savetxt('Dall.csv', Dall, delimiter = ',')
np.savetxt('Qall.csv', Qall, delimiter = ',')
np.savetxt('pall.csv', pall, delimiter = ',')
np.savetxt('time.csv', timearray, delimiter = ',')
| [
"42203782+t-afo@users.noreply.github.com"
] | 42203782+t-afo@users.noreply.github.com |
995e5340e3e9b0c8c5de25b5387d45937c15f28d | ac7e039a70ba627f6d9a7a02c9a8849ed5e18a89 | /unep.project-database/tags/0.5/reports/ProjectsByStaffReportFactory.py | 098ba274cf4516efa70e202f34be5109ec024408 | [] | no_license | jean/project-database | 65a2559844175350351ba87e820d25c3037b5fb2 | e818d322ec11d950f2770cd5324fbcd1acaa734d | refs/heads/master | 2021-01-01T06:27:24.528764 | 2014-01-31T11:11:45 | 2014-01-31T11:11:45 | 32,125,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from Report import Report
class ProjectsByStaffReportFactory(object):
def __init__(self, context, **kw):
self.context = context
self.params = kw
def getReport(self):
# create and fill the report
name = "Projects by Staff Report"
report = Report(name)
report.setReportHeaders(( name,),)
report.setTableHeaders(((
'Staff Name',
'Staff position',
'Focal Area',
'Project Title',
'Project Cycle Stage',
'Last milestone action',
'Actual date',
'Project Grant ',
),))
# XXX Implement this
# report.setTableRows()
# report.setTableTotals([])
# report.setReportFooters()
return report
| [
"jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d"
] | jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d |
2bfb19c4ed33d2b895432f2794e91a2a83869bf2 | a2832e8b13109e9c730995afda6037102f788d74 | /MNIST_CNN.py | c4b9209ea57745713a7f101a3c985dfd5817d366 | [] | no_license | NengnengZhang/my-nlp | abd3177792c4cbf8e901e7b4b06e400e784a2c4c | a6f160c5bd966abcf2133e9b8d3a4633439a94ef | refs/heads/master | 2021-07-03T01:11:55.197620 | 2020-10-15T08:21:49 | 2020-10-15T08:21:49 | 182,517,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,991 | py | # Convolutional Neural Network,CNN
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import com.hello.mnist.input_data as input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def convolutional2d(x, Weight):
# stride [1, x_movement, y_movement, 1]
# Must have strides[0] = strides[3] = 1
return tf.nn.conv2d(x, Weight, strides=[1, 1, 1, 1], padding='SAME')
def max_pooling_2x2(x):
# stride [1, x_movement, y_movement, 1]
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# define placeholder for inputs to network
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 784], name='x-input') # 28x28
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# change xs for the CNN
x_image = tf.reshape(xs, [-1, 28, 28, 1])
# convolutional layer 1
with tf.name_scope('convolutional_layer_1'):
W_convolutional_1 = weight_variable([5, 5, 1, 32]) # patch 5x5, in size 1, out size 32
b_convolutional_1 = bias_variable([32])
h_convolutional_1 = tf.nn.relu(convolutional2d(x_image, W_convolutional_1) + b_convolutional_1) # output size 28x28x32
h_pool_1 = max_pooling_2x2(h_convolutional_1) # output size 14x14x32
# convolutional layer 2
with tf.name_scope('convolutional_layer_2'):
W_convolutional_2 = weight_variable([5, 5, 32, 64]) # patch 5x5, in size 32, out size 64
b_convolutional_2 = bias_variable([64])
h_convolutional_2 = tf.nn.relu(convolutional2d(h_pool_1, W_convolutional_2) + b_convolutional_2) # output size 14x14x64
h_pool_2 = max_pooling_2x2(h_convolutional_2) # output size 7x7x64
# full connection layer 1
with tf.name_scope('full_connection_layer_1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
h_pool2_flat = tf.reshape(h_pool_2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# full connection layer 2
with tf.name_scope('full_connection_layer_2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
with tf.name_scope('total'):
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
init = tf.global_variables_initializer()
sess = tf.Session()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("logs/", sess.graph)
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={xs: batch_xs, y_: batch_ys, keep_prob: 0.5})
if i % 50 == 0:
summary = sess.run(merged, feed_dict={xs: batch_xs, y_: batch_ys, keep_prob: 0.5})
writer.add_summary(summary, i)
acc = sess.run(accuracy, feed_dict={xs: mnist.test.images, y_: mnist.test.labels, keep_prob: 0.5})
print('Accuracy at step %s: %s' % (i, acc))
# 测试一下模型
test_xs, test_ys = mnist.train.next_batch(1)
y_pre = sess.run(y, feed_dict={xs: test_xs, y_: mnist.test.labels})
print(y_pre)
print(np.argmax(y_pre))
# 显示图像
plt.figure()
im = test_xs[0].reshape(28, 28)
plt.imshow(im, 'gray')
plt.pause(0.0000001)
plt.show()
| [
"nengneng_zhang@outlook.com"
] | nengneng_zhang@outlook.com |
ea9d3ee3230d73f421fb22d2f59af8f113c81b91 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/load_balancing_rule_py3.py | 695a8e63e56403f3519346e6c5ce8aa055f7b53e | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 5,391 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class LoadBalancingRule(SubResource):
"""A loag balancing rule for a load balancer.
All required parameters must be populated in order to send to Azure.
:param id: Resource Identifier.
:type id: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration:
~azure.mgmt.network.v2015_06_15.models.SubResource
:param backend_address_pool: A reference to a pool of DIPs. Inbound
traffic is randomly load balanced across IPs in the backend IPs.
:type backend_address_pool:
~azure.mgmt.network.v2015_06_15.models.SubResource
:param probe: The reference of the load balancer probe used by the load
balancing rule.
:type probe: ~azure.mgmt.network.v2015_06_15.models.SubResource
:param protocol: Required. The transport protocol for the external
endpoint. Possible values are 'Udp' or 'Tcp'. Possible values include:
'Udp', 'Tcp'
:type protocol: str or
~azure.mgmt.network.v2015_06_15.models.TransportProtocol
:param load_distribution: The load distribution policy for this rule.
Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'.
Possible values include: 'Default', 'SourceIP', 'SourceIPProtocol'
:type load_distribution: str or
~azure.mgmt.network.v2015_06_15.models.LoadDistribution
:param frontend_port: Required. The port for the external endpoint. Port
numbers for each rule must be unique within the Load Balancer. Acceptable
values are between 1 and 65534.
:type frontend_port: int
:param backend_port: The port used for internal connections on the
endpoint. Acceptable values are between 1 and 65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
The value can be set between 4 and 30 minutes. The default value is 4
minutes. This element is only used when the protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the
floating IP capability required to configure a SQL AlwaysOn Availability
Group. This setting is required when using the SQL AlwaysOn Availability
Groups in SQL server. This setting can't be changed after you create the
endpoint.
:type enable_floating_ip: bool
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'frontend_port': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'load_distribution': {'key': 'properties.loadDistribution', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, frontend_port: int, id: str=None, frontend_ip_configuration=None, backend_address_pool=None, probe=None, load_distribution=None, backend_port: int=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(LoadBalancingRule, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.backend_address_pool = backend_address_pool
self.probe = probe
self.protocol = protocol
self.load_distribution = load_distribution
self.frontend_port = frontend_port
self.backend_port = backend_port
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.enable_floating_ip = enable_floating_ip
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| [
"noreply@github.com"
] | noreply@github.com |
298fb512ae1280030836435e20ea3f5487152026 | 1bab425414ff522916c70fbc57562ad7e7e2aa64 | /build_libtcod.py | 94b053869450719211c905bff082869f96a359a8 | [
"BSD-2-Clause-Views",
"Python-2.0"
] | permissive | Rakaneth/python-tcod | c958797193a365c5ccd8b1cb9d1a143938e4ba05 | 70ff1895fd7ae87bf66f16e388211db389d983fd | refs/heads/master | 2020-04-13T20:47:56.853106 | 2018-12-28T18:42:20 | 2018-12-28T18:42:20 | 163,439,619 | 0 | 0 | NOASSERTION | 2018-12-28T18:42:21 | 2018-12-28T18:40:01 | Python | UTF-8 | Python | false | false | 11,464 | py | #!/usr/bin/env python3
import os
import sys
import glob
from cffi import FFI
from pycparser import c_parser, c_ast, parse_file, c_generator
import shutil
import subprocess
import platform
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
import zipfile
SDL2_VERSION = os.environ.get('SDL_VERSION', '2.0.9')
TDL_NO_SDL2_EXPORTS = os.environ.get('TDL_NO_SDL2_EXPORTS', '0') == '1'
CFFI_HEADER = 'tcod/cffi.h'
CFFI_EXTRA_CDEFS = 'tcod/cdef.h'
BITSIZE, LINKAGE = platform.architecture()
def walk_sources(directory):
for path, dirs, files in os.walk(directory):
for source in files:
if source.endswith('.c') or source.endswith('.cpp'):
yield os.path.join(path, source)
def find_sources(directory):
return [os.path.join(directory, source)
for source in os.listdir(directory)
if source.endswith('.c')]
def get_sdl2_file(version):
if sys.platform == 'win32':
sdl2_file = 'SDL2-devel-%s-VC.zip' % (version,)
else:
assert sys.platform == 'darwin'
sdl2_file = 'SDL2-%s.dmg' % (version,)
sdl2_local_file = os.path.join('dependencies', sdl2_file)
sdl2_remote_file = 'https://www.libsdl.org/release/%s' % sdl2_file
if not os.path.exists(sdl2_local_file):
print('Downloading %s' % sdl2_remote_file)
urlretrieve(sdl2_remote_file, sdl2_local_file)
return sdl2_local_file
def unpack_sdl2(version):
sdl2_path = 'dependencies/SDL2-%s' % (version,)
if sys.platform == 'darwin':
sdl2_dir = sdl2_path
sdl2_path += '/SDL2.framework'
if os.path.exists(sdl2_path):
return sdl2_path
sdl2_arc = get_sdl2_file(version)
print('Extracting %s' % sdl2_arc)
if sdl2_arc.endswith('.zip'):
with zipfile.ZipFile(sdl2_arc) as zf:
zf.extractall('dependencies/')
else:
assert sdl2_arc.endswith('.dmg')
subprocess.check_call(['hdiutil', 'mount', sdl2_arc])
subprocess.check_call(['mkdir', '-p', sdl2_dir])
subprocess.check_call(['cp', '-r', '/Volumes/SDL2/SDL2.framework',
sdl2_dir])
subprocess.check_call(['hdiutil', 'unmount', '/Volumes/SDL2'])
return sdl2_path
module_name = 'tcod._libtcod'
include_dirs = [
'.',
'libtcod/src/vendor/',
'libtcod/src/vendor/zlib/',
]
extra_parse_args = []
extra_compile_args = []
extra_link_args = []
sources = []
libraries = []
library_dirs = []
define_macros = []
sources += walk_sources('tcod/')
sources += walk_sources('tdl/')
sources += walk_sources('libtcod/src/libtcod')
sources += ['libtcod/src/vendor/glad.c']
sources += ['libtcod/src/vendor/lodepng.cpp']
sources += ['libtcod/src/vendor/stb.c']
sources += ['libtcod/src/vendor/utf8proc/utf8proc.c']
sources += glob.glob('libtcod/src/vendor/zlib/*.c')
if TDL_NO_SDL2_EXPORTS:
extra_parse_args.append('-DTDL_NO_SDL2_EXPORTS')
if sys.platform == 'win32':
libraries += ['User32', 'OpenGL32']
define_macros.append(('TCODLIB_API', ''))
define_macros.append(('_CRT_SECURE_NO_WARNINGS', None))
if 'linux' in sys.platform:
libraries += ['GL']
if sys.platform == 'darwin':
extra_link_args += ['-framework', 'OpenGL']
extra_link_args += ['-framework', 'SDL2']
else:
libraries += ['SDL2']
# included SDL headers are for whatever OS's don't easily come with them
if sys.platform in ['win32', 'darwin']:
SDL2_PATH = unpack_sdl2(SDL2_VERSION)
include_dirs.append('libtcod/src/zlib/')
if sys.platform == 'win32':
include_dirs.append(os.path.join(SDL2_PATH, 'include'))
ARCH_MAPPING = {'32bit': 'x86', '64bit': 'x64'}
SDL2_LIB_DIR = os.path.join(SDL2_PATH, 'lib/', ARCH_MAPPING[BITSIZE])
library_dirs.append(SDL2_LIB_DIR)
SDL2_LIB_DEST = os.path.join('tcod', ARCH_MAPPING[BITSIZE])
if not os.path.exists(SDL2_LIB_DEST):
os.mkdir(SDL2_LIB_DEST)
shutil.copy(os.path.join(SDL2_LIB_DIR, 'SDL2.dll'), SDL2_LIB_DEST)
def fix_header(filepath):
"""Removes leading whitespace from a MacOS header file.
This whitespace is causing issues with directives on some platforms.
"""
with open(filepath, 'r+') as f:
current = f.read()
fixed = '\n'.join(line.strip() for line in current.split('\n'))
if current == fixed:
return
f.seek(0)
f.truncate()
f.write(fixed)
if sys.platform == 'darwin':
HEADER_DIR = os.path.join(SDL2_PATH, 'Headers')
fix_header(os.path.join(HEADER_DIR, 'SDL_assert.h'))
fix_header(os.path.join(HEADER_DIR, 'SDL_config_macosx.h'))
include_dirs.append(HEADER_DIR)
extra_link_args += ['-F%s/..' % SDL2_PATH]
extra_link_args += ['-rpath', '%s/..' % SDL2_PATH]
extra_link_args += ['-rpath', '/usr/local/opt/llvm/lib/']
if sys.platform not in ['win32', 'darwin']:
extra_parse_args += subprocess.check_output(['sdl2-config', '--cflags'],
universal_newlines=True
).strip().split()
extra_compile_args += extra_parse_args
extra_link_args += subprocess.check_output(['sdl2-config', '--libs'],
universal_newlines=True
).strip().split()
class CustomPostParser(c_ast.NodeVisitor):
def __init__(self):
self.ast = None
self.typedefs = []
self.removeable_typedefs = []
self.funcdefs = []
def parse(self, ast):
self.ast = ast
self.visit(ast)
for node in self.funcdefs:
ast.ext.remove(node)
for node in self.removeable_typedefs:
ast.ext.remove(node)
return ast
def visit_Typedef(self, node):
if node.name in ['wchar_t', 'size_t']:
# remove fake typedef placeholders
self.removeable_typedefs.append(node)
else:
self.generic_visit(node)
if node.name in self.typedefs:
print('warning: %s redefined' % node.name)
self.removeable_typedefs.append(node)
self.typedefs.append(node.name)
def visit_EnumeratorList(self, node):
"""Replace enumerator expressions with '...' stubs."""
for type, enum in node.children():
if enum.value is None:
pass
elif isinstance(enum.value, (c_ast.BinaryOp, c_ast.UnaryOp)):
enum.value = c_ast.Constant('int', '...')
elif hasattr(enum.value, 'type'):
enum.value = c_ast.Constant(enum.value.type, '...')
def visit_ArrayDecl(self, node):
if not node.dim:
return
if isinstance(node.dim, (c_ast.BinaryOp, c_ast.UnaryOp)):
node.dim = c_ast.Constant('int', '...')
def visit_Decl(self, node):
if node.name is None:
self.generic_visit(node)
elif (node.name and 'vsprint' in node.name or
node.name in ['SDL_vsscanf',
'SDL_vsnprintf',
'SDL_LogMessageV']):
# exclude va_list related functions
self.ast.ext.remove(node)
elif node.name in ['screen']:
# exclude outdated 'extern SDL_Surface* screen;' line
self.ast.ext.remove(node)
else:
self.generic_visit(node)
def visit_FuncDef(self, node):
"""Exclude function definitions. Should be declarations only."""
self.funcdefs.append(node)
def get_cdef():
generator = c_generator.CGenerator()
return generator.visit(get_ast())
def get_ast():
global extra_parse_args
if 'win32' in sys.platform:
extra_parse_args += [r'-I%s/include' % SDL2_PATH]
if 'darwin' in sys.platform:
extra_parse_args += [r'-I%s/Headers' % SDL2_PATH]
ast = parse_file(filename=CFFI_HEADER, use_cpp=True,
cpp_args=[r'-Idependencies/fake_libc_include',
r'-DDECLSPEC=',
r'-DSDLCALL=',
r'-DTCODLIB_API=',
r'-DSDL_FORCE_INLINE=',
r'-U__GNUC__',
r'-D_SDL_thread_h',
r'-DDOXYGEN_SHOULD_IGNORE_THIS',
r'-DMAC_OS_X_VERSION_MIN_REQUIRED=1060',
r'-D__attribute__(x)=',
r'-D_PSTDINT_H_INCLUDED',
] + extra_parse_args)
ast = CustomPostParser().parse(ast)
return ast
# Can force the use of OpenMP with this variable.
try:
USE_OPENMP = eval(os.environ.get('USE_OPENMP', 'None').title())
except Exception:
USE_OPENMP = None
tdl_build = os.environ.get('TDL_BUILD', 'RELEASE').upper()
MSVC_CFLAGS = {
'DEBUG': ['/Od'],
'RELEASE': ['/GL', '/O2', '/GS-'],
}
MSVC_LDFLAGS = {
'DEBUG': [],
'RELEASE': ['/LTCG'],
}
GCC_CFLAGS = {
'DEBUG': ['-O0'],
'RELEASE': ['-flto', '-O3', '-fPIC'],
}
if sys.platform == 'win32' and '--compiler=mingw32' not in sys.argv:
extra_compile_args.extend(MSVC_CFLAGS[tdl_build])
extra_link_args.extend(MSVC_LDFLAGS[tdl_build])
if USE_OPENMP is None:
USE_OPENMP = sys.version_info[:2] >= (3, 5)
if USE_OPENMP:
extra_compile_args.append('/openmp')
else:
extra_compile_args.extend(GCC_CFLAGS[tdl_build])
extra_link_args.extend(GCC_CFLAGS[tdl_build])
if USE_OPENMP is None:
USE_OPENMP = sys.platform != 'darwin'
if USE_OPENMP:
extra_compile_args.append('-fopenmp')
extra_link_args.append('-fopenmp')
ffi = FFI()
ffi.cdef(get_cdef())
ffi.cdef(open(CFFI_EXTRA_CDEFS, 'r').read())
ffi.set_source(
module_name, '#include <tcod/cffi.h>',
include_dirs=include_dirs,
library_dirs=library_dirs,
sources=sources,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=define_macros,
)
CONSTANT_MODULE_HEADER = '''"""
Constants from the libtcod C API.
This module is auto-generated by `build_libtcod.py`.
"""
from __future__ import absolute_import
from tcod.color import Color
'''
def write_library_constants():
"""Write libtcod constants into the tcod.constants module."""
from tcod._libtcod import lib, ffi
import tcod.color
with open('tcod/constants.py', 'w') as f:
f.write(CONSTANT_MODULE_HEADER)
for name in dir(lib):
value = getattr(lib, name)
if name[:5] == 'TCOD_':
if name.isupper(): # const names
f.write('%s = %r\n' % (name[5:], value))
elif name.startswith('FOV'): # fov const names
f.write('%s = %r\n' % (name, value))
elif name[:6] == 'TCODK_': # key name
f.write('KEY_%s = %r\n' % (name[6:], value))
f.write('\n# --- colors ---\n')
for name in dir(lib):
if name[:5] != 'TCOD_':
continue
value = getattr(lib, name)
if not isinstance(value, ffi.CData):
continue
if ffi.typeof(value) != ffi.typeof('TCOD_color_t'):
continue
color = tcod.color.Color._new_from_cdata(value)
f.write('%s = %r\n' % (name[5:], color))
if __name__ == "__main__":
write_library_constants()
| [
"4b796c65@gmail.com"
] | 4b796c65@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.