blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f7dc4bf523e6f673b501ed09ca49010f9c20a8a
|
b8acea93fc53b9c6e457959dd4b5446c520af272
|
/2017-01-13/workshop_10.py
|
600c94bc93b46a1d81f6ac1764e88c6500886593
|
[] |
no_license
|
MarcoFaretra93/ggpl
|
c35fbd5a07ee70fc6073626325d17eef84faa9e6
|
15066fc26c3af67d52b6670e265f9ec0e7113c42
|
refs/heads/master
| 2021-01-10T22:50:11.618623
| 2017-01-28T10:09:56
| 2017-01-28T10:09:56
| 70,344,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,523
|
py
|
from pyplasm import *
import csv
import src.workshop_08 as build_floor
import src.workshop_07 as windowsDoors
import src.workshop_03 as quarterTurnStairs
import src.workshop_09 as roof_builder
XWindow = [2,3,2,3,2]
YWindow = [2,3,2,3,2]
occurrencyWindow = [[True, True, True, True, True],
[True, False, True, False, True],
[True, True, True, True, True],
[True, False, True, False, True],
[True, True, True, True, True]]
XDoor = [.2, .2, .05, .2, .05, .2, .3, .2, .05, .2 ,.05, .2, .2]
YDoor = [.2, .2, .05, .2, .05, 1, .05, .2, .05, .2, .2]
occurencyDoor = [[True, True, True, True, True, True, True, True, True, True, True, True, True],
[True, False, False, False, False, False, True, False, False, False, False, False, True],
[True, False, True, True, True, True, True, True, True, True, True, False, True],
[True, False, True, False, False, False, True, False, False, False, True, False, True],
[True, False, True, False, True, True, True, True, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, True, True, True, True, False, True, False, True],
[True, False, True, False, False, False, True, False, False, False, True, False, True],
[True, False, True, True, True, True, True, True, True, True, True, False, True],
[True, False, False, False, False, False, True, False, False, False, False, False, True],
[True, True, True, True, True, True, True, True, True, True, True, True, True]]
listExternalWalls = build_floor.create_walls('first_model_muri_esterni_1.lines')
externalWalls = STRUCT(listExternalWalls)
xfactor = 15/SIZE([1])(externalWalls)[0]
yfactor = 15.1/SIZE([2])(externalWalls)[0]
zfactor = xfactor
listExternalWalls2 = build_floor.create_walls('second_model_muri_esterni_1.lines')
externalWalls2 = STRUCT(listExternalWalls2)
xfactor2 = 15/SIZE([1])(externalWalls2)[0]
yfactor2 = 15.1/SIZE([2])(externalWalls2)[0]
zfactor2 = xfactor2
def multistorey_house(nFloors, baseString, xfactor, yfactor, zfactor):
"""
multistorey_house is a function that return the function that calculate the HPC Model represent the house.
@param nFloor: represent the number of floors.
@param baseString: String represent the prefix of the .lines files.
@param xfactor: Float represent the factor to scale and calculate height.
@param yfactor: Float represent the factor to scale and calculate height.
@param zfactor: Float represent the factor to scale and calculate height.
@return renderWindows: Function that calculate the HPC Model.
"""
def renderWindows(XWindow, YWindow, occurrencyWindow):
"""
renderWindows is a function that return the function that calculate the HPC Model represent the house.
@param XWindow: Float list of asix X of the window cells
@param YWindow: Float list of asix Y of the window cells
@param occurrencyWindow: Bool matrix that represent the full cell and empty cell.
@return renderDoors: Function that calculate the HPC Model.
"""
def renderDoors(XDoor, YDoor, occurencyDoor):
"""
renderDoors is a function that return the function that calculate the HPC Model represent the house.
@param XDoor: Float list of asix X of the door cells.
@param YDoor: Float list of asix Y of the door cells.
@param occurencyDoor: Bool matrix that represent the full cells and empty cells.
@return renderFloor: Function that calculate the HPC Model.
"""
def renderFloor(verts, angle, height):
"""
renderFloor is a function that return the HPC Model represent the house.
@param verts: list of list of integer represent the verts that define the shape of roof bottom.
@param angle: integer represent the angle used to rotate the planes.
@param height: integer represent the height of the roof.
@return house: HPC Model represent the house.
"""
all_floor = []
#building roof model
with open(verts) as file:
reader = csv.reader(file, delimiter=",")
new_verts = []
for row in reader:
new_verts.append([float(row[0]), float(row[1])])
roofModel = roof_builder.buildRoof(new_verts, angle, height)
roofModel = T([3])([nFloors*3/zfactor])(roofModel)
roofModel = S([1,2,3])([xfactor*1.09, yfactor*1.09, zfactor])(roofModel)
roofModel = T([1,2])([-SIZE([1])(roofModel)[0]*0.05, -SIZE([2])(roofModel)[0]*0.05])(roofModel)
for i in range(nFloors):
floor_lines = [baseString + '_muri_esterni_'+str(i+1)+'.lines', baseString + '_muri_interni_'+str(i+1)+'.lines', baseString + '_porte_'+str(i+1)+'.lines', baseString + '_finestre_'+str(i+1)+'.lines', baseString + '_scale_'+str(i)+'.lines', baseString + '_scale_'+str(i+1)+'.lines']
floor = build_floor.ggpl_building_house(floor_lines,
windowsDoors.window_main(XWindow,YWindow,occurrencyWindow),
windowsDoors.door_main(YDoor, XDoor, occurencyDoor),
quarterTurnStairs, i, nFloors-1)
all_floor.append(floor)
all_floor = STRUCT(all_floor)
return STRUCT([all_floor, roofModel])
return renderFloor
return renderDoors
return renderWindows
VIEW(multistorey_house(2, 'first_model', xfactor, yfactor, zfactor)(XWindow, YWindow, occurrencyWindow)(XDoor, YDoor, occurencyDoor)('first_model_muri_esterni_1.lines', PI/5., 3/zfactor))
VIEW(multistorey_house(2, 'second_model', xfactor2, yfactor2, zfactor2)(XWindow, YWindow, occurrencyWindow)(XDoor, YDoor, occurencyDoor)('second_model_muri_esterni_1.lines', PI/5., 3/zfactor2))
|
[
"mar.faretra@stud.uniroma3.it"
] |
mar.faretra@stud.uniroma3.it
|
1f33da26cd31882e2a4cbb24cd7ec3852e4adaa7
|
eadcdcdbe46a5208f163ef22e668e42d6fff94a6
|
/src/sfctl/helps/deployment.py
|
a4108bb340bce66fecd2aa5a036e470330ad3be3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
yu-supersonic/service-fabric-cli
|
beed8c4b4d8a17a5fbcb5d0578a8e6c166dd9695
|
cc2838597e7d236852c6d95e1b5c54980e0fac96
|
refs/heads/master
| 2023-04-11T15:04:25.213876
| 2021-02-25T19:39:23
| 2021-02-25T19:39:23
| 348,077,179
| 0
| 0
|
NOASSERTION
| 2021-04-07T11:33:05
| 2021-03-15T18:16:11
| null |
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Help documentation for managing Service Fabric Mesh Resources."""
from knack.help_files import helps
helps['mesh deployment create'] = """
type: command
short-summary: Creates a deployment of Service Fabric Mesh Resources
parameters:
- name: --input-yaml-files
type: string
short-summary: Comma separated relative/absolute file paths of all the yaml files or relative/absolute path of the directory (recursive) which contain yaml files
- name: --parameters
type: string
short-summary: A relative/absolute path to yaml file or a json object which contains the parameters that need to be overridden
examples:
- name: Consolidates and deploys all the resources to cluster by overriding the parameters mentioned in the yaml file
text: sfctl mesh deployment create --input-yaml-files ./app.yaml,./network.yaml --parameters ./param.yaml
- name: Consolidates and deploys all the resources in a directory to cluster by overriding the parameters mentioned in the yaml file
text: sfctl mesh deployment create --input-yaml-files ./resources --parameters ./param.yaml
- name: Consolidates and deploys all the resources in a directory to cluster by overriding the parameters which are passed directly as json object
text: >
sfctl mesh deployment create --input-yaml-files ./resources --parameters "{ 'myparam' : {'value' : 'myvalue'} }"
"""
|
[
"Christina-Kang@users.noreply.github.com"
] |
Christina-Kang@users.noreply.github.com
|
57c8c4f7a53557e403719802170a2e4a7bd660c6
|
9ecd7568b6e4f0f55af7fc865451ac40038be3c4
|
/tianlikai/hubei/enshi_zhongbiao.py
|
aa1eb42ebd5cbeb6d019ac1072c18bf552fa29cc
|
[] |
no_license
|
jasonTLK/scrapy
|
f5ac6e575e902c077a07dc0eb9d228506f1a173f
|
2de8245fbc8731cfd868bbd91168e26271045300
|
refs/heads/master
| 2021-01-20T04:22:23.080864
| 2017-04-28T07:46:29
| 2017-04-28T07:46:29
| 89,681,374
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,967
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
import datetime
from items.biding import biding_gov
from utils.toDB import *
# 湖北恩施招投标网站
# 中标信息
class hz_gov_Spider(scrapy.Spider):
name = "enshi_zhongbiao.py"
allowed_domains = ["eszggzy.cn"]
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'middlewares.useragent_middleware.RandomUserAgent': 400,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'middlewares.proxy_middleware.ProxyMiddleware': 250,
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
# 'middlewares.retry_middleware.RetryWithProxyMiddleware': 300,
# 'middlewares.timestamp_middleware.TimestampMiddleware': 120
}
}
def start_requests(self):
urls = [
"http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=",
"http://www.eszggzy.cn/TPFront/jyxx/070002/070002003/?Paging=",
]
pages = [21, 20]
for i in range(len(urls)):
num=1
while num<=pages[i]:
url =urls[i]+str(num)
num+=1
# print url
yield Request(url=url,callback=self.parse)
# start_urls = [
# "http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=1"
# ]
def parse(self, response):
selector = Selector(response)
names = selector.xpath("//td[@align='left']//a/@title").extract()
urls = selector.xpath("//td[@align='left']//a/@href").extract()
print len(names),len(urls)
for i in range(len(names)):
url = "http://www.eszggzy.cn" + "".join(urls[i+4])
str = "".join(names[i]) + "," + url
print str
yield Request(url=url, callback=self.parse2, meta={"info": str})
def parse2(self, response):
infos = response.meta["info"]
items = biding_gov()
items["url"] = response.url
items["name"] = "".join(infos).split(",")[0]
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
items["info"] = "".join(page_info).decode('gbk')
db = MongodbHandle("172.20.3.10 ", 27017, "spiderBiding")
db.get_insert(
"bid_hubei_EnShi",
{
"url": items["url"],
"name": items["name"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
print items["url"]
print items["name"]
|
[
"18723163167@163.com"
] |
18723163167@163.com
|
b847e5e8b658046c0bc3299f6804066cdcf0f731
|
3701627e94b237d3402254ca8c752ceef4a766c5
|
/tools/megavii/get_list_init.py
|
6943564966f4334e6170d0e006e8f553e37fa888
|
[] |
no_license
|
shenmayufei/faceAlignment
|
358f5ddd3f9f4a487b3d6d4d7323a1f8b56bb876
|
5e7a6b01e36969d1112c5920beda8c29d70dc458
|
refs/heads/master
| 2022-09-08T18:12:48.839249
| 2020-06-02T11:43:04
| 2020-06-02T11:43:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
import os
import glob
path_data = '/ssd/cxzhao/data/quality_badcase/hf_sq/imgs'
path_json = '/ssd/cxzhao/data/quality_badcase/hf_sq/json_530'
os.makedirs(path_json, exist_ok=True)
path_txt = '/ssd/cxzhao/data/quality_badcase/hf_sq/test_list_json_pairs.txt'
def process():
IMAGES = glob.glob(os.path.join(path_data, '*/*.jpg'))
lines = []
for k, img_path in enumerate(IMAGES):
img_name = os.path.split(img_path)[-1]
json_path = os.path.join(path_json, img_name[:-4] + '.txt')
lines.append(img_path + ' ' + json_path + '\n')
with open(path_txt, 'w') as fid:
fid.writelines(lines)
if __name__ == '__main__':
process()
|
[
"1028377876@qq.com"
] |
1028377876@qq.com
|
a077a0210a9ead65fa1545bd027f14dc8f564d61
|
20eac8f94ef0a59ef65670545e2f1b8fb719425f
|
/eventos/migrations/0002_auto_20210522_0314.py
|
78766c13cfa331ea9b1aefb40689a4801f28a1f3
|
[] |
no_license
|
edgardo28081/gomez
|
fcc1c3fd9d9ce76a8a892d16c78d162556f067dc
|
b51f3af15d413789298488736365011acd77ee43
|
refs/heads/main
| 2023-05-31T17:39:06.234220
| 2021-06-10T19:54:46
| 2021-06-10T19:54:46
| 375,813,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
# Generated by Django 3.2 on 2021-05-22 07:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('eventos', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='recuerdos',
name='foto4',
),
migrations.RemoveField(
model_name='recuerdos',
name='foto5',
),
migrations.RemoveField(
model_name='recuerdos',
name='foto6',
),
]
|
[
"edgardomarcano04@gmail.com"
] |
edgardomarcano04@gmail.com
|
18c073c6bc7f6d571b0b80d928dd78cb157909bf
|
96796658623856b13bd32d0c22ded0a2fc4647f6
|
/src/scheduler.py
|
27f8a0fc090b7f7cbebf4323857dd559c93513d4
|
[] |
no_license
|
mifkoff/tgstarter_template
|
a0624ada0503815155c8af78efaacbac4ba7a015
|
9eba9d9802d7823f814c958a3ba0639bc86a62e1
|
refs/heads/master
| 2023-05-27T03:06:03.787996
| 2020-07-17T08:11:52
| 2020-07-17T08:11:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
import asyncio
import calendar
from datetime import datetime
import logging
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.interval import IntervalTrigger
from src.preparation import scheduler, config
timezone = config.bot.timezone
def main() -> None:
scheduler.start()
loop = asyncio.get_event_loop()
loop.run_forever()
@scheduler.scheduled_job(DateTrigger(timezone=timezone, run_date=datetime.now()))
async def instant_job() -> None:
print('INSTANT JOB')
@scheduler.scheduled_job(CronTrigger(timezone=timezone, day_of_week=calendar.MONDAY, hour=8, minute=0))
async def cron_job() -> None:
print('CRON JOB')
@scheduler.scheduled_job(IntervalTrigger(timezone=timezone, seconds=10))
async def interval_job() -> None:
print('INTERVAL JOB: 10 SECONDS HAVE PASSED')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
|
[
"t.lila@list.ru"
] |
t.lila@list.ru
|
a78ef1ec62d22b6a0115f1683a16eea2032e2160
|
bf834e52d25af1f59651ee5c3e0c6e5539ec3c19
|
/BizViz/urls.py
|
c435b7c213cd37d5a7be835ca2a473e13a21007b
|
[] |
no_license
|
bin7665/BizViz
|
fe7781d80fedba027eb5a1bc4a319ca588e108e8
|
99a7deb1bf19ff1f7f6ee30800355e4b56d4a077
|
refs/heads/master
| 2023-08-06T07:06:41.973326
| 2020-08-19T05:40:25
| 2020-08-19T05:40:25
| 281,556,175
| 0
| 0
| null | 2021-09-22T19:28:58
| 2020-07-22T02:40:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
from . import views
from django.urls import path
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('statistics/', views.statistics, name='statistics'),
path('statistics/<str:pk>/', views.department, name='department'),
path('support/<int:pk>/', views.content, name='content'),
]
|
[
"bin7665@naver.com"
] |
bin7665@naver.com
|
e61d248ab9d60f7194933ccc8cf31c297f485cc2
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/pytorch/dimenet/modules/envelope.py
|
b9d89620f674a562a255f52694e36235733374cc
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
import torch.nn as nn
class Envelope(nn.Module):
"""
Envelope function that ensures a smooth cutoff
"""
def __init__(self, exponent):
super(Envelope, self).__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, x):
# Envelope function divided by r
x_p_0 = x.pow(self.p - 1)
x_p_1 = x_p_0 * x
x_p_2 = x_p_1 * x
env_val = 1 / x + self.a * x_p_0 + self.b * x_p_1 + self.c * x_p_2
return env_val
|
[
"noreply@github.com"
] |
dmlc.noreply@github.com
|
71dca6cc57779157bdded3ae3d5cc0d48ce59ab8
|
9c0cd6dfc174167a416055ccd3d19d536d37f139
|
/gym-task/gym_task/envs/InhibitoryControl.py
|
59f811bf9d544305bf852c5e95ceadcb2be62020
|
[] |
no_license
|
emirakdere/tangled_program_graphs_research
|
5b0a7738cc5dfd2642a8f1e6539d7650efe0c69a
|
fb82247714c79156ee59f97f50171d0de178fec5
|
refs/heads/master
| 2023-02-02T05:25:12.940302
| 2020-12-18T14:26:28
| 2020-12-18T14:26:28
| 299,354,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
import numpy as np
import cv2
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym_task.envs.task_env import TaskEnv
# Task 6: Monkey fixate on center fixation, Monkey see stimulus in target location,
# Monkey see color cue (fixation's color changes),
# Monkey move towards or away from target based on that color.
class InhibitoryControl(TaskEnv, gym.Env):
def __init__(self):
super().__init__()
self.reset()
self.action_space = spaces.Discrete(9)
def step(self, action):
return _categoryStep(action)
def reset(self):
self.TIME = 120
self.experiment = np.zeros((self.TIME, self.DIM_Y, self.DIM_X))
halfSec = self.TIME//8
self.fixationTime = (0, 5*halfSec)
self.stimulusTime = (2*halfSec, self.TIME)
colorCue = (5*halfSec, self.TIME)
antisaccade = np.random.random_sample() < .5
targetLoc = [self.midY, 5*(self.DIM_X//6)]
self.experiment[self.fixationTime[0]:self.fixationTime[1], self.midY-1:self.midY+2, self.midX-1:self.midX+2] = 1.
self.experiment[self.stimulusTime[0]:self.stimulusTime[1], targetLoc[0]-3:targetLoc[0]+4, targetLoc[1]-3:targetLoc[1]+4] = 1.
self.experiment[colorCue[0]:colorCue[1], self.midY-1:self.midY+2, self.midX-1:self.midX+2] = .25 if antisaccade else .75
# 8 > aim >= 4 means antisaccade, 4 > aim means saccade (action=8 is waiting)
self.aim = 7 if antisaccade else 0
self.currFrame = 0
return self.experiment[0]
|
[
"emirakdere@gmail.com"
] |
emirakdere@gmail.com
|
e1dbbafc9e69b86598c57f49842afb0bfbde867f
|
629d3eaaaf4c83cb9d4cb863460e17d59a0bcb1c
|
/Skills/train_Magery.py
|
9695975ba533e428db506279886282f7468ab1e0
|
[] |
no_license
|
Maupishon/Razor-Enhanced
|
a8a514ddae5e03f8ad8b795e880750afc157c2f1
|
f686c8bd456728311757db22a13e25551bbfc9e1
|
refs/heads/master
| 2023-03-07T22:27:58.484194
| 2021-02-26T03:09:33
| 2021-02-26T03:09:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,696
|
py
|
# Trains Magery to GM
# Change resistTrain to True if you are training resist and/or
# don't want to take damage
# False will make it cast dmg spells on yourself, which use less regs.
# but require someone healing you, or you to have healing.
# By MatsaMilla
resistTrain = True
# ------------------------------------
self = Player.Serial
pearl = 0x0F7A
root = 0x0F86
shade = 0x0F88
silk = 0x0F8D
moss = 0x0F7B
ginseng = 0x0F85
garlic = 0x0F84
ash = 0x0F8C
def trainMageryNoResist():
while Player.Hits < 45:
Misc.Pause(100)
if Player.GetRealSkillValue('Magery') < 35:
Misc.SendMessage('Go buy Magery skill!!')
Stop
elif Player.GetRealSkillValue('Magery') < 65:
Spells.CastMagery('Mind Blast')
Target.WaitForTarget(2500)
Target.TargetExecute(self)
Misc.Pause(2500)
elif Player.GetRealSkillValue('Magery') < 85:
Spells.CastMagery('Energy Bolt')
Target.WaitForTarget(2500)
Target.TargetExecute(self)
Misc.Pause(2500)
elif Player.GetRealSkillValue('Magery') < 100:
Spells.CastMagery("Flamestrike")
Target.WaitForTarget(2500)
Target.TargetExecute(self)
Misc.Pause(2500)
if Player.Mana < 40:
Player.UseSkill('Meditation')
while Player.Mana < Player.ManaMax:
if (not Player.BuffsExist('Meditation') and not Timer.Check('skillTimer')):
Player.UseSkill('Meditation')
Timer.Create('skilltimer', 11000)
Misc.Pause(100)
def trainMage():
if Player.GetRealSkillValue('Magery') < 35:
Misc.SendMessage('Go buy Magery skill!!')
Stop
elif Player.GetRealSkillValue('Magery') < 55:
Spells.CastMagery('Mana Drain')
Target.WaitForTarget(2500)
Target.TargetExecute(self)
Misc.Pause(2500)
elif Player.GetRealSkillValue('Magery') < 75:
Spells.CastMagery('Invisibility')
Target.WaitForTarget(2500)
Target.TargetExecute(self)
Misc.Pause(2500)
elif Player.GetRealSkillValue('Magery') < 100:
Spells.CastMagery('Mana Vampire')
Target.WaitForTarget(2500)
Target.TargetExecute(self)
Misc.Pause(2500)
if Player.Mana < 40:
Player.UseSkill('Meditation')
while Player.Mana < Player.ManaMax:
if (not Player.BuffsExist('Meditation') and not Timer.Check('skillTimer')):
Player.UseSkill('Meditation')
Timer.Create('skilltimer', 11000)
Misc.Pause(100)
def checkRegs():
if Items.BackpackCount(pearl, -1) < 2:
Misc.SendMessage('Low on Pearl!')
Stop
elif Items.BackpackCount(root, -1) < 2:
Misc.SendMessage('Low on Root!')
Stop
elif Items.BackpackCount(shade, -1) < 2:
Misc.SendMessage('Low on Shade!')
Stop
elif Items.BackpackCount(silk, -1) < 2:
Misc.SendMessage('Low on Silk!')
Stop
elif Items.BackpackCount(garlic, -1) < 2:
Misc.SendMessage('Low on Garlic!')
Stop
elif Items.BackpackCount(ash, -1) < 2:
Misc.SendMessage('Low on Ash!')
Stop
elif Items.BackpackCount(silk, -1) < 2:
Misc.SendMessage('Low on Silk!')
Stop
elif Items.BackpackCount(ginseng, -1) < 2:
Misc.SendMessage('Low on Ginseng!')
Stop
Journal.Clear()
while Player.GetRealSkillValue('Magery') < 100:
if resistTrain:
trainMage()
else:
trainMageryNoResist()
checkRegs()
Player.ChatSay(33, 'GM Magery')
|
[
"noreply@github.com"
] |
Maupishon.noreply@github.com
|
23f09a5d1433dd1e6722e51b9796bd93fe04c67d
|
8b74027c56dfc7870787796bcf07816a868e427a
|
/MAE6263_CFD/HW3/Modular/poisson_solvers/trial/hybrid_poisson_solver_v2.py
|
9da30a260c871142375d48e414d56559da6d7292
|
[] |
no_license
|
surajp92/Random_Codes
|
f5195a428f1aee6b6922d1170266664edc4b29d5
|
e928e322c88bc0b43fc3fee3d31ba3452585fdd2
|
refs/heads/master
| 2021-06-09T07:47:09.201359
| 2021-05-03T10:07:55
| 2021-05-03T10:07:55
| 177,000,685
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,251
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 12 18:23:30 2021
@author: suraj
"""
import numpy as np
from numpy.random import seed
seed(1)
import pyfftw
from scipy import integrate
from scipy import linalg
import matplotlib.pyplot as plt
import time as tm
import matplotlib.ticker as ticker
import os
from numba import jit
from scipy.fftpack import dst, idst
from scipy.ndimage import gaussian_filter
import yaml
font = {'family' : 'Times New Roman',
'size' : 14}
plt.rc('font', **font)
#%%
def tdma(a,b,c,r,s,e):
a_ = np.copy(a)
b_ = np.copy(b)
c_ = np.copy(c)
r_ = np.copy(r)
un = np.zeros((np.shape(r)[0],np.shape(r)[1]), dtype='complex128')
for i in range(s+1,e+1):
b_[i,:] = b_[i,:] - a_[i,:]*(c_[i-1,:]/b_[i-1,:])
r_[i,:] = r_[i,:] - a_[i,:]*(r_[i-1,:]/b_[i-1,:])
un[e,:] = r_[e,:]/b_[e,:]
for i in range(e-1,s-1,-1):
un[i,:] = (r_[i,:] - c_[i,:]*un[i+1,:])/b_[i,:]
del a_, b_, c_, r_
return un
#%%
nx = 16
ny = 16
x_l = 0.0
x_r = 2.0
y_b = 0.0
y_t = 2.0
dx = (x_r-x_l)/nx
dy = (y_t-y_b)/ny
x = np.linspace(x_l, x_r, nx+1)
y = np.linspace(y_b, y_t, ny+1)
xm, ym = np.meshgrid(x,y, indexing='ij')
km = 16.0
c1 = (1.0/km)**2
c2 = -2.0*np.pi**2
ue = np.sin(2.0*np.pi*xm)*np.sin(2.0*np.pi*ym) + \
c1*np.sin(km*np.pi*xm)*np.sin(km*np.pi*ym)
f = 4.0*c2*np.sin(2.0*np.pi*xm)*np.sin(2.0*np.pi*ym) + \
c2*np.sin(km*np.pi*xm)*np.sin(km*np.pi*ym)
#%%
epsilon = 1.0e-6
aa = -2.0/(dx*dx) - 2.0/(dy*dy)
bb = 2.0/(dx*dx)
cc = 2.0/(dy*dy)
beta = dx/dy
a4 = -10.0*(1.0 + beta**2)
b4 = 5.0 - beta**2
c4 = 5.0*beta**2 -1.0
d4 = 0.5*(1.0 + beta**2)
e4 = 0.5*(dx**2)
# wave_number_coord = np.arange(-int(nx/2), int(nx/2)) #*(2.0*np.pi)
# wave_number_coord = np.fft.fftfreq(nx, d = 1/nx)
# Lx = nx*dx
# wave_number = wave_number_coord*(2.0*np.pi/Lx)
wave_number = np.arange(0,nx)
kx = np.copy(wave_number)
kx[0] = epsilon
# cos_kx = np.cos(kx)
cos_kx = np.cos(2.0*np.pi*kx/nx)
data = np.empty((nx,ny-1), dtype='complex128')
data1 = np.empty((nx,ny-1), dtype='complex128')
data[:,:] = np.vectorize(complex)(f[0:nx,1:ny],0.0)
a = pyfftw.empty_aligned((nx,ny-1),dtype= 'complex128')
b = pyfftw.empty_aligned((nx,ny-1),dtype= 'complex128')
fft_object = pyfftw.FFTW(a, b, axes = (0,), direction = 'FFTW_FORWARD')
fft_object_inv = pyfftw.FFTW(a, b,axes = (0,), direction = 'FFTW_BACKWARD')
data_f = np.fft.fft(data, axis=0)
# data_f = fft_object(data)
# data_f = np.abs(data_f)
#e = pyfftw.interfaces.scipy_fftpack.fft2(data)
# data_f[0,0] = 0.0
# j = 0
# data1[:,j] = np.zeros(nx, dtype='complex128')
# j = ny
# data1[:,j] = np.zeros(nx, dtype='complex128')
alpha_k = c4 + 2.0*d4*cos_kx
beta_k = a4 + 2.0*b4*cos_kx
alpha_k = np.reshape(alpha_k,[-1,1])
beta_k = np.reshape(beta_k,[-1,1])
A = np.zeros((nx,ny))
for i in range(nx):
A[i,i] = beta_k[i,0]
if i > 0:
A[i,i-1] = alpha_k[i,0]
if i < nx-1:
A[i,i+1] = alpha_k[i,0]
AI = np.linalg.inv(A)
for j in range(ny-2):
# print(j)
if j == 0:
rr = e4*((8.0 + 2.0*cos_kx)*data_f[:,j] + data_f[:,j+1])
elif j == ny-2:
rr = e4*(data_f[:,j-1] + (8.0 + 2.0*cos_kx)*data_f[:,j] + data_f[:,j+1])
else:
rr = e4*(data_f[:,j-1] + (8.0 + 2.0*cos_kx)*data_f[:,j])
rr = np.reshape(rr,[-1,1])
# temp = AI @ rr
temp = tdma(alpha_k,beta_k,alpha_k,rr,0,nx-1)
data1[:,j] = temp.flatten()
# data1[:,:] = data_f[:,:]/(aa + bb*kx[:,:] + cc*ky[:,:])
data2 = np.zeros((nx,ny+1), dtype='complex128')
data2[:,1:ny] = data1
ut = np.real(np.fft.ifft(data2, axis=0))
# ut = np.real(fft_object_inv(data1))
#periodicity
u = np.zeros((nx+1,ny+1))
u[0:nx,0:ny+1] = ut
# u[:,ny] = u[:,0]
u[nx,:] = u[0,:]
u[nx,ny] = u[0,0]
fig, axs = plt.subplots(1,2,figsize=(14,5))
cs = axs[0].contourf(xm, ym, ue, 60,cmap='jet')
#cax = fig.add_axes([1.05, 0.25, 0.05, 0.5])
fig.colorbar(cs, ax=axs[0], orientation='vertical')
cs = axs[1].contourf(xm, ym, u,60,cmap='jet')
#cax = fig.add_axes([1.05, 0.25, 0.05, 0.5])
fig.colorbar(cs, ax=axs[1], orientation='vertical')
plt.show()
fig.tight_layout()
print(np.linalg.norm(ue - u))
|
[
"pawarsuraj92@gmail.com"
] |
pawarsuraj92@gmail.com
|
a4f2c36e4c3b0cede51f060454ace8927faf42d4
|
1fd180ffcaf78a8ef5029a753e8b7ebd6aa7cdc6
|
/todolistapp/wsgi.py
|
a0714e3a7cfcb7f303b675f6ec51b5eec97c91a5
|
[] |
no_license
|
Ayush900/todo-list-app
|
05033615e7c521c16b4f840bd5401eb4c8bb7fd7
|
1f9c30dedab0ef1da9d08361a097bf31eec5c3f8
|
refs/heads/master
| 2022-12-25T19:53:06.353732
| 2020-10-01T07:04:22
| 2020-10-01T07:04:22
| 269,395,956
| 0
| 2
| null | 2020-10-01T07:04:23
| 2020-06-04T15:26:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for todolistapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todolistapp.settings')
application = get_wsgi_application()
|
[
"ayush.mehrotra900@gmail.com"
] |
ayush.mehrotra900@gmail.com
|
a743f62797f79b84b388a7e62f55f385a17d8748
|
583fd6b6c826e2c4d23dfcff9d51fd7105b58289
|
/apps/user/migrations/0002_userprofile_employee_type.py
|
641740c4552f2045f12688a8a3e9449ecd99246b
|
[] |
no_license
|
mjoyshuvo/RestaurantApp
|
eb47344804c8aa6caa2d3417ec174523f09b3496
|
b1fdc1b5c38fcb7f7239871d5e3bc80179f86f38
|
refs/heads/master
| 2023-05-18T17:05:09.048594
| 2021-06-12T06:24:07
| 2021-06-12T06:24:07
| 373,832,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
# Generated by Django 3.0.7 on 2021-06-05 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='employee_type',
field=models.CharField(choices=[('Employee', 'Employee'), ('Restaurant', 'Restaurant')], default='Employee', max_length=10),
),
]
|
[
"mrityunjoy.das@adndiginet.com"
] |
mrityunjoy.das@adndiginet.com
|
b5cb0f2a23fb3dc6094dfc2ed5c8ae6a36b4cacd
|
c23349b824849b066c6a2d68958206055b021177
|
/PP9.py
|
7900d26506b4796819faecdb3b66013383849e46
|
[] |
no_license
|
veera-sivarajan/LearningPython
|
5f84fe0b98b6649768881fa7ff7d2d07ec75b21d
|
0a08c1e5f0dfe79b48ac1b4bbde7e13a0356472e
|
refs/heads/master
| 2020-05-05T04:27:17.794941
| 2019-07-03T04:33:43
| 2019-07-03T04:33:43
| 179,712,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
import random
random = random.randint(1,9)
guess = int(input("Enter a number:"))
if random == guess:
print("Your guess is right!")
elif random > guess:
print("Your guess is too low")
elif random < guess:
print("Your guess is too high")
|
[
"sveera.2001@gmail.com"
] |
sveera.2001@gmail.com
|
8bf02c256d73472a61e065933f71d8e075957de5
|
a3d1e8a67ed43e1bea59180cc51c49f25a961a49
|
/scripts/dg2dotty
|
1aee7a8c68572dcdabdf99da9567433445ae7d8b
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
WladimirSidorenko/TextNormalization
|
38b076d88a2de40dae72dc8b4096e354b774f2f4
|
ac645fb41260b86491b17fbc50e5ea3300dc28b7
|
refs/heads/master
| 2020-04-14T16:48:42.541883
| 2019-09-29T23:38:28
| 2019-09-29T23:38:28
| 163,962,092
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,209
|
#!/usr/bin/env python2.7
# -*- coding: utf-8; -*-
"""
Utility for representing DG trees in DOTTY format.
Read a DG tree in CONLL-2009 format and output the read tree in GRAPHVIZ
format.
Input format (meaning of columns):
ID FORM LEMMA PLEMMA POS PPOS FEAT PFEAT HEAD PHEAD DEPREL PDEPREL FILLPRED PRED APREDs
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
Output format (meaning of columns):
"""
##################################################################
# Importing Libraries
import os
import re
import sys
from alt_argparse import argparser
from alt_fio import AltFileInput, AltFileOutput
##################################################################
# Variables and Constants
FIELDSEP = re.compile('\t')
fields = []
FEATURESEP = re.compile('\|')
features = []
QUOTE_RE = re.compile('(")')
NODE_STYLE = 'color="gray",fillcolor="palegreen",style="filled"'
FEAT_LABEL = ' [label="FEAT"];'
FEAT_STYLE = 'shape=box,fillcolor="lightblue",style="filled,rounded",'
w_id = 0
form = ''
lemma = ''
pos = ''
p_id = 0
rel = ''
edges = []
f_id = -1
##################################################################
# Methods
def escape_quote(iline):
"""Prepend all double quotes with a backslash."""
return QUOTE_RE.sub(r"\\\1", iline)
##################################################################
# Processing Arguments
argparser.description="""Utility for determining sentence boundaries."""
argparser.add_argument("-c", "--esc-char", help = """escape character which should
precede lines with meta-information""", nargs = 1, type = str, \
default = os.environ.get("SOCMEDIA_ESC_CHAR", ""))
args = argparser.parse_args()
##################################################################
# Main Body
foutput = AltFileOutput(encoding = args.encoding, \
flush = args.flush)
finput = AltFileInput(*args.files, \
skip_line = args.skip_line, \
print_func = foutput.fprint, \
errors = "replace")
# print graph header
foutput.fprint("""
graph dg {{
forcelabels=true
size="14";
node [{:s}];
0 [label="Root"];
""".format(NODE_STYLE))
for line in finput:
if line and line[0] == args.esc_char:
continue
# interpret fields
fields = line.split()
if not len(fields):
continue
w_id, form, lemma = fields[0], fields[1], fields[3]
pos, p_id, rel = fields[5], fields[9], fields[11]
features = FEATURESEP.split(fields[7])
# add node to the graph
foutput.fprint(w_id, ' [label="' + escape_quote(lemma) + \
"\\n(" + escape_quote(form) + ')"];')
# output features as additional node which will be connected to the current
# one
if features:
foutput.fprint(f_id, ' [{:s} label="'.format(FEAT_STYLE) + \
escape_quote(";\\n".join(features)) + ';"];')
edges.append(w_id + " -- " + str(f_id) + FEAT_LABEL)
f_id -= 1
# remember edge
edges.append(p_id + " -- " + w_id + ' [label="' + rel + '"];')
# output edges
foutput.fprint('\n'.join(edges), "\n}")
|
[
"wlsidorenko@gmail.com"
] |
wlsidorenko@gmail.com
|
|
de8358d209f0dbfcb2af469c09f0adecc9434180
|
8aa1203e1a1c350da16921787133014831097391
|
/luminardjangopgm/PythonCollection/ListDemi/listworkout2.py
|
3c265d38f3861a8f435cfbcd15806d5de7e7f4df
|
[] |
no_license
|
ashilz/luminarpython
|
98fa4a87c60529d0c819e13bc5145e6f7d4ef01f
|
9eb834448012bd60952cbc539409768cabd66325
|
refs/heads/master
| 2022-12-03T03:36:14.229723
| 2020-08-25T05:07:26
| 2020-08-25T05:07:26
| 290,109,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
lst=[10,12,13,14,15]
cnt=len(lst)
p=1
for i in range(0,cnt):
res=lst[i]**p
p+=1
print(res)
|
[
"ashilantony333@gmail.com"
] |
ashilantony333@gmail.com
|
6329a5eb0abe1699d0eb3af5a0cf08d72dda2bdb
|
69c2e1b106710d0018cae36fc63f56c61c859411
|
/test-search.py
|
026e1139ff18e186b63bd74e4004a72568ca44a1
|
[] |
no_license
|
willmurnane/store
|
a8636ccad2e220942e0d0699f4176323d326db55
|
065453987c492faeac07e3e2cd7fd574b0776d15
|
refs/heads/master
| 2021-01-01T15:35:14.775696
| 2013-11-10T14:42:03
| 2013-11-10T14:42:03
| 13,876,601
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
#! /usr/bin/python
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import sys
sys.path.append("..")
import store.models
from whoosh.index import *
from whoosh.query import *
from whoosh.qparser import QueryParser
index_path = "index"
ix = open_dir(index_path)
query = sys.argv[1]
print sys.argv[1:]
with ix.searcher() as s:
# terms = map(lambda w: Or([Term("content", unicode(w)), Term("title", unicode(w))]), sys.argv[1:])
my_query = Or([Variations("content", unicode(query)), Variations("title", unicode(query))])
# qp = QueryParser("title", schema = ix.schema)
# search = unicode(" ".join(sys.argv[1:]))
# print search
# my_query = qp.parse(search)
print my_query
results = s.search(my_query)
print results
print "%d results found\n" % len(results)
for item in results:
print item
|
[
"will.murnane@gmail.com"
] |
will.murnane@gmail.com
|
09f7ff38257927f817ca76e38b02d8f4f94da9fd
|
730707fdefc2934929e1309cfbb0484d62b4bc34
|
/backend/home/migrations/0001_load_initial_data.py
|
bc0ac08ee26bc6af244f1c1862878b762c7d3a2e
|
[] |
no_license
|
crowdbotics-apps/tpl-account-securty-27301
|
885f78b6256e3da6733c534cb85b89f797476e5f
|
44a580b64f14f7598b9e0c7a513976795992b15d
|
refs/heads/master
| 2023-04-26T15:38:35.791087
| 2021-05-23T22:55:42
| 2021-05-23T22:55:42
| 370,173,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tpl-account-securty-27301.botics.co"
site_params = {
"name": "tpl account securty page",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
1314ef75281011fc68f901e3661700823f076402
|
2d2a513e0bde6a9f814c49f36c480af46edfadd5
|
/tests/__init__.py
|
0354f67715a51889ce4a1d3c12fb29af2070109e
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
stjordanis/rubicon-objc
|
591aa6960e1d8b5ba58b403ab1b8ad8aec8f9ce4
|
abe4f243487ab6305b12b9798ff330337f616dd7
|
refs/heads/master
| 2020-11-27T23:42:38.138686
| 2019-05-25T05:03:07
| 2019-05-25T05:03:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
import faulthandler
from rubicon.objc.runtime import load_library
try:
import platform
OSX_VERSION = tuple(int(v) for v in platform.mac_ver()[0].split('.')[:2])
except Exception:
OSX_VERSION = None
try:
rubiconharness = load_library('rubiconharness')
except ValueError:
raise ValueError("Couldn't load Rubicon test harness library. Have you set DYLD_LIBRARY_PATH?")
faulthandler.enable()
|
[
"dgelessus@users.noreply.github.com"
] |
dgelessus@users.noreply.github.com
|
7d55e4f9b235c47896495ee39627711325cc23e6
|
234b14ae9bd4c8bc90b88ae84b9d0a2fd51b9fc3
|
/Sid/Day1/variable.py
|
7d056dd5b0a4e000a05d23512e0eeab2616cf560
|
[] |
no_license
|
Siddhant6078/Python
|
a7e730ef63435b8c114782158ebadc9ec5bfde89
|
1f9ad00b78fe417f3702a5e6421d8fccbb9d1c8f
|
refs/heads/master
| 2021-07-11T20:09:49.369503
| 2017-10-14T13:40:30
| 2017-10-14T13:40:30
| 105,008,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
counter = 100 # An integer assignment
miles = 1000.0 # A floating point
name = "John" # A string
print counter
print miles
print name
a = b = c = 1
print a,b,c
a,b,c = 1,2,"john"
print a,b,c
var1 = 1
var2 = 10
print var1,var2
del var1
print var1
|
[
"nishant.c@indictrans.com"
] |
nishant.c@indictrans.com
|
ef232dab5bc20bf3a6e6d2877ede262ab60bd9c8
|
99249dad36df26a712ae8d900041d53acf3901ea
|
/settings/configurations/LCLS_settings.py
|
0f4d71a15a5f657650c92536b3cfb5a54b7d163c
|
[
"MIT"
] |
permissive
|
bopopescu/Lauecollect
|
f1f79c2cc5ff106df0dedbd6939ec92630d2b305
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
refs/heads/master
| 2022-11-29T00:40:28.384831
| 2019-06-05T01:21:36
| 2019-06-05T01:21:36
| 280,989,300
| 0
| 0
|
MIT
| 2020-07-20T02:03:22
| 2020-07-20T02:03:22
| null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
MicroscopeCamera.ImageWindow.Center = (679.0, 512.0)
MicroscopeCamera.Mirror = False
MicroscopeCamera.NominalPixelSize = 0.000517
MicroscopeCamera.Orientation = -90
MicroscopeCamera.camera.IP_addr = '172.21.46.202'
MicroscopeCamera.x_scale = -1.0
MicroscopeCamera.y_scale = 1.0
MicroscopeCamera.z_scale = -1.0
WideFieldCamera.ImageWindow.Center = (738.0, 486.0)
WideFieldCamera.Mirror = False
WideFieldCamera.NominalPixelSize = 0.002445
WideFieldCamera.Orientation = -90
WideFieldCamera.camera.IP_addr = '172.21.46.70'
WideFieldCamera.x_scale = -1.0
WideFieldCamera.y_scale = 1.0
WideFieldCamera.z_scale = -1.0
laser_scope.ip_address = 'femto10.niddk.nih.gov:2000'
rayonix_detector.ip_address = '172.21.46.133:2222'
sample.phi_motor_name = 'SamplePhi'
sample.rotation_center = (-0.7938775, -0.31677586081529113)
sample.x_motor_name = 'SampleX'
sample.xy_rotating = False
sample.y_motor_name = 'SampleY'
sample.z_motor_name = 'SampleZ'
timing_system.ip_address = '172.21.46.207:2000'
xray_scope.ip_address = 'pico21.niddk.nih.gov:2000'
|
[
"friedrich.schotte@gmail.com"
] |
friedrich.schotte@gmail.com
|
5c36ae6fce8ec9601832a3503e9a4f0e716f1f1d
|
a35dadcdca748197bc400cebc180b58fe8f0735a
|
/constants.py
|
06f781bd894b739f694e6c9e5ed6447e66a2aa70
|
[] |
no_license
|
RGologorsky/CS-182-final-project
|
ec232bb40bca4ffab935be536ca8540972be57e6
|
117b3159b879d07c1195204718dadf2e696469f7
|
refs/heads/master
| 2022-09-23T03:33:08.991790
| 2020-06-04T22:13:36
| 2020-06-04T22:13:36
| 112,677,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,337
|
py
|
MULTIVAR = set(["AM21B","MATH21B", "MATH23B","MATH25A", "MATH55A"])
LINALG = set(["AM21A","MATH21A", "MATH23A","MATH25B", "MATH55B"])
STAT110 = "STAT110"
CS050 = "CS050"
CS051 = "CS051"
CS061 = "CS061"
CS020 = "CS020"
CS121 = "CS121"
CS124 = "CS124"
CS181 = "CS181"
CS182 = "CS182"
MATH23A = "MATH23A"
MATH25B = "MATH25B"
MATH25A = "MATH25A"
MATH25B = "MATH25B"
MATH55A = "MATH55A"
MATH55B = "MATH55B"
courses = {
'AM106': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'AM106',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 31,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 3.7,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 6.6},
'AM107': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'AM107',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 23,
'PREREQS': [],
'Q': 4.3,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 8.1},
'AM120': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'AM120',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 108,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
'CS050'],
'Q': 4.3,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 6.9},
'AM121': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'AM121',
'DAYS': set(['M', 'W']),
'END': 1129,
'ENROLLMENT': 73,
'PREREQS': [set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
'STAT110'],
'Q': 3.8,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 10.3},
'AM21A': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'AM21A',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 169,
'PREREQS': [],
'Q': 3.8,
'SEMESTER': 'F',
'START': 1100,
'WORKLOAD': 7.7},
'AM21B': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'AM21B',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 79,
'PREREQS': [set(['AM21A',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A'])],
'Q': 3.3,
'SEMESTER': 'S',
'START': 1100,
'WORKLOAD': 9.2},
'CS001': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS001',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 76,
'PREREQS': [],
'Q': 3.8,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 7.4},
'CS020': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1059AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS020',
'DAYS': set(['F', 'M', 'W']),
'END': 1059,
'ENROLLMENT': 58,
'PREREQS': [],
'Q': 4.4,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 5.0},
'CS050': {'CLOCKDAYS': 'F',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS050',
'DAYS': set(['F']),
'END': 1159,
'ENROLLMENT': 750,
'PREREQS': [],
'Q': 3.5,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 15.2},
'CS051': {'CLOCKDAYS': 'T',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS051',
'DAYS': set(['T']),
'END': 1429,
'ENROLLMENT': 348,
'PREREQS': ['CS050'],
'Q': 3.4,
'SEMESTER': 'S',
'START': 1300,
'WORKLOAD': 13.9},
'CS061': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS061',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 129,
'PREREQS': ['CS050'],
'Q': 4.2,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 15.8},
'CS091R': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'CS091R',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 5,
'PREREQS': [],
'Q': 3.879545455,
'SEMESTER': 'F',
'START': -1,
'WORKLOAD': 11.04772727},
'CS091R': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'CS091R',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 5,
'PREREQS': [],
'Q': 3.879545455,
'SEMESTER': 'S',
'START': -1,
'WORKLOAD': 11.04772727},
'CS096': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '559PM',
'CLOCKSTART': '400PM',
'COURSE': 'CS096',
'DAYS': set(['F', 'M', 'W']),
'END': 1759,
'ENROLLMENT': 2,
'PREREQS': [set(['CS051', 'CS061'])],
'Q': 4.5,
'SEMESTER': 'F',
'START': 1600,
'WORKLOAD': 3.0},
'CS105': {'CLOCKDAYS': 'TR',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS105',
'DAYS': set(['R', 'T']),
'END': 1429,
'ENROLLMENT': 37,
'PREREQS': [],
'Q': 4.7,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 5.2},
'CS108': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS108',
'DAYS': set(['R', 'T']),
'END': 1259,
'ENROLLMENT': 36,
'PREREQS': [],
'Q': 4.8,
'SEMESTER': 'F',
'START': 1130,
'WORKLOAD': 4.1},
'CS109A': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS109A',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 131,
'PREREQS': ['CS050'],
'Q': 3.1,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 9.1},
'CS109B': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS109B',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 79,
'PREREQS': ['CS109A'],
'Q': 3.5,
'SEMESTER': 'S',
'START': 1300,
'WORKLOAD': 11.9},
'CS121': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS121',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 169,
'PREREQS': [set(['CS020',
'MATH23A',
'MATH25A',
'MATH25B',
'MATH55A',
'MATH55B'])],
'Q': 3.2,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 9.5},
'CS124': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS124',
'DAYS': set(['R', 'T']),
'END': 1259,
'ENROLLMENT': 217,
'PREREQS': ['CS121'],
'Q': 3.9,
'SEMESTER': 'S',
'START': 1130,
'WORKLOAD': 15.2},
'CS126': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS126',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 30,
'PREREQS': ['STAT110', 'CS124'],
'Q': 3.0,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 8.0},
'CS127': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS127',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 19,
'PREREQS': [set(['CS121', 'CS124'])],
'Q': 4.5,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 12.2},
'CS134': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS134',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 167,
'PREREQS': ['STAT110',
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A'])],
'Q': 3.5,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 9.3},
'CS136': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS136',
'DAYS': set(['M', 'W']),
'END': 1259,
'ENROLLMENT': 57,
'PREREQS': [set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
'CS051',
'STAT110',
set(['CS181', 'CS182'])],
'Q': 4.6,
'SEMESTER': 'F',
'START': 1130,
'WORKLOAD': 9.8},
'CS141': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS141',
'DAYS': set(['M', 'W']),
'END': 1129,
'ENROLLMENT': 19,
'PREREQS': ['CS050'],
'Q': 4.0,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 10.5},
'CS143': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS143',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 43,
'PREREQS': ['CS050'],
'Q': 2.8,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 5.5},
'CS144R': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS144R',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 11,
'PREREQS': [],
'Q': 4.2,
'SEMESTER': 'S',
'START': 1430,
'WORKLOAD': 5.2},
'CS144R': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS144R',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 11,
'PREREQS': [],
'Q': 4.2,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 5.2},
'CS148': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS148',
'DAYS': set(['R', 'T']),
'END': 1259,
'ENROLLMENT': 4,
'PREREQS': [],
'Q': 5.0,
'SEMESTER': 'S',
'START': 1130,
'WORKLOAD': 5.7},
'CS152': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS152',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 19,
'PREREQS': ['CS051', 'CS121'],
'Q': 3.4,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 6.6},
'CS165': {'CLOCKDAYS': 'MW',
'CLOCKEND': '529PM',
'CLOCKSTART': '400PM',
'COURSE': 'CS165',
'DAYS': set(['M', 'W']),
'END': 1729,
'ENROLLMENT': 32,
'PREREQS': ['CS051', 'CS061'],
'Q': 4.5,
'SEMESTER': 'F',
'START': 1600,
'WORKLOAD': 10.5},
'CS171': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS171',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 97,
'PREREQS': ['CS050'],
'Q': 3.7,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 9.8},
'CS175': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS175',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 13,
'PREREQS': [set(['CS051', 'CS061']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 4.0,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 9.5},
'CS179': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS179',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 59,
'PREREQS': ['CS050'],
'Q': 3.6,
'SEMESTER': 'S',
'START': 1430,
'WORKLOAD': 6.8},
'CS181': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1029AM',
'CLOCKSTART': '900AM',
'COURSE': 'CS181',
'DAYS': set(['M', 'W']),
'END': 1029,
'ENROLLMENT': 215,
'PREREQS': ['CS051',
'STAT110',
set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 3.6,
'SEMESTER': 'S',
'START': 900,
'WORKLOAD': 16.8},
'CS182': {'CLOCKDAYS': 'TR',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS182',
'DAYS': set(['R', 'T']),
'END': 1429,
'ENROLLMENT': 84,
'PREREQS': ['CS051', 'STAT110'],
'Q': 3.9,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 7.2},
'CS189': {'CLOCKDAYS': 'F',
'CLOCKEND': '359PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS189',
'DAYS': set(['F']),
'END': 1559,
'ENROLLMENT': 20,
'PREREQS': [set(['CS181', 'CS182'])],
'Q': 3.6,
'SEMESTER': 'S',
'START': 1300,
'WORKLOAD': 13.9},
'CS191': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1029AM',
'CLOCKSTART': '900AM',
'COURSE': 'CS191',
'DAYS': set(['M', 'W']),
'END': 1029,
'ENROLLMENT': 20,
'PREREQS': [],
'Q': 3.0,
'SEMESTER': 'S',
'START': 900,
'WORKLOAD': 8.0},
'ES50': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'ES50',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 85,
'PREREQS': [],
'Q': 3.5,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 6.6},
'ES52': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'ES52',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 53,
'PREREQS': [],
'Q': 3.9,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 9.8},
'MATH154': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1200PM',
'COURSE': 'MATH154',
'DAYS': set(['F', 'M', 'W']),
'END': 1259,
'ENROLLMENT': 30,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 4.5,
'SEMESTER': 'S',
'START': 1200,
'WORKLOAD': 10.1},
'MATH21A': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'MATH21A',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 237,
'PREREQS': [],
'Q': 3.6,
'SEMESTER': 'FS',
'START': -1,
'WORKLOAD': 10.0},
'MATH21B': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'MATH21B',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 320,
'PREREQS': [],
'Q': 3.5,
'SEMESTER': 'FS',
'START': -1,
'WORKLOAD': 8.4},
'MATH23A': {'CLOCKDAYS': 'F',
'CLOCKEND': '159PM',
'CLOCKSTART': '100PM',
'COURSE': 'MATH23A',
'DAYS': set(['F']),
'END': 1359,
'ENROLLMENT': 59,
'PREREQS': [],
'Q': 3.4,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 10.5},
'MATH23B': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'MATH23B',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 59,
'PREREQS': [set(['MATH23A', 'MATH25A'])],
'Q': 3.9,
'SEMESTER': 'S',
'START': 1430,
'WORKLOAD': 8.8},
'MATH25A': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1059AM',
'CLOCKSTART': '1000AM',
'COURSE': 'MATH25A',
'DAYS': set(['F', 'M', 'W']),
'END': 1059,
'ENROLLMENT': 45,
'PREREQS': [],
'Q': 4.6,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 17.1},
'MATH25B': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1059AM',
'CLOCKSTART': '1000AM',
'COURSE': 'MATH25B',
'DAYS': set(['F', 'M', 'W']),
'END': 1059,
'ENROLLMENT': 38,
'PREREQS': [set(['MATH25A','MATH55A'])],
'Q': 3.9,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 16.3},
'MATH55A': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'MATH55A',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 11,
'PREREQS': [],
'Q': 3.7,
'SEMESTER': 'F',
'START': 1100,
'WORKLOAD': 30.2},
'MATH55B': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'MATH55B',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 12,
'PREREQS': ['MATH55A'],
'Q': 4.0,
'SEMESTER': 'S',
'START': 1100,
'WORKLOAD': 45.2},
'STAT110': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'STAT110',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 444,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A'])],
'Q': 4.3,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 10.6},
}
# 'STAT121A': {'CLOCKDAYS': 'MW',
# 'CLOCKEND': '229PM',
# 'CLOCKSTART': '100PM',
# 'COURSE': 'STAT121A',
# 'DAYS': set(['M', 'W']),
# 'END': 1429,
# 'ENROLLMENT': 131,
# 'PREREQS': [],
# 'Q': 3.1,
# 'SEMESTER': 'F',
# 'START': 1300,
# 'WORKLOAD': 9.1},
# 'STAT121B': {'CLOCKDAYS': 'MW',
# 'CLOCKEND': '229PM',
# 'CLOCKSTART': '100PM',
# 'COURSE': 'STAT121B',
# 'DAYS': set(['M', 'W']),
# 'END': 1429,
# 'ENROLLMENT': 25,
# 'PREREQS': [],
# 'Q': 3.5,
# 'SEMESTER': 'S',
# 'START': 1300,
# 'WORKLOAD': 5}
|
[
"rgologorsky@college.harvard.edu"
] |
rgologorsky@college.harvard.edu
|
39b71d964b507c6bddee391d264382ee2a09e569
|
aad38f959313c008af3cff6f2595c05131e0ae60
|
/week4/common/cloudAMQP_client_test.py
|
701364b71aaa2de82e1d3d121c82d7858c9f8899
|
[] |
no_license
|
wansuiye09/News-Scraping-and-Recommendation
|
82c128e3a31df95b6d19107db969e318810695b8
|
a3e9149d6952fc216dd6b5f21e8ad97fafa09168
|
refs/heads/master
| 2021-05-10T13:10:55.362946
| 2017-06-19T15:01:57
| 2017-06-19T15:01:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
from cloudAMQP_client import CloudAMQPClient
CLOUDAMQP_URL = 'amqp://dfrwrfgh:57HQ4sghISj3dAGA42BQbVf9AOqzrj0c@crocodile.rmq.cloudamqp.com/dfrwrfgh'
QUEUE_NAME = 'test'
def test_basic():
client = CloudAMQPClient(CLOUDAMQP_URL, QUEUE_NAME)
sentMsg = {'test_key': 'value'}
client.sendMessage(sentMsg)
client.sleep(5)
receivedMsg = client.getMessage()
assert sentMsg == receivedMsg
print 'test passed'
if __name__ == "__main__":
test_basic()
|
[
"ezhangmarvin@gmail.com"
] |
ezhangmarvin@gmail.com
|
66b303e32158b5df66849ee037cff4b3c3ee363c
|
7334b65c9506f69167402fe0d473821853724250
|
/build/shinobot/catkin_generated/pkg.installspace.context.pc.py
|
7e33aafc4d08a69c52f401c038b137e2b9bfc9e1
|
[] |
no_license
|
hphilamore/shinobot_ws
|
f5d044af587959009992f090f832279f52daf94f
|
153ff65f72729a2a1fd84f84305a78aae7373b73
|
refs/heads/master
| 2022-12-26T14:47:46.977468
| 2020-10-08T19:54:04
| 2020-10-08T19:54:04
| 267,820,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "shinobot"
PROJECT_SPACE_DIR = "/home/shinobot/shinobot_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"hemmaphilamore@gmail.com"
] |
hemmaphilamore@gmail.com
|
5886518f9d4354fc2e92ba2b794837444e2ce652
|
24aa54e27ea3aa648f1c2d898f2412a4a89678e4
|
/deep_glide/envs/withMap.py
|
888260c9a7155b378f2aab63228f4eb0059611db
|
[] |
no_license
|
afaehnrich/deep-glide
|
44246dbb9534c75e353b6c71cef1fab141b6746b
|
d80c857ee83e674c1ad2fe8670fa7f621ae8bb7e
|
refs/heads/master
| 2023-08-12T11:02:42.394090
| 2021-09-27T13:41:10
| 2021-09-27T13:41:10
| 314,990,517
| 0
| 0
| null | 2021-09-27T13:14:43
| 2020-11-22T08:19:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,467
|
py
|
from abc import abstractmethod
from enum import auto
from deep_glide.envs.withoutMap import Scenario_A
import numpy as np
from deep_glide.sim import Sim, SimState, TerrainBlockworld, TerrainClass, TerrainClass90m, TerrainOcean, TerrainSingleBlocks
from deep_glide.envs.abstractEnvironments import AbstractJSBSimEnv, TerminationCondition
import deep_glide.envs.rewardFunctions as rewardFunctions
from deep_glide.deprecated.properties import Properties, PropertylistToBox
from deep_glide.utils import Normalizer, Normalizer2D
from gym.envs.registration import register
import logging
from deep_glide.utils import angle_between
from gym import spaces
from matplotlib import pyplot as plt
import math
import os
class AbstractJSBSimEnv2D(Scenario_A):
metadata = {'render.modes': ['human']}
OBS_WIDTH = 36
OBS_HEIGHT = 36
observation_space: spaces.Box
map_mean: float
map_std: float
def __init__(self, terrain: str, save_trajectory = False, render_before_reset=False):
super().__init__(save_trajectory, render_before_reset)
self._init_terrain(terrain)
self.observation_space = spaces.Box( low = -math.inf, high = math.inf,
shape=(super().observation_space.shape[0]+self.OBS_HEIGHT*self.OBS_WIDTH,), dtype=np.float32)
def _init_terrain(self, terrain):
if terrain == 'ocean': self.terrain = TerrainOcean()
elif terrain == 'oceanblock': self.terrain = TerrainBlockworld(ocean=True)
elif terrain == 'alps': self.terrain = TerrainClass90m()
elif terrain == 'block': self.terrain = TerrainBlockworld()
elif terrain == 'singleblock': self.terrain = TerrainSingleBlocks()
else: raise ValueError('Terraintype unknown: {}'.format(terrain))
print( 'using Terrain:', terrain)
self.calc_map_mean_std()
def calc_map_mean_std(self):
self.map_mean = 5000.
self.map_std = 5000.
# (x1,x2), (y1,y2) = self.config.map_start_range
# map_min5 = np.percentile(self.terrain.data[x1:x2, y1:y2], 5)
# map_max5 = np.percentile(self.terrain.data[x1:x2, y1:y2], 95)
# self.map_mean = map_min5 + (map_max5-map_min5)/2
# self.map_std = abs((map_max5-map_min5)/2) + 0.00002
# logging.debug('Map mean={:.2f} std={:.2f}'.format(self.map_mean, self.map_std))
#print('Map mean={:.2f} std={:.2f}'.format(self.map_mean, self.map_std))
def _get_state(self):
state = super()._get_state()
map = self.terrain.map_around_position(self.pos[0], self.pos[1], self.OBS_WIDTH, self.OBS_HEIGHT).copy()
map = (map-self.map_mean)/self.map_std
#map = self.mapNormalizer.normalize(map.view().reshape(1,self.OBS_WIDTH,self.OBS_HEIGHT))
if not np.isfinite(state).all():
logging.error('Infinite number detected in state. Replacing with zero')
logging.error('State: {}'.format(state))
state = np.nan_to_num(state, neginf=0, posinf=0)
#state = self.stateNormalizer.normalize(state.view().reshape(1,17))
if not np.isfinite(state).all():
logging.error('Infinite number after Normalization!')
raise ValueError()
state = np.concatenate((map.flatten(), state.flatten()))
return state
class Scenario_A_Terrain(AbstractJSBSimEnv2D):
# stateNormalizer = Normalizer('JsbSimEnv2D_v0')
# mapNormalizer = Normalizer2D('JsbSimEnv2D_v0_map')
env_name = 'Scenario_A_Terrain-v0'
'''
In diesem Env ist der Reward abhängig davon, wie nahe der Agent dem Ziel gekommen ist.
Höhe und Anflugwinkel sind nicht entscheidend.
'''
def __init__(self, terrain='ocean', save_trajectory = False, render_before_reset=False):
super().__init__(terrain, save_trajectory, render_before_reset)
class Scenario_B_Terrain(Scenario_A_Terrain):
env_name = 'Scenario_B_Terrain-v0'
'''
Wie JSBSim_v5, aber mit Map.
'''
def __init__(self, terrain='ocean', save_trajectory = False, render_before_reset=False, range_dist = 500, goto_time = 5.):
super().__init__(terrain, save_trajectory, render_before_reset)
self.RANGE_DIST = range_dist # in m | Umkreis um das Ziel in Metern, bei dem es einen positiven Reward gibt
self.goto_time = goto_time
# Aktivieren, wenn mehr Logging benötigt wird:
# self.log_fn = 'Log_JSBSim2D-v2_final_heights'
# i=1
# while os.path.exists('{}_{}.csv'.format(self.log_fn, i)): i+=1
# self.log_fn = '{}_{}.csv'.format(self.log_fn, i)
# with open(self.log_fn,'w') as fd:
# fd.write('height; terrain_height\n')
def step(self, action):
new_state, reward, done, info = super().step(action)
# Aktivieren, wenn mehr Logging benötigt wird:
# if done:
# with open(self.log_fn,'a') as fd:
# fd.write('{:f}; {:f}\n'.format(self.pos[2],self.terrain.altitude(self.pos[0], self.pos[1])).replace('.',','))
return new_state, reward, done, info
_checkFinalConditions = rewardFunctions._checkFinalConditions_v5
_reward = rewardFunctions._reward_v5
class Scenario_C_Terrain(Scenario_B_Terrain):
env_name = 'Scenario_C_Terrain-v0'
'''
Wie JSBSim_v6, aber mit Map.
Ergebnis: Kein Lernen, selbst mit Ocean-Map. Wird der State auf den normalen State ohne Map reduziert, funktioniert alles super.
'''
_checkFinalConditions = rewardFunctions._checkFinalConditions_v6
_reward = rewardFunctions._reward_v6
def __init__(self, terrain='ocean', save_trajectory = False, render_before_reset=False, range_dist=500, range_angle = math.pi/5, angle_importance=0.5):
super().__init__(terrain, save_trajectory, render_before_reset, range_dist)
self.RANGE_ANGLE = range_angle # in rad | Toleranz des Anflugwinkels, bei dem ein positiver Reward gegeben wird
self.ANGLE_IMPORTANCE = angle_importance * 10
register(
id='Scenario_A_Terrain-v0',
entry_point='deep_glide.envs.withMap:Scenario_A_Terrain',
max_episode_steps=999,
reward_threshold=1000.0,
)
register(
id='Scenario_B_Terrain-v0',
entry_point='deep_glide.envs.withMap:Scenario_B_Terrain',
max_episode_steps=999,
reward_threshold=1000.0,
)
register(
id='Scenario_C_Terrain-v0',
entry_point='deep_glide.envs.withMap:Scenario_C_Terrain',
max_episode_steps=999,
reward_threshold=1000.0,
)
|
[
"a.faehnrich.acc@gmail.com"
] |
a.faehnrich.acc@gmail.com
|
ecc631a48f59fcc28412207e3d56e26f26d614f1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/128/usersdata/222/33411/submittedfiles/al6.py
|
a4e5c49916c0a47643dc35834d5f8c7cd5aca7c0
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# -*- coding: utf-8 -*-
a=int(input('Digite a:'))
contador=0
for i in range(2,a,1):
if n%i==0:
contador=contador+1
print(i)
for i in range(2,a,1):
if n%1==0:
contador=contador+1
print(i)
if contador==0:
print('Primo')
else:
print('Não primo')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
c85c091a3229318315dafe45d892f4fe27ad63c5
|
c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34
|
/source/All_Solutions/0480.滑动窗口中位数/0480-滑动窗口中位数.py
|
b6a27a3906d116af6ae8695a4eafea53559a93c4
|
[
"MIT"
] |
permissive
|
zhangwang0537/LeetCode-Notebook
|
73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1
|
1dbd18114ed688ddeaa3ee83181d373dcc1429e5
|
refs/heads/master
| 2022-11-13T21:08:20.343562
| 2020-04-09T03:11:51
| 2020-04-09T03:11:51
| 277,572,643
| 0
| 0
|
MIT
| 2020-07-06T14:59:57
| 2020-07-06T14:59:56
| null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
import bisect
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
"""
My solution, using sorted list
Time: O(nlog(k))
Space: O(n+k)
"""
res = []
if not nums or not k:
return res
def append_median():
median = sorted_list[k//2] if k%2==1 else (sorted_list[k//2] + sorted_list[k//2-1])/2
res.append(median)
n = len(nums)
p1, p2 = 0, k
sorted_list = sorted(nums[p1:p2])
append_median()
while p2 != n:
bisect.insort(sorted_list, nums[p2])
del_index = bisect.bisect(sorted_list, nums[p1])
# remember that the index of bisect and list are not same!
del sorted_list[del_index - 1]
append_median()
p1 += 1
p2 += 1
return res
|
[
"mzm@mail.dlut.edu.cn"
] |
mzm@mail.dlut.edu.cn
|
1eb38977bcd60dc2b44b88bac65269a4e1e247a7
|
5d441b10415e452113e395681e4b80e2c8f2bf8c
|
/commands/por_ano_melhorado.py
|
e3bb05ab68ee1599a532f6e45868a5c9938b80b4
|
[] |
no_license
|
diegobaron2612/copa_transparente
|
5fe49fe2c1785e8cd3808fe95c305a8a18dbab3f
|
e74663224012249d0c4c7688704d2659771ab350
|
refs/heads/master
| 2021-07-11T03:24:12.861363
| 2021-03-28T19:39:51
| 2021-03-28T19:39:51
| 242,865,154
| 0
| 0
| null | 2020-02-24T23:21:09
| 2020-02-24T23:21:08
| null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
def contar_execucoes(caminho):
totals = {}
with open(caminho, "r") as data:
for line in data:
info = line.strip().split(";")
year = int(info[8][-4:])
totals.setdefault(year, 0)
totals[year] += 1
sorted_totals = sorted(totals)
for year in sorted_totals:
print(f"{totals[year]} execuções assinadas em {year}")
if __name__ == "__main__":
contar_execucoes("data/data/ExecucaoFinanceira.csv")
|
[
"viniciusdesk@icloud.com"
] |
viniciusdesk@icloud.com
|
35b8a0e073fe1e4ace98a7f1bbf543673ee3905f
|
f648b8263f130f3be7fd07e87d1b9c12a3e94ffb
|
/webScraper.py
|
928df5de15ff32eb750e40ef671fc31069bbd81a
|
[] |
no_license
|
StaaleA/FinnScraper
|
43777ca9fbb16ae3b526a9fa2a8a5c2e16abaaf1
|
5c2c7d7cbeda392f92931c026b4ea265b913afb5
|
refs/heads/master
| 2021-01-01T04:40:23.819003
| 2016-05-08T11:21:15
| 2016-05-08T11:21:15
| 56,928,471
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
from lxml import html
from datetime import datetime
import requests
import boto3
import sys
# Gets info about two search terms; "datavarehus" and "business intelligence"
datavarehus = requests.get('http://m.finn.no/job/fulltime/search.html?q=datavarehus&industry=65&industry=8&industry=34&sort=1')
businessIntelligence = requests.get('http://m.finn.no/job/fulltime/search.html?q=business+intelligence&industry=65&industry=8&industry=34&sort=1')
# Extracts the number of jobs and ads
tree = html.fromstring(datavarehus.content)
datavarehus_count = tree.xpath('//span[@class="current-hit-count"]/b[@data-count]/text()') #['Stillinger', 'Annonser']
tree = html.fromstring(businessIntelligence.content)
businessIntelligence_count = tree.xpath('//span[@class="current-hit-count"]/b[@data-count]/text()') #['Stillinger', 'Annonser']
# Sets the date
date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Builds the output text
datavarehus_stillinger = datavarehus_count[0];
datavarehus_annonser = datavarehus_count[1];
datavarehusTekst = "datavarehus,"+datavarehus_stillinger+","+datavarehus_annonser+","+date
businessIntelligence_stillinger = businessIntelligence_count[0];
businessIntelligence_annonser = businessIntelligence_count[1];
businessIntelligenceTekst = "business intelligence,"+businessIntelligence_stillinger+","+businessIntelligence_annonser+","+date
# Gets the file location as an argument
if len(sys.argv) != 2:
sys.exit('Usage: .../webScraper.py fileLocation') # If a file location hasent been passed as an argument
else:
fileLocation = sys.argv[1]
# Appends the file with the new data
try:
file = open(fileLocation,"a")
except IOError:
print('Cannot open file. Check the that you have the correct file location', arg)
else:
file.write(datavarehusTekst +"\r\n")
file.write(businessIntelligenceTekst+"\r\n")
file.close()
# Upload the file to S3
s3_client = boto3.client('s3')
s3_client.upload_file(fileLocation, 'samedia', 'stillinger.csv')
|
[
"staaleas@gmail.com"
] |
staaleas@gmail.com
|
1dbd941eaa595923b6c8f889ba43df856f7e2df6
|
565892be77daffe1250229fbd1a8ed94819bff56
|
/src/basic_syntax_python.py
|
b97527ebe07af6e6ed81ea3f96a1e5fe4b807447
|
[] |
no_license
|
Romzzes/basic_python_selenium_test
|
6a05b9ba578d8254faadc43ea5056cd9c6b3d26a
|
4a21f06fec20b76c14c7fa7768f05dada81ebd8c
|
refs/heads/master
| 2020-08-15T09:42:35.275398
| 2019-10-21T14:36:58
| 2019-10-21T14:36:58
| 215,319,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
from selenium import webdriver
name = "Roman"
height = 180
weight = 83.5
married = False
age = 65
height = height + 6
height += 6
a = 4
b = 6.5
c = "2.5"
print (weight + height)
print (name + str(height))
print (name + " is " + str(height) + " cm and " + str(weight) + " kg")
print ("My name is " + name)
print ("name is {} and height is {}". format(name, height))
print (a+b)
print(b + float(c))
print(str(b) +c)
if (age < 10):
print("child")
elif (age <= 19):
if (age < 13):
print ("small")
print("teenager")
elif (age < 65):
print ("old")
else:
print("retiree")
lis = [12, 44.3, 'a', ['h', 'i']]
lis.append('new')
lis.insert(1, 14)
lis.remove(44.3)
print(lis)
print(lis[3])
print(lis[3][0])
set = {'Alex', 12, 12, 'Peter', 'Nick'}
print(set)
set = ('Alex', 12, 12, 'Peter', 'Nick'
print(set)
for element in lis:
print(element)
d = {'name': name, 'profession': {3, 2, 3}, 'name1': 'bcd', 'name2': 'hi', 'name3': 'hi'}
print(d['name'])
print(d['profession'])
def sum(a, b):
# a = 4
# b = 3
print(a + b)
sum(weight, height)
sum(weight+height, height-weight)
import math
pi = math.pi
c = math.cos(60/pi)
print(c)
print(pi)
#about xpath
#//input[@name='username']
# contains text: //button[contains(test(),"Submit")] //div[contains(test(),"Submit")]
#
#for element in set:
# print(element)
print(len(set))
print(set)
print(len(lis))
print(lis)
|
[
"Romanpopov120793@gmail.com"
] |
Romanpopov120793@gmail.com
|
a49c16b1780e0f525fcaef9f2316c830deb44dd2
|
4cabdcc3cdf929fa7cf761a42cd3012d01494336
|
/pipeline/mongodb/connector.py
|
02c440be290a00f350da0205b88def477f43851c
|
[] |
no_license
|
pjt3591oo/python-boilerplate
|
660e3234aa45f18ed553f499674c54e3226bfaf4
|
1ea8d84cfc06c84bab934f779ead309e8e4e7c14
|
refs/heads/master
| 2021-01-01T06:53:18.184728
| 2017-07-19T01:06:54
| 2017-07-19T01:06:54
| 97,542,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
from config.DB import CONNECTOR_INFO
CONNECTOR_INFO['mongodb']
|
[
"pjt3591oo@gmail.com"
] |
pjt3591oo@gmail.com
|
321bd9b50369d94963a04a588e9292a874dd1c3b
|
d2a88f8decc3c101c3a029d1ea269dab95e3d98a
|
/pages/product_page.py
|
308b844b0a5d8b9684dc48b7983657fb76fb4be6
|
[] |
no_license
|
arinablake/new_python-selenium-automation
|
6841c6ef434671dcb9912d76a55b4c0c2c5cad0b
|
771eb7579918bcbbe4605962f3c1eda03c4993ff
|
refs/heads/master
| 2022-11-26T08:38:31.016555
| 2020-08-02T00:08:23
| 2020-08-02T00:08:23
| 267,146,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from pages.base_page import Page
from selenium.webdriver.common.by import By
from time import sleep
class Product(Page):
NEW_ARRIVALS = (By.CSS_SELECTOR, '#nav-subnav > a:nth-child(7)')
DEALS = (By.CSS_SELECTOR, '.mega-menu')
def hover_new_arvls(self):
new_arvls_btn = self.find_element(*self.NEW_ARRIVALS)
self.actions.move_to_element(new_arvls_btn).perform()
def verify_deals_tooltip(self):
self.wait_for_element_appear(*self.DEALS)
|
[
"arinafilippova@gmail.com"
] |
arinafilippova@gmail.com
|
4476af7a141bf5d8e5169068416e88b445882d90
|
e7759f8c701f7fc983c64280a21d6d0c59398e57
|
/Labs/lab07_08_featherbear/src/booking.py
|
700d5eec96120132d3b0c1f6b518786bf030926d
|
[] |
no_license
|
featherbear/UNSW-COMP1531
|
3fbe33986065f464fa2ce4615588220b57cb55ad
|
3fff3663972034e9f6ce621fb06531b06a8d488f
|
refs/heads/master
| 2021-06-26T17:16:03.321442
| 2021-06-12T09:00:39
| 2021-06-12T09:00:39
| 170,249,325
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
class Booking(object):
def __init__(self, customer, period, car, location):
self._customer = customer
self._period = period
self._car = car
self._location = location
@property
def fee(self):
return self._car.get_fee(self._period)
@property
def location(self):
return self._location
@property
def car_rego(self):
return self._car.rego
def __str__(self):
output = ''
output += f'Made by {self._customer}\n'
output += f'Reserve {self._car} for {self._period} days\n'
output += f'Locations: {self._location}\n'
output += f'Total fee: ${self.fee:.2f}'
return output
|
[
"ian.isu.park@gmail.com"
] |
ian.isu.park@gmail.com
|
2e8c16e2e289f06947cb7b4b8a393ad42740713b
|
d08a0812d783fc72ca0d52b2c0172b846a1c0ffe
|
/helper_functions.py
|
60662f0950000f526dbcc6f032fcdfe433f1c4cf
|
[] |
no_license
|
NathanVenos/Electricity_Price_Forecasting
|
7ca18ec77bbf06e865b77161f3973fa0218468de
|
17495baf62bfe99d76b42dac24f76e93f11f0925
|
refs/heads/master
| 2020-09-11T06:19:19.102654
| 2020-01-21T17:58:44
| 2020-01-21T17:58:44
| 221,967,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,244
|
py
|
import numpy as np
import pandas as pd
import json
import requests
from fbprophet import Prophet
from sklearn.metrics import mean_squared_error, mean_absolute_error
from datetime import date
def generate_api_call_times(start_time, interval_length, intervals):
"""
Generates a list of times for which api calls can be requested
based on a given start time, interval length and number of intervals.
"""
api_times = [start_time]
for interval in range(0, intervals):
sample_time = api_times[-1] + interval_length
api_times.append(sample_time)
return api_times
def label_historicalType_and_precipType(api_json_data):
"""
Function loops through the hourly records in the input
json data to label the data as a historical 'type',
and to populate the 'precipType' with 'none' if this
key-value pair is not present, which occurs when there
was no precipitation at that time.
"""
data_records = api_json_data['hourly']['data']
for record in data_records:
record.update({'type': 'historical'})
try:
record.update({'precipType': record['precipType']})
except:
record.update({'precipType': 'none'})
return data_records
def label_forecastType_and_precipType(api_json_data):
"""
Function loops through the hourly records in the input
json data to label the data as a historical 'type',
and to populate the 'precipType' with 'none' if this
key-value pair is not present, which occurs when there
was no precipitation at that time.
"""
data_records = api_json_data['hourly']['data']
for record in data_records:
record.update({'type': 'forecast'})
try:
record.update({'precipType': record['precipType']})
except:
record.update({'precipType': 'none'})
return data_records
def api_dataframe_conversion(json_data, hourly_records, column_headers):
"""
Function generates a dataframe from the hourly historical
weather records for the given day and also provides
locational and type (e.g. historical or forecast) designations.
"""
data_frame = pd.DataFrame(hourly_records)
data_frame['time'] = pd.to_datetime(data_frame['time'],unit='s')
data_frame['latitude'] = json_data['latitude']
data_frame['longitude'] = json_data['longitude']
data_frame['timezone'] = json_data['timezone']
data_frame = data_frame[column_headers]
data_frame.set_index('time', inplace=True)
return data_frame
def historical_dataframe_from_api_calls(list_of_times, url_base, api_key, location):
"""
Function loops through the list of times provided and
returns a dataframe with hourly data from the date when
each time occurs.
"""
# initializing the final dataframe
column_headers = ['time', 'latitude', 'longitude', 'timezone', 'type', 'summary', 'icon',
'precipIntensity', 'precipProbability', 'precipType', 'temperature',
'apparentTemperature', 'dewPoint', 'humidity', 'pressure', 'windSpeed',
'windGust', 'windBearing', 'cloudCover', 'uvIndex', 'visibility']
historical_data_frame = pd.DataFrame(columns=column_headers)
historical_data_frame.set_index('time', inplace=True)
# looping through the list of times
for time in list_of_times:
url = url_base+api_key+'/'+location+','+str(time)+'?exclude=currently,minutely,daily,alerts,flags'
response = requests.get(url)
data = response.json()
hourly_data = label_historicalType_and_precipType(data)
time_data_frame = api_dataframe_conversion(data, hourly_data, column_headers)
historical_data_frame = historical_data_frame.append(time_data_frame, sort=False)
return historical_data_frame
def forecast_dataframe_from_api_calls(list_of_times):
"""
Function loops through the list of times provided and
returns a dataframe with hourly data from the date when
each time occurs.
"""
# initializing the final dataframe
column_headers = ['time', 'latitude', 'longitude', 'timezone', 'type', 'summary', 'icon',
'precipIntensity', 'precipProbability', 'precipType', 'temperature',
'apparentTemperature', 'dewPoint', 'humidity', 'pressure', 'windSpeed',
'windGust', 'windBearing', 'cloudCover', 'uvIndex', 'visibility']
forecast_data_frame = pd.DataFrame(columns=column_headers)
forecast_data_frame.set_index('time', inplace=True)
# looping through the list of times
for time in list_of_times:
url = url_base+api_key+'/'+location+','+str(time)+'?exclude=currently,minutely,daily,alerts,flags'
response = requests.get(url)
data = response.json()
hourly_data = label_forecastType_and_precipType(data)
time_data_frame = api_dataframe_conversion(data, hourly_data, column_headers)
forecast_data_frame = forecast_data_frame.append(time_data_frame, sort=False)
return forecast_data_frame
def is_peak(time_info_row):
"""
Encodes a given hour as Peak or Off-Peak per PJM/NERC published standards.
Per published standards:
weekdays from hour 7 through 22 are Peak and all others are Off-Peak
with specific NERC holidays treated as entirely Off-Peak as well.
Row of data must be from a DataFrame that includes these datetime columns:
'date', 'dayofweek', 'hour'.
"""
nerc_holidays = [date(2017, 1, 2),
date(2017, 5, 29),
date(2017, 7, 4),
date(2017, 9, 4),
date(2017, 11, 23),
date(2017, 12, 25),
date(2018, 1, 1),
date(2018, 5, 28),
date(2018, 7, 4),
date(2018, 9, 3),
date(2018, 11, 22),
date(2018, 12, 25),
date(2019, 1, 1),
date(2019, 5, 27),
date(2019, 7, 4),
date(2019, 9, 2),
date(2019, 11, 28),
date(2019, 12, 25),
date(2020, 1, 1),
date(2020, 5, 25),
date(2020, 7, 4),
date(2020, 9, 7),
date(2020, 11, 26),
date(2020, 12, 25)]
if time_info_row['date'] in nerc_holidays:
return 0
elif time_info_row['dayofweek'] >= 5:
return 0
elif (time_info_row['hour'] == 23) or (time_info_row['hour'] <= 6):
return 0
else:
return 1
def encode_circular_time(data, col):
"""
Creates sin/cos circular time for a given type of time (hour, day of week, etc)
to allow for regression on time metrics
using Linear Regression or Decision Tree models as opposed to Prophet.
"""
max_val = data[col].max()
data[col + '_sin'] = round(np.sin(2 * np.pi * data[col]/max_val),6)
data[col + '_cos'] = round(np.cos(2 * np.pi * data[col]/max_val),6)
return data
def mean_abs_pct_err(y_true, y_pred):
"""
Calculates Mean Absolute Percent Error (MAPE)
given actual target values (y_true) and predicted values (y_pred)
"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def print_metrics(y_true, y_pred):
"""
Prints mean squared error, mean absolute error and MAPE
given actual target values (y_true) and predicted values (y_pred)
"""
print('MSE: ', round(mean_squared_error(y_true, y_pred),2))
print('MAE: ', round(mean_absolute_error(y_true, y_pred),2))
print('MAPE: ', round(mean_abs_pct_err(y_true, y_pred),2),'%')
def init_prophet_model(regressors=[], holidays=False, model=Prophet()):
"""
Initializes a prophet model.
Adds regressors from a list of column names to be used as regressors.
Includes holidays if holidays=True
"""
# m = model
if len(regressors) > 0:
for reg in regressors:
model.add_regressor(reg)
if holidays == True:
model.add_country_holidays(country_name='US')
return model
def prophet_df(df, time, target, regressors=[]):
"""
Prepares dataframe of the time series, target and regressors
in the format required by Prophet.
"""
df_prep = df.rename(columns={time: 'ds', target: 'y'})
df_prep = df_prep[['ds', 'y']+regressors]
return df_prep
def create_poly_feat(data, list_of_cols, poly_names):
"""
Adds a polynomial feature to the data
for each column in the provided list of columns.
Names of resulting polynomial columns must be passed.
"""
for ix, col in enumerate(list_of_cols):
data[poly_names[ix]] = data[col] * data[col]
return data
def create_interact_feat(data, list_of_tuples, interact_names):
"""
Adds an interaction feature to the data
for each tuple of columns in the provided list of tuples.
Names of resulting interaction columns must be passed.
"""
for ix, tup in enumerate(list_of_tuples):
data[interact_names[ix]] = data[tup[0]] * data[tup[1]]
return data
|
[
"nathanvenos@gmail.com"
] |
nathanvenos@gmail.com
|
557dc77ea9e99dbf933860debf7334305d13e6aa
|
eff5f0a2470c7023f16f6962cfea35518ec0b89c
|
/Storage_Xs and Os Champion.py
|
7d81e185c2aae6377e67314d2e8577330d0932e8
|
[] |
no_license
|
olegJF/Checkio
|
94ea70b9ee8547e3b3991d17c4f75aed2c2bab2f
|
fc51a7244e16d8d0a97d3bb01218778db1d946aa
|
refs/heads/master
| 2021-01-11T00:46:42.564688
| 2020-03-02T13:36:02
| 2020-03-02T13:36:02
| 70,490,008
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
# -*- coding: utf-8 -*-
def x_and_o(grid, mark):
X_vs_O = {'X':'O', 'O':'X'}
def winner(grid, mark):
WINNER_WAYS = ((0, 1, 2), (3, 4, 5),
(6, 7, 8), (0, 3, 6),
(1, 4, 7), (2, 5, 8),
(0, 4, 8), (2, 4, 6)
)
for row in WINNER_WAYS:
line = grid[row[0]]+grid[row[1]]+grid[row[2]]
if line.count('.') == 1:
if line.count(mark) == 2 or line.count(X_vs_O[mark]) == 2:
return row[line.find('.')]
return False
BEST_MOVES = [4, 0, 2, 6, 8, 1, 3, 5, 7]
FIELD = {0:(0, 0), 1:(0, 1), 2:(0, 2),
3:(1, 0), 4:(1, 1), 5:(1, 2),
6:(2, 0), 7:(2, 1), 8:(2, 2)
}
grid = ''.join(grid)
dot_cnt = grid.count('.')
is_first_move = True if dot_cnt == 9 else False
if is_first_move: return FIELD[4]
is_second_move = True if dot_cnt == 8 else False
is_center_free = True if grid[4] =='.' else False
if is_second_move and is_center_free:
return FIELD[4]
elif is_second_move:
for i in BEST_MOVES:
if grid[i] == '.': return FIELD[i]
cnt_my_mark = grid.count(mark)
cnt_enemy_mark = grid.count(X_vs_O[mark])
was_my_first_move = True if cnt_my_mark == cnt_enemy_mark else False
legal_moves = [ i for i in range(9) if grid[i] =='.']
if was_my_first_move:
if dot_cnt == 7:
for i in (0, 2, 8, 6):
if grid[i] == '.': return FIELD[i]
is_winner = winner(grid, mark)
if is_winner is not False: return FIELD[is_winner]
if dot_cnt == 5:
lines = ((0, 1, 2), (6, 7, 8),
(0, 3, 6), (2, 5, 8))
for x, y in ([0, 8], [2, 6]):
if x in legal_moves and y in legal_moves:
for corner in (x,y):
for line in lines:
if corner in line:
row = grid[line[0]]+grid[line[1]]+grid[line[2]]
cnt_mark = row.count(mark)
cnt_dot = row.count('.')
if cnt_mark ==1 and cnt_dot ==2:
return FIELD[corner]
for move in BEST_MOVES:
if move in legal_moves: return FIELD[move]
else:
is_winner = winner(grid, mark)
if is_winner is not False: return FIELD[is_winner]
if dot_cnt == 6 and grid[4] == mark:
for i in (1, 3, 5, 7):
if i in legal_moves: return FIELD[i]
for move in BEST_MOVES:
if move in legal_moves: return FIELD[move]
print(x_and_o(( "XO.", ".X.", "..O"), "X"))
#print(winner("XO..X....", 'X'))
|
[
"jf2@ua.fm"
] |
jf2@ua.fm
|
bd47d3f1d21fd2a4603924825d0945b67780bbf8
|
0daa4596579a40c4148b379bda3949579adc9789
|
/csvimporter.py
|
17ff05943f8cf291435299b2c8c7d3e7fa53c52b
|
[] |
no_license
|
JustinHodge/C950
|
20c1bcbfcbf33303f2a059ba805c08dc4fdc6aee
|
4c0de4aaeb4d14e1a9327771d0758ce06a2d7e42
|
refs/heads/master
| 2023-02-08T22:45:54.880405
| 2021-01-05T02:16:13
| 2021-01-05T02:16:13
| 314,898,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
import csv
from hash_table import HashMap
from wgupspackage import WGUPSPackage
distance_table = "WGUPS Distance Table.csv"
package_table = "WGUPS Package File.csv"
# this method will open the csv file passed in and parse it returning a usable list format
# O(1)
def csv_import(file_name):
csv_as_list = []
with open(file_name, "r") as raw_CSV:
iterable_CSV = csv.reader(raw_CSV)
for i in iterable_CSV:
csv_as_list.append(i)
return csv_as_list
# this method creates an instance of the custom HashMap containing packages from the package_table
# O(N)
def get_packages():
hash_table = HashMap(len(csv_import(package_table)))
for i in csv_import(package_table):
package = WGUPSPackage(i)
hash_table.insert_item(int(package.package_id), package)
return hash_table
# O(N)
def get_distances():
# this returns a tuple. [0] is a 2 dimensional list of distances
# [1] is a dictionary assigning each key(address) to it's index for use
# in the 2 dimensional list
lists_of_distances = []
key_dict = {}
raw_data = csv_import(distance_table)
keys_list = []
raw_data.pop(0)
for line in raw_data:
keys_list.append(line.pop(0))
lists_of_distances.append(line)
for key in keys_list:
key_dict[key] = len(key_dict)
return lists_of_distances, key_dict
|
[
"51083905+JustinHodge@users.noreply.github.com"
] |
51083905+JustinHodge@users.noreply.github.com
|
ab81ba9e96858582ad8b65f4288a0df3ba0e34f5
|
f9f4b4ea4c8b51e0b5cba79f72745bce0564185b
|
/56tingshu/pipelines.py
|
e39409e084d31ce4591c80713990ff7b00ff6d03
|
[] |
no_license
|
nightBuger/ting89Catch
|
c01ed02ee5acb2621e10b981b04e98e6ed12c6fe
|
55f2964b5ec774d48f05e3bbf14d41734b70bb0b
|
refs/heads/main
| 2023-07-28T01:38:27.431230
| 2021-08-29T16:48:44
| 2021-08-29T16:48:44
| 387,152,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,086
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.pipelines.files import FilesPipeline
from urllib.parse import urlparse
import logging
def ConstractHeader(request):
user_agenta = ["Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
]
import re
import random
aaop = random.choice(user_agenta)
refer = re.search('(http|https)://(www.)?(\w+(\.)?)+', request.url).group()
header = { 'User-Agent': random.choice(user_agenta),
# 'Referer' : refer,
'Host' : refer[refer.find('//')+2:],
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding' : 'gzip, deflate',
'Accept-Language' : 'zh-CN,zh;q=0.9',
'Connection' : 'keep-alive',
'Upgrade-Insecure-Requests' : '1',
}
print(header)
class ListenPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, *, item=None):
header = ConstractHeader(request)
request.headers.update(header)
return '{}/{}/{}.{}'.format(item['web_name'], item['book_name'], item['title'], item['file_urls'][0].split('.')[-1])
def item_completed(self, results, item, info):
for ok, x in results:
if ok:
info.spider.log("下载完成: 文件={}/{}, 文件url={}".format(self.store.basedir, x['path'], item['title']))
else:
info.spider.log("下载失败:文件={}, url={}".format(item['title'], item['file_urls'][0]), level=logging.ERROR)
return super().item_completed(results, item, info)
|
[
"4788665@qq.com"
] |
4788665@qq.com
|
e70fb6cd614e83147c29b14ad1473ff1362210a3
|
9fb6f860bc4050add478c92bb1110fcf5047680e
|
/functions/inference_fcns.py
|
7cd5a19b70006010065048423f9e5ccaa34db95d
|
[
"Apache-2.0"
] |
permissive
|
UCLA-StarAI/HwAwareProb
|
9255202c90d67d6dcfe558e11394e27dbe95fe14
|
972e7924616f96cdbbeeec140c191e0fb5860632
|
refs/heads/master
| 2021-06-28T02:52:24.719997
| 2021-03-18T02:39:00
| 2021-03-18T02:39:00
| 217,924,553
| 0
| 0
|
Apache-2.0
| 2021-03-18T02:39:00
| 2019-10-27T22:23:38
| null |
UTF-8
|
Python
| false
| false
| 3,256
|
py
|
import operator
def prod(factors):
return reduce(operator.mul, factors, 1)
def init_weight(content_ac,content_lmap_parsed_indeces,content_lmap_parsed_weights):
weight_ac = [None] * len(content_ac)
for i, ac in enumerate(content_ac):
if ac[0] == 'L':
index = int(ac[2:len(ac)])
# index_weight = indices(content_lmap_parsed_indeces, lambda x: x == index)
index_weight=[ii for ii,con in enumerate(content_lmap_parsed_indeces) if con==index]
weight_ac[i] = content_lmap_parsed_weights[index_weight[0]]
else:
weight_ac[i] = 0
return (weight_ac)
def extract_operation_numbers(content):
# spaces = indices(content, lambda x: x.isspace())
spaces=[ii for ii,con in enumerate(content) if con.isspace()]
ex_op=[]
if content[0] == 'A':
for sp in range(len(spaces) - 1):
if sp == len(spaces) - 2:
ex_op.append(content[spaces[sp + 1] + 1:])
else:
ex_op.append(content[spaces[sp + 1] + 1:spaces[sp + 2]])
else:
for sp in range(len(spaces) - 2):
if sp == len(spaces) - 3:
ex_op.append(content[spaces[sp + 2] + 1:])
else:
ex_op.append(content[spaces[sp + 2] + 1:spaces[sp + 3]])
return ex_op,spaces
def generate_operation_set(operations_index,content_ac):
operation = [[] for _ in range(len(operations_index))]
for i, op in enumerate(operations_index):
content=content_ac[operations_index[i]]
(extracted_op,sp)=extract_operation_numbers(content)
operation[i]=extracted_op
return operation
def extract_operations(args):
k=1
for i, ac in enumerate(args):
if ac[0]=='L':
k=k+1
operations_index=[0]*(len(args)-k+1)
operation_wmc=[0]*(len(args))
j=0
for i, ac in enumerate(args):
res=[]
if ac[0] != 'L':
operations_index[j]=i
j=j+1
(op_ex, spaces) = extract_operation_numbers(ac)
if ac[0]=='O':
res.append(ac[0:spaces[2]])
if ac[0]=='A':
res.append(ac[0:spaces[1]])
lis=list(map(int, op_ex))
for l in lis:
res.append(l)
operation_wmc[i]=res
else:
operation_wmc[i]=ac
operation=generate_operation_set(operations_index,args)
return operations_index, operation
def performWMC(operations_index, operation, weight_ac_original, content_ac):
weight_ac=[w for w in weight_ac_original]
for i, op in enumerate(operations_index):
temp = []
for j in range(len(operation[i])):
temp.append(weight_ac[int(operation[i][j])])
if 'A' in content_ac[operations_index[i]][0]:
weight_ac[operations_index[i]] = prod(temp)
elif 'O' in content_ac[operations_index[i]][0]:
weight_ac[operations_index[i]] = sum(temp)
if content_ac[len(content_ac) - 1][0] != 'L':
wc = weight_ac[len(content_ac) - 1]
else:
for w in range(len(content_ac) - 1, -1, -1):
if content_ac[w][0] != 'L':
wc = weight_ac[w]
break
return (weight_ac, wc)
|
[
"laura.galindez@esat.kuleuven.be"
] |
laura.galindez@esat.kuleuven.be
|
4ba8ab1fb6488c448855d281c6cf1a00684f4f3d
|
3572182a76026b2ff1afcb9cb4fe1e8b138b2edc
|
/scripts/lightsensors2.py
|
05976595bed876b6484ba2912f184798006d9486
|
[] |
no_license
|
tak-mahal/pimouse_ros
|
489d8785360d5dbc688346e31d013a81e888ed44
|
234e939b1d739634db7a7efe67111190baf1efcd
|
refs/heads/master
| 2020-04-02T03:34:59.306485
| 2019-01-01T12:44:06
| 2019-01-01T12:44:06
| 153,973,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
#!/usr/bin/env python
import sys, rospy
from pimouse_ros.msg import LightSensorValues
if __name__ == '__main__':
devfile = '/dev/rtlightsensor0'
rospy.init_node('lightsensors')
pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
try:
with open(devfile, 'r') as f:
data = f.readline().split()
data = [ int(e) for e in data ]
d = LightSensorValues()
d.right_forward = data[0]
d.right_side = data[1]
d.left_side = data[2]
d.left_forward = data[3]
d.sum_all = sum(data)
d.sum_forward = data[0] + data[3]
pub.publish(d)
except IOError:
rospy.logger("cannot write to " + devfile)
rate.sleep()
|
[
"kawakami.takuma@takenaka.co.jp"
] |
kawakami.takuma@takenaka.co.jp
|
38ec59fe7a2b66fa41df94fca1b20e945f6c612e
|
7c7fab5672f2ca5956474908e50cae448e3b4359
|
/tools/lib/template_parser.py
|
0d38e7cfa4165feb196b1f469af0dec02a28f5cc
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
tobby2002/localzulip
|
b7656fd06e66c0817c3f9803fbafde5dcdf60d1a
|
bfedd3f5686b91a5e332c96b4102b16c4e1b6fa9
|
refs/heads/master
| 2022-12-10T18:20:42.823580
| 2016-09-30T00:28:18
| 2016-09-30T00:28:18
| 69,618,407
| 1
| 0
|
Apache-2.0
| 2022-12-07T23:39:23
| 2016-09-30T00:18:26
|
Python
|
UTF-8
|
Python
| false
| false
| 7,483
|
py
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Callable, Optional
from six.moves import range
import re
class TemplateParserException(Exception):
# TODO: Have callers pass in line numbers.
pass
class TokenizerState(object):
def __init__(self):
# type: () -> None
self.i = 0
self.line = 1
self.col = 1
class Token(object):
def __init__(self, kind, s, tag, line, col):
# type: (str, str, str, int, int) -> None
self.kind = kind
self.s = s
self.tag = tag
self.line = line
self.col = col
def tokenize(text):
# type: (str) -> List[Token]
def advance(n):
# type: (int) -> None
for _ in range(n):
state.i += 1
if state.i >= 0 and text[state.i - 1] == '\n':
state.line += 1
state.col = 1
else:
state.col += 1
def looking_at(s):
# type: (str) -> bool
return text[state.i:state.i+len(s)] == s
def looking_at_html_start():
# type: () -> bool
return looking_at("<") and not looking_at("</")
def looking_at_html_end():
# type: () -> bool
return looking_at("</")
def looking_at_handlebars_start():
# type: () -> bool
return looking_at("{{#") or looking_at("{{^")
def looking_at_handlebars_end():
# type: () -> bool
return looking_at("{{/")
def looking_at_django_start():
# type: () -> bool
return looking_at("{% ") and not looking_at("{% end")
def looking_at_django_end():
# type: () -> bool
return looking_at("{% end")
state = TokenizerState()
tokens = []
while state.i < len(text):
if looking_at_html_start():
s = get_html_tag(text, state.i)
tag_parts = s[1:-1].split()
if not tag_parts:
raise TemplateParserException("Tag name missing")
tag = tag_parts[0]
if is_special_html_tag(s, tag):
kind = 'html_special'
elif s.endswith('/>'):
kind = 'html_singleton'
else:
kind = 'html_start'
elif looking_at_html_end():
s = get_html_tag(text, state.i)
tag = s[2:-1]
kind = 'html_end'
elif looking_at_handlebars_start():
s = get_handlebars_tag(text, state.i)
tag = s[3:-2].split()[0]
kind = 'handlebars_start'
elif looking_at_handlebars_end():
s = get_handlebars_tag(text, state.i)
tag = s[3:-2]
kind = 'handlebars_end'
elif looking_at_django_start():
s = get_django_tag(text, state.i)
tag = s[3:-2].split()[0]
kind = 'django_start'
elif looking_at_django_end():
s = get_django_tag(text, state.i)
tag = s[6:-3]
kind = 'django_end'
else:
advance(1)
continue
token = Token(
kind=kind,
s=s,
tag=tag,
line=state.line,
col=state.col,
)
tokens.append(token)
advance(len(s))
return tokens
def validate(fn=None, text=None, check_indent=True):
# type: (Optional[str], Optional[str], bool) -> None
assert fn or text
if fn is None:
fn = '<in memory file>'
if text is None:
text = open(fn).read()
tokens = tokenize(text)
class State(object):
def __init__(self, func):
# type: (Callable[[Token], None]) -> None
self.depth = 0
self.matcher = func
def no_start_tag(token):
# type: (Token) -> None
raise TemplateParserException('''
No start tag
fn: %s
end tag:
%s
line %d, col %d
''' % (fn, token.tag, token.line, token.col))
state = State(no_start_tag)
def start_tag_matcher(start_token):
# type: (Token) -> None
state.depth += 1
start_tag = start_token.tag
start_line = start_token.line
start_col = start_token.col
old_matcher = state.matcher
def f(end_token):
# type: (Token) -> None
end_tag = end_token.tag
end_line = end_token.line
end_col = end_token.col
if start_tag == 'a':
max_lines = 3
else:
max_lines = 1
problem = None
if (start_tag == 'code') and (end_line == start_line + 1):
problem = 'Code tag is split across two lines.'
if start_tag != end_tag:
problem = 'Mismatched tag.'
elif check_indent and (end_line > start_line + max_lines):
if end_col != start_col:
problem = 'Bad indentation.'
if problem:
raise TemplateParserException('''
fn: %s
%s
start:
%s
line %d, col %d
end tag:
%s
line %d, col %d
''' % (fn, problem, start_token.s, start_line, start_col, end_tag, end_line, end_col))
state.matcher = old_matcher
state.depth -= 1
state.matcher = f
for token in tokens:
kind = token.kind
tag = token.tag
if kind == 'html_start':
start_tag_matcher(token)
elif kind == 'html_end':
state.matcher(token)
elif kind == 'handlebars_start':
start_tag_matcher(token)
elif kind == 'handlebars_end':
state.matcher(token)
elif kind == 'django_start':
if is_django_block_tag(tag):
start_tag_matcher(token)
elif kind == 'django_end':
state.matcher(token)
if state.depth != 0:
raise TemplateParserException('Missing end tag')
def is_special_html_tag(s, tag):
# type: (str, str) -> bool
return (s.startswith('<!--') or
tag in ['link', 'meta', '!DOCTYPE'])
def is_django_block_tag(tag):
# type: (str) -> bool
return tag in [
'autoescape',
'block',
'comment',
'for',
'if',
'ifequal',
'verbatim',
'blocktrans',
'trans',
'raw',
]
def get_handlebars_tag(text, i):
# type: (str, int) -> str
end = i + 2
while end < len(text) -1 and text[end] != '}':
end += 1
if text[end] != '}' or text[end+1] != '}':
raise TemplateParserException('Tag missing }}')
s = text[i:end+2]
return s
def get_django_tag(text, i):
# type: (str, int) -> str
end = i + 2
while end < len(text) -1 and text[end] != '%':
end += 1
if text[end] != '%' or text[end+1] != '}':
raise TemplateParserException('Tag missing %}')
s = text[i:end+2]
return s
def get_html_tag(text, i):
# type: (str, int) -> str
quote_count = 0
end = i + 1
while end < len(text) and (text[end] != '>' or quote_count % 2 != 0):
if text[end] == '"':
quote_count += 1
end += 1
if end == len(text) or text[end] != '>':
raise TemplateParserException('Tag missing >')
s = text[i:end+1]
return s
|
[
"tobby2002@gmail.com"
] |
tobby2002@gmail.com
|
bc38069aef7b32c7c351685d0b2122f0d604529e
|
2f5d221d5cd423f07da50ed8be9668d811e550b4
|
/airtest/core/ios/fake_minitouch.py
|
b5eb7986af962fc31648c001e4259ad16c8af15a
|
[
"Apache-2.0"
] |
permissive
|
Pactortester/Airtest
|
d1db25498591992dee525b2ceeb45de9239b319f
|
18e57ae2bbde3f2b95c32f09e214fdf4aec41330
|
refs/heads/master
| 2022-06-03T22:52:54.939200
| 2020-06-29T01:01:30
| 2020-06-29T01:01:30
| 275,080,743
| 1
| 0
|
Apache-2.0
| 2020-06-26T05:28:02
| 2020-06-26T05:28:02
| null |
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
# coding=utf-8
import subprocess
import os
import re
import struct
import logging
from airtest.utils.logger import get_logger
from airtest.utils.nbsp import NonBlockingStreamReader
from airtest.utils.safesocket import SafeSocket
LOGGING = get_logger(__name__)
class fakeMiniTouch(object):
lastDown = {'x': None, 'y': None}
recentPoint = {'x': None, 'y': None}
def __init__(self, dev):
self.dev = dev
self.swipe_threshold = 10
def setup(self):
pass
def operate(self, operate_arg):
# TODO FIX IPHONT TOUCH
# start down
if operate_arg['type'] == 'down':
self.lastDown['x'] = operate_arg['x']
self.lastDown['y'] = operate_arg['y']
# mouse up
if operate_arg['type'] == 'up':
# in case they may be None
if self.lastDown['x'] is None or self.lastDown['y'] is None:
return
# has recent point
if self.recentPoint['x'] and self.recentPoint['y']:
# swipe need to move longer
# TODO:设定滑动和点击的阈值,目前为10
if abs(self.recentPoint['x'] - self.lastDown['x']) > self.swipe_threshold \
or abs(self.recentPoint['y'] - self.lastDown['y']) > self.swipe_threshold:
self.dev.swipe((self.lastDown['x'], self.lastDown['y']),
(self.recentPoint['x'], self.recentPoint['y']))
else:
self.dev.touch((self.lastDown['x'], self.lastDown['y']))
else:
self.dev.touch((self.lastDown['x'], self.lastDown['y']))
# clear infos
self.lastDown = {'x': None, 'y': None}
self.recentPoint = {'x': None, 'y': None}
if operate_arg['type'] == 'move':
self.recentPoint['x'] = operate_arg['x']
self.recentPoint['y'] = operate_arg['y']
if __name__ == '__main__':
pass
|
[
"lxn3032@corp.netease.com"
] |
lxn3032@corp.netease.com
|
7066f6fd5882ec68a145a9b5116e7c5eff2d33f2
|
a854f81f3ca0d6e6d6cf60662d05bc301465e28c
|
/backend/booking/migrations/0001_initial.py
|
4d479654287d6f6f7b495a5050811e171d37cb04
|
[] |
no_license
|
crowdbotics-apps/lavadoras-19637
|
7f99e2046a6a92cdcfaec052eb9eadfd807193fd
|
577d0da2626867a8a1b27d2df386c8598e4adc6d
|
refs/heads/master
| 2022-12-02T21:15:17.103593
| 2020-08-18T08:28:47
| 2020-08-18T08:28:47
| 288,397,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,317
|
py
|
# Generated by Django 2.2.15 on 2020-08-18 08:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('location', '0001_initial'),
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookingTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('distance', models.FloatField()),
('price', models.FloatField()),
('status', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_depart', models.DateTimeField()),
('timestamp_arrive', models.DateTimeField()),
('tip', models.FloatField(blank=True, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_driver', to='taxi_profile.DriverProfile')),
('dropoff', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_dropoff', to='location.MapLocation')),
('pickup', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_pickup', to='location.MapLocation')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('review', models.TextField(blank=True, null=True)),
('driver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rating_driver', to='taxi_profile.DriverProfile')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rating_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('booking', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='message_booking', to='booking.BookingTransaction')),
('driver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_driver', to='taxi_profile.DriverProfile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_user', to='taxi_profile.UserProfile')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b868e52bfe4f8289a0b4ee764a4cdd78272d6019
|
c0d30680d859506be19468d4d42df3f930f97bed
|
/django/dabiao_new/dabiao/data/views.py
|
cc50d4e3bb264f7b9ea78846d9c1430245ee894b
|
[] |
no_license
|
glennneiger/deepdraw
|
bf2aca0acdc6ab6a57731e872f7287497428c280
|
52c4a50df3c1890499b0c42a3a02f6d418d31f40
|
refs/heads/master
| 2020-12-03T22:48:05.166524
| 2020-01-03T01:36:05
| 2020-01-03T01:36:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework.views import APIView
import requests
import json
from string import digits
from data.models import Tripletmark
# Create your views here.
class Test_Base(APIView):
def get(self, request):
print('ok');
return HttpResponse('成功')
class testdb(APIView):
def get(self, request):
test1 = Tripletmark(uuid1='1', uuid2='2', bbox1='3', bbox2='4', mark='5', fix='6')
test1.save()
return HttpResponse('成功')
class getAll(APIView):
def get(self, request):
test = Tripletmark.objects.all()
print(test)
for i in test:
print(i.uuid1)
return HttpResponse('success')
class LoginView(APIView):
def get(self, request, *args, **kwargs):
return HttpResponse(content=open("../templates/login.html").read())
|
[
"laipan@deepdraw.cn"
] |
laipan@deepdraw.cn
|
75d146601fcfb74873d0571bc7d1e05b92491d12
|
8f0b0ec0a0a2db00e2134b62a1515f0777d69060
|
/scripts/study_case/ID_32/0504_softmax_regression.py
|
5d1daab24d438285e89be0a81cd2092dde31f122
|
[
"Apache-2.0"
] |
permissive
|
Liang813/GRIST
|
2add5b4620c3d4207e7661eba20a79cfcb0022b5
|
544e843c5430abdd58138cdf1c79dcf240168a5f
|
refs/heads/main
| 2023-06-09T19:07:03.995094
| 2021-06-30T05:12:19
| 2021-06-30T05:12:19
| 429,016,034
| 0
| 0
|
Apache-2.0
| 2021-11-17T11:19:48
| 2021-11-17T11:19:47
| null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
import myutil as mu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import sys
sys.path.append("/data")
torch.manual_seed(1)
x_train = [[1, 2, 1, 1],
[2, 1, 3, 2],
[3, 1, 3, 4],
[4, 1, 5, 5],
[1, 7, 5, 5],
[1, 2, 5, 6],
[1, 6, 6, 6],
[1, 7, 7, 7]]
y_train = [2, 2, 2, 1, 1, 1, 0, 0]
x_train = torch.FloatTensor(x_train)
y_train = torch.LongTensor(y_train)
mu.log("x_train", x_train)
mu.log("y_train", y_train)
y_one_hot = torch.zeros(8, 3)
y_one_hot.scatter_(dim=1, index=y_train.unsqueeze(dim=1), value=1)
mu.log("y_one_hot", y_one_hot)
W = torch.zeros((4, 3), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
optimizer = optim.SGD([W, b], lr=0.1)
nb_epoches = 2000
mu.plt_init()
'''inserted code'''
import sys
sys.path.append("/data")
from scripts.utils.torch_utils import TorchScheduler
scheduler = TorchScheduler(name="PyTorchDeepLearningStart.0504_softmax_regression")
'''inserted code'''
while True:
hypothesis = F.softmax(x_train.matmul(W) + b, dim=1)
cost = (y_one_hot * -torch.log(hypothesis)).sum().mean()
optimizer.zero_grad()
cost.backward()
optimizer.step()
'''inserted code'''
scheduler.loss_checker(cost)
scheduler.check_time()
'''inserted code'''
mu.plt_show()
mu.log("W", W)
mu.log("b", b)
|
[
"793679547@qq.com"
] |
793679547@qq.com
|
0cb54dfdd0286d6326cf13ee4b734b2e4564e0f8
|
627ac9ef7baefa817f5824c3e5a306d46304e2eb
|
/Lagrange Polynomial.py
|
79ee5db1cd1a4f68f0b76c9d73d6f4654f0168d3
|
[] |
no_license
|
sieginglion/Numerical-Analysis
|
b23a1abd3498f182880de8e0378d68d8d824ea68
|
54247db2ea1180894bf8320014587c491dac9695
|
refs/heads/master
| 2021-06-13T07:43:49.808886
| 2017-03-20T21:44:21
| 2017-03-20T21:44:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
# Pn(x) = Sigma[f(xk) * Lk(x)] from 0 to n
# Lk(x) = Pi[(x – xi) / (xk – xi)] from 0 to n and n != k
# f(x0) = 1, f(x1) = 2, f(x3) = 3
# P(x) = 1 * (x - 2)(x - 3) / (1 - 2)(1 - 3) + ...
|
[
"s103031111@outlook.com"
] |
s103031111@outlook.com
|
33277af45dfcc7f2343a16e2514aff499af5abea
|
5869fa875a0070b49f2a5c14861b3f4a72c75ddc
|
/Coollibrary_tutorial/LibraryApp/models.py
|
a2b63d088d84706ccc8dd456e87b9701d738c15c
|
[] |
no_license
|
IzhykOleh/Coollibrary-tutorial
|
0c02a9cd4c7fc5d4619c431abbc43a67b432009e
|
ab5d588fdd78e2051e2a1aafbeec1202b3f42826
|
refs/heads/master
| 2020-04-29T07:39:51.474086
| 2019-03-16T11:25:37
| 2019-03-16T11:25:37
| 175,961,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,246
|
py
|
from django.db import models
class Team(models.Model):
char_field = models.CharField(max_length=40)
choices = (
('U09', 'Under 09s'),
('U10', 'Under 10s'),
('U11', 'Under 11s'),
)
charfield_choices = models.CharField(max_length=3,
choices=choices,
default='U11')
class Genre(models.Model):
"""
Model representing a book genre (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200, help_text="Enter a book genre \
(e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
class Book(models.Model):
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the book')
isbn = models.CharField('ISBN',max_length=13,
help_text='13 Character \
<a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text='Select a genre for this book')
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
def get_absolute_url(self):
"""
Returns the url to access a detail record for this book.
"""
return reverse('book-detail', args=[str(self.id)])
def display_genre(self):
"""
Creates a string for the Genre. This is required to display genre in Admin.
"""
return ', '.join([ genre.name for genre in self.genre.all()[:3] ])
display_genre.short_description = 'Genre'
import uuid # Required for unique book instances
class BookInstance(models.Model):
"""
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4,
help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True,
default='m', help_text='Book availability')
class Meta:
ordering = ["due_back"]
def __str__(self):
"""
String for representing the Model object
"""
return '{0} ({1})'.format(self.id,self.book.title)
class Author(models.Model):
"""
Model representing an author.
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100, null=True)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
class Meta:
ordering = ["last_name","first_name"]
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '{0}, {1}'.format(self.last_name,self.first_name)
|
[
"izhykoleh18@gmail.com"
] |
izhykoleh18@gmail.com
|
0259840c4fbc274cb2b4428c31cf763cbd64e996
|
b74eb41b660fe49320ba9f1c8aeb0f3f260e17ff
|
/easy_rosetta/session.py
|
42045f3f4864255f6fabb900cbe056a5373072c1
|
[] |
no_license
|
walterwu/easy_rosetta
|
605abebf8940060b53f928179c54fd305e581d41
|
a639c9751cb9c1cf678c0abfc2104578f4b086ce
|
refs/heads/master
| 2020-03-30T08:23:27.579395
| 2019-02-18T07:49:34
| 2019-02-18T07:49:34
| 151,011,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,749
|
py
|
import pickle
import sys
import os
from .utils import *
from .constants import *
from .config import EasyRosettaConfig, ProtocolConfig
class Session():
def __init__(self, session_name=None, working_dir=None, protein_name=None, progress_dict=None, protocol_configs=None):
self.session_name = session_name
self.working_dir = working_dir
self.protein_name = protein_name
self.progress_dict = progress_dict
if self.progress_dict == None:
self.progress_dict = {
"frags_generated":False,
"decoys_generated":False,
"scored":False,
"clustered":False,
}
self.easyrosetta_config = EasyRosettaConfig.load()
self.protocol_configs = protocol_configs
if self.protocol_settings == None:
self.protocol_settings = []
def save_session(self):
yes = ["yes", "y"]
no = ["no", "n"]
overwrite = True
is self.session_name == None:
return
while Session.session_exists(self.session_name) and not overwrite:
status = input("A session with that name already exists. Do you want to overwrite that session? (Y/n)").lower()
while status not in yes and status not in no:
status = input("Please enter (Y/n) ")
if status in no:
session_name = input("Enter a new session name, or (q) to quit: ")
if session_name == 'q':
sys.exit()
else:
self.set_session_name(session_name)
else:
overwrite = True
with open(self.get_session_file(), 'w') as fp:
pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
def set_session_name(self, session_name):
self.session_name = session_name
def set_working_dir(self, working_dir):
self.working_dir = working_dir
def set_protein_name(self, protein_name):
self.protein_name = protein_name
def set_progress_dict(self, progress_dict):
self.progress_dict = progress_dict
def change_progress_dict(self, key, value):
if key not in self.progress_dict:
return
else:
self.progress_dict[key] = value
def print_session_info(self):
print("Session name: " + self.session_name)
print("Protein name: " + self.protein_name)
print("Working directory: " + self.working_dir)
print("Progress:" + str(self.progress_dict))
@staticmethod
def load_session(session_name):
if not Session.session_exists(session_name):
print("No such session exists. To list all sessions, try easy-rosetta-sessions -l")
sys.exit()
session = None
with open(os.path.join(SESSIONS_PATH, Session.get_session_file(session_name)), 'r') as fp:
session = pickle.load(fp)
if session == None:
print("Error loading session " + session_name + ". Check logs for more details.")
sys.exit()
return session
@staticmethod
def remove_session(session_name):
if not Session.session_exists(session_name):
print("No such session exists. To list all sessions, try easy-rosetta-sessions -l")
sys.exit()
os.remove(Session.get_session_file(session_name))
@staticmethod
def clear_sessions():
for file in os.listdir(SESSIONS_PATH):
os.remove(file)
@staticmethod
def session_exists(session_name):
return Session.get_session_file(session_name) in os.listdir(SESSIONS_PATH)
@staticmethod
def get_session_file(session_name):
return session_name + ".session"
|
[
"walter.wu@berkeley.edu"
] |
walter.wu@berkeley.edu
|
10cffa971d36c5098b5eeb3386cca4463986e19c
|
1dfada4543ffe314ba0077a3446675de2ee6076b
|
/users/signals.py
|
faa82e95328912bfaa2c5249afbf185fb590a53d
|
[] |
no_license
|
michaelkamande/blissfulhomes
|
a1c19ab23c5ba8eb5fb9e229d6867dcbebdf857d
|
d711de53a313ea952e80652b48260ff5d8b94c9a
|
refs/heads/master
| 2022-11-11T15:57:03.247798
| 2020-07-06T08:20:55
| 2020-07-06T08:20:55
| 277,481,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender = User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user = instance)
@receiver(post_save, sender = User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
|
[
"mike@hotmail.com"
] |
mike@hotmail.com
|
66ee42bf083364ea3975225cfe14efbc76c1c287
|
8760f182049d4caf554c02b935684f56f6a0b39a
|
/boar/facebook_connect/migrations/0002_profile_onetoone_to_user.py
|
ed79636574a8ae85a20dfee1a85138d28e7f7b15
|
[
"BSD-3-Clause"
] |
permissive
|
boar/boar
|
c674bc65623ee361af31c7569dd16c6eb8da3b03
|
6772ad31ee5bb910e56e650cc201a476adf216bc
|
refs/heads/master
| 2020-06-09T06:59:31.658154
| 2012-02-28T19:28:58
| 2012-02-28T19:28:58
| 1,734,103
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
from south.db import db
from django.db import models
from boar.facebook_connect.models import *
class Migration:
def forwards(self, orm):
# Changing field 'FacebookProfile.user'
# (to signature: django.db.models.fields.related.OneToOneField(to=orm['auth.User'], unique=True))
db.alter_column('facebook_connect_facebookprofile', 'user_id', orm['facebook_connect.facebookprofile:user'])
# Creating unique_together for [user] on FacebookProfile.
db.create_unique('facebook_connect_facebookprofile', ['user_id'])
def backwards(self, orm):
# Deleting unique_together for [user] on FacebookProfile.
db.delete_unique('facebook_connect_facebookprofile', ['user_id'])
# Changing field 'FacebookProfile.user'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['auth.User']))
db.alter_column('facebook_connect_facebookprofile', 'user_id', orm['facebook_connect.facebookprofile:user'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_connect.facebookprofile': {
'Meta': {'unique_together': "(('user', 'uid'),)"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['facebook_connect']
|
[
"ben@firshman.co.uk"
] |
ben@firshman.co.uk
|
82b29211a6b91229fbd09b657dc3d5bd09b371d1
|
15f438d029528a978383f24f85035c911e314b72
|
/scripts/view.py
|
fb6535e75554b37be86f943e960d3f272dce0ff1
|
[
"MIT"
] |
permissive
|
rg314/autoballs
|
91d11315a61d4c088b099744301b3f1b68eecc93
|
21fab5c810f18c0d50c23051928d3bb86fbc6941
|
refs/heads/main
| 2023-05-30T11:48:52.901933
| 2021-06-23T14:48:27
| 2021-06-23T14:48:27
| 341,683,921
| 1
| 0
|
MIT
| 2021-03-18T23:28:23
| 2021-02-23T20:39:55
|
Python
|
UTF-8
|
Python
| false
| false
| 760
|
py
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.stats.multicomp as multi
data = pd.read_csv('results/20210226 Cam Franze_results/res.csv')
sns.set_style("white")
sns.set_style("ticks")
ax = sns.boxplot(y='Median axon', x='Gel type', data=data, palette="Blues")
ax = sns.swarmplot(y='Median axon', x='Gel type', data=data, color=".25", size=10)
ax.set_ylabel('Axon length [um]')
ax.set_xlabel('Gel type [kPa]')
test = multi.MultiComparison(data['Median axon'], data['Gel type'])
res = test.tukeyhsd()
res_table1 = res.summary()
print(res_table1)
test = multi.pairwise_tukeyhsd(data['Median axon'], data['Gel type'], alpha=0.05)
res_table2 = test.summary()
print(res_table2)
plt.show()
|
[
"ryan.greenhalgh@hotmail.co.uk"
] |
ryan.greenhalgh@hotmail.co.uk
|
f5519d705ed4169137add38ab789ba5a84d47bf8
|
5684c015980cc9bb86844345c33e0d50283d617f
|
/test/test_replace.py
|
2ab0015dce65a2d9bbc4f4bc3735084df0ba3859
|
[
"MIT"
] |
permissive
|
JIYANG-PLUS/ustjson
|
5da0b3ab7c18ba0a8565ea7e91389e8f00e8cfc9
|
f96fe00d0c23575c22aca8075dd544c1a460287f
|
refs/heads/master
| 2021-07-09T01:35:46.545806
| 2020-11-25T02:59:11
| 2020-11-25T02:59:11
| 216,016,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
from ustjson.Capture import Capturer
from ustjson import TreeBuilder,SpecialText,read_txt,replace_id_feature
from ustjson.read_pdf import read_pdf
import re,datetime,os,pprint
os.chdir('/Users/jiyang/Desktop/')
file_name = 'test.pdf'
flag = '0123456789$'
now_id_split_char = '.'
patt = re.compile(r'^第(.{1,5}?)条')
text, tables = read_txt('test.txt'), None
text = replace_id_feature(text,patt,'0','$') # 第一个替换表,使用$区分其它标题编号。
try:
catalog, text, _ = Capturer.capture_catalog_and_body_text(text) # 抓取全文信息,并分类清理。
pdf = TreeBuilder(catalog=catalog) # 初始化文档树结构。
except:
pdf = TreeBuilder() # 初始化空树
text_end = '$99999 END'
temp_ids = pdf.allocate_text_for_eachNode(
text,
standard_flag=flag+now_id_split_char,
standard_flag_wosc=flag,
id_split_char = now_id_split_char,
initials = '$',
text_end=text_end
) # 分配各个节点的TEMP域。使解析更准确。temp_ids为分配过TEMP域的节点。
pdf.build_data_and_sub_tree_node(
temp_ids,
standard_flag=flag+now_id_split_char,
id_split_char = now_id_split_char
) # 扩张子节点,完善DATA域。
if bool(tables): pass # 处理表格的语句,参照前一个样式。
ST = SpecialText(pdf.tree) # 获取特殊文本对象进行再处理
now = datetime.datetime.today() # 获取当前时间
ST.perfect_tree(f'{now:%Y-%m-%d %H:%M:%S}', f'{file_name}') # 这里传入的参数,只用于修饰,不作其他用途。主要用来彰显时效性。
ST.to_json(path=os.getcwd(),file_name=f'{file_name[:-4]}.json') # 保存为json文件,其他参数的使用参见官方的json文档。
|
[
"jiyangj@foxmail.com"
] |
jiyangj@foxmail.com
|
47693d0710e9c072cad944e857787701b982ce3d
|
0ea12ae71b3863a8279fd7200e61f5c40dc3dcb6
|
/image_bosch_detect_ssd_mobile.py
|
92fd277b6022c6d929dd37d5dae50ebf4863411d
|
[
"MIT"
] |
permissive
|
scrambleegg7/Traffic-Light-Classification
|
7dafb32f43bf1c73d62c645105cdc414ebb0cf44
|
2a9f6b8272866f289963905b162c35058ce6a234
|
refs/heads/master
| 2020-04-03T02:58:44.729521
| 2018-10-28T14:57:44
| 2018-10-28T14:57:44
| 154,973,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,201
|
py
|
import tensorflow as tf
import numpy as np
import datetime
import time
import os, sys
import cv2
from PIL import Image
import yaml
from glob import glob
try:
import matplotlib
matplotlib.use('TkAgg')
finally:
from matplotlib import pyplot as plt
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
class TrafficLightClassifier(object):
def __init__(self, frozen_model_file):
PATH_TO_MODEL = frozen_model_file
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
# Works up to here.
with tf.gfile.GFile(PATH_TO_MODEL, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.d_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.d_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.d_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_d = self.detection_graph.get_tensor_by_name('num_detections:0')
self.sess = tf.Session(graph=self.detection_graph)
def get_classification(self, img):
# Bounding Box Detection.
with self.detection_graph.as_default():
# Expand dimension since the model expects image to have shape [1, None, None, 3].
img_expanded = np.expand_dims(img, axis=0)
(boxes, scores, classes, num) = self.sess.run(
[self.d_boxes, self.d_scores, self.d_classes, self.num_d],
feed_dict={self.image_tensor: img_expanded})
return boxes, scores, classes, num
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def get_all_labels(input_yaml, riib=False):
""" Gets all labels within label file
Note that RGB images are 1280x720 and RIIB images are 1280x736.
:param input_yaml: Path to yaml file
:param riib: If True, change path to labeled pictures
:return: images: Labels for traffic lights
"""
images = yaml.load(open(input_yaml, 'rb').read())
for i in range(len(images)):
images[i]['path'] = os.path.abspath(os.path.join(os.path.dirname(input_yaml), images[i]['path']))
if riib:
images[i]['path'] = images[i]['path'].replace('.png', '.pgm')
images[i]['path'] = images[i]['path'].replace('rgb/train', 'riib/train')
images[i]['path'] = images[i]['path'].replace('rgb/test', 'riib/test')
for box in images[i]['boxes']:
box['y_max'] = box['y_max'] + 8
box['y_min'] = box['y_min'] + 8
return images
def detect_label_images(input_yaml, output_folder=None):
"""
Shows and draws pictures with labeled traffic lights.
Can save pictures.
:param input_yaml: Path to yaml file
:param output_folder: If None, do not save picture. Else enter path to folder
"""
PATH_TO_LABELS = r'data/bosch_label_map.pbtxt'
NUM_CLASSES = 14
frozen_model_file = "./models/bosch_freeze_tf1.3/frozen_inference_graph.pb"
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
print(category_index)
# loading models
tfc = TrafficLightClassifier(frozen_model_file)
images = get_all_labels(input_yaml)
if output_folder is not None:
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for idx, image_dict in enumerate(images[:10]):
image_path = image_dict['path']
image_np = cv2.imread( image_path )
if idx == 0:
print(image_path)
timestr = time.strftime("%Y%m%d-%H%M%S")
boxes, scores, classes, num = tfc.get_classification(image_np)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=0.3,
line_thickness=8)
if idx % 10 == 0 and idx > 0:
print("%d images processed. %s" % ( (idx + 1), image_path ) )
image_file = image_path.split("/")[-1]
cv2.imwrite( os.path.join( output_folder, image_file ) , image_np )
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
sys.exit(-1)
label_file = sys.argv[1]
output_folder = None if len(sys.argv) < 3 else sys.argv[2]
detect_label_images(label_file, output_folder)
|
[
"donchan@milano.local"
] |
donchan@milano.local
|
a6ed2f7ef8aa2918d09efb0206500abcbc374bb2
|
c762d74617d816e989ce86780e414bd6fed40157
|
/code/auxiliary/SupervisedModels.py
|
1d77eb575776a9ca381f798378e79ac6d71a7725
|
[] |
no_license
|
sfwatergit/IO-HMM
|
293f5adc7ed14780d7ce0d2d4298b0d65b1c7c24
|
13ae526e6fe289fdfb34679ffd8bd497f62e2b9c
|
refs/heads/master
| 2021-01-11T12:16:00.619550
| 2016-09-28T23:03:00
| 2016-09-28T23:03:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,893
|
py
|
from __future__ import division
import numpy as np
from scipy import linalg, special
from numpy.linalg import norm
import statsmodels.api as st
import family
from scipy.sparse import linalg as sp_linalg
from sklearn import linear_model
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.optimize import newton_cg
from scipy import optimize
from scipy.misc import logsumexp
import statsmodels.regression.linear_model as lim
import sys
import warnings
warnings.simplefilter("ignore")
def _rescale_data(X, Y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sqrtW = np.sqrt(sample_weight)
newX = X * sqrtW.reshape(-1,1)
newY = Y * sqrtW.reshape(-1,1)
return newX, newY
def addIntercept(X):
t = X.shape[0]
X_with_bias = np.hstack((np.ones((t, 1)), X))
return X_with_bias
class BaseModel(object):
"""
A generic supervised model for data with input and output.
BaseModel does nothing, but lays out the methods expected of any subclass.
"""
def __init__(self, fam, solver, fit_intercept = True, est_sd = False, penalty = None, reg = 0, l1_ratio = 0, tol = 1e-4, max_iter = 100):
"""
Constructor
Parameters
----------
fam: family of the GLM, LM or MNL
solver: family specific solver
penalty: penalty to regularize the model
reg: regularization strenth
l1_ratio: if elastic net, the l1 reg ratio
tol: tol in the optimization procedure
max_iter: max_iter in the optimization procedure
-------
"""
self.fit_intercept = fit_intercept
self.penalty = penalty
self.reg = reg
self.l1_ratio = l1_ratio
self.fam = fam
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.est_sd = est_sd
def fit(self, X, Y, sample_weight = None):
"""
fit the weighted model
Parameters
----------
X : design matrix
Y : response matrix
sample_weight: sample weight vector
"""
raise NotImplementedError
def predict(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
return NotImplementedError
def probability(self, X, Y):
"""
Given a set of X and Y, calculate the probability of
observing Y value
"""
logP = self.log_probability(X, Y)
if logP is not None:
return np.exp(self.log_probability(X, Y))
else:
return None
def log_probability(self, X, Y):
"""
Given a set of X and Y, calculate the log probability of
observing each of Y value given each X value
should return a vector
"""
return NotImplementedError
def estimate_dispersion(self):
raise NotImplementedError
def estimate_sd(self):
raise NotImplementedError
def estimate_loglikelihood(self):
raise NotImplementedError
class GLM(BaseModel):
"""
A Generalized linear model for data with input and output.
"""
def __init__(self, fam, solver = 'pinv', fit_intercept = True, est_sd = False, penalty = None, reg = 0, l1_ratio = 0, tol = 1e-4, max_iter = 100):
super(GLM, self).__init__(fam = fam, solver = solver, fit_intercept = fit_intercept, est_sd = est_sd, penalty = penalty,
reg = reg, l1_ratio = l1_ratio, tol = tol, max_iter = max_iter)
def fit(self, X, Y, sample_weight = None):
"""
fit the weighted model
Parameters
----------
X : design matrix
Y : response matrix
sample_weight: sample weight vector
"""
# family is the glm family with link, the family is the same as in the statsmodel
if sample_weight is None:
sample_weight = np.ones((X.shape[0],))
assert X.shape[0] == sample_weight.shape[0]
assert X.shape[0] == Y.shape[0]
assert Y.ndim == 1 or Y.shape[1] == 1
Y = Y.reshape(-1,)
sum_w = np.sum(sample_weight)
assert sum_w > 0
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
self.n_samples = X.shape[0]
self.n_features = X.shape[1]
self.n_targets = 1
# start fitting using irls
mu = self.fam.starting_mu(Y)
lin_pred = self.fam.predict(mu)
dev = self.fam.deviance_weighted(Y, mu, sample_weight)
if np.isnan(dev):
raise ValueError("The first guess on the deviance function "
"returned a nan. This could be a boundary "
" problem and should be reported.")
# This special case is used to get the likelihood for a specific
# params vector.
for iteration in range(self.max_iter):
weights = sample_weight * self.fam.weights(mu)
wlsendog = lin_pred + self.fam.link.deriv(mu) * (Y-mu)
if self.penalty is None:
wls_results = lim.WLS(wlsendog, X, weights).fit(method = self.solver)
if self.penalty == 'elasticnet':
wls_results = lim.WLS(wlsendog, X, weights).fit_regularized(alpha = self.reg, L1_wt = self.l1_ratio)
lin_pred = np.dot(X, wls_results.params)
mu = self.fam.fitted(lin_pred)
if Y.squeeze().ndim == 1 and np.allclose(mu - Y, 0):
msg = "Perfect separation detected, results not available"
raise Error(msg)
dev_new = self.fam.deviance_weighted(Y, mu, sample_weight)
converged = np.fabs(dev - dev_new) <= self.tol
dev = dev_new
if converged:
break
self.converged = converged
self.coef = wls_results.params
self.dispersion = self.estimate_dispersion(X, Y, mu, sample_weight)
if self.est_sd:
self.sd = self.estimate_sd(X, Y, mu, sample_weight, weights)
self.ll = self.estimate_loglikelihood(Y, mu, sample_weight)
def predict(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
lin_pred = np.dot(X, self.coef)
mu = self.fam.fitted(lin_pred)
return mu
def log_probability(self, X, Y):
"""
Given a set of X and Y, calculate the probability of
observing Y value
"""
mu = self.predict(X)
return self.fam.log_probability(Y.reshape(-1,), mu, scale=self.dispersion)
def estimate_dispersion(self, X, Y, mu, w):
if isinstance(self.fam, (family.Binomial, family.Poisson)):
return 1
else:
resid = (Y - mu)
return (resid ** 2 * w / self.fam.variance(mu)).sum()/ np.sum(w)
def estimate_sd(self, X, Y, mu, w, weights):
if self.penalty is None and self.dispersion is not None:
newX, newY = _rescale_data(X, Y, weights)
wX, wY = _rescale_data(X, Y, w * weights)
if X.shape[1] == 1:
try:
cov = 1 / np.dot(newX.T, newX)
temp = np.dot(wX.T, wX)
sd = (np.sqrt(cov ** 2 * temp) * np.sqrt(self.dispersion)).reshape(-1,)
except:
sd = None
else:
try:
cov = np.linalg.inv(np.dot(newX.T, newX))
temp = np.dot(cov, wX.T)
sd = np.sqrt(np.diag(np.dot(temp,temp.T))) * np.sqrt(self.dispersion)
except:
sd = None
else:
sd = None
return sd
def estimate_loglikelihood(self, Y, mu, w):
if self.dispersion is None:
return None
else:
return self.fam.loglike_weighted(Y, mu, w, scale=self.dispersion)
class LM(BaseModel):
"""
A Generalized linear model for data with input and output.
"""
def __init__(self, solver = 'svd', fit_intercept = True, penalty = None, est_sd = False, reg = 0, l1_ratio = 0, tol = 1e-4, max_iter = 100):
super(LM, self).__init__(fam = 'LM', solver = solver, fit_intercept = fit_intercept, est_sd = est_sd, penalty = penalty,
reg = reg, l1_ratio = l1_ratio, tol = tol, max_iter = max_iter)
def fit(self, X, Y, sample_weight = None):
"""
fit the weighted model
Parameters
----------
X : design matrix
Y : response matrix
sample_weight: sample weight vector
"""
if sample_weight is None:
sample_weight = np.ones((X.shape[0],))
assert X.shape[0] == sample_weight.shape[0]
assert X.shape[0] == Y.shape[0]
sum_w = np.sum(sample_weight)
assert sum_w > 0
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
if Y.ndim == 1:
Y = Y.reshape(-1,1)
self.n_samples = X.shape[0]
self.n_features = X.shape[1]
self.n_targets = Y.shape[1]
newX, newY = _rescale_data(X, Y, sample_weight)
if self.penalty is None:
model = linear_model.LinearRegression(fit_intercept=False)
if self.penalty == 'l1':
model = linear_model.Lasso(fit_intercept=False, alpha = self.reg, tol = self.tol, max_iter = self.max_iter)
if self.penalty == 'l2':
model = linear_model.Ridge(fit_intercept=False, alpha = self.reg, tol = self.tol,
max_iter = self.max_iter, solver = self.solver)
if self.penalty == 'elasticnet':
model = linear_model.ElasticNet(fit_intercept=False, alpha = self.reg, l1_ratio = self.l1_ratio,
tol = self.tol, max_iter = self.max_iter)
model.fit(newX, newY)
self.coef = model.coef_.T
if Y.shape[1] == 1:
self.coef = self.coef.reshape(-1,)
if self.penalty is not None:
self.converged = model.n_iter_ < self.max_iter
else:
self.converged = None
self.dispersion = self.estimate_dispersion(X, Y, sample_weight)
if self.est_sd:
self.sd = self.estimate_sd(X, Y, sample_weight)
self.ll = self.estimate_loglikelihood(sample_weight)
def predict(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
mu = np.dot(X, self.coef)
return mu
def log_probability(self, X, Y):
"""
Given a set of X and Y, calculate the probability of
observing Y value
"""
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
if Y.ndim == 1:
Y = Y.reshape(-1,1)
pred = np.dot(X, self.coef)
if pred.ndim == 1:
pred = pred.reshape(-1,1)
if Y.shape[1] == 1:
if self.dispersion > 0:
logP = (Y * pred - pred**2/2)/self.dispersion - Y**2/(2 * self.dispersion) - .5*np.log(2 * np.pi * self.dispersion)
logP = logP.reshape(-1,)
else:
logP = np.zeros((Y.shape[0],))
logP[Y.reshape(-1,)!=pred.reshape(-1,)] = -np.Infinity
logP = logP.reshape(-1,)
else:
if np.linalg.det(self.dispersion) > 0:
logP = -1/2*((Y.shape[1] * np.log(2 * np.pi) + np.log(np.linalg.det(self.dispersion))) +
np.diag(np.dot(np.dot(Y-pred, np.linalg.inv(self.dispersion)), (Y-pred).T)))
logP = logP.reshape(-1,)
else:
if (np.diag(self.dispersion) > 0).all():
new_dispersion = np.diag(np.diag(self.dispersion))
logP = -1/2*((Y.shape[1] * np.log(2 * np.pi) + np.log(np.linalg.det(self.dispersion))) +
np.diag(np.dot(np.dot(Y-pred, np.linalg.inv(new_dispersion)), (Y-pred).T)))
logP = logP.reshape(-1,)
else:
logP = np.zeros((Y.shape[0],))
logP[np.linalg.norm(Y-pred, axis = 1)!=0] = -np.Infinity
logP = logP.reshape(-1,)
return logP
def estimate_dispersion(self, X, Y, sample_weight):
newX, newY = _rescale_data(X, Y, sample_weight)
newPred = np.dot(newX, self.coef)
if newPred.ndim == 1:
newPred = newPred.reshape(-1,1)
wresid = newY - newPred
ssr = np.dot(wresid.T, wresid)
sigma2 = ssr / np.sum(sample_weight)
if sigma2.shape == (1,1):
sigma2 = sigma2[0,0]
return sigma2
def estimate_sd(self, X, Y, sample_weight):
newX, newY = _rescale_data(X, Y, sample_weight)
if self.penalty is None:
wX, wY = _rescale_data(X, Y, sample_weight ** 2)
if newX.shape[1] == 1:
try:
cov = 1 / np.dot(newX.T, newX)
temp = np.dot(wX.T, wX)
if newY.shape[1] == 1:
sd = np.sqrt(cov ** 2 * temp * self.dispersion).reshape(-1,)
else:
sd = np.sqrt(cov ** 2 * temp * np.diag(self.dispersion))
except:
sd = None
else:
try:
cov = np.linalg.inv(np.dot(newX.T, newX))
temp = np.dot(cov, wX.T)
if newY.shape[1] == 1:
sd = np.sqrt(np.diag(np.dot(temp,temp.T)) * self.dispersion).reshape(-1,)
else:
sd = np.sqrt(np.outer(np.diag(np.dot(temp,temp.T)), np.diag(self.dispersion)))
except:
sd = None
else:
sd = None
return sd
def estimate_loglikelihood(self, sample_weight):
q = self.n_targets
sum_w = np.sum(sample_weight)
if q == 1:
if self.dispersion > 0:
ll = - q * sum_w / 2 * np.log(2 * np.pi) - sum_w / 2 * np.log(self.dispersion) - q * sum_w / 2
else:
ll = None
else:
if np.linalg.det(self.dispersion) > 0:
ll = - q * sum_w / 2 * np.log(2 * np.pi) - sum_w / 2 * np.log(np.linalg.det(self.dispersion)) - q * sum_w / 2
else:
if (np.diag(self.dispersion) > 0).all():
ll = - q * sum_w / 2 * np.log(2 * np.pi) - np.sum(sum_w / 2 * np.log(np.diag(self.dispersion))) - q * sum_w / 2
else:
ll = None
return ll
class MNL(BaseModel):
"""
A MNL for data with input and output.
"""
def fit(self, X, Y, sample_weight = None):
"""
fit the weighted model
Parameters
----------
X : design matrix
Y : response matrix
sample_weight: sample weight vector
"""
raise NotImplementedError
def predict_probability(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
return np.exp(self.predict_log_probability(X))
def predict_log_probability(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
p = np.dot(X, self.coef)
if p.ndim == 1:
p = p.reshape(-1,1)
p -= logsumexp(p, axis = 1)[:, np.newaxis]
return p
def predict(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
return NotImplementedError
def log_probability(self, X, Y):
"""
Given a set of X and Y, calculate the probability of
observing Y value
"""
return NotImplementedError
def estimate_dispersion(self):
return 1
def estimate_sd(self, X, sample_weight):
if self.penalty == None:
o_normalized = np.dot(X, self.coef)
if o_normalized.ndim == 1:
o_normalized = o_normalized.reshape(-1,1)
o_normalized -= logsumexp(o_normalized, axis = 1)[:, np.newaxis]
o_normalized = np.exp(o_normalized)
# calculate hessian
p = self.n_features
q = self.n_targets
h = np.zeros((p*(q-1), p*(q-1)))
for e in range(q-1):
for f in range(q-1):
h[e*p: (e+1)*p, f*p: (f+1)*p] = -np.dot(np.dot(X.T,
np.diag(np.multiply(np.multiply(o_normalized[:, f+1],
(e==f) - o_normalized[:, e+1]),
sample_weight))), X)
if np.sum(sample_weight) > 0:
h = h / np.sum(sample_weight) * X.shape[0]
if np.all(np.linalg.eigvals(-h) > 0) and np.linalg.cond(-h) < 1/sys.float_info.epsilon:
sd = np.sqrt(np.diag(np.linalg.inv(-h))).reshape(p,q-1, order = 'F')
sd = np.hstack((np.zeros((p, 1)), sd))
else:
sd = None
else:
sd = None
return sd
def estimate_loglikelihood(self, X, Y, sample_weight):
return NotImplementedError
class MNLD(MNL):
"""
A MNL for discrete data with input and output.
"""
def __init__(self, solver='newton-cg', fit_intercept = True, est_sd = False, penalty = None, reg = 0, l1_ratio = 0, tol = 1e-4, max_iter = 100):
super(MNLD, self).__init__(fam = 'MNLD', solver = solver, fit_intercept = fit_intercept, est_sd = est_sd, penalty = penalty,
reg = reg, l1_ratio = l1_ratio, tol = tol, max_iter = max_iter)
def fit(self, X, Y, sample_weight = None):
"""
fit the weighted model
Parameters
----------
X : design matrix
Y : response matrix
sample_weight: sample weight vector
"""
if sample_weight is None:
sample_weight = np.ones((X.shape[0],))
assert Y.ndim == 1 or Y.shape[1] == 1
assert X.shape[0] == Y.shape[0]
assert X.shape[0] == sample_weight.shape[0]
if self.reg == 0 or (self.penalty is None):
penalty1 = 'l2'
c = 1e200
else:
penalty1 = self.penalty
c = 1/self.reg
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
self.n_samples = X.shape[0]
self.n_features = X.shape[1]
self.n_targets = len(np.unique(Y))
if self.n_targets < 2:
raise ValueError('n_targets < 2')
self.lb = LabelBinarizer().fit(Y)
model = linear_model.LogisticRegression(fit_intercept = False, penalty = penalty1, C = c,
multi_class = 'multinomial', solver = self.solver,
tol = self.tol, max_iter = self.max_iter)
Y_fit = self.lb.transform(Y)
model.fit(X, Y, sample_weight = sample_weight)
w0 = model.coef_
if self.n_targets == 2:
w0 = np.vstack((np.zeros((1, self.n_features)), w0*2))
w1 = w0.reshape(self.n_targets, -1)
w1 = w1.T - w1.T[:,0].reshape(-1,1)
self.coef = w1
self.converged = model.n_iter_ < self.max_iter
self.dispersion = self.estimate_dispersion()
if self.est_sd:
self.sd = self.estimate_sd(X, sample_weight)
self.ll = self.estimate_loglikelihood(X, Y, sample_weight)
def predict(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
index = np.argmax(self.predict_log_probability(X), axis = 1)
zero = np.zeros((X.shape[0], self.n_targets))
zero[np.arange(X.shape[0]), index] = 1
return self.lb.inverse_transform(zero)
def log_probability(self, X, Y):
"""
Given a set of X and Y, calculate the probability of
observing Y value
"""
if X.ndim == 1:
X = X.reshape(-1,1)
assert Y.ndim == 1 or Y.shape[1] == 1
assert X.shape[0] == Y.shape[0]
p = self.predict_log_probability(X)
Y_transformed = self.lb.transform(Y)
if Y_transformed.shape[1] == 1:
Y_aug = np.zeros((X.shape[0],2))
Y_aug[np.arange(X.shape[0]),Y_transformed.reshape(-1,)] = 1
else:
Y_aug = Y_transformed
logP = np.sum(p*Y_aug, axis = 1)
return logP
def estimate_loglikelihood(self, X, Y, sample_weight):
o_normalized_log = np.dot(X, self.coef)
if o_normalized_log.ndim == 1:
o_normalized_log = o_normalized_log.reshape(-1,1)
o_normalized_log -= logsumexp(o_normalized_log, axis = 1)[:, np.newaxis]
Y_aug = self.lb.transform(Y)
ll = (sample_weight[:, np.newaxis] * Y_aug * o_normalized_log).sum()
return ll
class MNLP(MNL):
"""
A MNL with probability response for data with input and output.
"""
def __init__(self, solver = 'newton-cg', fit_intercept = True, est_sd = False, penalty = None, reg = 0, l1_ratio = 0, tol = 1e-4, max_iter = 100):
super(MNL, self).__init__(fam = 'MNLP', solver = solver, fit_intercept = fit_intercept, est_sd = est_sd, penalty = penalty,
reg = reg, l1_ratio = l1_ratio, tol = tol, max_iter = max_iter)
def fit(self, X, Y, sample_weight = None):
"""
fit the weighted model
Parameters
----------
X : design matrix
Y : response matrix
sample_weight: sample weight vector
"""
if sample_weight is None:
sample_weight = np.ones((X.shape[0],))
assert X.shape[0] == Y.shape[0]
assert X.shape[0] == sample_weight.shape[0]
if X.ndim == 1:
X = X.reshape(-1,1)
if self.fit_intercept:
X = addIntercept(X)
self.n_samples = X.shape[0]
self.n_features = X.shape[1]
self.n_targets = Y.shape[1]
if self.n_targets < 2:
raise ValueError('n_targets < 2')
w0 = np.zeros((self.n_targets*self.n_features, ))
if self.solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
if self.solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, Y, self.reg, sample_weight),
iprint=0, pgtol=self.tol, maxiter=self.max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, Y, self.reg, sample_weight),
iprint=0, pgtol=self.tol)
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
else:
args = (X, Y, self.reg, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=self.max_iter, tol=self.tol)
w1 = w0.reshape(self.n_targets, -1)
w1 = w1.T - w1.T[:,0].reshape(-1,1)
self.coef = w1
self.converged = n_iter_i < self.max_iter
self.dispersion = self.estimate_dispersion()
if self.est_sd:
self.sd = self.estimate_sd(X, sample_weight)
self.ll = self.estimate_loglikelihood(X, Y, sample_weight)
def predict(self, X):
"""
predict the Y value based on the model
----------
X : design matrix
Returns
-------
predicted value
"""
index = np.argmax(self.predict_log_probability(X), axis = 1)
return index
def log_probability(self, X, Y):
"""
Given a set of X and Y, calculate the probability of
observing Y value
"""
if X.ndim == 1:
X = X.reshape(-1,1)
assert Y.ndim == 2
assert X.shape[0] == Y.shape[0]
p = self.predict_log_probability(X)
logP = np.sum(p*Y, axis = 1)
return logP
def estimate_loglikelihood(self, X, Y, sample_weight):
o_normalized_log = np.dot(X, self.coef)
if o_normalized_log.ndim == 1:
o_normalized_log = o_normalized_log.reshape(-1,1)
o_normalized_log -= logsumexp(o_normalized_log, axis = 1)[:, np.newaxis]
ll = (sample_weight[:, np.newaxis] * Y * o_normalized_log).sum()
return ll
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
p = np.dot(X, w.T)
p -= logsumexp(p, axis = 1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * np.sum(w * w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
grad = np.zeros((n_classes, n_features))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = np.dot(diff.T, X)
grad[:, :n_features] += alpha * w
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = np.dot(X, v.T)
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features))
hessProd[:, :n_features] = np.dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
return hessProd.ravel()
return grad, hessp
|
[
"yinmogeng@gmail.com"
] |
yinmogeng@gmail.com
|
84d43864ef2048b246ffb95eeccc37cd8ec22c52
|
97c2b0a57f6bb07a0b71641dd8bb81ae4f333515
|
/191217/main.py
|
6edb608c58f66b011215492e297c753314597abb
|
[] |
no_license
|
miyabi625/kaggle-pfs
|
cedbf29abde9d129d22f919605c01f60d878a305
|
72137cc8587cd00e9a8ab49648c74cb0a26cc728
|
refs/heads/master
| 2020-10-01T06:21:42.388085
| 2020-01-15T23:19:05
| 2020-01-15T23:19:05
| 227,477,436
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,571
|
py
|
####################################################
# インポート
####################################################
import data_load
import model as model
import logger
import logging
import numpy as np
import pandas as pd
####################################################
# 定数宣言
####################################################
# Windowサイズ
WINDOW_SIZE = 7 # testデータの11月も含めた期間
####################################################
# ログ宣言
####################################################
log = logging.getLogger(__name__)
logger.setLogger(log)
####################################################
# データ読み込み
####################################################
log.info('start read data')
#csvデータの読み込み
dl = data_load.DataLoad(WINDOW_SIZE)
log.info('end read data')
####################################################
# 分析
####################################################
log.info('start analysis')
### トレーニングデータ用意 ###################
# トレーニングデータを取得する
train = dl.getTrainValues()
train_ = train[((34-WINDOW_SIZE+1) <= train.date_block_num) & (train.date_block_num <= 33)].reset_index(drop=True)
train_y = train_['item_cnt_month']
train_x = train_.drop(columns=['date_block_num','item_cnt_month'])
#log.info(train_y.head())
log.info(train_y.count())
#log.info(train_x.head())
log.info(train_x.count())
model = model.Model()
model.fit(train_x.values,train_y.values)
log.info('feature_importances')
log.info(model.get_feature_importances(train_x))
pred = model.predict(train_x)
score = model.predictScore(train_y.values,pred)
log.info('predictScore')
log.info(score)
#テストデータに適用
test = dl.getTestValues()
test_ = train[(train.date_block_num == 34)].reset_index(drop=True)
test_x = test_.drop(columns=['date_block_num','item_cnt_month'])
#log.info(test_x.head())
pred = model.predict(test_x)
log.info('end analysis')
####################################################
# アウトプットファイル出力
####################################################
log.info('start output data')
test_x['item_cnt_month'] = pred
test_x['shop_id'] = test_x['unique_no'] % 100
test_x['item_id'] = test_x['unique_no'] // 100
submission = pd.merge(
test,
test_x[['shop_id','item_id','item_cnt_month']],
on=['shop_id','item_id'],
how='left'
)
# 提出ファイル作成
submission[['ID','item_cnt_month']].to_csv('./output/submission.csv', index=False)
log.info('end output data')
|
[
"miyabi625@gmail.com"
] |
miyabi625@gmail.com
|
f3342ae253a6c3ea4cdf0a8b6733c66468df32a0
|
b47a907e824b52a6ee02dfb6387d24fa4d7fe88f
|
/config/settings.py
|
711faa6f8b40f97ba26f9110ae9b2a5e620c989a
|
[] |
no_license
|
hiroshi-higashiyama/DJANGO-KAKEIBO
|
413a883fdef2571cacbd6c8679e63a6aecab7ae9
|
564c6047fcc6f6bb4a45b2eec121df619d158952
|
refs/heads/master
| 2022-12-29T19:53:15.186934
| 2020-09-21T01:04:10
| 2020-09-21T01:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,180
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3!7$0+ew+1s-)tt%ex9gwqtf_(oq==%7celkb+i7g01_ehy&im'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'kakeibo',
'bootstrapform',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
NUMBER_GROUPING = 3
|
[
"s20840011@gmail.com"
] |
s20840011@gmail.com
|
a3250628cfe4b7bd81a0cfd021620ea7fa5102e3
|
e5dafd36bb8ceaf8d68fd38188bdf2e80136d9ab
|
/helloworld.py
|
2e36877658e09f2cb17e264defa770af9a61f62d
|
[] |
no_license
|
arsalanahmad4/github-repo
|
e09ceeb1f90f87c61cdf9bb9d15f4794fc3fc012
|
e506f198421d8b9002fa222afdde307eb3edb68e
|
refs/heads/master
| 2023-01-23T06:33:02.737916
| 2020-11-12T16:44:54
| 2020-11-12T16:44:54
| 312,322,775
| 0
| 0
| null | 2020-11-12T16:29:06
| 2020-11-12T15:47:10
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
print('hello world')
print('dev branch')
print('new change in dev branch')
|
[
"arsalanahmad0407@gmail.com"
] |
arsalanahmad0407@gmail.com
|
271e0a82482eb25eaca4b7f12e7efeb08508fb7a
|
9206e405e9be5f80a08e78b59d1cb79c519ae515
|
/algorithms/codeforces/the_number_of_even_pairs/main.py
|
7b7aac218751e1de472854d40e92a53218a4c619
|
[] |
no_license
|
mfbx9da4/mfbx9da4.github.io
|
ac4e34f0e269fb285e4fc4e727b8564b5db1ce3b
|
0ea1a0d56a649de3ca7fde2d81b626aee0595b2c
|
refs/heads/master
| 2023-04-13T22:15:19.426967
| 2023-04-12T12:14:40
| 2023-04-12T12:14:40
| 16,823,428
| 2
| 0
| null | 2022-12-12T04:36:08
| 2014-02-14T01:30:20
|
SCSS
|
UTF-8
|
Python
| false
| false
| 738
|
py
|
"""
"""
from math import factorial
def int_as_array(num): return list(map(int, [y for y in str(num)]))
def array_as_int(arr): return int(''.join(map(str, arr)))
def read_int(): return int(input())
def read_array(): return list(map(int, input().split(' ')))
def array_to_string(arr, sep=' '): return sep.join(map(str, arr))
def matrix_to_string(arr, sep=' '): return '[\n' + '\n'.join(
[sep.join(map(str, row)) for row in arr]) + '\n]'
def combine(n, r):
try:
return (factorial(n) / factorial(n - r)) * (1 / r)
except:
return 0
def solve(N, M):
choose_evens = combine(N, 2)
choose_odds = combine(M, 2)
return int(choose_evens + choose_odds)
N, M = read_array()
print(solve(N, M))
|
[
"dalberto.adler@gmail.com"
] |
dalberto.adler@gmail.com
|
9a357773dc9557d0d326bc7c9bc1a1e5cdb927ce
|
91c5391b6960cad5ca476bce685a73918568fcaf
|
/Assets/XLua/Tutorial/TODOTest/Resources/xls2lua/Lib/email/message.py
|
409721e6cf3e920eee833a62f1f4a1ec018f7874
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
ljz/xLua
|
a6fa7cda38e609946633a99310c7d082afc116b6
|
44d484208fce409d87b3353e1441c52576b561bf
|
refs/heads/master
| 2021-04-30T10:50:07.816281
| 2018-06-24T05:19:42
| 2018-06-24T05:19:42
| 121,334,404
| 0
| 0
| null | 2018-02-13T03:31:19
| 2018-02-13T03:31:19
| null |
UTF-8
|
Python
| false
| false
| 46,510
|
py
|
# Copyright (C) 2001-2007 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Basic message object for the email package object model."""
__all__ = ['Message']
import re
import uu
import quopri
from io import BytesIO, StringIO
# Intrapackage imports
from email import utils
from email import errors
from email._policybase import compat32
from email import charset as _charset
from email._encoded_words import decode_b
Charset = _charset.Charset
SEMISPACE = '; '
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _splitparam(param):
# Split header parameters. BAW: this may be too simple. It isn't
# strictly RFC 2045 (section 5.1) compliant, but it catches most headers
# found in the wild. We may eventually need a full fledged parser.
# RDM: we might have a Header here; for now just stringify it.
a, sep, b = str(param).partition(';')
if not sep:
return a.strip(), None
return a.strip(), b.strip()
def _formatparam(param, value=None, quote=True):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true. If value is a
three tuple (charset, language, value), it will be encoded according
to RFC2231 rules. If it contains non-ascii characters it will likewise
be encoded according to RFC2231 rules, using the utf-8 charset and
a null language.
"""
if value is not None and len(value) > 0:
# A tuple is used for RFC 2231 encoded parameter values where items
# are (charset, language, value). charset is a string, not a Charset
# instance. RFC 2231 encoded values are never quoted, per RFC.
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
value = utils.encode_rfc2231(value[2], value[0], value[1])
return '%s=%s' % (param, value)
else:
try:
value.encode('ascii')
except UnicodeEncodeError:
param += '*'
value = utils.encode_rfc2231(value, 'utf-8', '')
return '%s=%s' % (param, value)
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessary.
if quote or tspecials.search(value):
return '%s="%s"' % (param, utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
return param
def _parseparam(s):
# RDM This might be a Header, so for now stringify it.
s = ';' + str(s)
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
plist.append(f.strip())
s = s[end:]
return plist
def _unquotevalue(value):
# This is different than utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
return value[0], value[1], utils.unquote(value[2])
else:
return utils.unquote(value)
class Message:
"""Basic message object.
A message object is defined as something that has a bunch of RFC 2822
headers and a payload. It may optionally have an envelope header
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
multipart or a message/rfc822), then the payload is a list of Message
objects, otherwise it is a string.
Message objects implement part of the `mapping' interface, which assumes
there is exactly one occurrence of the header per message. Some headers
do in fact appear multiple times (e.g. Received) and for those headers,
you must use the explicit API to set or get all the headers. Not all of
the mapping methods are implemented.
"""
def __init__(self, policy=compat32):
self.policy = policy
self._headers = []
self._unixfrom = None
self._payload = None
self._charset = None
# Defaults for multipart messages
self.preamble = self.epilogue = None
self.defects = []
# Default content type
self._default_type = 'text/plain'
def __str__(self):
"""Return the entire formatted message as a string.
"""
return self.as_string()
def as_string(self, unixfrom=False, maxheaderlen=0, policy=None):
"""Return the entire formatted message as a string.
Optional 'unixfrom', when true, means include the Unix From_ envelope
header. For backward compatibility reasons, if maxheaderlen is
not specified it defaults to 0, so you must override it explicitly
if you want a different maxheaderlen. 'policy' is passed to the
Generator instance used to serialize the mesasge; if it is not
specified the policy associated with the message instance is used.
If the message object contains binary data that is not encoded
according to RFC standards, the non-compliant data will be replaced by
unicode "unknown character" code points.
"""
from email.generator import Generator
policy = self.policy if policy is None else policy
fp = StringIO()
g = Generator(fp,
mangle_from_=False,
maxheaderlen=maxheaderlen,
policy=policy)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def __bytes__(self):
"""Return the entire formatted message as a bytes object.
"""
return self.as_bytes()
def as_bytes(self, unixfrom=False, policy=None):
"""Return the entire formatted message as a bytes object.
Optional 'unixfrom', when true, means include the Unix From_ envelope
header. 'policy' is passed to the BytesGenerator instance used to
serialize the message; if not specified the policy associated with
the message instance is used.
"""
from email.generator import BytesGenerator
policy = self.policy if policy is None else policy
fp = BytesIO()
g = BytesGenerator(fp, mangle_from_=False, policy=policy)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def is_multipart(self):
"""Return True if the message consists of multiple parts."""
return isinstance(self._payload, list)
#
# Unix From_ line
#
def set_unixfrom(self, unixfrom):
self._unixfrom = unixfrom
def get_unixfrom(self):
return self._unixfrom
#
# Payload manipulation.
#
def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
try:
self._payload.append(payload)
except AttributeError:
raise TypeError("Attach is not valid on a message with a"
" non-multipart payload")
def get_payload(self, i=None, decode=False):
"""Return a reference to the payload.
The payload will either be a list object or a string. If you mutate
the list object, you modify the message's payload in place. Optional
i returns that index into the payload.
Optional decode is a flag indicating whether the payload should be
decoded or not, according to the Content-Transfer-Encoding header
(default is False).
When True and the message is not a multipart, the payload will be
decoded if this header's value is `quoted-printable' or `base64'. If
some other encoding is used, or the header is missing, or if the
payload has bogus data (i.e. bogus base64 or uuencoded data), the
payload is returned as-is.
If the message is a multipart and the decode flag is True, then None
is returned.
"""
# Here is the logic table for this code, based on the email5.0.0 code:
# i decode is_multipart result
# ------ ------ ------------ ------------------------------
# None True True None
# i True True None
# None False True _payload (a list)
# i False True _payload element i (a Message)
# i False False error (not a list)
# i True False error (not a list)
# None False False _payload
# None True False _payload decoded (bytes)
# Note that Barry planned to factor out the 'decode' case, but that
# isn't so easy now that we handle the 8 bit data, which needs to be
# converted in both the decode and non-decode path.
if self.is_multipart():
if decode:
return None
if i is None:
return self._payload
else:
return self._payload[i]
# For backward compatibility, Use isinstance and this error message
# instead of the more logical is_multipart test.
if i is not None and not isinstance(self._payload, list):
raise TypeError('Expected list, got %s' % type(self._payload))
payload = self._payload
# cte might be a Header, so for now stringify it.
cte = str(self.get('content-transfer-encoding', '')).lower()
# payload may be bytes here.
if isinstance(payload, str):
if utils._has_surrogates(payload):
bpayload = payload.encode('ascii', 'surrogateescape')
if not decode:
try:
payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
except LookupError:
payload = bpayload.decode('ascii', 'replace')
elif decode:
try:
bpayload = payload.encode('ascii')
except UnicodeError:
# This won't happen for RFC compliant messages (messages
# containing only ASCII codepoints in the unicode input).
# If it does happen, turn the string into bytes in a way
# guaranteed not to fail.
bpayload = payload.encode('raw-unicode-escape')
if not decode:
return payload
if cte == 'quoted-printable':
return quopri.decodestring(bpayload)
elif cte == 'base64':
# XXX: this is a bit of a hack; decode_b should probably be factored
# out somewhere, but I haven't figured out where yet.
value, defects = decode_b(b''.join(bpayload.splitlines()))
for defect in defects:
self.policy.handle_defect(self, defect)
return value
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
in_file = BytesIO(bpayload)
out_file = BytesIO()
try:
uu.decode(in_file, out_file, quiet=True)
return out_file.getvalue()
except uu.Error:
# Some decoding problem
return bpayload
if isinstance(payload, str):
return bpayload
return payload
def set_payload(self, payload, charset=None):
"""Set the payload to the given value.
Optional charset sets the message's default character set. See
set_charset() for details.
"""
if hasattr(payload, 'encode'):
if charset is None:
self._payload = payload
return
if not isinstance(charset, Charset):
charset = Charset(charset)
payload = payload.encode(charset.output_charset)
if hasattr(payload, 'decode'):
self._payload = payload.decode('ascii', 'surrogateescape')
else:
self._payload = payload
if charset is not None:
self.set_charset(charset)
def set_charset(self, charset):
"""Set the charset of the payload to a given character set.
charset can be a Charset instance, a string naming a character set, or
None. If it is a string it will be converted to a Charset instance.
If charset is None, the charset parameter will be removed from the
Content-Type field. Anything else will generate a TypeError.
The message will be assumed to be of type text/* encoded with
charset.input_charset. It will be converted to charset.output_charset
and encoded properly, if needed, when generating the plain text
representation of the message. MIME headers (MIME-Version,
Content-Type, Content-Transfer-Encoding) will be added as needed.
"""
if charset is None:
self.del_param('charset')
self._charset = None
return
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
if 'MIME-Version' not in self:
self.add_header('MIME-Version', '1.0')
if 'Content-Type' not in self:
self.add_header('Content-Type', 'text/plain',
charset=charset.get_output_charset())
else:
self.set_param('charset', charset.get_output_charset())
if charset != charset.get_output_charset():
self._payload = charset.body_encode(self._payload)
if 'Content-Transfer-Encoding' not in self:
cte = charset.get_body_encoding()
try:
cte(self)
except TypeError:
# This 'if' is for backward compatibility, it allows unicode
# through even though that won't work correctly if the
# message is serialized.
payload = self._payload
if payload:
try:
payload = payload.encode('ascii', 'surrogateescape')
except UnicodeError:
payload = payload.encode(charset.output_charset)
self._payload = charset.body_encode(payload)
self.add_header('Content-Transfer-Encoding', cte)
def get_charset(self):
"""Return the Charset instance associated with the message's payload.
"""
return self._charset
#
# MAPPING INTERFACE (partial)
#
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __getitem__(self, name):
"""Get a header value.
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, exactly which
occurrence gets returned is undefined. Use get_all() to get all
the values matching a header field name.
"""
return self.get(name)
def __setitem__(self, name, val):
"""Set the value of a header.
Note: this does not overwrite an existing header with the same field
name. Use __delitem__() first to delete any existing headers.
"""
max_count = self.policy.header_max_count(name)
if max_count:
lname = name.lower()
found = 0
for k, v in self._headers:
if k.lower() == lname:
found += 1
if found >= max_count:
raise ValueError("There may be at most {} {} headers "
"in a message".format(max_count, name))
self._headers.append(self.policy.header_store_parse(name, val))
def __delitem__(self, name):
"""Delete all occurrences of a header, if present.
Does not raise an exception if the header is missing.
"""
name = name.lower()
newheaders = []
for k, v in self._headers:
if k.lower() != name:
newheaders.append((k, v))
self._headers = newheaders
def __contains__(self, name):
return name.lower() in [k.lower() for k, v in self._headers]
def __iter__(self):
for field, value in self._headers:
yield field
def keys(self):
"""Return a list of all the message's header field names.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [self.policy.header_fetch_parse(k, v)
for k, v in self._headers]
def items(self):
"""Get all the message's header fields and values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [(k, self.policy.header_fetch_parse(k, v))
for k, v in self._headers]
def get(self, name, failobj=None):
"""Get a header value.
Like __getitem__() but return failobj instead of None when the field
is missing.
"""
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
return self.policy.header_fetch_parse(k, v)
return failobj
#
# "Internal" methods (public API, but only intended for use by a parser
# or generator, not normal application code.
#
def set_raw(self, name, value):
"""Store name and value in the model without modification.
This is an "internal" API, intended only for use by a parser.
"""
self._headers.append((name, value))
def raw_items(self):
"""Return the (name, value) header pairs without modification.
This is an "internal" API, intended only for use by a generator.
"""
return iter(self._headers.copy())
#
# Additional useful stuff
#
def get_all(self, name, failobj=None):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None).
"""
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(self.policy.header_fetch_parse(k, v))
if not values:
return failobj
return values
def add_header(self, _name, _value, **_params):
"""Extended header setting.
name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added. If a
parameter value contains non-ASCII characters it can be specified as a
three-tuple of (charset, language, value), in which case it will be
encoded according to RFC2231 rules. Otherwise it will be encoded using
the utf-8 charset and a language of ''.
Examples:
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
msg.add_header('content-disposition', 'attachment',
filename=('utf-8', '', Fußballer.ppt'))
msg.add_header('content-disposition', 'attachment',
filename='Fußballer.ppt'))
"""
parts = []
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
if _value is not None:
parts.insert(0, _value)
self[_name] = SEMISPACE.join(parts)
def replace_header(self, _name, _value):
"""Replace a header.
Replace the first matching header found in the message, retaining
header order and case. If no matching header was found, a KeyError is
raised.
"""
_name = _name.lower()
for i, (k, v) in zip(range(len(self._headers)), self._headers):
if k.lower() == _name:
self._headers[i] = self.policy.header_store_parse(k, _value)
break
else:
raise KeyError(_name)
#
# Use these three methods instead of the three above.
#
def get_content_type(self):
"""Return the message's content type.
The returned string is coerced to lower case of the form
`maintype/subtype'. If there was no Content-Type header in the
message, the default type as given by get_default_type() will be
returned. Since according to RFC 2045, messages always have a default
type this will always return a value.
RFC 2045 defines a message's default type to be text/plain unless it
appears inside a multipart/digest container, in which case it would be
message/rfc822.
"""
missing = object()
value = self.get('content-type', missing)
if value is missing:
# This should have no parameters
return self.get_default_type()
ctype = _splitparam(value)[0].lower()
# RFC 2045, section 5.2 says if its invalid, use text/plain
if ctype.count('/') != 1:
return 'text/plain'
return ctype
def get_content_maintype(self):
"""Return the message's main content type.
This is the `maintype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[0]
def get_content_subtype(self):
"""Returns the message's sub-content type.
This is the `subtype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[1]
def get_default_type(self):
"""Return the `default' content type.
Most messages have a default content type of text/plain, except for
messages that are subparts of multipart/digest containers. Such
subparts have a default content type of message/rfc822.
"""
return self._default_type
def set_default_type(self, ctype):
"""Set the `default' content type.
ctype should be either "text/plain" or "message/rfc822", although this
is not enforced. The default content type is not stored in the
Content-Type header.
"""
self._default_type = ctype
def _get_params_preserve(self, failobj, header):
# Like get_params() but preserves the quoting of values. BAW:
# should this be part of the public interface?
missing = object()
value = self.get(header, missing)
if value is missing:
return failobj
params = []
for p in _parseparam(value):
try:
name, val = p.split('=', 1)
name = name.strip()
val = val.strip()
except ValueError:
# Must have been a bare attribute
name = p.strip()
val = ''
params.append((name, val))
params = utils.decode_params(params)
return params
def get_params(self, failobj=None, header='content-type', unquote=True):
"""Return the message's Content-Type parameters, as a list.
The elements of the returned list are 2-tuples of key/value pairs, as
split on the `=' sign. The left hand side of the `=' is the key,
while the right hand side is the value. If there is no `=' sign in
the parameter the value is the empty string. The value is as
described in the get_param() method.
Optional failobj is the object to return if there is no Content-Type
header. Optional header is the header to search instead of
Content-Type. If unquote is True, the value is unquoted.
"""
missing = object()
params = self._get_params_preserve(missing, header)
if params is missing:
return failobj
if unquote:
return [(k, _unquotevalue(v)) for k, v in params]
else:
return params
def get_param(self, param, failobj=None, header='content-type',
unquote=True):
"""Return the parameter value if found in the Content-Type header.
Optional failobj is the object to return if there is no Content-Type
header, or the Content-Type header has no such parameter. Optional
header is the header to search instead of Content-Type.
Parameter keys are always compared case insensitively. The return
value can either be a string, or a 3-tuple if the parameter was RFC
2231 encoded. When it's a 3-tuple, the elements of the value are of
the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
LANGUAGE can be None, in which case you should consider VALUE to be
encoded in the us-ascii charset. You can usually ignore LANGUAGE.
The parameter value (either the returned string, or the VALUE item in
the 3-tuple) is always unquoted, unless unquote is set to False.
If your application doesn't care whether the parameter was RFC 2231
encoded, it can turn the return value into a string as follows:
rawparam = msg.get_param('foo')
param = email.utils.collapse_rfc2231_value(rawparam)
"""
if header not in self:
return failobj
for k, v in self._get_params_preserve(failobj, header):
if k.lower() == param.lower():
if unquote:
return _unquotevalue(v)
else:
return v
return failobj
def set_param(self, param, value, header='Content-Type', requote=True,
charset=None, language='', replace=False):
"""Set a parameter in the Content-Type header.
If the parameter already exists in the header, its value will be
replaced with the new value.
If header is Content-Type and has not yet been defined for this
message, it will be set to "text/plain" and the new parameter and
value will be appended as per RFC 2045.
An alternate header can specified in the header argument, and all
parameters will be quoted as necessary unless requote is False.
If charset is specified, the parameter will be encoded according to RFC
2231. Optional language specifies the RFC 2231 language, defaulting
to the empty string. Both charset and language should be strings.
"""
if not isinstance(value, tuple) and charset:
value = (charset, language, value)
if header not in self and header.lower() == 'content-type':
ctype = 'text/plain'
else:
ctype = self.get(header)
if not self.get_param(param, header=header):
if not ctype:
ctype = _formatparam(param, value, requote)
else:
ctype = SEMISPACE.join(
[ctype, _formatparam(param, value, requote)])
else:
ctype = ''
for old_param, old_value in self.get_params(header=header,
unquote=requote):
append_param = ''
if old_param.lower() == param.lower():
append_param = _formatparam(param, value, requote)
else:
append_param = _formatparam(old_param, old_value, requote)
if not ctype:
ctype = append_param
else:
ctype = SEMISPACE.join([ctype, append_param])
if ctype != self.get(header):
if replace:
self.replace_header(header, ctype)
else:
del self[header]
self[header] = ctype
def del_param(self, param, header='content-type', requote=True):
"""Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
"""
if header not in self:
return
new_ctype = ''
for p, v in self.get_params(header=header, unquote=requote):
if p.lower() != param.lower():
if not new_ctype:
new_ctype = _formatparam(p, v, requote)
else:
new_ctype = SEMISPACE.join([new_ctype,
_formatparam(p, v, requote)])
if new_ctype != self.get(header):
del self[header]
self[header] = new_ctype
def set_type(self, type, header='Content-Type', requote=True):
"""Set the main type and subtype for the Content-Type header.
type must be a string in the form "maintype/subtype", otherwise a
ValueError is raised.
This method replaces the Content-Type header, keeping all the
parameters in place. If requote is False, this leaves the existing
header's quoting as is. Otherwise, the parameters will be quoted (the
default).
An alternative header can be specified in the header argument. When
the Content-Type header is set, we'll always also add a MIME-Version
header.
"""
# BAW: should we be strict?
if not type.count('/') == 1:
raise ValueError
# Set the Content-Type, you get a MIME-Version
if header.lower() == 'content-type':
del self['mime-version']
self['MIME-Version'] = '1.0'
if header not in self:
self[header] = type
return
params = self.get_params(header=header, unquote=requote)
del self[header]
self[header] = type
# Skip the first param; it's the old type.
for p, v in params[1:]:
self.set_param(p, v, header, requote)
def get_filename(self, failobj=None):
"""Return the filename associated with the payload if present.
The filename is extracted from the Content-Disposition header's
`filename' parameter, and it is unquoted. If that header is missing
the `filename' parameter, this method falls back to looking for the
`name' parameter.
"""
missing = object()
filename = self.get_param('filename', missing, 'content-disposition')
if filename is missing:
filename = self.get_param('name', missing, 'content-type')
if filename is missing:
return failobj
return utils.collapse_rfc2231_value(filename).strip()
def get_boundary(self, failobj=None):
"""Return the boundary associated with the payload if present.
The boundary is extracted from the Content-Type header's `boundary'
parameter, and it is unquoted.
"""
missing = object()
boundary = self.get_param('boundary', missing)
if boundary is missing:
return failobj
# RFC 2046 says that boundaries may begin but not end in w/s
return utils.collapse_rfc2231_value(boundary).rstrip()
def set_boundary(self, boundary):
"""Set the boundary parameter in Content-Type to 'boundary'.
This is subtly different than deleting the Content-Type header and
adding a new one with a new boundary parameter via add_header(). The
main difference is that using the set_boundary() method preserves the
order of the Content-Type header in the original message.
HeaderParseError is raised if the message has no Content-Type header.
"""
missing = object()
params = self._get_params_preserve(missing, 'content-type')
if params is missing:
# There was no Content-Type header, and we don't know what type
# to set it to, so raise an exception.
raise errors.HeaderParseError('No Content-Type header found')
newparams = []
foundp = False
for pk, pv in params:
if pk.lower() == 'boundary':
newparams.append(('boundary', '"%s"' % boundary))
foundp = True
else:
newparams.append((pk, pv))
if not foundp:
# The original Content-Type header had no boundary attribute.
# Tack one on the end. BAW: should we raise an exception
# instead???
newparams.append(('boundary', '"%s"' % boundary))
# Replace the existing Content-Type header with the new value
newheaders = []
for h, v in self._headers:
if h.lower() == 'content-type':
parts = []
for k, v in newparams:
if v == '':
parts.append(k)
else:
parts.append('%s=%s' % (k, v))
val = SEMISPACE.join(parts)
newheaders.append(self.policy.header_store_parse(h, val))
else:
newheaders.append((h, v))
self._headers = newheaders
def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
as_bytes = charset[2].encode('raw-unicode-escape')
charset = str(as_bytes, pcharset)
except (LookupError, UnicodeError):
charset = charset[2]
# charset characters must be in us-ascii range
try:
charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower()
def get_charsets(self, failobj=None):
"""Return a list containing the charset(s) used in this message.
The returned list of items describes the Content-Type headers'
charset parameter for this message and all the subparts in its
payload.
Each item will either be a string (the value of the charset parameter
in the Content-Type header of that part) or the value of the
'failobj' parameter (defaults to None), if the part does not have a
main MIME type of "text", or the charset is not defined.
The list will contain one string for each part of the message, plus
one for the container message (i.e. self), so that a non-multipart
message will still return a list of length 1.
"""
return [part.get_content_charset(failobj) for part in self.walk()]
# I.e. def walk(self): ...
from email.iterators import walk
class MIMEPart(Message):
def __init__(self, policy=None):
if policy is None:
from email.policy import default
policy = default
Message.__init__(self, policy)
@property
def is_attachment(self):
c_d = self.get('content-disposition')
if c_d is None:
return False
return c_d.lower() == 'attachment'
def _find_body(self, part, preferencelist):
if part.is_attachment:
return
maintype, subtype = part.get_content_type().split('/')
if maintype == 'text':
if subtype in preferencelist:
yield (preferencelist.index(subtype), part)
return
if maintype != 'multipart':
return
if subtype != 'related':
for subpart in part.iter_parts():
yield from self._find_body(subpart, preferencelist)
return
if 'related' in preferencelist:
yield (preferencelist.index('related'), part)
candidate = None
start = part.get_param('start')
if start:
for subpart in part.iter_parts():
if subpart['content-id'] == start:
candidate = subpart
break
if candidate is None:
subparts = part.get_payload()
candidate = subparts[0] if subparts else None
if candidate is not None:
yield from self._find_body(candidate, preferencelist)
def get_body(self, preferencelist=('related', 'html', 'plain')):
"""Return best candidate mime part for display as 'body' of message.
Do a depth first search, starting with self, looking for the first part
matching each of the items in preferencelist, and return the part
corresponding to the first item that has a match, or None if no items
have a match. If 'related' is not included in preferencelist, consider
the root part of any multipart/related encountered as a candidate
match. Ignore parts with 'Content-Disposition: attachment'.
"""
best_prio = len(preferencelist)
body = None
for prio, part in self._find_body(self, preferencelist):
if prio < best_prio:
best_prio = prio
body = part
if prio == 0:
break
return body
_body_types = {('text', 'plain'),
('text', 'html'),
('multipart', 'related'),
('multipart', 'alternative')}
def iter_attachments(self):
"""Return an iterator over the non-main parts of a multipart.
Skip the first of each occurrence of text/plain, text/html,
multipart/related, or multipart/alternative in the multipart (unless
they have a 'Content-Disposition: attachment' header) and include all
remaining subparts in the returned iterator. When applied to a
multipart/related, return all parts except the root part. Return an
empty iterator when applied to a multipart/alternative or a
non-multipart.
"""
maintype, subtype = self.get_content_type().split('/')
if maintype != 'multipart' or subtype == 'alternative':
return
parts = self.get_payload()
if maintype == 'multipart' and subtype == 'related':
# For related, we treat everything but the root as an attachment.
# The root may be indicated by 'start'; if there's no start or we
# can't find the named start, treat the first subpart as the root.
start = self.get_param('start')
if start:
found = False
attachments = []
for part in parts:
if part.get('content-id') == start:
found = True
else:
attachments.append(part)
if found:
yield from attachments
return
parts.pop(0)
yield from parts
return
# Otherwise we more or less invert the remaining logic in get_body.
# This only really works in edge cases (ex: non-text relateds or
# alternatives) if the sending agent sets content-disposition.
seen = [] # Only skip the first example of each candidate type.
for part in parts:
maintype, subtype = part.get_content_type().split('/')
if ((maintype, subtype) in self._body_types and
not part.is_attachment and subtype not in seen):
seen.append(subtype)
continue
yield part
def iter_parts(self):
"""Return an iterator over all immediate subparts of a multipart.
Return an empty iterator for a non-multipart.
"""
if self.get_content_maintype() == 'multipart':
yield from self.get_payload()
def get_content(self, *args, content_manager=None, **kw):
if content_manager is None:
content_manager = self.policy.content_manager
return content_manager.get_content(self, *args, **kw)
def set_content(self, *args, content_manager=None, **kw):
if content_manager is None:
content_manager = self.policy.content_manager
content_manager.set_content(self, *args, **kw)
def _make_multipart(self, subtype, disallowed_subtypes, boundary):
if self.get_content_maintype() == 'multipart':
existing_subtype = self.get_content_subtype()
disallowed_subtypes = disallowed_subtypes + (subtype,)
if existing_subtype in disallowed_subtypes:
raise ValueError("Cannot convert {} to {}".format(
existing_subtype, subtype))
keep_headers = []
part_headers = []
for name, value in self._headers:
if name.lower().startswith('content-'):
part_headers.append((name, value))
else:
keep_headers.append((name, value))
if part_headers:
# There is existing content, move it to the first subpart.
part = type(self)(policy=self.policy)
part._headers = part_headers
part._payload = self._payload
self._payload = [part]
else:
self._payload = []
self._headers = keep_headers
self['Content-Type'] = 'multipart/' + subtype
if boundary is not None:
self.set_param('boundary', boundary)
def make_related(self, boundary=None):
self._make_multipart('related', ('alternative', 'mixed'), boundary)
def make_alternative(self, boundary=None):
self._make_multipart('alternative', ('mixed',), boundary)
def make_mixed(self, boundary=None):
self._make_multipart('mixed', (), boundary)
def _add_multipart(self, _subtype, *args, _disp=None, **kw):
if (self.get_content_maintype() != 'multipart' or
self.get_content_subtype() != _subtype):
getattr(self, 'make_' + _subtype)()
part = type(self)(policy=self.policy)
part.set_content(*args, **kw)
if _disp and 'content-disposition' not in part:
part['Content-Disposition'] = _disp
self.attach(part)
def add_related(self, *args, **kw):
self._add_multipart('related', *args, _disp='inline', **kw)
def add_alternative(self, *args, **kw):
self._add_multipart('alternative', *args, **kw)
def add_attachment(self, *args, **kw):
self._add_multipart('mixed', *args, _disp='attachment', **kw)
def clear(self):
self._headers = []
self._payload = None
def clear_content(self):
self._headers = [(n, v) for n, v in self._headers
if not n.lower().startswith('content-')]
self._payload = None
class EmailMessage(MIMEPart):
def set_content(self, *args, **kw):
super().set_content(*args, **kw)
if 'MIME-Version' not in self:
self['MIME-Version'] = '1.0'
|
[
"964697423@qq.com"
] |
964697423@qq.com
|
2874996f6c897cca1dca1e9260b196d93d55bff3
|
bad1849b772933eb45775d1a3484d9b6e8354e3c
|
/opt/omd/versions/1.2.6p16.cre/share/check_mk/web/plugins/views/availability.py
|
6fe7ce261e3273e6bd9fd67fa738565758a7b7b8
|
[] |
no_license
|
parvathikonda/My-Repos
|
620cbb52321a37e47d3e83d93777570f03913ef7
|
2b748c0c75f14634a0f1efa8db53ea1b5bf2e388
|
refs/heads/master
| 2020-12-24T18:50:51.483479
| 2016-04-23T05:16:49
| 2016-04-23T05:16:49
| 57,040,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78,399
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Hints:
# There are several modes for displaying data
# 1. Availability table
# 2. Timeline view with chronological events of one object
# There are two types of data sources
# a. Hosts/Services (identified by site, host and service)
# b. BI aggregates (identified by aggr_groups and aggr_name)
# The code flow for these four combinations is different
#
# 1a) availability of hosts/services
# Here the logic of show_view is used for creating the
# filter headers. But these are being reused for the statehist
# table instead of the original hosts/services table! This is
# done in get_availability_data().
#
# - htdocs/views.py:show_view()
# - plugins/views/availability.py:render_availability()
# - plugins/views/availability.py:get_availability_data()
# - plugins/views/availability.py:do_render_availability()
# - plugins/views/availability.py:render_availability_table()
#
# 2a) timeline of hosts/services
# It is much the same as for 1a), just that in get_availability_data()
# an additional filter is being added for selecting just one host/serivce.
#
# - htdocs/views.py:show_view()
# - plugins/views/availability.py:render_availability()
# - plugins/views/availability.py:get_availability_data()
# - plugins/views/availability.py:do_render_availability()
# - plugins/views/availability.py:render_timeline()
#
# 1b) availability of bi aggregates
# In order to use the filter logic of the aggr datasource, we
# also start in show_view(). But this time we let the actual
# rows being computed - just we make sure that only the two
# columns aggr_name, aggr_group and aggr_tree are being fetched. The
# other columns won't be displayed. We just need the correct
# result set. With that we fork into render_bi_availability().
# This computes the historic states of the aggregate by using
# data from hosts/services from state_hist.
#
# - htdocs/views.py:show_view()
# - plugins/views/availability.py:render_bi_availability()
# - plugins/views/availability.py:get_bi_timeline()
# - plugins/views/availability.py:do_render_availability()
# - plugins/views/availability.py:render_availability_table()
#
# 2b) timeline of bi aggregates
# In this case we do not need any logic from the view, since
# we just diplay one element - which is identified by aggr_group
# and aggr_name. We immediately fork to page_timeline()
#
# - htdocs/views.py:show_view() (jumps immediately to page_timeline)
# - htdocs/bi.py:page_timeline()
# - plugins/views/availability.py:render_bi_availability()
# - plugins/views/availability.py:do_render_availability()
# - plugins/views/availability.py:render_timeline()
import table
from valuespec import *
# Function building the availability view
def render_availability(view, datasource, filterheaders, display_options,
only_sites, limit):
if handle_edit_annotations():
return
timeline = not not html.var("timeline")
if timeline:
tl_site = html.var("timeline_site")
tl_host = html.var("timeline_host")
tl_service = html.var("timeline_service")
tl_aggr = html.var("timeline_aggr")
if tl_aggr:
title = _("Timeline of") + " " + tl_aggr
timeline = (tl_aggr, None, None)
else:
title = _("Timeline of") + " " + tl_host
if tl_service:
title += ", " + tl_service
timeline = (tl_site, tl_host, tl_service)
else:
title = _("Availability: ") + view_title(view)
html.add_status_icon("download_csv", _("Export as CSV"), html.makeuri([("output_format", "csv_export")]))
if timeline and tl_aggr:
what = "bi"
else:
what = "service" in datasource["infos"] and "service" or "host"
avoptions = get_availability_options_from_url(what)
range, range_title = avoptions["range"]
title += " - " + range_title
if html.output_format == "csv_export":
do_csv = True
av_output_csv_mimetype(title)
else:
do_csv = False
if 'H' in display_options:
html.body_start(title, stylesheets=["pages","views","status"], force=True)
if 'T' in display_options:
html.top_heading(title)
handle_delete_annotations()
# Remove variables for editing annotations, otherwise they will make it into the uris
html.del_all_vars("editanno_")
html.del_all_vars("anno_")
if html.var("filled_in") == "editanno":
html.del_var("filled_in")
if 'B' in display_options:
html.begin_context_buttons()
togglebutton("avoptions", html.has_user_errors(), "painteroptions", _("Configure details of the report"))
html.context_button(_("Status View"), html.makeuri([("mode", "status")]), "status")
if config.reporting_available():
html.context_button(_("Export as PDF"), html.makeuri([], filename="report_instant.py"), "report")
if timeline:
html.context_button(_("Availability"), html.makeuri([("timeline", "")]), "availability")
history_url = history_url_of(tl_site, tl_host, tl_service, range[0], range[1])
if not tl_aggr: # No history for BI aggregate timeline
html.context_button(_("History"), history_url, "history")
html.end_context_buttons()
if not do_csv:
# Render the avoptions again to get the HTML code, because the HTML vars have changed
# above (anno_ and editanno_ has been removed, which must not be part of the form
avoptions = render_availability_options(what)
if not html.has_user_errors():
if timeline and tl_aggr:
if not html.has_var("aggr_group"):
raise MKGeneralException("Missing GET variable <tt>aggr_group</tt>")
aggr_group = html.var("aggr_group")
tree = bi.get_bi_tree(aggr_group, tl_aggr)
rows = [{ "aggr_tree" : tree , "aggr_group" : aggr_group}]
else:
rows = get_availability_data(datasource, filterheaders, range, only_sites,
limit, timeline, timeline or avoptions["show_timeline"], avoptions)
do_render_availability(rows, what, avoptions, timeline, "")
if 'Z' in display_options:
html.bottom_footer()
if 'H' in display_options:
html.body_end()
def av_output_csv_mimetype(title):
html.req.content_type = "text/csv; charset=UTF-8"
filename = '%s-%s.csv' % (title, time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time())))
if type(filename) == unicode:
filename = filename.encode("utf-8")
html.req.headers_out['Content-Disposition'] = 'Attachment; filename="%s"' % filename
# Options for availability computation and rendering. These are four-tuple
# with the columns:
# 1. variable name
# 2. show in single or double height box
# 3. use this in reporting
# 4. the valuespec
def get_avoption_entries(what):
if what == "bi":
grouping_choices = [
( None, _("Do not group") ),
( "host", _("By Aggregation Group") ),
]
else:
grouping_choices = [
( None, _("Do not group") ),
( "host", _("By Host") ),
( "host_groups", _("By Host group") ),
( "service_groups", _("By Service group") ),
]
return [
# Time range selection
( "rangespec",
"double",
False,
Timerange(
title = _("Time Range"),
default_value = 'd0',
)
),
# Labelling and Texts
( "labelling",
"double",
True,
ListChoice(
title = _("Labelling Options"),
choices = [
( "omit_headers", _("Do not display column headers")),
( "omit_host", _("Do not display the host name")),
( "use_display_name", _("Use alternative display name for services")),
( "omit_buttons", _("Do not display icons for history and timeline")),
( "display_timeline_legend", _("Display legend for timeline")),
]
)
),
# How to deal with downtimes
( "downtimes",
"double",
True,
Dictionary(
title = _("Scheduled Downtimes"),
columns = 2,
elements = [
( "include",
DropdownChoice(
choices = [
( "honor", _("Honor scheduled downtimes") ),
( "ignore", _("Ignore scheduled downtimes") ),
( "exclude", _("Exclude scheduled downtimes" ) ),
],
default_value = "honor",
)
),
( "exclude_ok",
Checkbox(label = _("Treat phases of UP/OK as non-downtime"))
),
],
optional_keys = False,
)
),
# How to deal with downtimes, etc.
( "consider",
"double",
True,
Dictionary(
title = _("Status Classification"),
columns = 2,
elements = [
( "flapping",
Checkbox(
label = _("Consider periods of flapping states"),
default_value = True),
),
( "host_down",
Checkbox(
label = _("Consider times where the host is down"),
default_value = True),
),
( "unmonitored",
Checkbox(
label = _("Include unmonitored time"),
default_value = True),
),
],
optional_keys = False,
),
),
# Optionally group some states together
( "state_grouping",
"double",
True,
Dictionary(
title = _("Status Grouping"),
columns = 2,
elements = [
( "warn",
DropdownChoice(
label = _("Treat Warning as: "),
choices = [
( "ok", _("OK") ),
( "warn", _("WARN") ),
( "crit", _("CRIT") ),
( "unknown", _("UNKNOWN") ),
],
default_value = "warn",
),
),
( "unknown",
DropdownChoice(
label = _("Treat Unknown as: "),
choices = [
( "ok", _("OK") ),
( "warn", _("WARN") ),
( "crit", _("CRIT") ),
( "unknown", _("UNKNOWN") ),
],
default_value = "unknown",
),
),
( "host_down",
DropdownChoice(
label = _("Treat Host Down as: "),
choices = [
( "ok", _("OK") ),
( "warn", _("WARN") ),
( "crit", _("CRIT") ),
( "unknown", _("UNKNOWN") ),
( "host_down", _("Host Down") ),
],
default_value = "host_down",
),
),
],
optional_keys = False,
),
),
# Visual levels for the availability
( "av_levels",
"double",
False,
Optional(
Tuple(
elements = [
Percentage(title = _("Warning below"), default_value = 99, display_format="%.3f", size=7),
Percentage(title = _("Critical below"), default_value = 95, display_format="%.3f", size=7),
]
),
title = _("Visual levels for the availability (OK percentage)"),
)
),
# Show colummns for min, max, avg duration and count
( "outage_statistics",
"double",
True,
Tuple(
title = _("Outage statistics"),
orientation = "horizontal",
elements = [
ListChoice(
title = _("Aggregations"),
choices = [
( "min", _("minimum duration" )),
( "max", _("maximum duration" )),
( "avg", _("average duration" )),
( "cnt", _("count" )),
]
),
ListChoice(
title = _("For these states:"),
columns = 2,
choices = [
( "ok", _("OK/Up") ),
( "warn", _("Warn") ),
( "crit", _("Crit/Down") ),
( "unknown", _("Unknown/Unreach") ),
( "flapping", _("Flapping") ),
( "host_down", _("Host Down") ),
( "in_downtime", _("Downtime") ),
( "outof_notification_period", _("OO/Notif") ),
]
)
]
)
),
# Omit all non-OK columns
( "av_mode",
"single",
True,
Checkbox(
title = _("Availability"),
label = _("Just show the availability (i.e. OK/UP)"),
),
),
# How to deal with the service periods
( "service_period",
"single",
True,
DropdownChoice(
title = _("Service Time"),
choices = [
( "honor", _("Base report only on service times") ),
( "ignore", _("Include both service and non-service times" ) ),
( "exclude", _("Base report only on non-service times" ) ),
],
default_value = "honor",
)
),
# How to deal with times out of the notification period
( "notification_period",
"single",
True,
DropdownChoice(
title = _("Notification Period"),
choices = [
( "honor", _("Distinguish times in and out of notification period") ),
( "exclude", _("Exclude times out of notification period" ) ),
( "ignore", _("Ignore notification period") ),
],
default_value = "ignore",
)
),
# Group by Host, Hostgroup or Servicegroup?
( "grouping",
"single",
True,
DropdownChoice(
title = _("Grouping"),
choices = grouping_choices,
default_value = None,
)
),
# Format of numbers
( "dateformat",
"single",
True,
DropdownChoice(
title = _("Format time stamps as"),
choices = [
("yyyy-mm-dd hh:mm:ss", _("YYYY-MM-DD HH:MM:SS") ),
("epoch", _("Unix Timestamp (Epoch)") ),
],
default_value = "yyyy-mm-dd hh:mm:ss",
)
),
( "timeformat",
"single",
True,
DropdownChoice(
title = _("Format time ranges as"),
choices = [
("percentage_0", _("Percentage - XX %") ),
("percentage_1", _("Percentage - XX.X %") ),
("percentage_2", _("Percentage - XX.XX %") ),
("percentage_3", _("Percentage - XX.XXX %") ),
("seconds", _("Seconds") ),
("minutes", _("Minutes") ),
("hours", _("Hours") ),
("hhmmss", _("HH:MM:SS") ),
],
default_value = "percentage_2",
)
),
# Short time intervals
( "short_intervals",
"single",
True,
Integer(
title = _("Short Time Intervals"),
label = _("Ignore intervals shorter or equal"),
minvalue = 0,
unit = _("sec"),
default_value = 0,
),
),
# Merging
( "dont_merge",
"single",
True,
Checkbox(
title = _("Phase Merging"),
label = _("Do not merge consecutive phases with equal state")),
),
# Summary line
( "summary",
"single",
True,
DropdownChoice(
title = _("Summary line"),
choices = [
( None, _("Do not show a summary line") ),
( "sum", _("Display total sum (for % the average)") ),
( "average", _("Display average") ),
],
default_value = "sum",
)
),
# Timeline
( "show_timeline",
"single",
True,
Checkbox(
title = _("Timeline"),
label = _("Show timeline of each object directly in table")),
),
# Timelimit
( "timelimit",
"single",
False,
Age(
title = _("Query Time Limit"),
help = _("Limit the execution time of the query, in order to "
"avoid a hanging system."),
unit = _("sec"),
default_value = 30,
),
)
]
# Get availability options without rendering the valuespecs
def get_availability_options_from_url(what):
html.plug()
avoptions = render_availability_options(what)
html.drain()
html.unplug()
return avoptions
def get_default_avoptions():
return {
"range" : (time.time() - 86400, time.time()),
"rangespec" : "d0",
"labelling" : [],
"downtimes" : {
"include" : "honor",
"exclude_ok" : False,
},
"consider" : {
"flapping" : True,
"host_down" : True,
"unmonitored" : True,
},
"state_grouping" : {
"warn" : "warn",
"unknown" : "unknown",
"host_down" : "host_down",
},
"av_levels" : None,
"outage_statistics" : ([],[]),
"av_mode" : False,
"service_period" : "honor",
"notification_period" : "ignore",
"grouping" : None,
"dateformat" : "yyyy-mm-dd hh:mm:ss",
"timeformat" : "percentage_2",
"short_intervals" : 0,
"dont_merge" : False,
"summary" : "sum",
"show_timeline" : False,
"timelimit" : 30,
}
def render_availability_options(what):
if html.var("_reset") and html.check_transaction():
config.save_user_file("avoptions", {})
for varname in html.vars.keys():
if varname.startswith("avo_"):
html.del_var(varname)
html.del_var("avoptions")
avoptions = get_default_avoptions()
# Users of older versions might not have all keys set. The following
# trick will merge their options with our default options.
avoptions.update(config.load_user_file("avoptions", {}))
is_open = False
html.begin_form("avoptions")
html.hidden_field("avoptions", "set")
avoption_entries = get_avoption_entries(what)
if html.var("avoptions") == "set":
for name, height, show_in_reporting, vs in avoption_entries:
try:
avoptions[name] = vs.from_html_vars("avo_" + name)
except MKUserError, e:
html.add_user_error(e.varname, e)
is_open = True
range_vs = None
for name, height, show_in_reporting, vs in avoption_entries:
if name == 'rangespec':
range_vs = vs
try:
range, range_title = range_vs.compute_range(avoptions["rangespec"])
avoptions["range"] = range, range_title
except MKUserError, e:
html.add_user_error(e.varname, e)
if html.has_user_errors():
html.show_user_errors()
html.write('<div class="view_form" id="avoptions" %s>'
% (not is_open and 'style="display: none"' or '') )
html.write("<table border=0 cellspacing=0 cellpadding=0 class=filterform><tr><td>")
for name, height, show_in_reporting, vs in avoption_entries:
html.write('<div class="floatfilter %s %s">' % (height, name))
html.write('<div class=legend>%s</div>' % vs.title())
html.write('<div class=content>')
vs.render_input("avo_" + name, avoptions.get(name))
html.write("</div>")
html.write("</div>")
html.write("</td></tr>")
html.write("<tr><td>")
html.button("apply", _("Apply"), "submit")
html.button("_reset", _("Reset to defaults"), "submit")
html.write("</td></tr></table>")
html.write("</div>")
html.hidden_fields()
html.end_form()
if html.form_submitted():
config.save_user_file("avoptions", avoptions)
# Convert outage-options from service to host
states = avoptions["outage_statistics"][1]
for os, oh in [ ("ok","up"), ("crit","down"), ("unknown", "unreach") ]:
if os in states:
states.append(oh)
return avoptions
def get_availability_data(datasource, filterheaders, range, only_sites, limit, single_object, include_output, avoptions):
has_service = "service" in datasource["infos"]
av_filter = "Filter: time >= %d\nFilter: time < %d\n" % range
if single_object:
tl_site, tl_host, tl_service = single_object
av_filter += "Filter: host_name = %s\nFilter: service_description = %s\n" % (
tl_host, tl_service)
only_sites = [ tl_site ]
elif has_service:
av_filter += "Filter: service_description !=\n"
else:
av_filter += "Filter: service_description =\n"
query = "GET statehist\n" + av_filter
query += "Timelimit: %d\n" % avoptions["timelimit"]
# Add Columns needed for object identification
columns = [ "host_name", "service_description" ]
# Columns for availability
columns += [
"duration", "from", "until", "state", "host_down", "in_downtime",
"in_host_downtime", "in_notification_period", "in_service_period", "is_flapping", ]
if include_output:
columns.append("log_output")
if "use_display_name" in avoptions["labelling"]:
columns.append("service_display_name")
# If we group by host/service group then make sure that that information is available
if avoptions["grouping"] not in [ None, "host" ]:
columns.append(avoptions["grouping"])
add_columns = datasource.get("add_columns", [])
rows = do_query_data(query, columns, add_columns, None, filterheaders, only_sites, limit = None)
return rows
host_availability_columns = [
( "up", "state0", _("UP"), None ),
( "down", "state2", _("DOWN"), None ),
( "unreach", "state3", _("UNREACH"), None ),
( "flapping", "flapping", _("Flapping"), None ),
( "in_downtime", "downtime", _("Downtime"), _("The host was in a scheduled downtime") ),
( "outof_notification_period", "", _("OO/Notif"), _("Out of Notification Period") ),
( "outof_service_period", "ooservice", _("OO/Service"), _("Out of Service Period") ),
( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ),
]
service_availability_columns = [
( "ok", "state0", _("OK"), None ),
( "warn", "state1", _("WARN"), None ),
( "crit", "state2", _("CRIT"), None ),
( "unknown", "state3", _("UNKNOWN"), None ),
( "flapping", "flapping", _("Flapping"), None ),
( "host_down", "hostdown", _("H.Down"), _("The host was down") ),
( "in_downtime", "downtime", _("Downtime"), _("The host or service was in a scheduled downtime") ),
( "outof_notification_period", "", _("OO/Notif"), _("Out of Notification Period") ),
( "outof_service_period", "ooservice", _("OO/Service"), _("Out of Service Period") ),
( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ),
]
bi_availability_columns = [
( "ok", "state0", _("OK"), None ),
( "warn", "state1", _("WARN"), None ),
( "crit", "state2", _("CRIT"), None ),
( "unknown", "state3", _("UNKNOWN"), None ),
( "in_downtime", "downtime", _("Downtime"), _("The aggregate was in a scheduled downtime") ),
( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ),
]
# Fetch = true: return av table as Python data, do render nothing
def do_render_availability(rows, what, avoptions, timeline, timewarpcode, fetch=False):
# Sort by site/host and service, while keeping native order
by_host = {}
for row in rows:
site_host = row["site"], row["host_name"]
service = row["service_description"]
by_host.setdefault(site_host, {})
by_host[site_host].setdefault(service, []).append(row)
# Load annotations
annotations = load_annotations()
# Now compute availability table. We have the following possible states:
# 1. "unmonitored"
# 2. "monitored"
# 2.1 "outof_notification_period"
# 2.2 "in_notification_period"
# 2.2.1 "in_downtime" (also in_host_downtime)
# 2.2.2 "not_in_downtime"
# 2.2.2.1 "host_down"
# 2.2.2.2 "host not down"
# 2.2.2.2.1 "ok"
# 2.2.2.2.2 "warn"
# 2.2.2.2.3 "crit"
# 2.2.2.2.4 "unknown"
availability = []
os_aggrs, os_states = avoptions.get("outage_statistics", ([],[]))
need_statistics = os_aggrs and os_states
show_timeline = avoptions["show_timeline"] or timeline
grouping = avoptions["grouping"]
timeline_rows = [] # Need this as a global variable if just one service is affected
total_duration = 0
considered_duration = 0
# Note: in case of timeline, we have data from exacly one host/service
for site_host, site_host_entry in by_host.iteritems():
for service, service_entry in site_host_entry.iteritems():
if grouping == "host":
group_ids = [site_host]
elif grouping:
group_ids = set([])
else:
group_ids = None
# First compute timeline
timeline_rows = []
total_duration = 0
considered_duration = 0
for span in service_entry:
# Information about host/service groups are in the actual entries
if grouping and grouping != "host" and what != "bi":
group_ids.update(span[grouping]) # List of host/service groups
display_name = span.get("service_display_name", service)
state = span["state"]
consider = True
if state == -1:
s = "unmonitored"
if not avoptions["consider"]["unmonitored"]:
consider = False
elif avoptions["service_period"] != "ignore" and \
(( span["in_service_period"] and avoptions["service_period"] != "honor" )
or \
( not span["in_service_period"] and avoptions["service_period"] == "honor" )):
s = "outof_service_period"
consider = False
elif span["in_notification_period"] == 0 and avoptions["notification_period"] == "exclude":
consider = False
elif span["in_notification_period"] == 0 and avoptions["notification_period"] == "honor":
s = "outof_notification_period"
elif (span["in_downtime"] or span["in_host_downtime"]) and not \
(avoptions["downtimes"]["exclude_ok"] and state == 0) and not \
avoptions["downtimes"]["include"] == "ignore":
if avoptions["downtimes"]["include"] == "exclude":
consider = False
else:
s = "in_downtime"
elif what != "host" and span["host_down"] and avoptions["consider"]["host_down"]:
s = "host_down"
elif span["is_flapping"] and avoptions["consider"]["flapping"]:
s = "flapping"
else:
if what in [ "service", "bi" ]:
s = { 0: "ok", 1:"warn", 2:"crit", 3:"unknown" }.get(state, "unmonitored")
else:
s = { 0: "up", 1:"down", 2:"unreach" }.get(state, "unmonitored")
if s == "warn":
s = avoptions["state_grouping"]["warn"]
elif s == "unknown":
s = avoptions["state_grouping"]["unknown"]
elif s == "host_down":
s = avoptions["state_grouping"]["host_down"]
total_duration += span["duration"]
if consider:
timeline_rows.append((span, s))
considered_duration += span["duration"]
# Now merge consecutive rows with identical state
if not avoptions["dont_merge"]:
merge_timeline(timeline_rows)
# Melt down short intervals
if avoptions["short_intervals"]:
melt_short_intervals(timeline_rows, avoptions["short_intervals"], avoptions["dont_merge"])
# Condense into availability
states = {}
statistics = {}
for span, s in timeline_rows:
states.setdefault(s, 0)
duration = span["duration"]
states[s] += duration
if need_statistics:
entry = statistics.get(s)
if entry:
entry[0] += 1
entry[1] = min(entry[1], duration)
entry[2] = max(entry[2], duration)
else:
statistics[s] = [ 1, duration, duration ] # count, min, max
if not show_timeline:
timeline_rows = None
availability.append([site_host[0], site_host[1], service, display_name, states,
considered_duration, total_duration, statistics, timeline_rows, group_ids])
# Prepare number format function
range, range_title = avoptions["range"]
from_time, until_time = range
duration = until_time - from_time
render_number = render_number_function(avoptions)
fetch_data = {}
if timeline:
if not fetch: # Timeline does not support fetch
render_timeline(timeline_rows, from_time, until_time, total_duration,
timeline, range_title, render_number, what, timewarpcode, avoptions, False, style="standalone")
else:
fetch_data["table"] = render_availability_table(availability, from_time, until_time, range_title,
what, avoptions, render_number, fetch)
if not fetch:
render_annotations(annotations, from_time, until_time, by_host, what, avoptions, omit_service = timeline)
return fetch_data
# Creates a function for rendering time values according to
# the avoptions of the report.
def render_number_function(avoptions):
timeformat = avoptions["timeformat"]
if timeformat.startswith("percentage_"):
def render_number(n, d):
if not d:
return _("n/a")
else:
return ("%." + timeformat[11:] + "f%%") % ( float(n) / float(d) * 100.0)
elif timeformat == "seconds":
def render_number(n, d):
return "%d s" % n
elif timeformat == "minutes":
def render_number(n, d):
return "%d min" % (n / 60)
elif timeformat == "hours":
def render_number(n, d):
return "%d h" % (n / 3600)
else:
def render_number(n, d):
minn, sec = divmod(n, 60)
hours, minn = divmod(minn, 60)
return "%02d:%02d:%02d" % (hours, minn, sec)
return render_number
# style is either inline (just the timeline bar) or "standalone" (the complete page)
def render_timeline(timeline_rows, from_time, until_time, considered_duration,
timeline, range_title, render_number, what, timewarpcode, avoptions, fetch, style):
if not timeline_rows:
if fetch:
return []
else:
html.write('<div class=info>%s</div>' % _("No information available"))
return
# Timeformat: show date only if the displayed time range spans over
# more than one day.
format = "%H:%M:%S"
if time.localtime(from_time)[:3] != time.localtime(until_time-1)[:3]:
format = "%Y-%m-%d " + format
def render_date(ts):
if avoptions["dateformat"] == "epoch":
return str(int(ts))
else:
return time.strftime(format, time.localtime(ts))
if type(timeline) == tuple:
tl_site, tl_host, tl_service = timeline
if tl_service:
availability_columns = service_availability_columns
else:
availability_columns = host_availability_columns
else:
availability_columns = bi_availability_columns
# Render graphical representation
# Make sure that each cell is visible, if possible
min_percentage = min(100.0 / len(timeline_rows), style == "inline" and 0.0 or 0.5)
rest_percentage = 100 - len(timeline_rows) * min_percentage
if not fetch:
html.write('<div class="timelinerange %s">' % style)
if style == "standalone":
html.write('<div class=from>%s</div><div class=until>%s</div></div>' % (
render_date(from_time), render_date(until_time)))
if not fetch:
html.write('<table class="timeline %s">' % style)
html.write('<tr class=timeline>')
chaos_begin = None
chaos_end = None
chaos_count = 0
chaos_width = 0
def output_chaos_period(chaos_begin, chaos_end, chaos_count, chaos_width):
if fetch:
html.write("|chaos:%s" % chaos_width)
else:
title = _("%d chaotic state changes from %s until %s (%s)") % (
chaos_count,
render_date(chaos_begin), render_date(chaos_end),
render_number(chaos_end - chaos_begin, considered_duration))
html.write('<td style="width: %.3f%%" title="%s" class="chaos"></td>' % (
max(0.2, chaos_width), html.attrencode(title)))
for row_nr, (row, state_id) in enumerate(timeline_rows):
for sid, css, sname, help in availability_columns:
if sid == state_id:
title = _("From %s until %s (%s) %s") % (
render_date(row["from"]), render_date(row["until"]),
render_number(row["duration"], considered_duration),
help and help or sname)
if "log_output" in row and row["log_output"]:
title += " - " + row["log_output"]
width = rest_percentage * row["duration"] / considered_duration
# If the width is very small then we group several phases into
# one single "chaos period".
if style == "inline" and width < 0.05:
if not chaos_begin:
chaos_begin = row["from"]
chaos_width += width
chaos_count += 1
chaos_end = row["until"]
continue
# Chaos period has ended? One not-small phase:
elif chaos_begin:
# Only output chaos phases with a certain length
if chaos_count >= 4:
output_chaos_period(chaos_begin, chaos_end, chaos_count, chaos_width)
chaos_begin = None
chaos_count = 0
chaos_width = 0
width += min_percentage
if fetch:
html.write("|%s:%s" % (css, width))
else:
html.write('<td onmouseover="timeline_hover(%d, 1);" onmouseout="timeline_hover(%d, 0);" '
'style="width: %.3f%%" title="%s" class="%s"></td>' % (
row_nr, row_nr, width, html.attrencode(title), css))
if chaos_count > 1:
output_chaos_period(chaos_begin, chaos_end, chaos_count, chaos_width)
if not fetch:
html.write('</tr></table>')
if style == "inline":
if not fetch:
render_timeline_choords(from_time, until_time, width=500)
return
# Render timewarped BI aggregate (might be empty)
html.write(timewarpcode)
# Render Table
table.begin("av_timeline", "", css="timelineevents")
for row_nr, (row, state_id) in enumerate(timeline_rows):
table.row()
table.cell(_("Links"), css="buttons")
if what == "bi":
url = html.makeuri([("timewarp", str(int(row["from"])))])
if html.var("timewarp") and int(html.var("timewarp")) == int(row["from"]):
html.disabled_icon_button("timewarp_off")
else:
html.icon_button(url, _("Time warp - show BI aggregate during this time period"), "timewarp")
else:
url = html.makeuri([("anno_site", tl_site),
("anno_host", tl_host),
("anno_service", tl_service),
("anno_from", row["from"]),
("anno_until", row["until"])])
html.icon_button(url, _("Create an annotation for this period"), "annotation")
table.cell(_("From"), render_date(row["from"]), css="nobr narrow")
table.cell(_("Until"), render_date(row["until"]), css="nobr narrow")
table.cell(_("Duration"), render_number(row["duration"], considered_duration), css="narrow number")
for sid, css, sname, help in availability_columns:
if sid == state_id:
table.cell(_("State"), sname, css=css + " state narrow")
break
else:
table.cell(_("State"), "(%s/%s)" % (sid,sname))
table.cell(_("Last Known Plugin Output"), row["log_output"])
table.end()
# Legend for timeline
if "display_timeline_legend" in avoptions["labelling"]:
render_timeline_legend(what)
def render_timeline_choords(from_time, until_time, width):
duration = until_time - from_time
def render_choord(t, title):
pixel = width * (t - from_time) / float(duration)
html.write('<div title="%s" class="timelinechoord" style="left: %dpx"></div>' % (title, pixel))
# Now comes the difficult part: decide automatically, whether to use
# hours, days, weeks or months. Days and weeks needs to take local time
# into account. Months are irregular.
hours = duration / 3600
if hours < 12:
scale = "hours"
elif hours < 24:
scale = "2hours"
elif hours < 48:
scale = "6hours"
elif hours < 24 * 14:
scale = "days"
elif hours < 24 * 60:
scale = "weeks"
else:
scale = "months"
broken = list(time.localtime(from_time))
while True:
next_choord, title = find_next_choord(broken, scale)
if next_choord >= until_time:
break
render_choord(next_choord, title)
# Elements in broken:
# 0: year
# 1: month (1 = January)
# 2: day of month
# 3: hour
# 4: minute
# 5: second
# 6: day of week (0 = monday)
# 7: day of year
# 8: isdst (0 or 1)
def find_next_choord(broken, scale):
broken[4:6] = [0, 0] # always set min/sec to 00:00
old_dst = broken[8]
if scale == "hours":
epoch = time.mktime(broken)
epoch += 3600
broken[:] = list(time.localtime(epoch))
title = time.strftime("%H:%M", broken)
elif scale == "2hours":
broken[3] = broken[3] / 2 * 2
epoch = time.mktime(broken)
epoch += 2 * 3600
broken[:] = list(time.localtime(epoch))
title = valuespec.weekdays[broken[6]] + time.strftime(" %H:%M", broken)
elif scale == "6hours":
broken[3] = broken[3] / 6 * 6
epoch = time.mktime(broken)
epoch += 6 * 3600
broken[:] = list(time.localtime(epoch))
title = valuespec.weekdays[broken[6]] + time.strftime(" %H:%M", broken)
elif scale == "days":
broken[3] = 0
epoch = time.mktime(broken)
epoch += 24 * 3600
broken[:] = list(time.localtime(epoch))
title = valuespec.weekdays[broken[6]] + time.strftime(", %d.%m. 00:00", broken)
elif scale == "weeks":
broken[3] = 0
at_00 = int(time.mktime(broken))
at_monday = at_00 - 86400 * broken[6]
epoch = at_monday + 7 * 86400
broken[:] = list(time.localtime(epoch))
title = valuespec.weekdays[broken[6]] + time.strftime(", %d.%m.", broken)
else: # scale == "months":
broken[3] = 0
broken[2] = 0
broken[1] += 1
if broken[1] > 12:
broken[1] = 1
broken[0] += 1
epoch = time.mktime(broken)
title = "%s %d" % (valuespec.month_names[broken[1]-1], broken[0])
dst = broken[8]
if old_dst == 1 and dst == 0:
epoch += 3600
elif old_dst == 0 and dst == 1:
epoch -= 3600
return epoch, title
# Merge consecutive rows with same state
def merge_timeline(entries):
n = 1
while n < len(entries):
if entries[n][1] == entries[n-1][1]:
entries[n-1][0]["duration"] += entries[n][0]["duration"]
entries[n-1][0]["until"] = entries[n][0]["until"]
del entries[n]
else:
n += 1
def melt_short_intervals(entries, duration, dont_merge):
n = 1
need_merge = False
while n < len(entries) - 1:
if entries[n][0]["duration"] <= duration and \
entries[n-1][1] == entries[n+1][1]:
entries[n] = (entries[n][0], entries[n-1][1])
need_merge = True
n += 1
# Due to melting, we need to merge again
if need_merge and not dont_merge:
merge_timeline(entries)
melt_short_intervals(entries, duration, dont_merge)
def history_url_of(site, host, service, from_time, until_time):
history_url_vars = [
("site", site),
("host", host),
("logtime_from_range", "unix"), # absolute timestamp
("logtime_until_range", "unix"), # absolute timestamp
("logtime_from", str(int(from_time))),
("logtime_until", str(int(until_time)))]
if service:
history_url_vars += [
("service", service),
("view_name", "svcevents"),
]
else:
history_url_vars += [
("view_name", "hostevents"),
]
return "view.py?" + html.urlencode_vars(history_url_vars)
statistics_headers = {
"min" : _("Shortest"),
"max" : _("Longest"),
"avg" : _("Average"),
"cnt" : _("Count"),
}
def render_availability_table(availability, from_time, until_time, range_title, what, avoptions, render_number, fetch):
do_csv = html.output_format == "csv_export"
no_html = do_csv or fetch
if not availability:
if not no_html:
html.message(_("No matching hosts/services."))
return [] # No objects
grouping = avoptions["grouping"]
fetch_data = []
if not grouping:
fetch_data.append((None,
render_availability_group(range_title, range_title, None, availability, from_time,
until_time, what, avoptions, render_number, fetch)))
else:
# Grouping is one of host/hostgroup/servicegroup
# 1. Get complete list of all groups
all_group_ids = get_av_groups(availability, grouping)
# 2. Compute Names for the groups and sort according to these names
if grouping != "host":
group_titles = dict(visuals.all_groups(grouping[:-7]))
titled_groups = []
for group_id in all_group_ids:
if grouping == "host":
titled_groups.append((group_id[1], group_id)) # omit the site name
else:
if group_id == ():
title = _("Not contained in any group")
else:
title = group_titles.get(group_id, group_id)
titled_groups.append((title, group_id)) ## ACHTUNG
titled_groups.sort(cmp = lambda a,b: cmp(a[1], b[1]))
# 3. Loop over all groups and render them
for title, group_id in titled_groups:
fetch_data.append((title,
render_availability_group(title, range_title, group_id, availability,
from_time, until_time, what, avoptions, render_number, fetch)
))
# Legend for Availability levels
av_levels = avoptions["av_levels"]
if av_levels and not no_html:
warn, crit = av_levels
html.write('<div class="avlegend levels">')
html.write('<h3>%s</h3>' % _("Availability levels"))
html.write('<div class="state state0">%s</div><div class=level>≥ %.3f%%</div>' % (_("OK"), warn))
html.write('<div class="state state1">%s</div><div class=level>≥ %.3f%%</div>' % (_("WARN"), crit))
html.write('<div class="state state2">%s</div><div class=level>< %.3f%%</div>' % (_("CRIT"), crit))
html.write('</div>')
# Legend for timeline
if "display_timeline_legend" in avoptions["labelling"] and avoptions["show_timeline"] and not no_html:
render_timeline_legend(what)
return fetch_data
def render_timeline_legend(what):
html.write('<div class="avlegend timeline">')
html.write('<h3>%s</h3>' % _('Timeline colors'))
html.write('<div class="state state0">%s</div>' % (what == "host" and _("UP") or _("OK")))
if what != "host":
html.write('<div class="state state1">%s</div>' % _("WARN"))
html.write('<div class="state state2">%s</div>' % (what == "host" and _("DOWN") or _("CRIT")))
html.write('<div class="state state3">%s</div>' % (what == "host" and _("UNREACH") or _("UNKNOWN")))
html.write('<div class="state flapping">%s</div>' % _("Flapping"))
if what != "host":
html.write('<div class="state hostdown">%s</div>' % _("H.Down"))
html.write('<div class="state downtime">%s</div>' % _("Downtime"))
html.write('<div class="state ooservice">%s</div>' % _("OO/Service"))
html.write('<div class="state unmonitored">%s</div>' % _("unmonitored"))
html.write('</div>')
def get_av_groups(availability, grouping):
all_group_ids = set([])
for site, host, service, display_name, states, considered_duration, total_duration, statistics, timeline_rows, group_ids in availability:
all_group_ids.update(group_ids)
if len(group_ids) == 0:
all_group_ids.add(()) # null-tuple denotes ungrouped objects
return all_group_ids
# When grouping is enabled, this function is called once for each group
def render_availability_group(group_title, range_title, group_id, availability,
from_time, until_time, what, avoptions, render_number, fetch):
# Filter out groups that we want to show this time
group_availability = []
for entry in availability:
group_ids = entry[-1]
if group_id == () and group_ids:
continue # This is not an angrouped object
elif group_id and group_id not in group_ids:
continue # Not this group
group_availability.append(entry)
# Some columns might be unneeded due to state treatment options
sg = avoptions["state_grouping"]
state_groups = [ sg["warn"], sg["unknown"], sg["host_down"] ]
show_timeline = avoptions["show_timeline"]
labelling = avoptions["labelling"]
av_levels = avoptions["av_levels"]
# Helper function, needed in row and in summary line
def cell_active(sid):
if sid not in [ "up", "ok" ] and avoptions["av_mode"]:
return False
if sid == "outof_notification_period" and avoptions["notification_period"] != "honor":
return False
elif sid == "outof_service_period": # Never show this as a column
return False
elif sid == "in_downtime" and avoptions["downtimes"]["include"] != "honor":
return False
elif sid == "unmonitored" and not avoptions["consider"]["unmonitored"]:
return False
elif sid == "flapping" and not avoptions["consider"]["flapping"]:
return False
elif sid == "host_down" and not avoptions["consider"]["host_down"]:
return False
elif sid in [ "warn", "unknown", "host_down" ] and sid not in state_groups:
return False
else:
return True
# Render the stuff
do_csv = html.output_format == "csv_export"
no_html = do_csv or fetch
# Sort according to host and service. First after site, then
# host (natural sort), then service
def cmp_av_entry(a, b):
return cmp(a[0], b[0]) or \
cmp(num_split(a[1]) + (a[1],), num_split(b[1]) + (b[1],)) or \
cmp(cmp_service_name_equiv(a[2]), cmp_service_name_equiv(b[2])) or \
cmp(a[2], b[2])
group_availability.sort(cmp = cmp_av_entry)
show_summary = avoptions.get("summary")
summary = {}
summary_counts = {}
table.begin("av_items", group_title, css="availability",
searchable = False, limit = None, output_format = do_csv and "csv" or (fetch and "fetch" or "html"),
omit_headers = "omit_headers" in avoptions["labelling"])
for site, host, service, display_name, states, considered_duration, total_duration, statistics, timeline_rows, group_ids in group_availability:
table.row()
if what != "bi":
timeline_url = html.makeuri([
("timeline", "yes"),
("timeline_site", site),
("timeline_host", host),
("timeline_service", service)])
else:
timeline_url = html.makeuri([("timeline", "yes"), ("av_aggr_name", service), ("av_aggr_group", host)])
if not "omit_buttons" in labelling and not no_html:
table.cell("", css="buttons")
if what != "bi":
history_url = history_url_of(site, host, service, from_time, until_time)
html.icon_button(history_url, _("Event History"), "history")
html.icon_button(timeline_url, _("Timeline"), "timeline")
else:
html.icon_button(timeline_url, _("Timeline"), "timeline")
host_url = "view.py?" + html.urlencode_vars([("view_name", "hoststatus"), ("site", site), ("host", host)])
if what == "bi":
table.cell(_("Aggregate"))
if no_html:
html.write(service)
else:
bi_url = "view.py?" + html.urlencode_vars([("view_name", "aggr_single"), ("aggr_group", host), ("aggr_name", service)])
html.write('<a href="%s">%s</a>' % (bi_url, service))
availability_columns = bi_availability_columns
else:
if not "omit_host" in labelling:
table.cell(_("Host"))
if no_html:
html.write(host)
else:
html.write('<a href="%s">%s</a>' % (host_url, host))
if what == "service":
if "use_display_name" in labelling:
service_name = display_name
else:
service_name = service
table.cell(_("Service"))
if no_html:
html.write(service_name)
else:
service_url = "view.py?" + html.urlencode_vars([("view_name", "service"), ("site", site), ("host", host), ("service", service)])
html.write('<a href="%s">%s</a>' % (service_url, service_name))
availability_columns = service_availability_columns
else:
availability_columns = host_availability_columns
if show_timeline:
table.cell(_("Timeline"), css="timeline")
if not no_html:
html.write('<a href="%s">' % timeline_url)
render_timeline(timeline_rows, from_time, until_time, total_duration, (site, host, service), range_title, render_number, what, "", avoptions, fetch, style="inline")
if not no_html:
html.write('</a>')
for sid, css, sname, help in availability_columns:
if not cell_active(sid):
continue
if avoptions["av_mode"]:
sname = _("Avail.")
number = states.get(sid, 0)
if not number:
css = "unused"
elif show_summary:
summary.setdefault(sid, 0.0)
if avoptions["timeformat"].startswith("percentage"):
if considered_duration > 0:
summary[sid] += float(number) / considered_duration
else:
summary[sid] += number
# Apply visual availability levels (render OK in yellow/red, if too low)
if number and av_levels and sid in [ "ok", "up" ]:
css = "state%d" % check_av_levels(number, av_levels, considered_duration)
table.cell(sname, render_number(number, considered_duration), css="narrow number " + css, help=help)
# Statistics?
x_cnt, x_min, x_max = statistics.get(sid, (None, None, None))
os_aggrs, os_states = avoptions.get("outage_statistics", ([],[]))
if sid in os_states:
for aggr in os_aggrs:
title = statistics_headers[aggr]
if x_cnt != None:
if aggr == "avg":
r = render_number(number / x_cnt, considered_duration)
elif aggr == "min":
r = render_number(x_min, considered_duration)
elif aggr == "max":
r = render_number(x_max, considered_duration)
else:
r = str(x_cnt)
summary_counts.setdefault(sid, 0)
summary_counts[sid] += x_cnt
table.cell(title, r, css="number stats " + css)
else:
table.cell(title, "")
if show_summary:
table.row(css="summary")
if not "omit_buttons" in labelling and not no_html:
table.cell("")
if not "omit_host" in labelling or what == "bi":
table.cell("", _("Summary"), css="heading")
if what == "service":
table.cell("", "")
if show_timeline and not do_csv:
table.cell("")
for sid, css, sname, help in availability_columns:
if not cell_active(sid):
continue
number = summary.get(sid, 0)
if show_summary == "average" or avoptions["timeformat"].startswith("percentage"):
number /= len(group_availability)
if avoptions["timeformat"].startswith("percentage"):
number *= considered_duration
if not number:
css = "unused"
if number and av_levels and sid in [ "ok", "up" ]:
css = "state%d" % check_av_levels(number, av_levels, considered_duration)
table.cell(sname, render_number(number, considered_duration), css="heading number " + css, help=help)
os_aggrs, os_states = avoptions.get("outage_statistics", ([],[]))
if sid in os_states:
for aggr in os_aggrs:
title = statistics_headers[aggr]
if aggr == "cnt":
count = summary_counts.get(sid, 0)
if show_summary == "average":
count = float(count) / len(group_availability)
text = "%.2f" % count
else:
text = str(count)
table.cell(sname, text, css="number stats " + css, help=help)
else:
table.cell(title, "")
return table.end() # returns Table data if fetch == True
def check_av_levels(number, av_levels, considered_duration):
if considered_duration == 0:
return 0
perc = 100 * float(number) / float(considered_duration)
warn, crit = av_levels
if perc < crit:
return 2
elif perc < warn:
return 1
else:
return 0
def compute_bi_availability(avoptions, aggr_rows):
rows = []
for aggr_row in aggr_rows:
these_rows, tree_state = get_bi_timeline(aggr_row["aggr_tree"], aggr_row["aggr_group"], avoptions, False)
rows += these_rows
return do_render_availability(rows, "bi", avoptions, timeline=False, timewarpcode=None, fetch=True)
# Render availability of a BI aggregate. This is currently
# no view and does not support display options
def render_bi_availability(title, aggr_rows):
html.add_status_icon("download_csv", _("Export as CSV"), html.makeuri([("output_format", "csv_export")]))
timeline = html.var("timeline")
if timeline:
title = _("Timeline of ") + title
else:
title = _("Availability of ") + title
if html.output_format != "csv_export":
html.body_start(title, stylesheets=["pages","views","status", "bi"], javascripts=['bi'])
html.top_heading(title)
html.begin_context_buttons()
togglebutton("avoptions", False, "painteroptions", _("Configure details of the report"))
html.context_button(_("Status View"), html.makeuri([("mode", "status")]), "status")
if timeline:
html.context_button(_("Availability"), html.makeuri([("timeline", "")]), "availability")
elif len(aggr_rows) == 1:
aggr_name = aggr_rows[0]["aggr_name"]
aggr_group = aggr_rows[0]["aggr_group"]
timeline_url = html.makeuri([("timeline", "1"), ("av_aggr_name", aggr_name), ("av_aggr_group", aggr_group)])
html.context_button(_("Timeline"), timeline_url, "timeline")
html.end_context_buttons()
html.plug()
avoptions = render_availability_options("bi")
range, range_title = avoptions["range"]
avoptions_html = html.drain()
html.unplug()
if html.output_format == "csv_export":
av_output_csv_mimetype(title)
else:
html.write(avoptions_html)
timewarpcode = ""
if not html.has_user_errors():
rows = []
for aggr_row in aggr_rows:
tree = aggr_row["aggr_tree"]
reqhosts = tree["reqhosts"]
try:
timewarp = int(html.var("timewarp"))
except:
timewarp = None
these_rows, tree_state = get_bi_timeline(tree, aggr_row["aggr_group"], avoptions, timewarp)
rows += these_rows
if timewarp and tree_state:
state, assumed_state, node, subtrees = tree_state
eff_state = state
if assumed_state != None:
eff_state = assumed_state
row = {
"aggr_tree" : tree,
"aggr_treestate" : tree_state,
"aggr_state" : state, # state disregarding assumptions
"aggr_assumed_state" : assumed_state, # is None, if there are no assumptions
"aggr_effective_state" : eff_state, # is assumed_state, if there are assumptions, else real state
"aggr_name" : node["title"],
"aggr_output" : eff_state["output"],
"aggr_hosts" : node["reqhosts"],
"aggr_function" : node["func"],
"aggr_group" : html.var("aggr_group"),
}
tdclass, htmlcode = bi.render_tree_foldable(row, boxes=False, omit_root=False,
expansion_level=bi.load_ex_level(), only_problems=False, lazy=False)
html.plug()
html.write('<h3>')
# render icons for back and forth
if int(these_rows[0]["from"]) == timewarp:
html.disabled_icon_button("back_off")
have_forth = False
previous_row = None
for row in these_rows:
if int(row["from"]) == timewarp and previous_row != None:
html.icon_button(html.makeuri([("timewarp", str(int(previous_row["from"])))]), _("Jump one phase back"), "back")
elif previous_row and int(previous_row["from"]) == timewarp and row != these_rows[-1]:
html.icon_button(html.makeuri([("timewarp", str(int(row["from"])))]), _("Jump one phase forth"), "forth")
have_forth = True
previous_row = row
if not have_forth:
html.disabled_icon_button("forth_off")
html.write(" ")
html.icon_button(html.makeuri([("timewarp", "")]), _("Close Timewarp"), "closetimewarp")
timewarpcode = html.drain()
html.unplug()
timewarpcode += '%s %s</h3>' % (_("Timewarp to "), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timewarp))) + \
'<table class="data table timewarp"><tr class="data odd0"><td class="%s">' % tdclass + \
htmlcode + \
'</td></tr></table>'
else:
timewarpcode = ""
do_render_availability(rows, "bi", avoptions, timeline, timewarpcode)
if html.output_format != "csv_export":
html.bottom_footer()
html.body_end()
def get_bi_timeline(tree, aggr_group, avoptions, timewarp):
range, range_title = avoptions["range"]
# Get state history of all hosts and services contained in the tree.
# In order to simplify the query, we always fetch the information for
# all hosts of the aggregates.
only_sites = set([])
hosts = []
for site, host in tree["reqhosts"]:
only_sites.add(site)
hosts.append(host)
columns = [ "host_name", "service_description", "from", "log_output", "state", "in_downtime" ]
html.live.set_only_sites(list(only_sites))
html.live.set_prepend_site(True)
html.live.set_limit() # removes limit
query = "GET statehist\n" + \
"Columns: " + " ".join(columns) + "\n" +\
"Filter: time >= %d\nFilter: time < %d\n" % range
# Create a specific filter. We really only want the services and hosts
# of the aggregation in question. That prevents status changes
# irrelevant services from introducing new phases.
by_host = {}
for site, host, service in bi.find_all_leaves(tree):
by_host.setdefault(host, set([])).add(service)
for host, services in by_host.items():
query += "Filter: host_name = %s\n" % host
query += "Filter: service_description = \n"
for service in services:
query += "Filter: service_description = %s\n" % service
query += "Or: %d\nAnd: 2\n" % (len(services) + 1)
if len(hosts) != 1:
query += "Or: %d\n" % len(hosts)
data = html.live.query(query)
if not data:
return [], None
# raise MKGeneralException(_("No historical data available for this aggregation. Query was: <pre>%s</pre>") % query)
html.live.set_prepend_site(False)
html.live.set_only_sites(None)
columns = ["site"] + columns
rows = [ dict(zip(columns, row)) for row in data ]
# Now comes the tricky part: recompute the state of the aggregate
# for each step in the state history and construct a timeline from
# it. As a first step we need the start state for each of the
# hosts/services. They will always be the first consecute rows
# in the statehist table
# First partition the rows into sequences with equal start time
phases = {}
for row in rows:
from_time = row["from"]
phases.setdefault(from_time, []).append(row)
# Convert phases to sorted list
sorted_times = phases.keys()
sorted_times.sort()
phases_list = []
for from_time in sorted_times:
phases_list.append((from_time, phases[from_time]))
states = {}
def update_states(phase_entries):
for row in phase_entries:
service = row["service_description"]
key = row["site"], row["host_name"], service
states[key] = row["state"], row["log_output"], row["in_downtime"]
update_states(phases_list[0][1])
# states does now reflect the host/services states at the beginning
# of the query range.
tree_state = compute_tree_state(tree, states)
tree_time = range[0]
if timewarp == int(tree_time):
timewarp_state = tree_state
else:
timewarp_state = None
timeline = []
def append_to_timeline(from_time, until_time, tree_state):
timeline.append({
"state" : tree_state[0]['state'],
"log_output" : tree_state[0]['output'],
"from" : from_time,
"until" : until_time,
"site" : "",
"host_name" : aggr_group,
"service_description" : tree['title'],
"in_notification_period" : 1,
"in_service_period" : 1,
"in_downtime" : tree_state[0]['in_downtime'],
"in_host_downtime" : 0,
"host_down" : 0,
"is_flapping" : 0,
"duration" : until_time - from_time,
})
for from_time, phase in phases_list[1:]:
update_states(phase)
next_tree_state = compute_tree_state(tree, states)
duration = from_time - tree_time
append_to_timeline(tree_time, from_time, tree_state)
tree_state = next_tree_state
tree_time = from_time
if timewarp == tree_time:
timewarp_state = tree_state
# Add one last entry - for the state until the end of the interval
append_to_timeline(tree_time, range[1], tree_state)
return timeline, timewarp_state
def compute_tree_state(tree, status):
# Convert our status format into that needed by BI
services_by_host = {}
hosts = {}
for site_host_service, state_output in status.items():
site_host = site_host_service[:2]
service = site_host_service[2]
if service:
services_by_host.setdefault(site_host, []).append((
service, # service description
state_output[0], # state
1, # has_been_checked
state_output[1], # output
state_output[0], # hard state (we use the soft state here)
1, # attempt
1, # max_attempts (not relevant)
state_output[2], # in_downtime
False, # acknowledged
))
else:
hosts[site_host] = state_output
status_info = {}
for site_host, state_output in hosts.items():
status_info[site_host] = [
state_output[0],
state_output[0], # host hard state
state_output[1],
state_output[2], # in_downtime
False, # acknowledged
services_by_host.get(site_host,[])
]
# Finally we can execute the tree
bi.load_assumptions()
tree_state = bi.execute_tree(tree, status_info)
return tree_state
#.
# .--Annotations---------------------------------------------------------.
# | _ _ _ _ |
# | / \ _ __ _ __ ___ | |_ __ _| |_(_) ___ _ __ ___ |
# | / _ \ | '_ \| '_ \ / _ \| __/ _` | __| |/ _ \| '_ \/ __| |
# | / ___ \| | | | | | | (_) | || (_| | |_| | (_) | | | \__ \ |
# | /_/ \_\_| |_|_| |_|\___/ \__\__,_|\__|_|\___/|_| |_|___/ |
# | |
# +----------------------------------------------------------------------+
# | This code deals with retrospective annotations and downtimes. |
# '----------------------------------------------------------------------'
# Example for annotations:
# {
# ( "mysite", "foohost", "myservice" ) : # service might be None
# [
# {
# "from" : 1238288548,
# "until" : 1238292845,
# "text" : u"Das ist ein Text über mehrere Zeilen, oder was weiß ich",
# "downtime" : True, # Treat as scheduled Downtime,
# "date" : 12348854885, # Time of entry
# "author" : "mk",
# },
# # ... further entries
# ]
# }
def save_annotations(annotations):
file(defaults.var_dir + "/web/statehist_annotations.mk", "w").write(repr(annotations) + "\n")
def load_annotations(lock = False):
path = defaults.var_dir + "/web/statehist_annotations.mk"
if os.path.exists(path):
if lock:
aquire_lock(path)
return eval(file(path).read())
else:
return {}
def update_annotations(site_host_svc, annotation):
annotations = load_annotations(lock = True)
entries = annotations.get(site_host_svc, [])
new_entries = []
for entry in entries:
if entry["from"] == annotation["from"] \
and entry["until"] == annotation["until"]:
continue # Skip existing entries with same identity
new_entries.append(entry)
new_entries.append(annotation)
annotations[site_host_svc] = new_entries
save_annotations(annotations)
def find_annotation(annotations, site_host_svc, fromtime, untiltime):
entries = annotations.get(site_host_svc)
if not entries:
return None
for annotation in entries:
if annotation["from"] == fromtime \
and annotation["until"] == untiltime:
return annotation
return None
def delete_annotation(annotations, site_host_svc, fromtime, untiltime):
entries = annotations.get(site_host_svc)
if not entries:
return
found = None
for nr, annotation in enumerate(entries):
if annotation["from"] == fromtime \
and annotation["until"] == untiltime:
found = nr
break
if found != None:
del entries[nr]
def render_annotations(annotations, from_time, until_time, by_host, what, avoptions, omit_service):
format = "%H:%M:%S"
if time.localtime(from_time)[:3] != time.localtime(until_time-1)[:3]:
format = "%Y-%m-%d " + format
def render_date(ts):
return time.strftime(format, time.localtime(ts))
annos_to_render = []
for site_host, avail_entries in by_host.iteritems():
for service in avail_entries.keys():
site_host_svc = site_host[0], site_host[1], (service or None)
for annotation in annotations.get(site_host_svc, []):
if (annotation["from"] >= from_time and annotation["from"] <= until_time) or \
(annotation["until"] >= from_time and annotation["until"] <= until_time):
annos_to_render.append((site_host_svc, annotation))
annos_to_render.sort(cmp=lambda a,b: cmp(a[1]["from"], b[1]["from"]) or cmp(a[0], b[0]))
labelling = avoptions["labelling"]
table.begin(title = _("Annotations"), omit_if_empty = True)
for (site_id, host, service), annotation in annos_to_render:
table.row()
table.cell("", css="buttons")
anno_vars = [
( "anno_site", site_id ),
( "anno_host", host ),
( "anno_service", service or "" ),
( "anno_from", int(annotation["from"]) ),
( "anno_until", int(annotation["until"]) ),
]
edit_url = html.makeuri(anno_vars)
html.icon_button(edit_url, _("Edit this annotation"), "edit")
delete_url = html.makeactionuri([("_delete_annotation", "1")] + anno_vars)
html.icon_button(delete_url, _("Delete this annotation"), "delete")
if not omit_service:
if not "omit_host" in labelling:
host_url = "view.py?" + html.urlencode_vars([("view_name", "hoststatus"), ("site", site_id), ("host", host)])
table.cell(_("Host"), '<a href="%s">%s</a>' % (host_url, host))
if service:
service_url = "view.py?" + html.urlencode_vars([("view_name", "service"), ("site", site_id), ("host", host), ("service", service)])
# TODO: honor use_display_name. But we have no display names here...
service_name = service
table.cell(_("Service"), '<a href="%s">%s</a>' % (service_url, service_name))
table.cell(_("From"), render_date(annotation["from"]), css="nobr narrow")
table.cell(_("Until"), render_date(annotation["until"]), css="nobr narrow")
table.cell(_("Annotation"), html.attrencode(annotation["text"]))
table.cell(_("Author"), annotation["author"])
table.cell(_("Entry"), render_date(annotation["date"]), css="nobr narrow")
table.end()
def edit_annotation():
site_id = html.var("anno_site") or ""
hostname = html.var("anno_host")
service = html.var("anno_service") or None
fromtime = float(html.var("anno_from"))
untiltime = float(html.var("anno_until"))
site_host_svc = (site_id, hostname, service)
# Find existing annotation with this specification
annotations = load_annotations()
annotation = find_annotation(annotations, site_host_svc, fromtime, untiltime)
if not annotation:
annotation = {
"from" : fromtime,
"until" : untiltime,
"text" : "",
}
annotation["host"] = hostname
annotation["service"] = service
annotation["site"] = site_id
html.plug()
title = _("Edit annotation of ") + hostname
if service:
title += "/" + service
html.body_start(title, stylesheets=["pages","views","status"])
html.top_heading(title)
html.begin_context_buttons()
html.context_button(_("Abort"), html.makeuri([("anno_host", "")]), "abort")
html.end_context_buttons()
value = forms.edit_dictionary([
( "site", TextAscii(title = _("Site")) ),
( "host", TextUnicode(title = _("Hostname")) ),
( "service", Optional(TextUnicode(allow_empty=False), sameline = True, title = _("Service")) ),
( "from", AbsoluteDate(title = _("Start-Time"), include_time = True) ),
( "until", AbsoluteDate(title = _("End-Time"), include_time = True) ),
( "text", TextAreaUnicode(title = _("Annotation"), allow_empty = False) ), ],
annotation,
varprefix = "editanno_",
formname = "editanno",
focus = "text")
if value:
site_host_svc = value["site"], value["host"], value["service"]
del value["site"]
del value["host"]
value["date"] = time.time()
value["author"] = config.user_id
update_annotations(site_host_svc, value)
html.drain() # omit previous HTML code, not needed
html.unplug()
html.del_all_vars(prefix = "editanno_")
html.del_var("filled_in")
return False
html.unplug() # show HTML code
html.bottom_footer()
html.body_end()
return True
# Called at the beginning of every availability page
def handle_delete_annotations():
if html.var("_delete_annotation"):
site_id = html.var("anno_site") or ""
hostname = html.var("anno_host")
service = html.var("anno_service") or None
fromtime = float(html.var("anno_from"))
untiltime = float(html.var("anno_until"))
site_host_svc = (site_id, hostname, service)
annotations = load_annotations()
annotation = find_annotation(annotations, site_host_svc, fromtime, untiltime)
if not annotation:
return
if not html.confirm(_("Are you sure that you want to delete the annotation '%s'?" % annotation["text"])):
return
delete_annotation(annotations, site_host_svc, fromtime, untiltime)
save_annotations(annotations)
def handle_edit_annotations():
if html.var("anno_host") and not html.var("_delete_annotation"):
finished = edit_annotation()
else:
finished = False
return finished
|
[
"parvathi.konda@techwave.net"
] |
parvathi.konda@techwave.net
|
cd50fc8b715db9544fca346be9d2f59be5483792
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/FrogRiver_20200723134656.py
|
b53537eb14ce4472bd411f219e101697e4edb59b
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
def Frog(X,A):
# given x where the frog wants to go
# find earliest time
# once you get the second that has that position
# return the second
pos = set()
print(Frog(5,[1,3,1,4,2,3,5,4]))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
6e5b22d94e41c54bed477e9c213add68291fd728
|
d85cc746428e787254455c80b66a7309aa715e24
|
/demo_odoo_tutorial/models/models.py
|
fc9e8beafbb712f17fd48d60021152bfda775a67
|
[
"MIT"
] |
permissive
|
AllenHuang101/odoo-demo-addons-tutorial
|
2ef7d47432a2530f1e704f86cba78e3e975ca0f3
|
e719594bc42e3a9b273f5b37980ac61773702ab9
|
refs/heads/master
| 2023-03-28T03:37:46.338483
| 2021-03-29T08:44:22
| 2021-03-29T08:44:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,829
|
py
|
from odoo import models, fields, api
from odoo.exceptions import UserError, ValidationError
class DemoOdooTutorial(models.Model):
_name = 'demo.odoo.tutorial'
_description = 'Demo Odoo Tutorial'
_inherit = ['mail.thread', 'mail.activity.mixin'] # track_visibility
name = fields.Char('Description', required=True)
# track_visibility='always' 和 track_visibility='onchange'
is_done_track_onchange = fields.Boolean(
string='Is Done?', default=False, track_visibility='onchange')
name_track_always = fields.Char(string="track_name", track_visibility='always')
start_datetime = fields.Datetime('Start DateTime', default=fields.Datetime.now())
stop_datetime = fields.Datetime('End Datetime', default=fields.Datetime.now())
field_onchange_demo = fields.Char('onchange_demo')
field_onchange_demo_set = fields.Char('onchange_demo_set', readonly=True)
# float digits
# field tutorial
input_number = fields.Float(string='input number', digits=(10,3))
field_compute_demo = fields.Integer(compute="_get_field_compute") # readonly
_sql_constraints = [
('name_uniq', 'unique(name)', 'Description must be unique'),
]
@api.constrains('start_datetime', 'stop_datetime')
def _check_date(self):
for data in self:
if data.start_datetime > data.stop_datetime:
raise ValidationError(
"data.stop_datetime > data.start_datetime"
)
@api.depends('input_number')
def _get_field_compute(self):
for data in self:
data.field_compute_demo = data.input_number * 1000
@api.onchange('field_onchange_demo')
def onchange_demo(self):
if self.field_onchange_demo:
self.field_onchange_demo_set = 'set {}'.format(self.field_onchange_demo)
|
[
"twtrubiks@gmail.com"
] |
twtrubiks@gmail.com
|
897039da602122f80f994a6b5a9d5a718d8d68df
|
d42ea0626b3322be8962857ff16621a40811186a
|
/payment/migrations/0001_initial.py
|
eb5dc095f9af8dd025347c28634ab01bbd11a7b1
|
[] |
no_license
|
manalap/django-ToDo
|
f7fe270939b1aec7b1cbe14e78c9370f18864657
|
9d94df60d87f72797e40a4feb7f86d2e594aeb96
|
refs/heads/master
| 2023-08-17T07:00:03.654741
| 2023-08-13T06:26:57
| 2023-08-13T06:26:57
| 372,252,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
# Generated by Django 3.2.3 on 2021-05-30 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
],
),
]
|
[
"manuchian@yahoo.com"
] |
manuchian@yahoo.com
|
6d32ff9bf7835bf603fe781dc5c449adaf154377
|
356892df7a0b3a8afbe19e1a9ef8c1e01bf65ac6
|
/03. Lists.py
|
fc091435203e2450f2320cf5b4441a82ad29d847
|
[
"MIT"
] |
permissive
|
LPIX-11/quick-python-for-developers
|
71d5ca22391d86b82d4b5fec47fe46402e510aa2
|
6c52d54a51cd7a1ba6915b77d9efd919ce98b4bc
|
refs/heads/master
| 2020-06-28T14:02:29.687633
| 2019-08-02T13:31:47
| 2019-08-02T14:34:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
# Dealing with lists in python since we use them a lot
x = [1, 2, 3, 4, 5, 6]
# Print list length
print(x, ' contains: ', len(x), ' items')
# len returns the number of items in contained an object
# ###
# If you need to slice list to extract values in a given range you can do
first = x[:3]
last = x[3:]
tLast = x[-2:]
print(first, ': first data extracted from the original list')
print(last, ': last data extracted from the original list')
print(tLast, ': last two elements extracted from the original list')
# ###
# Append a list to another list
x.extend([7, 8])
print(x, ': Appened [7, 8] list')
# If it's just appending a single object
x.append(9)
print(x, ': Appened 9\n')
# If it's about creating a list of lists
y = [14, 11, 13, 10, 12]
# This will be a list constituated of x and y lists
listLists = [x, y]
print(listLists, ': list of lists\n')
# ###
# If it's about sorting the lists
# the basic sorting
listLists[1].sort()
# the reverse sorting
listLists[0].sort(reverse=True)
# reversing the list of lists
listLists.sort(reverse=True)
print(listLists, ': sorted list of lists list')
print(listLists[0], ': sorted y list')
print(listLists[1], ': x list reversly sorted')
# ###
# Tuples
# In python, tuples are a lot like lists but they are immutable, once a tuple, it can't change it
# It's handy when performing functionnal programming or when interfacing with systems like Apache Spark
# Redeclaring x as a tuple
x = (1, 2, 3)
print(len(x))
# And you can do pretty much things that we've done earlier
# If you want to pass group of variables that you want to keep together you can use tuple
tupleVariables = (age, income) = "29,1200000".split(',')
(age, income) = "29,1200000".split(',')
print(tupleVariables, ': Tupled values')
print(age)
print(income)
|
[
"mohamedjohnson31@gmail.com"
] |
mohamedjohnson31@gmail.com
|
9950d363a6587e6d6ea4a143ceb4bdb58679db28
|
d690e866697b87a3eacc4d52d37461806f7dbc23
|
/wapi/wapi-0.1/SConstruct
|
84c7ca43ceed7e724215f5bd4e5dfe8d69b3eedb
|
[
"BSD-2-Clause"
] |
permissive
|
bbrancke/tempNL
|
01e39f445dddc80d5b58177094a4a3a701f9b6e2
|
716c4c9e0c9da361e548b17d6847e1e4aa9788ec
|
refs/heads/master
| 2021-05-08T12:51:28.767516
| 2018-02-27T12:09:06
| 2018-02-27T12:09:06
| 119,975,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,887
|
from os.path import join as opj
from os import getenv
from sys import stderr, stdout
### Common Variables ###########################################################
INCDIR = 'include'
LIBDIR = 'lib'
SRCDIR = 'src'
EXADIR = 'examples'
### Common Utilities ###########################################################
def to_src_path(file):
return opj(SRCDIR, file)
### Parse Command Line Arguments ###############################################
vars = Variables()
vars.AddVariables(
BoolVariable('debug', 'Enable debug symbols.', True),
BoolVariable('optimize', 'Compile with optimization flags turned on.', True),
BoolVariable('profile', 'Enable profile information.', False),
BoolVariable('check', 'Enable library/header checks.', True),
)
env = Environment(variables = vars)
Help(vars.GenerateHelpText(env))
if env['debug']:
env.Append(CCFLAGS = '-g')
if env['optimize']:
env.Append(CCFLAGS = '-O3 -pipe')
if not env['profile']:
env.Append(CCFLAGS = '-fomit-frame-pointer')
if env['profile']:
env.Append(CCFLAGS = '-pg')
env.Append(LINKFLAGS = '-pg')
### Generic Compiler Flags #####################################################
env.Append(CCFLAGS = '-Wall')
env.Append(CPPPATH = [INCDIR])
env.Append(LIBPATH = LIBDIR)
### Parse Environment Variables ################################################
env.Append(CCFLAGS = getenv('CFLAGS', ''))
env.Append(CCFLAGS = '-fno-strict-aliasing')
env.Append(LINKFLAGS = getenv('LDFLAGS', ''))
### Library/Header Check #######################################################
common_libs = ['m', 'iw']
common_hdrs = [
'ctype.h',
'errno.h',
'iwlib.h',
'linux/nl80211.h',
'math.h',
'netinet/in.h',
'netlink/attr.h',
'netlink/genl/ctrl.h',
'netlink/genl/family.h',
'netlink/genl/genl.h',
'netlink/msg.h',
'net/route.h',
'stdio.h',
'stdlib.h',
'string.h',
'sys/ioctl.h',
'sys/socket.h',
'sys/types.h',
]
def CheckPkgConfig(ctx):
ctx.Message('Checking for pkg-config... ')
ret = ctx.TryAction('pkg-config pkg-config')[0]
ctx.Result(ret)
return ret
def CheckPkg(ctx, pkg, ver):
ctx.Message('Checking for package %s... ' % pkg)
ret = ctx.TryAction('pkg-config --atleast-version=%s %s' % (ver, pkg))[0]
ctx.Result(ret)
return ret
conf = Configure(
env,
custom_tests = {
'CheckPkgConfig': CheckPkgConfig,
'CheckPkg': CheckPkg})
def require_lib(lib):
if not conf.CheckLib(lib):
Exit(1)
def require_hdr(hdr):
if not conf.CheckCHeader(hdr):
Exit(1)
src = env.Clone() # Library sources.
exa = env.Clone() # Examples.
if not env.GetOption('clean') and env['check']:
# Checking common libraries.
map(require_hdr, common_hdrs)
map(require_lib, common_libs)
# Check pkg-config.
if not conf.CheckPkgConfig():
stderr.write("pkg-config is missing!\n")
Exit(1)
# Configuring nl80211.
if conf.CheckPkg('libnl-1', '1'):
src.ParseConfig('pkg-config --libs --cflags libnl-1')
src.Append(CCFLAGS = '-DLIBNL1')
elif conf.CheckPkg('libnl-2.0', '2'):
src.ParseConfig('pkg-config --libs --cflags libnl-2.0')
src.Append(CCFLAGS = '-DLIBNL2')
else:
stderr.write('libnl could not be found!')
Exit(1)
### Compile WAPI ###############################################################
common_srcs = map(to_src_path, ['util.c', 'network.c', 'wireless.c'])
src.Append(LIBS = common_libs)
src.Append(CPPPATH = [SRCDIR])
src.SharedLibrary(
opj(LIBDIR, 'wapi'),
map(src.SharedObject, common_srcs))
### Compile Examples ###########################################################
exa.Append(LIBS = ["wapi"])
exa.Program(opj(EXADIR, 'sample-get.c'))
exa.Program(opj(EXADIR, 'sample-set.c'))
exa.Program(opj(EXADIR, 'ifadd.c'))
exa.Program(opj(EXADIR, 'ifdel.c'))
|
[
"bbrancke@gmail.com"
] |
bbrancke@gmail.com
|
|
12f6cd8c0d13ddb5246553b8acd29a2595a7a282
|
82ca64c6a819f3e2cb41057f2df9f758cedee28a
|
/BlockChain/venv/bin/python-config
|
ae068f06249b1f99eb784109dbf07bbc241050d5
|
[] |
no_license
|
seanxxxx/coinx
|
619a18f9b2d7f83076083055bfccf0c5e404f665
|
eb1a7ed430c546cf02ddcc79f436200b218d5244
|
refs/heads/master
| 2023-01-28T03:09:10.358463
| 2018-09-07T07:49:19
| 2018-09-07T07:49:19
| 146,564,986
| 0
| 1
| null | 2022-12-20T14:20:06
| 2018-08-29T07:52:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,363
|
#!/Users/xuanxu/PycharmProjects/BlockChain/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"xuxuan@lanlingdai.net"
] |
xuxuan@lanlingdai.net
|
|
a040a91675720c6d4d854fce17ba48a719e606df
|
3770efe2f7ebadf6d2ec54a3771f1bdcee5c73d3
|
/yandex_parser/items.py
|
796bbe17a760d077b3e7affebcf2fd0b2b72125a
|
[] |
no_license
|
Dantistnfs/yahoo-parser-scrapy
|
84d294e59e0a29787640c4f4766483050dd73f25
|
d747285272080d8969d90ef5f5f44558b62525ee
|
refs/heads/master
| 2021-04-30T17:28:13.682794
| 2017-02-01T13:55:50
| 2017-02-01T13:55:50
| 80,216,042
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class YandexParserItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
current_price = scrapy.Field()
PREV_CLOSE = scrapy.Field()
OPEN = scrapy.Field()
BID = scrapy.Field()
ASK = scrapy.Field()
DAYS_RANGE = scrapy.Field()
FIFTY_TWO_WK_RANGE = scrapy.Field()
TD_VOLUME = scrapy.Field()
AVERAGE_VOLUME_3MONTH = scrapy.Field()
MARKET_CAP = scrapy.Field()
BETA = scrapy.Field()
PE_RATIO = scrapy.Field()
EPS_RATIO = scrapy.Field()
EARNINGS_DATE = scrapy.Field()
DIVIDEND_AND_YIELD = scrapy.Field()
EXDIVIDEND_DATE = scrapy.Field()
ONE_YEAR_TARGET_PRICE = scrapy.Field()
pass
|
[
"dantistnfs@gmail.com"
] |
dantistnfs@gmail.com
|
227e871bbba83b930725e44eafc4874132d87a55
|
a3ea074995fd14fc6a1b3f31286a099ebd312ec1
|
/src/TDDBlog/Blog/blogUrls.py
|
b9e78ad49b988e1266eaca797283507f207965e1
|
[] |
no_license
|
nicholaslemay/TDDBlog
|
e847a59be80dbd7087cc7910c3ae0cd190d98008
|
ca56c9746bc58892070c8787b6aed27eb97f2f63
|
refs/heads/master
| 2016-09-06T10:33:17.844652
| 2010-09-19T20:36:27
| 2010-09-19T20:36:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from django.conf.urls.defaults import *
from TDDBlog.Blog.controllers.NewBlogController import BlogPostController
urlpatterns = patterns('',
url(r'^new/', BlogPostController(), name="newBlog"),
url(r'^thankyou/', "django.views.generic.simple.direct_to_template",{'template': 'thankYou.html'},name="thankYou")
)
|
[
"nlemay@pyxis-tech.co"
] |
nlemay@pyxis-tech.co
|
d99f3077d12c805081ea18bebf5d1bd924df3682
|
8f02939917edda1e714ffc26f305ac6778986e2d
|
/BOJ/2180/generator/gen.py
|
e0da56e963e00dd0ce89f4b06cd344b746f0aa3a
|
[] |
no_license
|
queuedq/ps
|
fd6ee880d67484d666970e7ef85459683fa5b106
|
d45bd3037a389495d9937afa47cf0f74cd3f09cf
|
refs/heads/master
| 2023-08-18T16:45:18.970261
| 2023-08-17T17:04:19
| 2023-08-17T17:04:19
| 134,966,734
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
from random import *
N = int(input())
s = input() # seed
seed(s)
mx = 40000
# Generate
A = []
for i in range(N//2):
A.append((0, 0))
for i in range(N//2, N*3//4):
A.append((randint(1, mx), randint(1, mx)))
for i in range(N*3//4, N*7//8):
A.append((0, randint(1, mx)))
for i in range(N*7//8, N):
A.append((randint(1, mx), 0))
assert(len(A) == N)
shuffle(A)
# Print
print(N)
for i in range(N):
print(A[i][0], A[i][1])
|
[
"queued37@gmail.com"
] |
queued37@gmail.com
|
b37888fa6385baeb41115a66b55bec5886b14fbc
|
387ad3775fad21d2d8ffa3c84683d9205b6e697d
|
/testsuite/trunk/el/el_test_036.py
|
cfab23e5ff03600c188c22c0c83bb31985905443
|
[] |
no_license
|
kodiyalashetty/test_iot
|
916088ceecffc17d2b6a78d49f7ea0bbd0a6d0b7
|
0ae3c2ea6081778e1005c40a9a3f6d4404a08797
|
refs/heads/master
| 2020-03-22T11:53:21.204497
| 2018-03-09T01:43:41
| 2018-03-09T01:43:41
| 140,002,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
#!/usr/bin/env python
"""
(C) Copyright IBM Corp. 2008
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This
file and program are licensed under a BSD style license. See
the Copying file included with the OpenHPI distribution for
full licensing terms.
Authors:
Jayashree Padmanabhan <jayshree@in.ibm.com>
"""
import unittest
from openhpi import *
class TestSequence(unittest.TestCase):
"""
runTest : EL test
*
* This test verifies the failure of oh_el_map_from_file when el == None
*
* Return value: 0 on success, 1 on failure
"""
def runTest(self):
el = oh_el()
retc = None
# test failure of oh_el_map_from_file with el==None
el = None
retc = oh_el_map_from_file(el, "./elTest.data")
self.assertEqual (retc == SA_OK,False)
if __name__=='__main__':
unittest.main()
|
[
"suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26"
] |
suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26
|
afc6a4f4facc75d71b3e22fc99b9f7be1895f171
|
021fd55be143c1520f2554a5fb5f671561e8a26a
|
/mysite/settings.py
|
044b36b1c33ce34558b366a3f0d10f5eee72b9bd
|
[] |
no_license
|
seb-seb/my-first-blog
|
294ef99df1d5227104cdf0831bef98f01b423043
|
8a11f3e9bd4f179c7a269973da34c71976d13577
|
refs/heads/master
| 2020-03-19T04:32:57.764270
| 2018-06-03T20:42:20
| 2018-06-03T20:42:20
| 135,840,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
#Django Girl tuto
LOGIN_REDIRECT_URL = '/'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7cuby2(c_$m4hy9$-0uj*g0!!z+xsvc$f(c)__0zse2c41ax=='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"sebetclo@bregeon.net"
] |
sebetclo@bregeon.net
|
956b9766831edf51e30c5abb046640ac6e56815b
|
56a821768a62e41ca7486b7ff54fed3c6aa0d827
|
/lecture-artificial-intelligence/decision-tree.py
|
0f621b1c400cbff4daca6dfbf76085e4b0fcc90c
|
[] |
no_license
|
goFrendiAsgard/kuliah-2.0
|
512fd0bacefd66f9564b3ba6ad034b2fd381911f
|
1f03d38979153bbb001e52b460f59118f11de880
|
refs/heads/master
| 2022-12-16T00:10:29.860433
| 2019-07-03T04:01:03
| 2019-07-03T04:01:03
| 125,147,197
| 24
| 23
| null | 2022-12-09T15:58:30
| 2018-03-14T03:02:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
clf = DecisionTreeClassifier()
# train
clf.fit(iris.data, iris.target)
print(clf.predict([[5.9, 3, 5.1, 1.8]])) # 2
print(clf.predict([[5.1, 3.5, 1.4, 0.1]])) # 0
|
[
"gofrendiasgard@gmail.com"
] |
gofrendiasgard@gmail.com
|
38c80d4c299c6dbe85afac306b3ae78b212ec38c
|
2b81ca6291eee31dc797b31ba15b088191f6a74e
|
/tutorial2/tutorial2/pipelines.py
|
899f6e34ccb176771ca57222bb334bc97a526142
|
[] |
no_license
|
MIKEHHQ/Crawlers
|
f2f3a548bd75182a5e132696d4e3238be5c0a840
|
0321df0a68894c973440e81f6d2b40a07093ad19
|
refs/heads/master
| 2022-11-25T17:01:07.335872
| 2020-08-06T03:32:05
| 2020-08-06T03:32:05
| 284,692,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class Tutorial2Pipeline:
def process_item(self, item, spider):
return item
|
[
"392920729@qq.com"
] |
392920729@qq.com
|
f4989d258e89f5e950e3729031c78f646095c4a1
|
e44de61f99836ee92f8cdfe3a8e53b60a42a7e63
|
/2018/day8/solution1.py
|
9bfd9604ecda288c879c607e28eaa0121fa0cfa8
|
[
"Apache-2.0"
] |
permissive
|
om-henners/advent_of_code
|
471bae7d16fd7ae876f2f10f1399f85f5faa07a5
|
2c11272e05d7d1dcc5a96c9026d0f799f6443fa7
|
refs/heads/master
| 2021-12-15T02:00:17.590726
| 2021-12-04T01:07:17
| 2021-12-04T01:07:17
| 225,377,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
from itertools import chain
from uuid import uuid4
import networkx as nx
data = open('input.txt').read().strip()
# data = open('sample_input.txt').read().strip()
starting_numbers = [int(i) for i in data.split()]
tree = nx.DiGraph()
def build_node(numbers):
node_id = uuid4()
num_children = numbers[0]
num_metadata = numbers[1]
remainder = numbers[2:]
for i in range(num_children):
child_node, remainder = build_node(remainder)
tree.add_edge(
node_id,
child_node
)
metadata = remainder[:num_metadata]
if len(metadata) < num_metadata:
raise ValueError("Missing metadata")
tree.add_node(
node_id,
details=(num_children, num_metadata),
metadata=metadata
)
return node_id, remainder[num_metadata:]
top_node, remainder = build_node(starting_numbers)
if remainder:
raise ValueError("Didn't work")
print(sum(chain.from_iterable(
pair[1] for pair in
tree.nodes(data='metadata')
)))
import matplotlib.pyplot as plt
pos = nx.nx_agraph.graphviz_layout(tree, prog='dot')
nx.draw_networkx(tree, pos=pos, with_labels=False)
plt.show()
|
[
"henry.walshaw@gmail.com"
] |
henry.walshaw@gmail.com
|
93d6f00bf21e5a4d6004d45417bd2d5253c50290
|
63913055f86d625786196a880c1d8f82b1b569d5
|
/makeSemiLeptonicTemplates.py
|
25b2bd497d54b53a42a56d256ee2a9e53b71beab
|
[] |
no_license
|
mroguljic/X_YH_4b
|
328791db1449d5ddef8495df3e0ad8a30aeefba3
|
78ba7980058bd7759354182c685baf605a4e8a8d
|
refs/heads/master
| 2022-11-10T15:09:56.836525
| 2021-09-29T14:35:46
| 2021-09-29T14:35:46
| 248,929,562
| 0
| 3
| null | 2020-12-23T08:18:44
| 2020-03-21T07:44:38
|
Python
|
UTF-8
|
Python
| false
| false
| 15,171
|
py
|
#To be used with trees from event selection
import ROOT as r
import time, os
from optparse import OptionParser
from collections import OrderedDict
from TIMBER.Tools.Common import *
from TIMBER.Analyzer import *
TIMBERPATH = os.environ["TIMBERPATH"]
parser = OptionParser()
parser.add_option('-i', '--input', metavar='IFILE', type='string', action='store',
default = '',
dest = 'input',
help = 'A root file or text file with multiple root file locations to analyze')
parser.add_option('-o', '--output', metavar='OFILE', type='string', action='store',
default = 'output.root',
dest = 'output',
help = 'Output file name.')
parser.add_option('-p', '--process', metavar='PROCESS', type='string', action='store',
default = 'ttbarSemi',
dest = 'process',
help = 'Process in the given file')
parser.add_option('-v','--var', metavar='variation', type='string', action='store',
default = "nom",
dest = 'variation',
help = 'jmrUp/Down, jmsUp/Down, jesUp/Down, jerUp/Down, sfUp/sfDown, trigUp/Down, isoUp/Down, IdUp/IdDown')
parser.add_option('-y', '--year', metavar='year', type='string', action='store',
default = '2016',
dest = 'year',
help = 'Dataset year')
parser.add_option('-m', metavar='mode', type='string', action='store',
default = "RECREATE",
dest = 'mode',
help = 'RECREATE or UPDATE outputfile')
(options, args) = parser.parse_args()
#SF and JES/R have their own event trees
iFile = options.input
variation = options.variation
year = options.year
if ("je" in variation):
if not variation in iFile:
iFile = iFile.replace(".root","_{0}.root".format(variation))
print("{0} not in {1}, swapping input to {2}".format(variation,options.input,iFile))
elif ("sf" in variation):
if not variation in iFile:
iFile = iFile.replace(".root","_{0}.root".format(variation))
print("{0} not in {1}, swapping input to {2}".format(variation,options.input,iFile))
else:
if not("nom" in iFile):
iFile = iFile.replace(".root","_nom.root")
a = analyzer(iFile)
if("data" in options.process or "SingleMuon" in options.process):
isData=True
else:
isData=False
histos =[]
if("jm" in variation):
probeJetMassVar = "probeJetMass_{0}".format(variation)
else:
probeJetMassVar = "probeJetMass_nom"
if(year=="2016"):
#SFs are split into B-F and G-H era with respective lumis 19.961 and 16.217
IdFile1 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2016_preVFP_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2016_UL_HIPM_ID.root"
IdName1 = "NUM_MediumID_DEN_TrackerMuons_abseta_pt"
IdFile2 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2016_postVFP_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2016_UL_ID.root"
IdName2 = "NUM_MediumID_DEN_TrackerMuons_abseta_pt"
IsoFile1 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2016_postVFP_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2016_UL_ISO.root"
IsoName1 = "NUM_TightRelIso_DEN_MediumID_abseta_pt"
IsoFile2 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2016_preVFP_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2016_UL_HIPM_ISO.root"
IsoName2 = "NUM_TightRelIso_DEN_MediumID_abseta_pt"
TrigFile1 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesAndSF_RunBtoF.root"
TrigName1 = "IsoMu24_OR_IsoTkMu24_PtEtaBins/efficienciesDATA/abseta_pt_DATA"
TrigFile2 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesAndSF_Period4.root"
TrigName2 = "IsoMu24_OR_IsoTkMu24_PtEtaBins/efficienciesDATA/abseta_pt_DATA"
lumiBCDEF = 19.961
lumiGH = 16.227
elif(year=="2017"):
IdFile = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2017_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2017_UL_ID.root"
IdName = "NUM_MediumID_DEN_TrackerMuons_abseta_pt"
IsoFile = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2017_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2017_UL_ISO.root"
IsoName = "NUM_TightRelIso_DEN_MediumID_abseta_pt"
TrigFile = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesAndSF_RunBtoF_Nov17Nov2017.root"
TrigName = "IsoMu27_PtEtaBins/efficienciesDATA/abseta_pt_DATA"
elif(year=="2018"):
IdFile = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2018_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2018_UL_ID.root"
IdName = "NUM_MediumID_DEN_TrackerMuons_abseta_pt"
IsoFile = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_UL2018_DEN_TrackerMuons_rootfiles_Efficiencies_muon_generalTracks_Z_Run2018_UL_ISO.root"
IsoName = "NUM_TightRelIso_DEN_MediumID_abseta_pt"
TrigFile1 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_2018_trigger_EfficienciesAndSF_2018Data_BeforeMuonHLTUpdate.root"
TrigName1 = "IsoMu24_PtEtaBins/efficienciesDATA/abseta_pt_DATA"
TrigFile2 = TIMBERPATH+"TIMBER/data/OfficialSFs/EfficienciesStudies_2018_trigger_EfficienciesAndSF_2018Data_AfterMuonHLTUpdate.root"
TrigName2 = "IsoMu24_PtEtaBins/efficienciesDATA/abseta_pt_DATA"
lumiBefore= 8.950
lumiAfter = 50.789
if not isData:
if(year=="2016"):
IdCorr = Correction('IdSF',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(IdFile1),'"{0}"'.format(IdName1),'"{0}"'.format(IdFile2),'"{0}"'.format(IdName2),'{0}'.format(lumiBCDEF/(lumiBCDEF+lumiGH)),'{0}'.format(lumiGH/(lumiBCDEF+lumiGH))],corrtype='weight',mainFunc="evalComb")
IsoCorr = Correction('IsoSF',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(IsoFile1),'"{0}"'.format(IsoName1),'"{0}"'.format(IsoFile2),'"{0}"'.format(IsoName2),'{0}'.format(lumiBCDEF/(lumiBCDEF+lumiGH)),'{0}'.format(lumiGH/(lumiBCDEF+lumiGH))],corrtype='weight',mainFunc="evalComb")
TriggerCorr = Correction('TriggerEff',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(TrigFile1),'"{0}"'.format(TrigName1),'"{0}"'.format(TrigFile2),'"{0}"'.format(TrigName2),'{0}'.format(lumiBCDEF/(lumiBCDEF+lumiGH)),'{0}'.format(lumiGH/(lumiBCDEF+lumiGH))],corrtype='weight',mainFunc="evalComb")
elif(year=="2017"):
IdCorr = Correction('IdSF',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(IdFile),'"{0}"'.format(IdName)],corrtype='weight')
IsoCorr = Correction('IsoSF',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(IsoFile),'"{0}"'.format(IsoName)],corrtype='weight')
TriggerCorr = Correction('TriggerEff',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(TrigFile),'"{0}"'.format(TrigName)],corrtype='weight')
elif(year=="2018"):
IdCorr = Correction('IdSF',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(IdFile),'"{0}"'.format(IdName)],corrtype='weight')
IsoCorr = Correction('IsoSF',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(IsoFile),'"{0}"'.format(IsoName)],corrtype='weight')
TriggerCorr = Correction('TriggerEff',"TIMBER/Framework/src/TH2_SF.cc",constructor=['"{0}"'.format(TrigFile1),'"{0}"'.format(TrigName1),'"{0}"'.format(TrigFile2),'"{0}"'.format(TrigName2),'{0}'.format(lumiBefore/(lumiBefore+lumiAfter)),'{0}'.format(lumiAfter/(lumiBefore+lumiAfter))],corrtype='weight',mainFunc="evalComb")
genWeight = Correction('genWeightCorr',"TIMBER/Framework/src/generatorWeight.cc",constructor=[],corrtype='corr')
#STcorr = Correction('STcorr',"TIMBER/Framework/src/ST_weight.cc",constructor=[ '0.0','1.0'],corrtype='weight')#UPDATE WHEN REDERIVED!
#STcorr = Correction('STcorr',"TIMBER/Framework/src/TF1_weight.cc",constructor=[ '"STcorr_{0}.root"'.format(year[2:]),'"ST"','"STdown"','"STup"'],corrtype='weight')
#a.AddCorrection(STcorr,evalArgs={'var':'ST'})
a.AddCorrection(IdCorr,evalArgs={'pt':'lPt','eta':'lEta'})
a.AddCorrection(IsoCorr,evalArgs={'pt':'lPt','eta':'lEta'})
a.AddCorrection(TriggerCorr,evalArgs={'pt':'lPt','eta':'lEta'})
a.AddCorrection(genWeight,evalArgs={'genWeight':'genWeight'})
#a.MakeWeightCols('noSTCorr',dropList=["STcorr"])
a.MakeWeightCols()
weightString = "weight__nominal"
if(variation=="isoUp"):
weightString = "weight__IsoSF_up"
elif(variation=="isoDown"):
weightString = "weight__IsoSF_down"
elif(variation=="IdUp"):
weightString = "weight__IdSF_up"
elif(variation=="IdDown"):
weightString = "weight__IdSF_down"
elif(variation=="trigUp"):
weightString = "weight__TriggerEff_up"
elif(variation=="trigDown"):
weightString = "weight__TriggerEff_down"
pnetHi = 0.95
pnetLo = 0.80
pnetCuts = ["probeJetPNet>{0}".format(pnetHi),"probeJetPNet>{0} && probeJetPNet<{1}".format(pnetLo,pnetHi),"probeJetPNet>-0.001","probeJetPNet>-0.001 && probeJetPNet<{0}".format(pnetLo)]
pnetTags = ["T","L","I","AT"]
beforePnet = a.GetActiveNode()
for i in range(len(pnetCuts)):
a.SetActiveNode(beforePnet)
a.Cut("{0}_cut".format(pnetTags[i]),pnetCuts[i])
hMET = a.DataFrame.Histo1D(('{0}_MET_{1}'.format(options.process,pnetTags[i]),';MET [GeV];Events/100 GeV;',20,0,2000),"MET_pt",weightString)
hHT = a.DataFrame.Histo1D(('{0}_HT_{1}'.format(options.process,pnetTags[i]),';HT [GeV];Events/100;',20,0,2000),"HT",weightString)
hST = a.DataFrame.Histo1D(('{0}_ST_{1}'.format(options.process,pnetTags[i]),';ST [GeV];Events/100;',30,0,3000),"ST",weightString)
hPt = a.DataFrame.Histo1D(('{0}_lepton_pT_{1}'.format(options.process,pnetTags[i]),';pT [GeV];Events/100;',20,0,2000),"lPt",weightString)
histos.append(hMET)
histos.append(hHT)
histos.append(hST)
histos.append(hPt)
if not isData:
checkpoint = a.GetActiveNode()
hMassInclusive = a.DataFrame.Histo1D(('{0}_mSD_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
histos.append(hMassInclusive)
a.Cut("bqq_{0}".format(pnetTags[i]),"partonCategory==3")
hMassInclusive = a.DataFrame.Histo1D(('{0}_bqq_mSD_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
beforePTcut = a.GetActiveNode()
hMassLoPT = a.Cut("ptLoCutbqq_{0}".format(pnetTags[i]),"probeJetPt<500 && probeJetPt>300").DataFrame.Histo1D(('{0}_bqq_mSD_pTLo_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
a.SetActiveNode(beforePTcut)
hMassHiPT = a.Cut("ptHiCutbqq_{0}".format(pnetTags[i]),"probeJetPt>500").DataFrame.Histo1D(('{0}_bqq_mSD_pTHi_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
histos.append(hMassLoPT)
histos.append(hMassHiPT)
histos.append(hMassInclusive)
a.SetActiveNode(checkpoint)
a.Cut("bq_{0}".format(pnetTags[i]),"partonCategory==2")
hMassInclusive = a.DataFrame.Histo1D(('{0}_bq_mSD_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
beforePTcut = a.GetActiveNode()
hMassLoPT = a.Cut("ptLoCutbq_{0}".format(pnetTags[i]),"probeJetPt<500 && probeJetPt>300").DataFrame.Histo1D(('{0}_bq_mSD_pTLo_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
a.SetActiveNode(beforePTcut)
hMassHiPT = a.Cut("ptHiCutbq_{0}".format(pnetTags[i]),"probeJetPt>500").DataFrame.Histo1D(('{0}_bq_mSD_pTHi_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
histos.append(hMassLoPT)
histos.append(hMassHiPT)
histos.append(hMassInclusive)
a.SetActiveNode(checkpoint)
a.Cut("qq_{0}".format(pnetTags[i]),"partonCategory==1")
hMassInclusive = a.DataFrame.Histo1D(('{0}_qq_mSD_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
beforePTcut = a.GetActiveNode()
hMassLoPT = a.Cut("ptLoCutqq_{0}".format(pnetTags[i]),"probeJetPt<500 && probeJetPt>300").DataFrame.Histo1D(('{0}_qq_mSD_pTLo_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
a.SetActiveNode(beforePTcut)
hMassHiPT = a.Cut("ptHiCutqq_{0}".format(pnetTags[i]),"probeJetPt>500").DataFrame.Histo1D(('{0}_qq_mSD_pTHi_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
histos.append(hMassLoPT)
histos.append(hMassHiPT)
histos.append(hMassInclusive)
a.SetActiveNode(checkpoint)
a.Cut("unmatched_{0}".format(pnetTags[i]),"partonCategory==0")
hMassInclusive = a.DataFrame.Histo1D(('{0}_unmatched_mSD_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
beforePTcut = a.GetActiveNode()
hMassLoPT = a.Cut("ptLoCutunm_{0}".format(pnetTags[i]),"probeJetPt<500 && probeJetPt>300").DataFrame.Histo1D(('{0}_unmatched_mSD_pTLo_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
a.SetActiveNode(beforePTcut)
hMassHiPT = a.Cut("ptHiCutunm_{0}".format(pnetTags[i]),"probeJetPt>500").DataFrame.Histo1D(('{0}_unmatched_mSD_pTHi_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
histos.append(hMassLoPT)
histos.append(hMassHiPT)
histos.append(hMassInclusive)
histos.append(hMassInclusive)
else:
hMassInclusive = a.DataFrame.Histo1D(('{0}_mSD_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
beforePTcut = a.GetActiveNode()
hMassLoPT = a.Cut("ptLoCut_{0}".format(pnetTags[i]),"probeJetPt<500 && probeJetPt>300").DataFrame.Histo1D(('{0}_mSD_pTLo_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
a.SetActiveNode(beforePTcut)
hMassHiPT = a.Cut("ptHiCut_{0}".format(pnetTags[i]),"probeJetPt>500").DataFrame.Histo1D(('{0}_mSD_pTHi_{1}'.format(options.process,pnetTags[i]),';mSD [GeV];Jets/10 GeV;',14,60,200),probeJetMassVar,weightString)
histos.append(hMassLoPT)
histos.append(hMassHiPT)
histos.append(hMassInclusive)
in_f = ROOT.TFile(iFile)
#Grab cutflow histogram
for key in in_f.GetListOfKeys():
h = key.ReadObj()
hName = h.GetName()
if(hName=="Events"):
continue
h.SetDirectory(0)
histos.append(h)
out_f = ROOT.TFile(options.output,options.mode)
out_f.cd()
for h in histos:
if not isData:
h.SetName(h.GetName()+"_"+options.variation)
h.Write()
out_f.Close()
|
[
"matej.roguljic@cern.ch"
] |
matej.roguljic@cern.ch
|
8dc0940ac14fa2137b05281882c37b63abde2bd4
|
8d24418e352793aa9c4e20338cb07375e879a2a5
|
/STT.py
|
aa2f656b09f11be666e821b8c2c1604ef1659e4d
|
[] |
no_license
|
mezeru/Internet-Speedtest
|
78972db073b1c7fd9d8c526f4fcc7ec64f86200c
|
4daa5b1369d9d7a18f8bf71b7e7e121329837cb2
|
refs/heads/main
| 2023-01-06T12:38:06.665252
| 2020-11-07T10:09:10
| 2020-11-07T10:09:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
import speedtest
if __name__ == "__main__":
speed = speedtest.Speedtest()
print("Choose the units to be Displayed \n1)Mbps 2)MBps\n")
choice = int(input())
print("\n\nPlease Wait......\n\n")
if(choice == 1 or choice == 2):
print("The source is : ",)
Ds = speed.download()
Us = speed.upload()
servers = []
speed.get_servers(servers)
png = (speed.results.ping)
if choice == 1:
print("\nThe Download Speed is ",Ds/1000000," Mbps")
print("\nThe Upload Speed is ",Us/1000000," Mbps")
elif choice == 2:
print("\nThe Download Speed is ",Ds*0.000000125," MBps")
print("\nThe Upload Speed is ",Us*0.000000125," MBps")
print("\nThe Ping is : ",png," ms\n")
|
[
"noreply@github.com"
] |
mezeru.noreply@github.com
|
6ccffde61a2ac1b34e249255ef314491f213a582
|
06685b319ecbabaf87a77ba06fb9ff7072581e1d
|
/timelapse/Photo.py
|
739be2cc5a20fe9727072a9aa046aa414e4ad242
|
[
"MIT"
] |
permissive
|
tomhotch/timelapse
|
64b4ea35ad003ff834204ba1b717598b5a9cacf3
|
c4baf52b2ccce0978ab6281bc1b67731f8815b9a
|
refs/heads/master
| 2021-01-22T09:50:54.004645
| 2017-05-07T13:27:08
| 2017-05-07T13:27:08
| 55,226,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
import time
import picamera
def take_and_save_photo(camera_settings, file_path_name):
# Take a photo and save it in the given file path name
# file_path_name includes a relative or absolute path to the file
with picamera.PiCamera() as camera:
camera.resolution = (camera_settings.horizontal_resolution,
camera_settings.vertical_resolution)
camera.rotation = camera_settings.rotation
# TODO Do we want to add anything to exif data?
time.sleep(camera_settings.camera_start_up_time)
camera.capture(file_path_name)
return
|
[
"tomhotch@yahoo.com"
] |
tomhotch@yahoo.com
|
fb1d2f0d7ff511dafd8ee3da0267549af618152f
|
1d544794930ae2da3d4eb87e969ce04215ab87d9
|
/kpibrainstorm0.py
|
9365cb9057134dac6a815b83b13ab644b6842682
|
[] |
no_license
|
samhung19/kpi-brainstorm
|
7bba8dfc242fdd7b0e03f8dff5f7c0922a763433
|
41e2de50c7b032f3eb454e9a1a35192674e10357
|
refs/heads/master
| 2021-03-27T20:37:57.090205
| 2017-07-20T00:26:38
| 2017-07-20T00:26:38
| 95,706,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
import cv2
import numpy as np
cap = cv2.VideoCapture('lalalala.mp4')
framecount = 0
while True:
framecount += 1
ret, frame = cap.read()
roi = frame[65:75, 985:995]
cv2.rectangle(frame, (982,62), (998,78), (0, 255, 0), 2) #highlight region of interest
cv2.namedWindow('frame', cv2.WINDOW_NORMAL) #this reframes the window so it fits screen
cv2.imshow('frame', frame)
b, g, r = frame[70][990] #[row][column]
print("framecount: ", framecount, "r: ",r ,"g: ",g,"b: ", b)
if r>250 and g > 250 and b > 250:
print(framecount)
cv2.imwrite('frame%i.jpg' %framecount, frame)
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
samhung19.noreply@github.com
|
28253f6b96351295cc00cef6d1448e446c31f212
|
672b57ee6ad36cab8eff4802c423d5f836ebcab0
|
/scraper/management/commands/scrape.py
|
e865b940702992b8b4392daebe620187f6ad9c2f
|
[] |
no_license
|
stanislavn/thrustfeed
|
a6b76dd485c80c1a16156930d078eb67267ec30d
|
b6a79d11b777048ff4f93629eea70c161f612d33
|
refs/heads/master
| 2023-02-18T19:22:25.228888
| 2021-01-24T13:08:26
| 2021-01-24T13:08:26
| 332,446,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,185
|
py
|
# importing the required modules
import numpy as np
import requests
import xml.etree.ElementTree as ET
import time
import urllib.request
import extruct
from w3lib.html import get_base_url
from scraper.models import Product
from django.db import IntegrityError
from fake_useragent import UserAgent
import sys
from bs4 import BeautifulSoup
start_time = time.time()
ua = UserAgent()
#UCWEB/2.0 (compatible; Googlebot/2.1; +google.com/bot.html)
headers = {
"User-Agent": ua.random,
'Referer': 'www.google.com'
}
# url of rss feed
url = 'https://www.tinte24.de/sitemap/devices.xml'
req = urllib.request.Request(
url=url,
data=None,
headers=headers
)
#response = urllib.request.urlopen(req).read()
response = requests.get(url, headers=headers, timeout=5)
root = ET.fromstring(response.text)
urls_to_scrape = []
i = 0
for url in root.findall('{http://www.sitemaps.org/schemas/sitemap/0.9}url'):
#i = i + 1
loc = url.find('{http://www.sitemaps.org/schemas/sitemap/0.9}loc').text
urls_to_scrape.append(loc)
#if '/Tinte/' in loc:
# urls_to_scrape.append(loc)
#if '/Toner/' in loc:
# urls_to_scrape.append(loc)
urls_to_scrape = list(set(urls_to_scrape))
#print(f'Scrapovat sa bude {len(urls_to_scrape)} produktov z celkoveho poctu {i}')
print(f'Scrapovat sa bude {len(urls_to_scrape)} produktov')
j = 0
for url_to_scrape in urls_to_scrape:
data_list = []
#cached_url = 'http://webcache.googleusercontent.com/search?q=cache:' + url_to_scrape
j = j + 1
print(f'Starting scraping {j}/{len(urls_to_scrape)} url: {url_to_scrape}')
try:
#print(ua.random)
r = requests.get(url_to_scrape, headers=headers, timeout=5)
#print(r.text)
soup = BeautifulSoup(r.content, 'html.parser')
except:
sys.exit("Cant load website. Check connection.")
base_url = get_base_url(r.text, r.url)
data = extruct.extract(r.text, base_url=base_url, uniform=True, syntaxes=['rdfa', 'json-ld'])
data_list = data['json-ld']
print(data_list)
if data_list == []:
sys.exit("Website did not respond correctly, quitting")
for data_dict in data_list:
compatible = soup.find("div", class_='compatible')
print('kompatibilne s ', compatible)
brand=color=depth=gtin12=logo=manufacturer=mpn=sku=alternateName=description=image=name = ''
price = data_dict['offers']['price']
priceCurrency = data_dict['offers']['priceCurrency']
try:
name = data_dict['name']
image = data_dict['image']
url = data_dict['offers']['url']
brand = data_dict['brand']
color = data_dict['color']
depth = data_dict['depth']
gtin12 = data_dict['gtin12']
logo = data_dict['logo']
manufacturer = data_dict['manufacturer']
mpn = data_dict['mpn']
sku = data_dict['sku']
alternateName = data_dict['alternateName']
description = data_dict['description']
except:
print('cant get all parameters')
availability = data_dict['offers']['availability']
if 'InStock'.lower() in availability.lower():
availability = "In stock"
elif 'OutOfStock'.lower() in availability.lower():
availability = "Out Of Stock"
elif 'PreOrder'.lower() in availability.lower():\
availability = "Preorder"
else:
availability = ""
print("cant get availability")
print('dostupnost je ', availability)
itemCondition = data_dict['offers']['itemCondition']
if 'NewCondition'.lower() in itemCondition.lower():
itemCondition = "New"
elif 'UsedCondition'.lower() in itemCondition.lower():
itemCondition = "Used Condition"
elif 'RefurbishedCondition'.lower() in itemCondition.lower():
itemCondition = "Refurbished Condition"
elif 'DamagedCondition'.lower() in itemCondition.lower():
itemCondition = "Damaged Condition"
else:
itemCondition = ""
if name != '':
try:
p = Product(availability=availability, itemCondition=itemCondition, price=price, priceCurrency=priceCurrency,
url=url, brand=brand, color=color, depth=depth, gtin12=gtin12, logo=logo,
manufacturer=manufacturer, mpn=mpn, sku=sku, alternateName=alternateName, description=description,
image=image, name=name, compatible=compatible)
p.save()
print('ulozene')
except IntegrityError:
print("Cant scrape already existing url ", url_to_scrape)
pass
else:
pass
print(f'Scrapnute {j} produktov z celkoveho poctu {len(urls_to_scrape)}')
delays = [7, 24, 22, 12, 30, 19]
delay = np.random.choice(delays)
print('waiting ',delay)
time.sleep(delay)
#except:
# print("Cant scrape url ", url_to_scrape)
finish_time = time.time()
elapsed_time = finish_time - start_time
print(f'Skript bezal {elapsed_time} sekund.')
|
[
"29331439+stanislavn@users.noreply.github.com"
] |
29331439+stanislavn@users.noreply.github.com
|
8b3f32f97c1f1d0f6fb4c91406e974436d1e30ea
|
44b9c654ba58adeb7213d80dfcf22dd4794f08dc
|
/util/RiskParityPortfolio.py
|
6f423b7c42564c8fc98efcb9dff1447304f7f2de
|
[] |
no_license
|
handrew/all-weather-for-noobs
|
437be02f7677e96dbceeb16f815a4a8cd19f72bb
|
5f2b57acea381ed110e6ec157cd50af1b8872d94
|
refs/heads/master
| 2023-07-15T04:58:42.111046
| 2023-06-21T20:52:35
| 2023-06-21T20:52:35
| 65,438,423
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,291
|
py
|
"""RiskParityPortfolio object. Optimizes weights to be inversely proportional
to the volatility of each asset. This approach does not not assume any
correlations – for a risk parity approach that accounts for correlation, use
EqualRiskContributionPortfolio.
"""
import pdb
from typing import List
import numpy as np
import pandas as pd
from .asset import Asset
from .portfolio import Portfolio
class RiskParityPortfolio(Portfolio):
"""Optimizes weights to be inversely proportional to the volatility of each
asset. This approach does not not assume any correlations – for a risk
parity approach that accounts for correlation, use
EqualRiskContributionPortfolio.
"""
def __init__(self, assets, window=60, periodicity=1, volatility_target=0.1):
has_portfolio_objs = any([isinstance(a, Portfolio) for a in assets])
if has_portfolio_objs:
raise ValueError(
"RiskParityPortfolio can not " "accept Portfolio in param `assets`."
)
super(RiskParityPortfolio, self).__init__(
assets,
window=window,
periodicity=periodicity,
volatility_target=volatility_target,
)
def optimize(self, as_of_date=None):
"""Solves for inverse-volatility weights.
@param as_of_date: datetime object
@return: {asset_name: {"asset": Asset, "weight": float}}
"""
most_recent_vols: List[float] = [
asset.last_volatility(
window=self.window, periodicity=self.periodicity, as_of_date=as_of_date
)
for asset in self.assets
]
asset_df = pd.DataFrame([self.assets, most_recent_vols]).T
asset_df.columns = ["asset", "vol"]
asset_df = asset_df.dropna().reset_index() # get rid of None vols
weights_i: List[float] = []
std_inv = 1.0 / np.sqrt(asset_df["vol"].astype(float))
weights_i = list(std_inv / std_inv.sum())
asset_df["weights"] = pd.Series(weights_i)
# Make sure that volatility contributions are all the same.
try:
vol_contributions = (asset_df["weights"] ** 2) * asset_df["vol"]
for i in range(1, len(vol_contributions)):
diff = abs(vol_contributions[0] - vol_contributions[i])
assert diff <= 1e-4
except AssertionError:
pdb.set_trace()
# Put it in return format.
allocations: dict = {}
for i in range(len(asset_df)):
curr_asset: Asset = asset_df["asset"].iloc[i]
weight = asset_df["weights"].iloc[i]
vol_cont = (weight ** 2) * asset_df["vol"].iloc[i]
allocations[curr_asset.name] = {
"asset": curr_asset,
"weight": weight,
"vol_contribution": vol_cont,
}
if self.volatility_target:
# Scale to vol target.
vol_contributions = [
allocations[name]["vol_contribution"] for name in allocations
]
portfolio_vol = np.sum(vol_contributions)
vol_scale = np.sqrt(self.volatility_target / portfolio_vol)
for name in allocations:
allocations[name]["weight"] = allocations[name]["weight"] * vol_scale
# Update vol contributions.
asset_df["name"] = asset_df["asset"].apply(lambda x: x.name)
for name in allocations:
weight = allocations[name]["weight"]
vol_cont = (weight ** 2) * asset_df[asset_df["name"] == name][
"vol"
].iloc[0]
allocations[name]["vol_contribution"] = vol_cont
# Check that everything is right.
vol_contributions = [
allocations[name]["vol_contribution"] for name in allocations
]
try:
diff = abs(np.sum(vol_contributions) - self.volatility_target)
assert diff <= 1e-4
for i in range(1, len(vol_contributions)):
diff = abs(vol_contributions[0] - vol_contributions[i])
assert diff <= 1e-4
except AssertionError:
pdb.set_trace()
return allocations
|
[
"handrew11@gmail.com"
] |
handrew11@gmail.com
|
2ce81be387ddd1049a70ef86640fe60c543417eb
|
cf149e692b5abcb1c8ec9e86aaf1c52e71127da5
|
/particionador_de_audio.py
|
91e14517beda3256d4d310b954a9666fb60589ee
|
[] |
no_license
|
rafael03/Conversores
|
2f0d1a4a3b33d3c93024b921cfdb22282af10419
|
093096a658e6e9db26090920203febd70d5b2cd1
|
refs/heads/master
| 2021-01-13T13:15:33.485048
| 2016-11-03T00:42:44
| 2016-11-03T00:42:44
| 72,692,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from os import walk
import sys
# print sys.argv[1]
lista_de_arquivos = []
for (dirpath, dirnames, filenames) in walk(os.getcwd()):
lista_de_arquivos.extend(filenames)
break
for arquivo in lista_de_arquivos:
if arquivo[-4:] == ".mp3":
os.system("mp3splt %s -S 7" % arquivo)
os.system("rm %s" % arquivo)
lista_particionada = []
for (dirpath, dirnames, filenames) in walk(os.getcwd()):
lista_particionada.extend(filenames)
break
lista_particionada.sort()
print "lista normal", lista_particionada
for arquivo in range(0, len(lista_particionada)):
if lista_particionada[arquivo][-4:] == ".mp3":
os.system("mv %s %s" % (lista_particionada[arquivo], str(arquivo)))
|
[
"noreply@github.com"
] |
rafael03.noreply@github.com
|
3f9f14f0bbc5f8fa531618edc817cd13a2a7ea16
|
140bc1bb4b2a68f71d7fa7e4bbcf22da824a645c
|
/first_occurrence.py
|
f3ed1722827dbfda78cf82c3d6b6c86e1b4c3a99
|
[] |
no_license
|
rmorency40/python-projects
|
3371ac7b6cbd70b722bf5c0733326d007fbf6f8e
|
00ec7bade93d0f7afd71203731f18a2e99140955
|
refs/heads/master
| 2022-12-14T11:07:15.144179
| 2020-09-18T16:21:59
| 2020-09-18T16:21:59
| 288,572,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
#!/usr/bin/python
string = input("enter your own string : ")
char = input("Enter your own character: ")
flag = 0
#if not string:
# print("this is not a string")
for i in range(len(string)):
if (string[i] == char):
flag = 1
break
if (flag == 0):
print("Sorry, we haven't found the search character in this string")
else:
print("The first occurence of", char, "is found at position", i + 1)
|
[
"30129346+rmorency40@users.noreply.github.com"
] |
30129346+rmorency40@users.noreply.github.com
|
fb483adff09210c3a8dea90d203b5b070f3768fb
|
84379e15e54ba79b7e63c1fceecf712b46f22977
|
/apps/decks/migrations/0016_auto_20181011_1715.py
|
2ac96bd86b326bc8447c68610a43fbba4554b4f0
|
[] |
no_license
|
CoderEnko007/HearthStoneStationBackend
|
a1d74c324233ebd617ad01df13bc609d1f1aa2f6
|
6cc92cb806f19f2a2a0596645028cfe2fa5895d6
|
refs/heads/master
| 2022-12-11T23:20:24.335737
| 2022-09-18T07:04:08
| 2022-09-18T07:04:08
| 144,392,864
| 0
| 0
| null | 2022-12-08T02:22:42
| 2018-08-11T14:40:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
# Generated by Django 2.0.4 on 2018-10-11 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('decks', '0015_auto_20180928_1019'),
]
operations = [
migrations.AddField(
model_name='decks',
name='real_game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='实际对局数'),
),
migrations.AddField(
model_name='trending',
name='real_game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='实际对局数'),
),
migrations.AlterField(
model_name='decks',
name='game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='对局数'),
),
]
|
[
"yf381966217@163.com"
] |
yf381966217@163.com
|
a6e6e3dc88952cfbd159649b851d165f6ccff89b
|
138cf8c6710e844541e23b8a88f42b0bf695b655
|
/547. Number of Provinces.py
|
7ad052955bcaccc2aaafd04922835080b85c4eb8
|
[] |
no_license
|
cherryzoe/Leetcode
|
58fb1c353c57ce02a8cf3c345caeeacb1e1bdfaf
|
f8fd6bb130a4d55d83d9bc07caac53c7e0a26afd
|
refs/heads/master
| 2023-01-11T10:48:53.214138
| 2022-12-29T20:20:41
| 2022-12-29T20:20:41
| 40,199,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,732
|
py
|
# BFS / DFS 的时间复杂度是 O(n^2), n 为城市的数量,需要遍历 n^2 的邻接矩阵
class Solution(object):
def findCircleNum(self, isConnected):
"""
:type isConnected: List[List[int]]
:rtype: int
"""
# dfs solution:
# loop through all cites, from 1 - n.
# find a city,if it is not visited, -> increase province count and explore its connected city
self.n = len(isConnected)
visited = set()
cnt = 0
for i in range(self.n):
if i not in visited:
cnt += 1
self.explore(i, isConnected, visited)
return cnt
def explore(self, i, isConnected, visited):
visited.add(i)
for j in range(self.n):
if isConnected[i][j] == 1 and j not in visited:
self.explore(j, isConnected, visited)
# BFS Version
class Solution(object):
def findCircleNum(self, isConnected):
"""
:type isConnected: List[List[int]]
:rtype: int
"""
# bfs version
q = collections.deque()
n = len(isConnected)
cnt = 0
visited = [False for _ in range(n)]
for i in range(n):
if visited[i] == False:
q.append(i)
visited[i] = True
cnt += 1
while q:
cur = q.popleft()
# visited[cur] = True
for j in range(n):
nx = isConnected[cur][j]
if not visited[j] and nx == 1:
q.append(j)
visited[j] = True
return cnt
|
[
"noreply@github.com"
] |
cherryzoe.noreply@github.com
|
d7aa8e7ee0133ca62e4816336b143153993ef60f
|
107a49df30fd8b300cca888cf3a41e2cac4ba9c3
|
/crawler/items.py
|
cec0d679422fb725c76be4b9f4042ec615922def
|
[
"MIT"
] |
permissive
|
oliverdelacruz/crawler
|
6c8c0dd1e4894064b1f882ea1ddc5b2125697327
|
ef8688807af7722ecc882b2b62a7ba5a78e49513
|
refs/heads/master
| 2020-07-22T05:43:22.927185
| 2020-01-18T21:26:34
| 2020-01-18T21:26:34
| 207,090,871
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class CrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class AmazonItem(scrapy.Item):
# define the fields for your item here like:
product_name = scrapy.Field()
product_sale_price = scrapy.Field()
product_category = scrapy.Field()
product_original_price = scrapy.Field()
product_availability = scrapy.Field()
|
[
"deoliver@student.ethz.ch"
] |
deoliver@student.ethz.ch
|
1b9f51303dca395e8adaa69dbf9242d4fbb2ad07
|
20da145915700a20487fb1000adbe2bfc4042e9e
|
/sandersfeatures/tweet_pca.py
|
0659fe0f2bd6ed8fd61a6f08920256ae9153f0e6
|
[] |
no_license
|
yogeshg/Twitter-Sentiment
|
d362b02c895681c61f8f4ed137da5b481076eb73
|
2ade1641d7bddaa0ef3423f9d4a160edb9ae593f
|
refs/heads/master
| 2023-03-08T17:42:53.473532
| 2022-12-01T02:21:20
| 2022-12-01T02:21:20
| 15,335,630
| 74
| 40
| null | 2023-03-01T08:48:36
| 2013-12-20T10:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
"""
@package tweet_pca
PCT for dimensionality reduction.
"""
import mdp, numpy
import tweet_features
import pdb
def tweet_pca_reduce( tweets_train, tweets_test, output_dim ):
# convert dictionary feature vecs to numpy array
print '--> Converting dictionaries to NumPy arrays'
train_arr = numpy.array( [tweet_features.tweet_dict_to_nparr(t) for \
(t,s) in tweets_train])
test_arr = numpy.array( [tweet_features.tweet_dict_to_nparr(t) for \
(t,s) in tweets_test])
# compute principle components over training set
print '--> Computing PCT'
pca_array = mdp.pca( train_arr.transpose(), \
svd=True, output_dim=output_dim )
# both train and test sets to PC space
print '--> Projecting feature vectors to PC space'
train_arr = numpy.dot( train_arr, pca_array )
test_arr = numpy.dot( test_arr, pca_array )
# convert projected vecs back to reduced dictionaries
print '--> Converting NumPy arrays to dictionaries'
reduced_train = \
zip( [tweet_features.tweet_nparr_to_dict(v) for v in train_arr], \
[s for (t,s) in tweets_train] )
reduced_test = \
zip( [tweet_features.tweet_nparr_to_dict(v) for v in test_arr], \
[s for (t,s) in tweets_test])
return (reduced_train, reduced_test)
|
[
"yogeshg91@gmail.com"
] |
yogeshg91@gmail.com
|
2e1932ed858673f53ed800edcdc60355be87bf3d
|
7739128a2e4112317f2dfec6bb4a04033b2a0d5a
|
/venv/bin/pip3
|
c14562829e6a306b1be02ec70927e1caf314f232
|
[] |
no_license
|
basharE/sixthLesson
|
e5a3d19f1abe888c4c6ccaebafe9f879f8604038
|
aa50cb1d1b38733e92a27dfa7c66644eb4f0023f
|
refs/heads/master
| 2023-03-05T10:54:10.806231
| 2021-02-14T18:48:15
| 2021-02-14T18:48:15
| 338,868,837
| 0
| 0
| null | 2021-02-14T18:42:28
| 2021-02-14T18:08:40
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
#!/Users/basharegbariya/PycharmProjects/sixthLesson/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"e.bashar.t@gmail.com"
] |
e.bashar.t@gmail.com
|
|
f8e9c00352aa73396ea5b86fe09beb3dca7d00de
|
582c42e5aa6611433e7f8cad225f5c9fd8c2c3ba
|
/client/__init__.py
|
692f6087bc05e1b6806fb5b7228444fe8572f9b3
|
[
"MIT"
] |
permissive
|
Soulou/msc-thesis-container-balancer-client
|
7b44fafb473caec787adb1c65b25d863738d8534
|
05ea1fc0005733ec2be25494bc2d0b216fd19fa5
|
refs/heads/master
| 2021-01-02T10:25:36.265062
| 2014-08-10T23:00:48
| 2014-08-10T23:00:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
from .client import *
from .status import *
from .start import *
from .stop import *
from .migrate import *
from .node_status import *
from .nodes_status import *
from .container_status import *
from .balance import *
Client.start = start
Client.stop = stop
Client.stop_service = stop_service
Client.stop_all = stop_all
Client.status = status
Client.migrate = migrate
Client.node_status = node_status
Client.nodes_status = nodes_status
Client.container_status = container_status
Client.balance = balance
|
[
"leo@unbekandt.eu"
] |
leo@unbekandt.eu
|
288c78dc9899d83b795e90be1572c02ebf22ea94
|
c7517291eeb44cfb829373c6c233e408237b4af2
|
/chapter_4/queue.py
|
662d8cab35ba148d6e51850481436cfe8568b81e
|
[
"MIT"
] |
permissive
|
elishaking/CTCi
|
fbec515ad26a4d11ddbb6e479ad11bd0e8a083e0
|
6a91fd67e8765e5abef72c5b247f4d5444945438
|
refs/heads/master
| 2022-11-17T13:18:22.589740
| 2020-07-08T16:53:42
| 2020-07-08T16:53:42
| 272,730,371
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,525
|
py
|
class Node:
def __init__(self, value=0, next=None):
self.value = value
self.next = next
def __str__(self):
return str(self.value)
class LinkedList:
def __init__(self, head: Node = None):
self.head = head
self.tail = self.head
def push(self, value: int, node: Node = None):
if node:
new_node = node
else:
new_node = Node(value=value)
if self.head == None:
self.head = new_node
self.tail = self.head
return self
self.tail.next = new_node
self.tail = new_node
return self
def unshift(self, value: int):
new_node = Node(value=value, next=self.head)
self.head = new_node
return self
def shift(self):
node = self.head
self.head = self.head.next
return node
def insert(self, value: int, index: int):
current_node = self.head
current_index = 0
if index == 0:
return self.unshift(value)
while current_node:
if current_index == index - 1:
new_node = Node(value=value, next=current_node.next)
current_node.next = new_node
return self
current_index += 1
current_node = current_node.next
raise Exception('index out of range')
def delete(self, node: Node):
if node == self.head:
temp_node = self.head
self.head = self.head.next
del temp_node
prev_node = self.head
current_node = self.head.next
while current_node:
if node == current_node:
prev_node.next = current_node.next
del current_node
return self
prev_node = prev_node.next
current_node = current_node.next
return self
def __str__(self):
values = []
current_node = self.head
while current_node:
values.append(current_node.value)
current_node = current_node.next
return str(values)
class Queue:
def __init__(self):
self.values = LinkedList()
def add(self, value):
self.values.push(value)
return self
def remove(self):
return self.values.shift()
def peek(self):
return self.values.head.value
def is_empty(self):
return self.values.head == None
def __str__(self):
return str(self.values)
|
[
"ek.chibueze@gmail.com"
] |
ek.chibueze@gmail.com
|
bb7dd72cb79f5b8c3c70bc802cfd22bd72c2a811
|
bd149e195fbe5fa2aa0ca2dd88989e4f0b6bac5c
|
/pytest_1/test_fixture.py
|
76500e993d5fbb935868f3b2f74a563d6e67d356
|
[] |
no_license
|
sanjidaoli/pytest1
|
b0f9c59663146cdce72049598c00d68f07c05de8
|
39ef6ae2c2935c800672cbe4bf4664c2595c6c51
|
refs/heads/master
| 2023-07-26T19:47:01.563951
| 2021-09-12T15:35:22
| 2021-09-12T15:35:22
| 348,737,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
#!/usr/bin/env python
#!-*- coding:utf-8 -*-
import pytest
@pytest.fixture()
def login():
print("这是个登录方法")
return ('tom;','123')
@pytest.fixture()
def operate():
print("登录后的操作")
def test_case1(login,operate):
print(login)
print("test_case1,需要登录")
def test_case2():
print("test_case2,不需要登录")
def test_case3(login):
print(login)
print("test_case3,需要登录")
|
[
"123903159@qq.com"
] |
123903159@qq.com
|
2978555e84a362cb5ed925eb7a8317d2a53cace9
|
d85d078dd2eea23083c3b5e8fb7bed5b7aa96b00
|
/generic/management/commands/create_proxy_permissions.py
|
ee4e40f74deb80c7dc97eaaf8a508c54e2fb2370
|
[] |
no_license
|
ixc/django-generic
|
1ef3c27ac89d582d3b9fc3afd743d97ccda4a68a
|
fc17e7907162829faaf80cd2af605357b204a315
|
refs/heads/master
| 2022-12-04T19:52:26.700398
| 2019-02-20T13:16:07
| 2019-02-20T13:16:07
| 287,650,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
from django.core.management.base import AppCommand
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from django.contrib.auth.management import _get_all_permissions
class Command(AppCommand):
help = 'Creates permissions for proxy models; see Django #11154.'
def handle_app(self, app, **options):
app_name = app.__name__.split('.')[-2] # app is the models module
for ctype in ContentType.objects.filter(
app_label=app_name, permission__isnull=True
):
for codename, name in _get_all_permissions(
ctype.model_class()._meta
):
p, created = Permission.objects.get_or_create(
codename=codename,
content_type__pk=ctype.id,
defaults={'name': name, 'content_type': ctype})
if created:
if options.get('verbosity', 1) >= 1:
self.stdout.write("Created: %s\n" % (p,))
|
[
"simon@simonmeers.com"
] |
simon@simonmeers.com
|
6f49e68819abe8b1d485500c72678faf77327817
|
146012dda21ab72badad6daa8f98e6b26fedb128
|
/08day/04-练习求和.py
|
c539322bdecb93e196c838806f2fc360f0cb12e3
|
[] |
no_license
|
fengshuai1/1805
|
41786c3561beca580ba82d9e9d4347571e38e198
|
8dc3e6605cc1d6f91685ae45bfebfc062f0aa489
|
refs/heads/master
| 2020-03-19T07:41:40.608389
| 2018-06-28T01:45:43
| 2018-06-28T01:45:43
| 136,140,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
c = 0
m = 0
while c <= 100:
print("当前数字:%d"%c)
c+=1
m = c + m
print("求和是%d"%m)
|
[
"1329008013@qq.com"
] |
1329008013@qq.com
|
4acb6cd3c096137fa35422140bd2fb23a7a5a1ca
|
b16abb62b283064bd2fa4819a711578658759c7b
|
/zhihu.py
|
901c9bd5f90425194b7b793230c9c22f6df0872f
|
[] |
no_license
|
pfDou/insects-of-zhihu-hot-topics
|
5bfa66f9775de98b4c6dc58abbe1620f0bbd5a95
|
0f338ec287d1832a1792ad613fb65858329982e7
|
refs/heads/master
| 2021-01-23T22:10:46.731370
| 2015-05-09T14:36:01
| 2015-05-09T14:36:01
| 32,391,701
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
# -*- coding: utf-8 -*-
import urllib.request
from bs4 import BeautifulSoup
import re
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding="utf8")
main_page = "http://www.zhihu.com/explore#daily-hot"
urllines = urllib.request.urlopen(main_page) #<class 'http.client.HTTPResponse'>
page_data = urllines.read() #<class 'bytes'>
urllines.close()
soup = BeautifulSoup(page_data) #<class 'bs4.BeautifulSoup'>
#f = open("zhihu.txt","w")
hot_topics = soup.find('div', attrs = {"data-type":"daily"}).children
output = []
for item in list(hot_topics):
if item.string:
pass # navigableString type, maybe space line in the source page
else:
output.append({})
q_index = int(item["data-offset"])-1
print(item["data-offset"])
href = item.h2.a["href"]
question = item.h2.a.string
print("Question:", question)
#answer page's url
url = "http://www.zhihu.com" + href
print("answer address:",url)
#open answer page get the answer
sub_urllines = urllib.request.urlopen(url) #<class 'http.client.HTTPResponse'>
sub_page_data = sub_urllines.read() #<class 'bytes'>
sub_urllines.close()
sub_soup = BeautifulSoup(sub_page_data)
# print(sub_soup.title)
favorer_num = sub_soup.find("span", class_="count").text
print("favorer_num:",favorer_num)
brief_Q = sub_soup.find("div", class_="zm-editable-content").text
print("Question's brief:",brief_Q)
# test = sub_soup.find_all("div", attrs={"class":"zm-editable-content"})
# for i in test:
# print(i["class"])
answer_head = sub_soup.find("div", class_="answer-head")
author = sub_soup.find("a", class_="zm-item-link-avatar").next_sibling.next_sibling.string
print("author:", author)
author_qg = sub_soup.find("a", class_="zm-item-link-avatar").next_sibling.next_sibling.next_sibling.next_sibling.string
print("author's qg:", author_qg)
#answer = sub_soup.find_all("div", attrs={"class":"zm-editable-content"})[2].text#get_text()
answer = sub_soup.find("div", class_=" zm-editable-content clearfix").text
print("Answer:", answer)
|
[
"372167676@qq.com"
] |
372167676@qq.com
|
d7dc7236ef3c00feb2d661cbb1f257e5ad610ec1
|
7a93b7817b343b9da49662948e922684b94d1d40
|
/jstorm-client/src/main/py/storm/Nimbus-remote
|
2aae8a4fa0862207f6473a1653963e9e2abfbd00
|
[
"Apache-2.0"
] |
permissive
|
caiyifeng/learn_jstorm
|
c2aeeb9dd6d29ddf4c7593347b537e049fa57b4a
|
61d27beea51a5c37e00193ad4c464b58ded156c1
|
refs/heads/master
| 2020-12-26T21:49:50.380442
| 2015-05-03T14:43:19
| 2015-05-03T14:43:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,717
|
#!/usr/bin/env python
#
# Autogenerated by Thrift Compiler (0.7.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import sys
import pprint
from urlparse import urlparse
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TBinaryProtocol
import Nimbus
from ttypes import *
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print ''
print 'Usage: ' + sys.argv[0] + ' [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]'
print ''
print 'Functions:'
print ' void submitTopology(string name, string uploadedJarLocation, string jsonConf, StormTopology topology)'
print ' void submitTopologyWithOpts(string name, string uploadedJarLocation, string jsonConf, StormTopology topology, SubmitOptions options)'
print ' void killTopology(string name)'
print ' void killTopologyWithOpts(string name, KillOptions options)'
print ' void activate(string name)'
print ' void deactivate(string name)'
print ' void rebalance(string name, RebalanceOptions options)'
print ' void metricMonitor(string name, MonitorOptions options)'
print ' void beginLibUpload(string libName)'
print ' string beginFileUpload()'
print ' void uploadChunk(string location, string chunk)'
print ' void finishFileUpload(string location)'
print ' string beginFileDownload(string file)'
print ' string downloadChunk(string id)'
print ' string getNimbusConf()'
print ' ClusterSummary getClusterInfo()'
print ' TopologyInfo getTopologyInfo(string id)'
print ' SupervisorWorkers getSupervisorWorkers(string host)'
print ' string getTopologyConf(string id)'
print ' StormTopology getTopology(string id)'
print ' StormTopology getUserTopology(string id)'
print ' TopologyMetricInfo getTopologyMetric(string id)'
print ''
sys.exit(0)
pp = pprint.PrettyPrinter(indent = 2)
host = 'localhost'
port = 9090
uri = ''
framed = False
http = False
argi = 1
if sys.argv[argi] == '-h':
parts = sys.argv[argi+1].split(':')
host = parts[0]
port = int(parts[1])
argi += 2
if sys.argv[argi] == '-u':
url = urlparse(sys.argv[argi+1])
parts = url[1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
else:
port = 80
uri = url[2]
if url[4]:
uri += '?%s' % url[4]
http = True
argi += 2
if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
framed = True
argi += 1
cmd = sys.argv[argi]
args = sys.argv[argi+1:]
if http:
transport = THttpClient.THttpClient(host, port, uri)
else:
socket = TSocket.TSocket(host, port)
if framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Nimbus.Client(protocol)
transport.open()
if cmd == 'submitTopology':
if len(args) != 4:
print 'submitTopology requires 4 args'
sys.exit(1)
pp.pprint(client.submitTopology(args[0],args[1],args[2],eval(args[3]),))
elif cmd == 'submitTopologyWithOpts':
if len(args) != 5:
print 'submitTopologyWithOpts requires 5 args'
sys.exit(1)
pp.pprint(client.submitTopologyWithOpts(args[0],args[1],args[2],eval(args[3]),eval(args[4]),))
elif cmd == 'killTopology':
if len(args) != 1:
print 'killTopology requires 1 args'
sys.exit(1)
pp.pprint(client.killTopology(args[0],))
elif cmd == 'killTopologyWithOpts':
if len(args) != 2:
print 'killTopologyWithOpts requires 2 args'
sys.exit(1)
pp.pprint(client.killTopologyWithOpts(args[0],eval(args[1]),))
elif cmd == 'activate':
if len(args) != 1:
print 'activate requires 1 args'
sys.exit(1)
pp.pprint(client.activate(args[0],))
elif cmd == 'deactivate':
if len(args) != 1:
print 'deactivate requires 1 args'
sys.exit(1)
pp.pprint(client.deactivate(args[0],))
elif cmd == 'rebalance':
if len(args) != 2:
print 'rebalance requires 2 args'
sys.exit(1)
pp.pprint(client.rebalance(args[0],eval(args[1]),))
elif cmd == 'metricMonitor':
if len(args) != 2:
print 'metricMonitor requires 2 args'
sys.exit(1)
pp.pprint(client.metricMonitor(args[0],eval(args[1]),))
elif cmd == 'beginLibUpload':
if len(args) != 1:
print 'beginLibUpload requires 1 args'
sys.exit(1)
pp.pprint(client.beginLibUpload(args[0],))
elif cmd == 'beginFileUpload':
if len(args) != 0:
print 'beginFileUpload requires 0 args'
sys.exit(1)
pp.pprint(client.beginFileUpload())
elif cmd == 'uploadChunk':
if len(args) != 2:
print 'uploadChunk requires 2 args'
sys.exit(1)
pp.pprint(client.uploadChunk(args[0],args[1],))
elif cmd == 'finishFileUpload':
if len(args) != 1:
print 'finishFileUpload requires 1 args'
sys.exit(1)
pp.pprint(client.finishFileUpload(args[0],))
elif cmd == 'beginFileDownload':
if len(args) != 1:
print 'beginFileDownload requires 1 args'
sys.exit(1)
pp.pprint(client.beginFileDownload(args[0],))
elif cmd == 'downloadChunk':
if len(args) != 1:
print 'downloadChunk requires 1 args'
sys.exit(1)
pp.pprint(client.downloadChunk(args[0],))
elif cmd == 'getNimbusConf':
if len(args) != 0:
print 'getNimbusConf requires 0 args'
sys.exit(1)
pp.pprint(client.getNimbusConf())
elif cmd == 'getClusterInfo':
if len(args) != 0:
print 'getClusterInfo requires 0 args'
sys.exit(1)
pp.pprint(client.getClusterInfo())
elif cmd == 'getTopologyInfo':
if len(args) != 1:
print 'getTopologyInfo requires 1 args'
sys.exit(1)
pp.pprint(client.getTopologyInfo(args[0],))
elif cmd == 'getSupervisorWorkers':
if len(args) != 1:
print 'getSupervisorWorkers requires 1 args'
sys.exit(1)
pp.pprint(client.getSupervisorWorkers(args[0],))
elif cmd == 'getTopologyConf':
if len(args) != 1:
print 'getTopologyConf requires 1 args'
sys.exit(1)
pp.pprint(client.getTopologyConf(args[0],))
elif cmd == 'getTopology':
if len(args) != 1:
print 'getTopology requires 1 args'
sys.exit(1)
pp.pprint(client.getTopology(args[0],))
elif cmd == 'getUserTopology':
if len(args) != 1:
print 'getUserTopology requires 1 args'
sys.exit(1)
pp.pprint(client.getUserTopology(args[0],))
elif cmd == 'getTopologyMetric':
if len(args) != 1:
print 'getTopologyMetric requires 1 args'
sys.exit(1)
pp.pprint(client.getTopologyMetric(args[0],))
else:
print 'Unrecognized method %s' % cmd
sys.exit(1)
transport.close()
|
[
"songtk@msn.com"
] |
songtk@msn.com
|
|
cec42bc5df865c7e99d23024fa4c71a6f7db99d8
|
32fb6fd06b496b4c9ceabe578dceed265950cc37
|
/homework/core/models/meta/base.py
|
b45e9d7de1fe1813e6d37480dcef6702e9545bf9
|
[] |
no_license
|
rach/homework
|
8167d3930d4313818e306fb0965ffbd6402bf12b
|
aca450753445caa188675d637300ead443d15525
|
refs/heads/master
| 2021-01-10T04:50:53.857108
| 2016-01-11T21:13:38
| 2016-01-11T21:13:38
| 49,445,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import event
from sqlalchemy import (
Column,
Integer
)
import re
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
def _camel_to_snake(s):
subbed = _underscorer1.sub(r'\1_\2', s)
return _underscorer2.sub(r'\1_\2', subbed).lower()
class Base(object):
@declared_attr
def __tablename__(cls):
return _camel_to_snake(cls.__name__)
id = Column(Integer, primary_key=True)
Base = declarative_base(cls=Base)
def create_dbsession(engine):
dbsession = scoped_session(sessionmaker())
dbsession.configure(bind=engine)
Base.metadata.bind = engine
return dbsession
|
[
"rachid.belaid@gmail.com"
] |
rachid.belaid@gmail.com
|
4b5be1fb84187f4d83d1e07885657d02b7a120f5
|
30d1b89b67d48efdacce5bceeee2c734bee2b509
|
/manual_translation/devel/lib/python2.7/dist-packages/mavros_msgs/msg/_Mavlink.py
|
2d4e562e868c5dec2e71bd13bbbde54c744bcc04
|
[] |
no_license
|
ParthGaneriwala/uppaal2ros
|
db4a6b20c78e423511e565477a2461942c2adceb
|
f88b2b860b0b970b61110a323d0397352785c9e2
|
refs/heads/main
| 2023-02-20T19:36:22.406515
| 2021-01-28T18:58:44
| 2021-01-28T18:58:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
/home/adi/ardu_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/msg/_Mavlink.py
|
[
"adithyatp@yahoo.com"
] |
adithyatp@yahoo.com
|
268f77595526ce94d83bcd97375dc506662f676b
|
309da681f1ce8d119f2e44580ba196094d03bd92
|
/project.py
|
1dbaa8cec2329e4e1555049d01b2d79a0b6f0710
|
[] |
no_license
|
aditya6116/catalog
|
bd9da4c8f8ec2c95728b66a8914d04e759c7ddb0
|
e8247118cde31d92327a8df82f766bb0f218999f
|
refs/heads/master
| 2021-01-21T21:32:36.111228
| 2017-06-20T07:01:09
| 2017-06-20T07:01:09
| 94,858,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,369
|
py
|
from flask import Flask, render_template
from flask import request, redirect, jsonify, url_for, flash
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem, User
from flask import session as login_session
import random
import string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
# Connect to Database and create database session
engine = create_engine('sqlite:///restaurantmenuwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Restaurant Menu Application"
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except BaseException:
return None
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template("login.html", STATE=state)
# gconnect
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
print login_session['username']
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += """ " style = "width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;"> """
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session['access_token']
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
if access_token is None:
print 'Access Token is None'
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = """https://accounts.google.com/o/oauth2/revoke?
token=%s""" %login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] == '404':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(
json.dumps(
'Failed to revoke token for given user.',
400))
response.headers['Content-Type'] = 'application/json'
return response
# JSON APIs to view Restaurant Information
@app.route('/restaurant/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(
restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJSON(restaurant_id, menu_id):
Menu_Item = session.query(MenuItem).filter_by(id=menu_id).one()
return jsonify(Menu_Item=Menu_Item.serialize)
@app.route('/restaurant/JSON')
def restaurantsJSON():
restaurants = session.query(Restaurant).all()
return jsonify(restaurants=[r.serialize for r in restaurants])
# Show all restaurants
@app.route('/')
@app.route('/restaurant/')
def showRestaurants():
restaurants = session.query(Restaurant).order_by(asc(Restaurant.name))
if 'username' not in login_session:
return render_template(
'publicrestaurant.html',
restaurants=restaurants)
else:
return render_template('restaurants.html', restaurants=restaurants)
# Create a new restaurant
@app.route('/restaurant/new/', methods=['GET', 'POST'])
def newRestaurant():
if 'username' not in login_session:
return redirect("/login")
if request.method == 'POST':
newRestaurant = Restaurant(
name=request.form['name'],
user_id=login_session['user_id'])
session.add(newRestaurant)
flash('New Restaurant %s Successfully Created' % newRestaurant.name)
session.commit()
return redirect(url_for('showRestaurants'))
else:
return render_template('newRestaurant.html')
# Edit a restaurant
@app.route('/restaurant/<int:restaurant_id>/edit/', methods=['GET', 'POST'])
def editRestaurant(restaurant_id):
if 'username' not in login_session:
return redirect("/login")
editedRestaurant = session.query(
Restaurant).filter_by(id=restaurant_id).one()
if login_session['user_id'] != editedRestaurant.user_id:
flash('edit your restaurant')
return redirect(url_for('showRestaurants'))
else:
if request.method == 'POST':
if request.form['name']:
editedRestaurant.name = request.form['name']
flash(
'Restaurant Successfully Edited %s' %
editedRestaurant.name)
return redirect(url_for('showRestaurants'))
else:
return render_template(
'editRestaurant.html',
restaurant=editedRestaurant)
# Delete a restaurant
@app.route('/restaurant/<int:restaurant_id>/delete/', methods=['GET', 'POST'])
def deleteRestaurant(restaurant_id):
if 'username' not in login_session:
return redirect("/login")
else:
restaurantToDelete = session.query(
Restaurant).filter_by(id=restaurant_id).one()
if login_session['user_id'] != restaurantToDelete.user_id:
flash('You can Delete only your restaurant')
return redirect(url_for('showRestaurants'))
if request.method == 'POST':
session.delete(restaurantToDelete)
flash('%s Successfully Deleted' % restaurantToDelete.name)
session.commit()
return redirect(
url_for(
'showRestaurants',
restaurant_id=restaurant_id))
else:
return render_template(
'deleteRestaurant.html',
restaurant=restaurantToDelete)
# Show a restaurant menu
@app.route('/restaurant/<int:restaurant_id>/')
@app.route('/restaurant/<int:restaurant_id>/menu/')
def showMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(
restaurant_id=restaurant_id).all()
creator = getUserInfo(restaurant.user_id)
if "username" not in \
login_session or login_session['user_id'] != creator.id:
return render_template(
'publicmenu.html',
items=items,
restaurant=restaurant,
creator=creator)
else:
return render_template(
'menu.html',
items=items,
restaurant=restaurant,
creator=creator)
# Create a new menu item
@app.route(
'/restaurant/<int:restaurant_id>/menu/new/',
methods=[
'GET',
'POST'])
def newMenuItem(restaurant_id):
if 'username' not in login_session:
return redirect("/login")
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
if request.method == 'POST':
newItem = MenuItem(
name=request.form['name'],
description=request.form['description'],
price=request.form['price'],
course=request.form['course'],
restaurant_id=restaurant_id,
user_id=restaurant.user_id)
session.add(newItem)
session.commit()
flash('New Menu %s Item Successfully Created' % (newItem.name))
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id=restaurant_id)
# Edit a menu item
@app.route(
'/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit',
methods=[
'GET',
'POST'])
def editMenuItem(restaurant_id, menu_id):
if 'username' not in login_session:
return redirect("/login")
editedItem = session.query(MenuItem).filter_by(id=menu_id).one()
restaurant = session.query(
Restaurant).filter_by(id=restaurant_id).one()
if login_session['user_id'] != restaurant.user_id:
flash('edit your restaurant Menu')
return redirect(url_for('showRestaurants'))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['price']:
editedItem.price = request.form['price']
if request.form['course']:
editedItem.course = request.form['course']
session.add(editedItem)
session.commit()
flash('Menu Item Successfully Edited')
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template(
'editmenuitem.html',
restaurant_id=restaurant_id,
menu_id=menu_id,
item=editedItem)
# Delete a menu item
@app.route(
'/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete',
methods=[
'GET',
'POST'])
def deleteMenuItem(restaurant_id, menu_id):
if 'username' not in login_session:
return redirect("/login")
else:
restaurant = session.query(
Restaurant).filter_by(id=restaurant_id).one()
itemToDelete = session.query(MenuItem).filter_by(id=menu_id).one()
if login_session['user_id'] != restaurant.user_id:
flash('edit your restaurant')
return redirect(url_for('showRestaurants'))
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Menu Item Successfully Deleted')
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('deleteMenuItem.html', item=itemToDelete)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
[
"gurusharan2@gmail.com"
] |
gurusharan2@gmail.com
|
7ed8f7fb8b672c0b572c9f73874b66a87b146d20
|
ff55f48130e88f678a9a7896a746579a24fe02d2
|
/app/app.py
|
aaaba555e077b465e1007feed77119eb3c932c7c
|
[] |
no_license
|
201504481/Tarea8
|
5c91d0b95feb0542d6cd195e6b4d65cde89de352
|
794190ff15efe775a9ef090883f0582e139f3542
|
refs/heads/master
| 2020-08-09T12:30:57.291552
| 2019-10-10T06:00:41
| 2019-10-10T06:00:41
| 214,088,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
from typing import List, Dict
from flask import Flask
import mysql.connector
import json
app = Flask(__name__)
def Materia() -> List[Dict]:
config = {
'user': 'root',
'password': 'root',
'host': 'db',
'port': '3306',
'database': 'knights'
}
connection = mysql.connector.connect(**config)
cursor = connection.cursor()
cursor.execute('SELECT * FROM Materia')
results = [{nombre: codigo} for (nombre, codigo) in cursor]
cursor.close()
connection.close()
return results
@app.route('/')
def index() -> str:
return json.dumps({'Materia': Materia()})
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
[
"eljulio.arango97@gmail.com"
] |
eljulio.arango97@gmail.com
|
f6c327232f55a5253a539568cc9c8d10d656384d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02686/s642611525.py
|
914bb9607791cee5d353d156d9afb343faf395b3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
def main():
N = int(input())
up_lines = []
down_lines = []
for i in range(N):
s = input()
height = 0
bottom = 0
for c in s:
if c == "(":
height += 1
else:
height -= 1
bottom = min(bottom, height)
if height > 0:
up_lines.append((bottom, height))
else:
down_lines.append((bottom-height, -height))
up_lines.sort(reverse=True, key=lambda line: line[0])
down_lines.sort(reverse=True, key=lambda line: line[0])
left = 0
for bottom, height in up_lines:
if left + bottom < 0:
print("No")
return
left += height
right = 0
for bottom, height in down_lines:
if right + bottom < 0:
print("No")
return
right += height
if left == right:
print("Yes")
else:
print("No")
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d41a42e653d1d0c41e86f0a5e096ce3bb000a5ee
|
a0398f983a3eec052780b13953e8d43162bc9787
|
/LogProducer/main.py
|
e3698fe863387a65d68ebd21c15b30ae5fbf2179
|
[] |
no_license
|
nguyenvanhuybk99/ForexSpark
|
1795ad295f18753a7ec685282b3fe7e7b7210991
|
4008213223fae7cca63695015c33c59f17754f16
|
refs/heads/main
| 2023-02-06T05:46:03.942904
| 2020-12-22T04:33:36
| 2020-12-22T04:33:36
| 323,226,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from log_manager import LogManager
from connector import KafkaConnector
from config import LoggerConfig, ConnectorConfig
import sys
class Main:
__log_manager: LogManager
@classmethod
def setup(cls, msg_num=1, rate=1):
task_usdeur_logger_config = LoggerConfig("TASK_USDEUR", 2)
task_usdeur_connector_config = ConnectorConfig("KAFKA_CONNECTOR", "USDEUR")
task_gbpusd_logger_config = LoggerConfig("TASK_GBPUSD", 2)
task_gbpusd_connector_config = ConnectorConfig("KAFKA_CONNECTOR", "GBPUSD")
cls.__log_manager = LogManager([(task_usdeur_logger_config, task_usdeur_connector_config),
(task_gbpusd_logger_config, task_gbpusd_connector_config)],
msg_num, rate)
return cls
@classmethod
def run(cls):
cls.__log_manager.dispatch_logs()
if __name__ == '__main__':
args = sys.argv
try:
app = Main.setup(int(args[1]), int(args[2]))
except Exception as e:
print(e)
print("Wrong arguments, use default config")
app = Main.setup(3, 4)
app.run()
|
[
"huynv1@kaopiz.com"
] |
huynv1@kaopiz.com
|
5d00e7dd24ff76d035474abbf3f113bf88deb4cc
|
cb82718999848e7ab557b6877d40c079916d065a
|
/gen_trips.py
|
b17191fbed4c9fd60f31a3d9421639b2a40469c9
|
[
"Apache-2.0"
] |
permissive
|
divergent63/simple_shortest_routing
|
e84d1b6659a7f19436f3d9125534075b39a274e1
|
f6719ad1fb0a0fdd5916bece62edbed82a0ef899
|
refs/heads/master
| 2020-05-20T22:29:29.099605
| 2019-05-15T12:26:26
| 2019-05-15T12:26:26
| 185,783,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
#!/usr/bin/env python
# coding=utf-8
"""
input: OD information(vehicle information)
output: SUMO Trips
"""
from lxml import etree
from pathlib import Path
import os
import pandas as pd
def gen_trips(od):
start_time = od['start_time'].values
root = etree.Element("routes")
for i in range(len(od.values)):
veh_id = str(i)
route = od['route'].values[i]
route = route.split("'")
route_list = []
for j in range(len(route)):
if len(route[j]) > 3:
route_list.append(route[j])
if len(route_list) == 4:
route = str(route_list[0]) + ' ' + str(route_list[1]) + str(' ') + str(route_list[2]) + ' ' + str(route_list[3])
if len(route_list) == 3:
route = str(route_list[0]) + ' ' + str(route_list[1]) + str(' ') + str(route_list[2])
if len(route_list) == 2:
route = str(route_list[0]) + ' ' + str(route_list[1])
root_1 = etree.SubElement(root, "vehicle", id=str(veh_id), depart=str(start_time[i] * 10))
child_11 = etree.SubElement(
root_1, "route", edges=route
)
with open(Path(Path(os.getcwd()) / 'conf' / Path('test0_trips.trips.xml')), 'w') as e_data:
print(etree.tostring(root, pretty_print=True, encoding='unicode'), file=e_data)
if __name__ == '__main__':
path = Path(os.getcwd()) / 'conf' / 'veh_info.csv'
od = pd.read_csv(path).dropna()
gen_trips(od)
|
[
"634184805@qq.com"
] |
634184805@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.