blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed7bdce889e461553bb10bad06747aae9bc3bee8
|
22ee2c0b35393e31c0bf9d715ad0cd4c884c0615
|
/day2/buggy/dicegame/die.py
|
148544efaa4d61e92a6f89f8c8738d7598ed69ee
|
[] |
no_license
|
wwa-vanekeren/aspp2021_solutions
|
de051bdf3571affdcb17cf8d876218f870d3db13
|
eac2ff0e3f0d2c60aca5b9115dd3039608aaacf9
|
refs/heads/main
| 2023-03-17T17:51:00.019986
| 2021-02-26T10:50:23
| 2021-02-26T10:50:23
| 340,689,036
| 0
| 0
| null | 2021-02-21T08:08:48
| 2021-02-20T15:38:14
| null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
# Just count the stupid dice
import random
def roll(dice):
for die in dice:
# XXX: I don't even know what this function does
continue
class Die:
"""
This is always correct. Seriously, look away.
"""
def __init__(self):
self.roll()
def roll(self):
self.value = int(random.random() * 6 + 1)
def show(self):
if self.value == 1:
return("---------\n| |\n| * |\n| |\n---------")
elif self.value == 2:
return("---------\n|* |\n| |\n| *|\n---------")
elif self.value == 3:
return("---------\n|* |\n| * |\n| *|\n---------")
elif self.value == 4:
return("---------\n|* *|\n| |\n|* *|\n---------")
elif self.value == 5:
return("---------\n|* *|\n| * |\n|* *|\n---------")
else:
return("---------\n|* *|\n|* *|\n|* *|\n---------")
@classmethod
def create_dice(cls, n):
return [cls() for _ in range(n)]
|
[
"noreply@github.com"
] |
wwa-vanekeren.noreply@github.com
|
e0974cb3b18e1e63399d4a31048a7e7a76a9a09e
|
1f7bc6ee69209f91b96b258def5e75e0e5fde3c2
|
/listbox.py
|
9835cddc447b5f919f097cbf07b725884dbf8a46
|
[] |
no_license
|
kaurjassi/GUI
|
d82a93be9ec9be0fea95f22059d374fbe72ccfcb
|
d30a24bd5ece7641cda976c4eec594f442bc6a82
|
refs/heads/master
| 2022-11-09T06:43:04.145684
| 2020-06-25T14:39:01
| 2020-06-25T14:39:01
| 274,936,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
from tkinter import *
def add():
global i
lbs.insert(ACTIVE,f"{i}")
i+=1
i=0 #intial value
root = Tk()
root.title("Listbox")
root.geometry("500x400")
# list box = list box is a widget which displays the alternate list..
# blank list box and insert the items in it
lbs = Listbox(root)
lbs.pack()
lbs.insert(END,"First item in list box")
Button(root,text="Add item",command=add).pack()
root.mainloop()
|
[
"noreply@github.com"
] |
kaurjassi.noreply@github.com
|
5242129ffbb2e23f80babe8513a9e8272fd25dcf
|
75f5eab9a026c3076972690519fb90489d60de16
|
/discord/helpers/utils.py
|
d3be7004d3052ffbd757620634dfbac138befd28
|
[
"Apache-2.0"
] |
permissive
|
yogurtcpu/RPANBot
|
72f507bd346d9c6370d4210fccb580da0988c061
|
0515e3dbac31735a0f936365919e46851a91dc36
|
refs/heads/master
| 2023-02-27T06:36:54.156607
| 2021-02-02T18:50:45
| 2021-02-02T18:50:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
"""
Copyright 2020 RPANBot
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from re import sub
from datetime import datetime, timezone
def is_rpan_broadcast(link: str) -> bool:
if "reddit.com/rpan/" in link:
return True
else:
return False
def parse_link(link: str) -> str:
"""
Parse the stream id from a provided link.
:return: The parsed stream id.
"""
id = (
sub("http(s)?://", "", link)
.replace("www.reddit.com/", "")
.replace("old.reddit.com/", "")
.replace("reddit.com/", "")
.replace("redd.it/", "")
)
return sub("(rpan/r|r)/(.*?)/(comments/)?", "", id).split("/")[0].split("?")[0]
def escape_username(name: str) -> str:
"""
Escapes markdown in a username.
:return: The username with prevented italics.
"""
return name.replace("_", "\\_")
def format_timestamp(timestamp: float) -> str:
"""
Formats a timestamp.
:param timestamp: The timestamp to format.
:return: Returns a time in a set format.
"""
time = datetime.fromtimestamp(int(timestamp), tz=timezone.utc)
return time.strftime("%d/%m/%Y at %H:%M UTC")
|
[
"56532639+OneUpPotato@users.noreply.github.com"
] |
56532639+OneUpPotato@users.noreply.github.com
|
432af564f54f219a36399d60ff01262383c310b7
|
bd359061d94167f624db31e1b583a0b19a0bcff6
|
/src/Test_DiGraph.py
|
bbb4aa93d4caee386a3a8680301cd1ba41a41f7c
|
[] |
no_license
|
rinatGrinberg/Ex3-python
|
b69bfb4571b75b17e1be999ffa20e976ecda5f56
|
4945e7ba872543410efd452bbb45f156f89162ab
|
refs/heads/main
| 2023-02-14T19:15:31.617860
| 2021-01-13T18:35:30
| 2021-01-13T18:35:30
| 329,116,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,697
|
py
|
import unittest
from DiGraph import DiGraph
from src import GraphInterface
import calc
class MyTestCase(unittest.TestCase):
def test_nodesize_and_addnnode_and_removenode(self):
g = DiGraph()
for i in range(3):
g.add_node(i)
g.remove_node(0)
g.remove_node(1)
g.remove_node(1)
self.assertEqual(g.node_size,1)
def test_edgesize_and_connect_and_remove_edge(self):
g = DiGraph()
for i in range(3):
g.add_node(i)
g.remove_node(0)
g.remove_node(1)
g.remove_node(1)
g.add_node(25)
g.add_node(27)
g.add_edge(25,2,1)
g.add_edge(27,2,78)
g.add_edge(2,25,3)
g.add_node(7)
g.add_node(8)
g.add_node(9)
g.add_edge(7,25,6)
g.add_edge(8,25,16)
g.add_edge(8,7,3)
g.add_edge(9,2,11)
g.add_edge(9,8,111)
g.remove_edge(9,8)
print(g.edge_size)
self.assertEqual(g.edge_size,7)
def test_get_v(self):
g = DiGraph()
for i in range(3):
g.add_node(i)
g.remove_node(0)
g.remove_node(1)
g.remove_node(1)
g.add_node(25)
g.add_node(27)
g.add_edge(25, 2, 1)
g.add_edge(27, 2, 78)
g.add_edge(2, 25, 3)
g.add_node(7)
g.add_node(8)
g.add_node(9)
g.add_edge(7, 25, 6)
g.add_edge(8, 25, 16)
g.add_edge(8, 7, 3)
g.add_edge(9, 2, 11)
g.add_edge(9, 8, 111)
g.remove_edge(9, 8)
nodes=g.get_all_v()
self.assertIn(25,nodes)
self.assertIn(7,nodes)
self.assertIn(8,nodes)
self.assertIn(9,nodes)
self.assertIn(27,nodes)
self.assertNotIn(1,nodes)
def test_get_nodes_in_and_outs(self):
g = DiGraph()
for i in range(3):
g.add_node(i)
g.remove_node(0)
g.remove_node(1)
g.remove_node(1)
g.add_node(25)
g.add_node(27)
g.add_edge(25, 2, 1)
g.add_edge(27, 2, 78)
g.add_edge(2, 25, 3)
g.add_node(7)
g.add_node(8)
g.add_node(9)
g.add_edge(7, 25, 6)
g.add_edge(8, 25, 16)
g.add_edge(8, 7, 3)
g.add_edge(9, 2, 11)
g.add_edge(9, 8, 111)
g.remove_edge(9, 8)
out = g.all_out_edges_of_node(2)
inn = g.all_in_edges_of_node(2)
self.assertIn(25, out)
self.assertIn(27, inn)
self.assertIn(25, inn)
self.assertIn(9, inn)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
rinatGrinberg.noreply@github.com
|
b636530dff51028fbe0a7fbb20a468126863860f
|
3b9d763180410bf0abf5b9c37391a64319efe839
|
/toontown/building/DistributedHQInteriorAI.py
|
d7c12fec12fec9b0432c0e998ae1a9b352eb808e
|
[] |
no_license
|
qphoton/Reverse_Engineering_Project_ToonTown
|
442f15d484324be749f6f0e5e4e74fc6436e4e30
|
11468ab449060169191366bc14ff8113ee3beffb
|
refs/heads/master
| 2021-05-08T00:07:09.720166
| 2017-10-21T02:37:22
| 2017-10-21T02:37:22
| 107,617,661
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
import cPickle
class DistributedHQInteriorAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedHQInteriorAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.isTutorial = False
self.zoneId = 0
self.block = 0
self.leaderData = cPickle.dumps(([], [], []))
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def setLeaderBoard(self, leaderData):
self.leaderData = leaderData
def setTutorial(self, isTutorial):
self.isTutorial = False
def getZoneIdAndBlock(self):
return (self.zoneId, self.block)
def getLeaderBoard(self):
return self.leaderData
def getTutorial(self):
return self.isTutorial
|
[
"Infinitywilee@rocketmail.com"
] |
Infinitywilee@rocketmail.com
|
2466113f0da79b2244862448de9eb3746c0d33d1
|
8fd255fc3498ec970d7202d3f70a671b7aa4c64b
|
/pmsal/blog/views.py
|
d5b704545317b790379154f070dad0ca73a0eb84
|
[
"MIT"
] |
permissive
|
klebercode/pmsal
|
afda05fe29bb67db70fc7dcb8dfc577f4a3f0c9c
|
d78477f7cd1a5d1ed9973e13be5758c71a2ce2db
|
refs/heads/master
| 2016-09-06T05:42:34.468341
| 2015-04-04T17:24:07
| 2015-04-04T17:24:07
| 33,409,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
# coding: utf-8
from django.db.models import Q
from django.views import generic
from django.views.generic.dates import (YearArchiveView, MonthArchiveView,
DayArchiveView)
from pmsal.context_processors import EnterpriseExtraContext
from pmsal.blog.models import Entry
from pmsal.core.models import Category
class EntryYearArchiveView(YearArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
# TODO: mudar a paginacao
paginate_by = 10
class EntryMonthArchiveView(MonthArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
class EntryDayArchiveView(DayArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
class EntryListView(EnterpriseExtraContext, generic.ListView):
# model = Entry
queryset = Entry.published.all()
template_name = 'blog/entry_home.html'
# TODO: mudar a paginacao
paginate_by = 6
def get_queryset(self, **kwargs):
search = self.request.GET.get('search', '')
if search:
obj_lst = Entry.published.filter(Q(title__icontains=search) |
Q(created__icontains=search) |
Q(body__icontains=search))
else:
obj_lst = Entry.published.all()
return obj_lst
def get_context_data(self, **kwargs):
context = super(EntryListView, self).get_context_data(**kwargs)
search = self.request.GET.get('search', '')
context['search'] = search
context['tag_list'] = Entry.tags.most_common()
# TODO: mudar a forma de carregamento das categorias
context['category_list'] = Category.objects.filter(area=3
).order_by('?')[:10]
return context
class EntryDateDetailView(EnterpriseExtraContext, generic.DateDetailView):
# model = Entry
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
def get_context_data(self, **kwargs):
context = super(EntryDateDetailView, self).get_context_data(**kwargs)
context['tag_list'] = Entry.tags.most_common()
# TODO: mudar a forma de carregamento das categorias
context['category_list'] = Category.objects.all().order_by('?')[:10]
return context
class EntryTagListView(EntryListView):
"""
Herda de EntryListView mudando o filtro para tag selecionada
"""
def get_queryset(self):
"""
Incluir apenas as Entries marcadas com a tag selecionada
"""
return Entry.published.filter(tags__slug=self.kwargs['tag_slug'])
class EntryCategoryListView(EntryListView):
"""
Herda de EntryListView mudando o filtro para categoria selecionada
"""
def get_queryset(self, **kwargs):
"""
Inclui apenas as Entries marcadas com a categoria selecionada
"""
return Entry.published.filter(categories__slug=self.kwargs['cat_slug'])
|
[
"kleberr@msn.com"
] |
kleberr@msn.com
|
980959abb37db81339bd74bddf75e0f1e7839b61
|
f4b77a4bac8acdf550a13e04661765f0eab4509d
|
/contenta/manager.py
|
ec84b4863d2e655bc5dc1fb26e6a8a446f14342a
|
[] |
no_license
|
k1000/contenta
|
b6d083c2c78e8fc8bd27481c4e94e8c0ef841909
|
a3d11ac71a1bba82320daa2be6a7b322aa76b6b9
|
refs/heads/master
| 2021-01-18T22:59:14.022598
| 2016-06-22T08:25:22
| 2016-06-22T08:25:22
| 7,433,369
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
from django.db import models
class PageManager(models.Manager):
def active(self, **filters):
return super(PageManager, self).get_query_set().filter(state=2, **filters)
def siblings(self, page): # TODO
if page.parent:
return self.active().filter(parent__pk=page.parent.pk)
def same_template(self, template_name):
return self.active().filter(template_name__exact=template_name)
|
[
"selwak@gmail.com"
] |
selwak@gmail.com
|
7b46bd89e96c7e6548ef6f816becf00ba0d630ee
|
a56a74b362b9263289aad96098bd0f7d798570a2
|
/venv/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py
|
67e80611ef10e78892a747eb9f010880cb4577dc
|
[
"MIT"
] |
permissive
|
yoonkt200/ml-theory-python
|
5812d06841d30e1068f6592b5730a40e87801313
|
7643136230fd4f291b6e3dbf9fa562c3737901a2
|
refs/heads/master
| 2022-12-21T14:53:21.624453
| 2021-02-02T09:33:07
| 2021-02-02T09:33:07
| 132,319,537
| 13
| 14
|
MIT
| 2022-12-19T17:23:57
| 2018-05-06T08:17:45
|
Python
|
UTF-8
|
Python
| false
| false
| 22,347
|
py
|
"""
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG, optionally JPEG and TIFF
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* integrate screen dpi w/ ppi and text
"""
try:
import threading
except ImportError:
import dummy_threading as threading
try:
from contextlib import nullcontext
except ImportError:
from contextlib import ExitStack as nullcontext # Py 3.6.
from math import radians, cos, sin
import numpy as np
from matplotlib import cbook, rcParams, __version__
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.font_manager import findfont, get_font
from matplotlib.ft2font import (LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING,
LOAD_DEFAULT, LOAD_NO_AUTOHINT)
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxBase
from matplotlib import colors as mcolors
from matplotlib.backends._backend_agg import RendererAgg as _RendererAgg
from matplotlib.backend_bases import _has_pil
if _has_pil:
from PIL import Image
backend_version = 'v2.2'
def get_hinting_flag():
mapping = {
True: LOAD_FORCE_AUTOHINT,
False: LOAD_NO_HINTING,
'either': LOAD_DEFAULT,
'native': LOAD_NO_AUTOHINT,
'auto': LOAD_FORCE_AUTOHINT,
'none': LOAD_NO_HINTING
}
return mapping[rcParams['text.hinting']]
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
# we want to cache the fonts at the class level so that when
# multiple figures are created we can reuse them. This helps with
# a bug on windows where the creation of too many figures leads to
# too many open file handles. However, storing them at the class
# level is not thread safe. The solution here is to let the
# FigureCanvas acquire a lock on the fontd at the start of the
# draw, and release it when it is done. This allows multiple
# renderers to share the cached fonts, but only one figure can
# draw at time and so the font cache is used by only one
# renderer at a time.
lock = threading.RLock()
def __init__(self, width, height, dpi):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self._renderer = _RendererAgg(int(width), int(height), dpi)
self._filter_renderers = []
self._update_methods()
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
def __getstate__(self):
# We only want to preserve the init keywords of the Renderer.
# Anything else can be re-created.
return {'width': self.width, 'height': self.height, 'dpi': self.dpi}
def __setstate__(self, state):
self.__init__(state['width'], state['height'], state['dpi'])
def _update_methods(self):
self.draw_gouraud_triangle = self._renderer.draw_gouraud_triangle
self.draw_gouraud_triangles = self._renderer.draw_gouraud_triangles
self.draw_image = self._renderer.draw_image
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.copy_from_bbox = self._renderer.copy_from_bbox
self.get_content_extents = self._renderer.get_content_extents
def tostring_rgba_minimized(self):
extents = self.get_content_extents()
bbox = [[extents[0], self.height - (extents[1] + extents[3])],
[extents[0] + extents[2], self.height - extents[1]]]
region = self.copy_from_bbox(bbox)
return np.array(region), extents
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if (nmax > 100 and npts > nmax and path.should_simplify and
rgbFace is None and gc.get_hatch() is None):
nch = np.ceil(npts / nmax)
chsize = int(np.ceil(npts / nch))
i0 = np.arange(0, npts, chsize)
i1 = np.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1, :]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
try:
self._renderer.draw_path(gc, p, transform, rgbFace)
except OverflowError:
raise OverflowError("Exceeded cell block limit (set "
"'agg.path.chunksize' rcparam)")
else:
try:
self._renderer.draw_path(gc, path, transform, rgbFace)
except OverflowError:
raise OverflowError("Exceeded cell block limit (set "
"'agg.path.chunksize' rcparam)")
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
xd = descent * sin(radians(angle))
yd = descent * cos(radians(angle))
x = round(x + ox + xd)
y = round(y - oy + yd)
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
flags = get_hinting_flag()
font = self._get_agg_font(prop)
if font is None:
return None
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=flags)
font.draw_glyphs_to_bitmap(antialiased=rcParams['text.antialiased'])
d = font.get_descent() / 64.0
# The descent needs to be adjusted for the angle.
xo, yo = font.get_bitmap_offset()
xo /= 64.0
yo /= 64.0
xd = d * sin(radians(angle))
yd = d * cos(radians(angle))
x = round(x + xo + xd)
y = round(y + yo + yd)
self._renderer.draw_text_image(font, x, y + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
if ismath in ["TeX", "TeX!"]:
# todo: handle props
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(
s, fontsize, renderer=self)
return w, h, d
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
flags = get_hinting_flag()
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height() # width and height of unrotated string
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# docstring inherited
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
Z = np.array(Z * 255.0, np.uint8)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
xd = d * sin(radians(angle))
yd = d * cos(radians(angle))
x = round(x + xd)
y = round(y + yd)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
# docstring inherited
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, caching for efficiency
"""
fname = findfont(prop)
font = get_font(fname)
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
# docstring inherited
return points * self.dpi / 72
def buffer_rgba(self):
return memoryview(self._renderer)
def tostring_argb(self):
return np.asarray(self._renderer).take([3, 0, 1, 2], axis=2).tobytes()
def tostring_rgb(self):
return np.asarray(self._renderer).take([0, 1, 2], axis=2).tobytes()
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# docstring inherited
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def option_scale_image(self):
# docstring inherited
return False
def restore_region(self, region, bbox=None, xy=None):
"""
Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a pair of floats) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1))
"""
if bbox is not None or xy is not None:
if bbox is None:
x1, y1, x2, y2 = region.get_extents()
elif isinstance(bbox, BboxBase):
x1, y1, x2, y2 = bbox.extents
else:
x1, y1, x2, y2 = bbox
if xy is None:
ox, oy = x1, y1
else:
ox, oy = xy
# The incoming data is float, but the _renderer type-checking wants
# to see integers.
self._renderer.restore_region(region, int(x1), int(y1),
int(x2), int(y2), int(ox), int(oy))
else:
self._renderer.restore_region(region)
def start_filter(self):
"""
Start filtering. It simply create a new canvas (the old one is saved).
"""
self._filter_renderers.append(self._renderer)
self._renderer = _RendererAgg(int(self.width), int(self.height),
self.dpi)
self._update_methods()
def stop_filter(self, post_processing):
"""
Save the plot in the current canvas as a image and apply
the *post_processing* function.
def post_processing(image, dpi):
# ny, nx, depth = image.shape
# image (numpy array) has RGBA channels and has a depth of 4.
...
# create a new_image (numpy array of 4 channels, size can be
# different). The resulting image may have offsets from
# lower-left corner of the original image
return new_image, offset_x, offset_y
The saved renderer is restored and the returned image from
post_processing is plotted (using draw_image) on it.
"""
width, height = int(self.width), int(self.height)
buffer, (l, b, w, h) = self.tostring_rgba_minimized()
self._renderer = self._filter_renderers.pop()
self._update_methods()
if w > 0 and h > 0:
img = np.frombuffer(buffer, np.uint8)
img, ox, oy = post_processing(img.reshape((h, w, 4)) / 255.,
self.dpi)
gc = self.new_gc()
if img.dtype.kind == 'f':
img = np.asarray(img * 255., np.uint8)
img = img[::-1]
self._renderer.draw_image(gc, l + ox, height - b - h + oy, img)
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region, bbox=None, xy=None):
renderer = self.get_renderer()
return renderer.restore_region(region, bbox, xy)
def draw(self):
"""
Draw the figure using the renderer.
"""
self.renderer = self.get_renderer(cleared=True)
# Acquire a lock on the shared font cache.
with RendererAgg.lock, \
(self.toolbar._wait_cursor_for_draw_cm() if self.toolbar
else nullcontext()):
self.figure.draw(self.renderer)
# A GUI class may be need to update a window using this draw, so
# don't forget to call the superclass.
super().draw()
def get_renderer(self, cleared=False):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
reuse_renderer = (hasattr(self, "renderer")
and getattr(self, "_lastKey", None) == key)
if not reuse_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
elif cleared:
self.renderer.clear()
return self.renderer
def tostring_rgb(self):
"""Get the image as an RGB byte string.
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
bytes
"""
return self.renderer.tostring_rgb()
def tostring_argb(self):
"""Get the image as an ARGB byte string.
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
bytes
"""
return self.renderer.tostring_argb()
def buffer_rgba(self):
"""Get the image as a memoryview to the renderer's buffer.
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
memoryview
"""
return self.renderer.buffer_rgba()
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
with cbook.open_file_cm(filename_or_obj, "wb") as fh:
fh.write(renderer.buffer_rgba())
print_rgba = print_raw
def print_png(self, filename_or_obj, *args,
metadata=None, pil_kwargs=None,
**kwargs):
"""
Write the figure to a PNG file.
Parameters
----------
filename_or_obj : str or PathLike or file-like object
The file to write to.
metadata : dict, optional
Metadata in the PNG file as key-value pairs of bytes or latin-1
encodable strings.
According to the PNG specification, keys must be shorter than 79
chars.
The `PNG specification`_ defines some common keywords that may be
used as appropriate:
- Title: Short (one line) title or caption for image.
- Author: Name of image's creator.
- Description: Description of image (possibly long).
- Copyright: Copyright notice.
- Creation Time: Time of original image creation
(usually RFC 1123 format).
- Software: Software used to create the image.
- Disclaimer: Legal disclaimer.
- Warning: Warning of nature of content.
- Source: Device used to create the image.
- Comment: Miscellaneous comment;
conversion from other image format.
Other keywords may be invented for other purposes.
If 'Software' is not given, an autogenerated value for matplotlib
will be used.
For more details see the `PNG specification`_.
.. _PNG specification: \
https://www.w3.org/TR/2003/REC-PNG-20031110/#11keywords
pil_kwargs : dict, optional
If set to a non-None value, use Pillow to save the figure instead
of Matplotlib's builtin PNG support, and pass these keyword
arguments to `PIL.Image.save`.
If the 'pnginfo' key is present, it completely overrides
*metadata*, including the default 'Software' key.
"""
from matplotlib import _png
if metadata is None:
metadata = {}
default_metadata = {
"Software":
f"matplotlib version{__version__}, http://matplotlib.org/",
}
FigureCanvasAgg.draw(self)
if pil_kwargs is not None:
from PIL import Image
from PIL.PngImagePlugin import PngInfo
# Only use the metadata kwarg if pnginfo is not set, because the
# semantics of duplicate keys in pnginfo is unclear.
if "pnginfo" in pil_kwargs:
if metadata:
cbook._warn_external("'metadata' is overridden by the "
"'pnginfo' entry in 'pil_kwargs'.")
else:
pnginfo = PngInfo()
for k, v in {**default_metadata, **metadata}.items():
pnginfo.add_text(k, v)
pil_kwargs["pnginfo"] = pnginfo
pil_kwargs.setdefault("dpi", (self.figure.dpi, self.figure.dpi))
(Image.fromarray(np.asarray(self.buffer_rgba()))
.save(filename_or_obj, format="png", **pil_kwargs))
else:
renderer = self.get_renderer()
with cbook.open_file_cm(filename_or_obj, "wb") as fh:
_png.write_png(renderer._renderer, fh, self.figure.dpi,
metadata={**default_metadata, **metadata})
def print_to_buffer(self):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
return (bytes(renderer.buffer_rgba()),
(int(renderer.width), int(renderer.height)))
if _has_pil:
# Note that these methods should typically be called via savefig() and
# print_figure(), and the latter ensures that `self.figure.dpi` already
# matches the dpi kwarg (if any).
@cbook._delete_parameter("3.2", "dryrun")
def print_jpg(self, filename_or_obj, *args, dryrun=False,
pil_kwargs=None, **kwargs):
"""
Write the figure to a JPEG file.
Parameters
----------
filename_or_obj : str or PathLike or file-like object
The file to write to.
Other Parameters
----------------
quality : int
The image quality, on a scale from 1 (worst) to 100 (best).
The default is :rc:`savefig.jpeg_quality`. Values above
95 should be avoided; 100 completely disables the JPEG
quantization stage.
optimize : bool
If present, indicates that the encoder should
make an extra pass over the image in order to select
optimal encoder settings.
progressive : bool
If present, indicates that this image
should be stored as a progressive JPEG file.
pil_kwargs : dict, optional
Additional keyword arguments that are passed to
`PIL.Image.save` when saving the figure. These take precedence
over *quality*, *optimize* and *progressive*.
"""
FigureCanvasAgg.draw(self)
if dryrun:
return
# The image is pasted onto a white background image to handle
# transparency.
image = Image.fromarray(np.asarray(self.buffer_rgba()))
background = Image.new('RGB', image.size, "white")
background.paste(image, image)
if pil_kwargs is None:
pil_kwargs = {}
for k in ["quality", "optimize", "progressive"]:
if k in kwargs:
pil_kwargs.setdefault(k, kwargs[k])
pil_kwargs.setdefault("quality", rcParams["savefig.jpeg_quality"])
pil_kwargs.setdefault("dpi", (self.figure.dpi, self.figure.dpi))
return background.save(
filename_or_obj, format='jpeg', **pil_kwargs)
print_jpeg = print_jpg
@cbook._delete_parameter("3.2", "dryrun")
def print_tif(self, filename_or_obj, *args, dryrun=False,
pil_kwargs=None, **kwargs):
FigureCanvasAgg.draw(self)
if dryrun:
return
if pil_kwargs is None:
pil_kwargs = {}
pil_kwargs.setdefault("dpi", (self.figure.dpi, self.figure.dpi))
return (Image.fromarray(np.asarray(self.buffer_rgba()))
.save(filename_or_obj, format='tiff', **pil_kwargs))
print_tiff = print_tif
@_Backend.export
class _BackendAgg(_Backend):
FigureCanvas = FigureCanvasAgg
FigureManager = FigureManagerBase
|
[
"kitae.yoon@deliveryhero.co.kr"
] |
kitae.yoon@deliveryhero.co.kr
|
583466431748d71c10e4768b2295e9e980422200
|
10d77a1bca1358738179185081906956faf3963a
|
/venv/Lib/site-packages/django/core/mail/backends/filebased.py
|
f01e1497dbcc6dc15a7cf45416368b8606f613a2
|
[] |
no_license
|
ekansh18/WE_Care_NGO_WEBSITE
|
3eb6b12ae798da26aec75d409b0b92f7accd6c55
|
7c1eaa78d966d13893c38e7157744fbf8f377e71
|
refs/heads/master
| 2023-07-16T07:22:48.920429
| 2021-08-31T04:11:19
| 2021-08-31T04:11:19
| 401,563,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
"""Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import (
EmailBackend """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import (
EmailBackend as ConsoleEmailBackend,
)
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, file_path=None, **kwargs):
self._fname = None
if file_path is not None:
self.file_path = file_path
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH', None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, str):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
try:
os.makedirs(self.file_path, exist_ok=True)
except FileExistsError:
raise ImproperlyConfigured(
'Path for saving email messages exists, but is not a directory: %s' % self.file_path
)
except OSError as err:
raise ImproperlyConfigured(
'Could not create directory for saving email messages: %s (%s)' % (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super().__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b'\n')
self.stream.write(b'-' * 79)
self.stream.write(b'\n')
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'ab')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
|
[
"ekansh00@gmail.com"
] |
ekansh00@gmail.com
|
b77167d258ce02e04bdda1ea6a83707259bbe0f7
|
80e701c5b9c03ef288848d8b368360e0940d9b67
|
/sleyeball/files.py
|
51db5f0a7dda0c4166dceea14a6d3bc400d4b819
|
[] |
no_license
|
esheldon/sleyeball
|
a4917300b041747e0600186f0e596c6d83a95ff4
|
9eee500119d2bc07c942350a67c8777257e92a3d
|
refs/heads/master
| 2020-06-20T05:22:59.921610
| 2019-07-17T18:38:29
| 2019-07-17T18:38:29
| 197,008,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
import os
def get_base_dir():
"""
base directory
"""
return os.environ['SLDIR']
def get_cand_dir():
"""
we keep lists here
"""
return os.path.join(get_base_dir(), 'candidates')
def get_cand_file_orig():
"""
holds paths to coadds
"""
d = get_cand_dir()
return os.path.join(d, 'z4ErinSheldon.fits')
def get_cand_file():
"""
holds paths to coadds
"""
d = get_cand_dir()
return os.path.join(d, 'z4ErinSheldon-clean.fits')
def get_badreg_dir():
"""
we keep lists here
"""
return os.path.join(get_base_dir(), 'badregions')
def get_badreg_file():
"""
holds paths to coadds
"""
d = get_badreg_dir()
return os.path.join(d, 'y3a2_foreground_mask_v2.1.fits.gz')
def get_stamp_dir(tilename):
"""
location for the image and temp files
"""
return os.path.join(
get_base_dir(),
'stamps',
tilename,
)
def get_temp_dir():
"""
location for the image and temp files
"""
return os.environ['TMPDIR']
def get_stamp_file(tilename, number):
"""
location of a output file
"""
odir = get_stamp_dir(tilename)
fname = '%s-%06d.jpg' % (tilename, number)
return os.path.join(odir, fname)
#
# batch processing
#
def get_script_dir():
"""
location for scripts
"""
return os.path.join(get_base_dir(), 'scripts')
def get_script_file(tilename):
"""
location for scripts
"""
d = get_script_dir()
fname = '%s.sh' % tilename
return os.path.join(d, fname)
def get_wq_file(tilename, missing=False):
"""
location for scripts
"""
fname = '%s.yaml' % tilename
d = get_script_dir()
return os.path.join(d, fname)
|
[
"erin.sheldon@gmail.com"
] |
erin.sheldon@gmail.com
|
3d0f5f9fa5889a3b0c4e7e33c56710be4c790b92
|
03f82e74d54399eff2282e853ffacf4a29722ca4
|
/2.py
|
323c7ed69b8ae0730cb39c2e08f0ee9a39167d3a
|
[] |
no_license
|
SerdarKuliev/proj1
|
d7eff875ca7788e7e391b417bcab5ba7ed9975b1
|
07c3a3f9069b512378e78db5caedd0c73c39e2df
|
refs/heads/dev
| 2022-12-19T06:50:26.810610
| 2020-09-29T12:34:45
| 2020-09-29T12:34:45
| 265,944,603
| 0
| 0
| null | 2020-06-13T00:53:17
| 2020-05-21T20:25:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
m1 = int(input("Введите выручку за январь 20 года."))
m2 = int(input("Введите выручку за февраль 20 года."))
m3 = int(input("Введите выручку за март 20 года."))
n1 = int(input("Введите расходы за январь 20 года."))
n2 = int(input("Введите расходы за февраль 20 года."))
n3 = int(input("Введите расходы за март 20 года."))
C1 = int(input("Введите среднее колчество сотрудников, с января по март"))
R1 = int(m1-n1)
R2 = int(m2-n2)
R3 = int(m3-n3)
S1 = int(m1/n1*100)
S2 = int(m2/n2*100)
S3 = int(m3/n3*100)
X1 = int(R1/C1)
X2 = int(R2/C1)
X3 = int(R3/C1)
R1 = str(R1)
R2 = str(R2)
R3 = str(R3)
X1 = str(X1)
X2 = str(X2)
X3 = str(X3)
print("Теперь пойдем по порядку.")
if (m1>n1):
print("Заработали в январе " + (R1) + " руб." + ("%.2f" % S1) + "%")
else: print("Янваь принес убытки, " +(R1)+ " руб." )
if (m2>n2):
print("Заработали в феврале " +(R2)+ " руб." +("%.2f" %S2)+ "%")
else: print("Февраль принес убытки, " +(R2)+ " руб." )
if (m3>n3):
print("Заработали в марте " +(R3)+ " руб." +("%.2f" %S3)+ "%")
else: print("Март принес убытки, " +(R3)+ " руб." )
print("В среднем, доход на одного оструднка составил: "
+(X1)+ "руб. в январе."
+(X2)+ "руб. в феврале."
+(X3)+ "руб. в марте.")
print("Ок?")
|
[
"“Serdarkuliev@gmail.com”"
] |
“Serdarkuliev@gmail.com”
|
b3effc169e3db184f1082b87601f29458d399353
|
38952ab2dd82309b7ef2b510a69084f9e0945921
|
/shuake/venv/bin/easy_install
|
eb458e8af0336cd3e6c931240e2437b961f90561
|
[] |
no_license
|
zicarb/YorkCoursebro
|
6c88fdf66c852defbd7e26e204c7715f8384e16d
|
ac5d4aba8320d7c73c6bd2bdc9d8dbbb4e1c8dea
|
refs/heads/master
| 2020-03-19T19:47:56.649018
| 2019-02-13T04:53:50
| 2019-02-13T04:53:50
| 136,874,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
#!/Users/zombie/PycharmProjects/shuake/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"zicarb09@gmail.com"
] |
zicarb09@gmail.com
|
|
a67af589f2fcef2b0ce235cad9572f2f6497b238
|
e96434fd49285d66cf926730f3b5c8ae2e51ee77
|
/org/cyy/fw/piedis/Client.py
|
b07f56a2fd4a05c6239d0a59031c2d9fb1a55889
|
[] |
no_license
|
chenyihan/piedis
|
2d213f2b06ef151803b7b8a9d9c253429fbf41c4
|
2649d71c5e6d599fa33864e5ca82bdce23fede6f
|
refs/heads/master
| 2021-01-20T12:37:28.735144
| 2015-12-17T15:42:16
| 2015-12-17T15:42:16
| 39,572,341
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 23,872
|
py
|
# -*- coding: GBK -*-
'''
Created on 2015Äê7ÔÂ8ÈÕ
@author: yunyun
'''
import socket
from org.cyy.fw.piedis import RedisProtocol, Command, RedisKeyword, Response
from org.cyy.fw.piedis.Command import BinaryCommand
from org.cyy.fw.piedis.Server import ServerNode
from _sha1 import sha1
class PiedisClient:
def __init__(self):
self.isConnect = False
self.sock = None
self.serverNode = ServerNode()
self.connectTimeout = 0
def setHost(self, host):
self.serverNode.setHost(host)
return self
def setPort(self, port):
self.serverNode.setPort(port)
return self
def setConnectTimeout(self, timeout):
self.connectTimeout = timeout
def dump(self, key):
resp = self.sendCommand(Command.DUMP, key)
return resp
def expire(self, key, seconds):
resp = self.sendCommand(Command.EXPIRE, key, str(seconds))
return Response.toInt(resp) == 1
def expireAt(self, key, ts):
resp = self.sendCommand(Command.EXPIREAT, key, str(ts))
return Response.toInt(resp) == 1
def migrate(self, key, destHost, destPort, destDb, timeout):
resp = self.sendCommand(Command.MIGRATE, key, str(destHost), str(destPort), str(destDb), str(timeout))
return Response.toStr(resp)
def move(self, key, destDb):
resp = self.sendCommand(Command.MOVE, key, str(destDb))
return Response.toInt(resp) == 1
def objectRefcount(self, key):
resp = self.sendCommand(Command.OBJECT, RedisKeyword.REFCOUNT, key)
return Response.toInt(resp)
def objectIdletime(self, key):
resp = self.sendCommand(Command.OBJECT, RedisKeyword.IDLETIME, key)
return Response.toInt(resp)
def objectEncoding(self, key):
resp = self.sendCommand(Command.OBJECT, RedisKeyword.ENCODING, key)
return Response.toStr(resp)
def persist(self, key):
resp = self.sendCommand(Command.PERSIST, key)
return Response.toInt(resp) == 1
def pExpire(self, key, millSeconds):
resp = self.sendCommand(Command.PEXPIRE, key, str(millSeconds))
return Response.toInt(resp) == 1
def pExpireAt(self, key, millTs):
resp = self.sendCommand(Command.PEXPIREAT, key, str(millTs))
return Response.toInt(resp) == 1
def pTTL(self, key):
resp = self.sendCommand(Command.PTTL, key)
return Response.toInt(resp)
def randomKey(self):
resp = self.sendCommand(Command.RANDOMKEY)
return Response.toStr(resp)
def rename(self, key, newKey):
resp = self.sendCommand(Command.RENAME, key, newKey)
return Response.toStr(resp)
def renameNX(self, key, newKey):
resp = self.sendCommand(Command.RENAMENX, key, newKey)
return Response.toInt(resp) == 1
def restore(self, key, serialValue, millTTL, replace):
if replace :
resp = self.sendCommand(Command.RESTORE, key, str(millTTL), serialValue, RedisKeyword.REPLACE)
return Response.toStr(resp)
else:
resp = self.sendCommand(Command.RESTORE, key, str(millTTL), serialValue)
return Response.toStr(resp)
def sort(self, key, params, destKey):
args = []
args.append(key)
if params != None:
args += params.getParams()
if destKey != None:
args.append(RedisKeyword.STORE)
args.append(destKey)
resp = self.sendCommand(Command.SORT, *tuple(args))
return Response.toTuple(resp)
def ttl(self, key):
resp = self.sendCommand(Command.TTL, key)
return Response.toInt(resp)
def type(self, key):
resp = self.sendCommand(Command.TYPE, key)
return Response.toStr(resp)
def scan(self, cursor, params):
args = (str(cursor),)
if params != None:
args = args + params.getParams()
resp = self.sendCommand(Command.SCAN, *args)
return Response.toScanResult(resp)
def set(self, key, value):
resp = self.sendCommand(Command.SET, key, str(value))
return Response.toStr(resp)
def get(self, key):
resp = self.sendCommand(Command.GET, key)
return Response.toStr(resp)
def Del(self, key, *moreKeys):
resp = self.sendCommand(Command.DEL, key, *moreKeys)
return Response.toInt(resp)
def keys(self, pattern):
resp = self.sendCommand(Command.KEYS, pattern)
return Response.toTuple(resp)
def exists(self, key):
resp = self.sendCommand(Command.EXISTS, key)
return Response.toInt(resp) == 1
def append(self, key, value):
resp = self.sendCommand(Command.APPEND, key, value)
return Response.toInt(resp)
def setBit(self, key, offset, value):
resp = self.sendCommand(Command.SETBIT, key, str(offset), value)
return Response.toInt(resp)
def getBit(self, key, offset):
resp = self.sendCommand(Command.GETBIT, key, str(offset))
return Response.toInt(resp)
def bitCount(self, key, start, end):
args = [key]
if start != None:
args.append(str(start))
if end != None:
args.append(str(end))
resp = self.sendCommand(Command.BITCOUNT, *tuple(args))
return Response.toInt(resp)
def bitop(self, bitOP, destKey, srcKey, *moreSrcKeys):
resp = self.sendCommand(Command.BITOP, bitOP, destKey, srcKey, *moreSrcKeys)
return Response.toInt(resp)
def incr(self, key):
resp = self.sendCommand(Command.INCR, key)
return Response.toInt(resp)
def incrBy(self, key, increment):
resp = self.sendCommand(Command.INCRBY, key, str(increment))
return Response.toInt(resp)
def incrByFloat(self, key, increment):
resp = self.sendCommand(Command.INCRBYFLOAT, key, str(increment))
return Response.toFloat(resp)
def decr(self, key):
resp = self.sendCommand(Command.DECR, key)
return Response.toInt(resp)
def decrBy(self, key, increment):
resp = self.sendCommand(Command.DECRBY, key, str(increment))
return Response.toInt(resp)
def getRange(self, key, start, end):
resp = self.sendCommand(Command.GETRANGE, key, str(start), str(end))
return Response.toStr(resp)
def getSet(self, key, value):
resp = self.sendCommand(Command.GETSET, key, value)
return Response.toStr(resp)
def mSet(self, keyValuePair, *moreKeyValuePair):
args = Command.combineArgs(keyValuePair, *moreKeyValuePair)
resp = self.sendCommand(Command.MSET, *args)
return Response.toStr(resp)
def mGet(self, key, *moreKeys):
resp = self.sendCommand(Command.MGET, key, *moreKeys)
return Response.toTuple(resp)
def mSetNX(self, keyValuePair, *moreKeyValuePair):
args = Command.combineArgs(keyValuePair, *moreKeyValuePair)
resp = self.sendCommand(Command.MSETNX, *args)
return Response.toInt(resp) == 1
def setNX(self, key, value):
resp = self.sendCommand(Command.SETNX, key, value)
return Response.toInt(resp) == 1
def setEX(self, key, seconds, value):
resp = self.sendCommand(Command.SETEX, key, str(seconds), value)
return Response.toStr(resp)
def pSetEX(self, key, millSeconds, value):
resp = self.sendCommand(Command.PSETEX, key, str(millSeconds), value)
return Response.toStr(resp)
def setRange(self, key, offset, value):
resp = self.sendCommand(Command.SETRANGE, key, str(offset), value)
return Response.toInt(resp)
def strLen(self, key):
resp = self.sendCommand(Command.STRLEN, key)
return Response.toInt(resp)
def hSet(self, key, field, value):
resp = self.sendCommand(Command.HSET, key, field, value)
return Response.toInt(resp) == 1
def hSetNX(self, key, field, value):
resp = self.sendCommand(Command.HSETNX, key, field, value)
return Response.toInt(resp) == 1
def hGet(self, key, field):
resp = self.sendCommand(Command.HGET, key, field)
return Response.toStr(resp)
def hGetAll(self, key):
resp = self.sendCommand(Command.HGETALL, key)
return Response.toDict(resp)
def hDel(self, key, field):
resp = self.sendCommand(Command.HDEL, key, field)
return Response.toInt(resp)
def hExist(self, key, field):
resp = self.sendCommand(Command.HEXISTS, key, field)
return Response.toInt(resp) == 1
def hIncrBy(self, key, field, increment):
resp = self.sendCommand(Command.HINCRBY, key, field, str(increment))
return Response.toInt(resp)
def hIncrByFloat(self, key, field, increment):
resp = self.sendCommand(Command.HINCRBYFLOAT, key, field, str(increment))
return Response.toFloat(resp)
def hKeys(self, key):
resp = self.sendCommand(Command.HKEYS, key)
return Response.toTuple(resp)
def hVals(self, key):
resp = self.sendCommand(Command.HVALS, key)
return Response.toTuple(resp)
def hLen(self, key):
resp = self.sendCommand(Command.HLEN, key)
return Response.toInt(resp)
def hMSet(self, key, keyValuePair, *moreKeyValuePairs):
args = Command.combineArgs(keyValuePair, *moreKeyValuePairs)
resp = self.sendCommand(Command.HMSET, key, *args)
return Response.toStr(resp)
def hMGet(self, key, field, *moreFields):
resp = self.sendCommand(Command.HMGET, key, field, *moreFields)
return Response.toTuple(resp)
'''
The hscan command seem to be a bug
'''
def hScan(self, key, cursor, scanParams):
args = (key, cursor)
if scanParams != None:
args += scanParams.getParams()
resp = self.sendCommand(Command.HSCAN, *args)
return Response.toHashScanResult(resp)
def lPush(self, key, value, *moreValues):
resp = self.sendCommand(Command.LPUSH, key, value, *moreValues)
return Response.toInt(resp)
def lPushX(self, key, value):
resp = self.sendCommand(Command.LPUSHX, key, value)
return Response.toInt(resp)
def lPop(self, key):
resp = self.sendCommand(Command.LPOP, key)
return Response.toStr(resp)
def rPush(self, key, value, *moreValues):
resp = self.sendCommand(Command.RPUSH, key, value, *moreValues)
return Response.toInt(resp)
def rPushX(self, key, value):
resp = self.sendCommand(Command.RPUSHX, key, value)
return Response.toInt(resp)
def rPop(self, key):
resp = self.sendCommand(Command.RPOP, key)
return Response.toStr(resp)
def blPop(self, timeout, key, *morekeys):
args = (key,)
args += morekeys
args += (timeout,)
resp = self.sendCommand(Command.BLPOP, *args)
return Response.toTuple(resp)
def brPop(self, timeout, key, *morekeys):
args = (key,)
args += morekeys
args += (timeout,)
resp = self.sendCommand(Command.BRPOP, *args)
return Response.toTuple(resp)
def lRange(self, key, start, end):
resp = self.sendCommand(Command.LRANGE, key, str(start), str(end))
return Response.toTuple(resp)
def rPopLPush(self, sourceKey, destKey):
resp = self.sendCommand(Command.RPOPLPUSH, sourceKey, destKey)
return Response.toStr(resp)
def bRPopLPush(self, sourceKey, destKey, timeout):
resp = self.sendCommand(Command.BRPOPLPUSH, sourceKey, destKey, str(timeout))
return Response.toStr(resp)
def lIndex(self, key, index):
resp = self.sendCommand(Command.LINDEX, key, str(index))
return Response.toStr(resp)
def lInsert(self, key, value, pivot, isBefore):
extraParam = RedisKeyword.BEFORE
if not isBefore:
extraParam = RedisKeyword.AFTER
resp = self.sendCommand(Command.LINSERT, key, extraParam, pivot, value)
return Response.toInt(resp)
def lLen(self, key):
resp = self.sendCommand(Command.LLEN, key)
return Response.toInt(resp)
def lREM(self, key, value, count):
resp = self.sendCommand(Command.LREM, key, count, value)
return Response.toInt(resp)
def lSet(self, key, index, value):
resp = self.sendCommand(Command.LSET, key, index, value)
return Response.toStr(resp)
def lTrim(self, key, start, end):
resp = self.sendCommand(Command.LTRIM, key, str(start), str(end))
return Response.toStr(resp)
def sAdd(self, key, member, *moreMember):
resp = self.sendCommand(Command.SADD, key, member, *moreMember)
return Response.toInt(resp)
def sCard(self, key):
resp = self.sendCommand(Command.SCARD, key)
return Response.toInt(resp)
def sPop(self, key):
resp = self.sendCommand(Command.SPOP, key)
return Response.toStr(resp)
def sRandMember(self, key, count):
resp = self.sendCommand(Command.SRANDMEMBER, key, str(count))
return Response.toTuple(resp)
def sMembers(self, key):
resp = self.sendCommand(Command.SMEMBERS, key)
return Response.toTuple(resp)
def sDiff(self, key, *moreKeys):
resp = self.sendCommand(Command.SDIFF, key, *moreKeys)
return Response.toTuple(resp)
def sDiffStore(self, destKey, key, *moreKeys):
resp = self.sendCommand(Command.SDIFFSTORE, destKey, key, *moreKeys)
return Response.toInt(resp)
def sInter(self, key, *moreKeys):
resp = self.sendCommand(Command.SINTER, key, *moreKeys)
return Response.toTuple(resp)
def sInterStore(self, destKey, key, *moreKeys):
resp = self.sendCommand(Command.SINTERSTORE, destKey, key, *moreKeys)
return Response.toInt(resp)
def sisMember(self, key, member):
resp = self.sendCommand(Command.SISMEMBER, key, member)
return Response.toInt(resp) == 1
def sRem(self, key, member, *moreMembers):
resp = self.sendCommand(Command.SREM, key, member, *moreMembers)
return Response.toInt(resp)
def sMove(self, source, destination, member):
resp = self.sendCommand(Command.SMOVE, source, destination, member)
return Response.toInt(resp) == 1
def sUnion(self, key, *moreKeys):
resp = self.sendCommand(Command.SUNION, key, *moreKeys)
return Response.toTuple(resp)
def sUnionStore(self, destKey, key, *moreKeys):
resp = self.sendCommand(Command.SUNIONSTORE, destKey, key, *moreKeys)
return Response.toInt(resp)
def sScan(self, key, cursor, scanParams):
args = (key, cursor)
if scanParams != None:
args = args + scanParams.getParams()
resp = self.sendCommand(Command.SSCAN, *args)
return Response.toStringScanResult(resp)
def zAdd(self, key, scoreMember, *moreScoreMember):
args = Command.combineArgsByScoreMember(scoreMember, *moreScoreMember)
resp = self.sendCommand(Command.ZADD, key, *args)
return Response.toInt(resp)
def zCard(self, key):
resp = self.sendCommand(Command.ZCARD, key)
return Response.toInt(resp)
def zCount(self, key, Min, Max):
resp = self.sendCommand(Command.ZCOUNT, key, str(Min), str(Max))
return Response.toInt(resp)
def zIncrBy(self, key, member, increment):
resp = self.sendCommand(Command.ZINCRBY, key, increment, member)
return Response.toFloat(resp)
def zRange(self, key, start, end):
resp = self.sendCommand(Command.ZRANGE, key, str(start), str(end))
return Response.toTuple(resp)
def zRangeWithScores(self, key, start, end):
resp = self.sendCommand(Command.ZRANGE, key, str(start), str(end), RedisKeyword.WITHSCORES)
return Response.toScoreMember(resp)
def zRangeByScore(self, key, Min, Max):
resp = self.sendCommand(Command.ZRANGEBYSCORE, key, str(Min), str(Max))
return Response.toTuple(resp)
def zRangeByScoreWithOffset(self, key, Min, Max, offset, count):
resp = self.sendCommand(Command.ZRANGEBYSCORE, key, str(Min), str(Max), RedisKeyword.LIMIT, str(offset), str(count))
return Response.toTuple(resp)
def zRangeByScoreWithScores(self, key, Min, Max):
resp = self.sendCommand(Command.ZRANGEBYSCORE, key, str(Min), str(Max), RedisKeyword.WITHSCORES)
return Response.toScoreMember(resp)
def zRangeByScoreWithScoresByOffset(self, key, Min, Max, offset, count):
resp = self.sendCommand(Command.ZRANGEBYSCORE, key, str(Min), str(Max), RedisKeyword.WITHSCORES, RedisKeyword.LIMIT, str(offset), str(count))
return Response.toScoreMember(resp)
def zRank(self, key, member):
resp = self.sendCommand(Command.ZRANK, key, member)
return Response.toInt(resp)
def zRem(self, key, member, *moreMembers):
resp = self.sendCommand(Command.ZREM, key, member, *moreMembers)
return Response.toInt(resp)
def zRemRangeByRank(self, key, start, end):
resp = self.sendCommand(Command.ZREMRANGEBYRANK, key, str(start), str(end))
return Response.toInt(resp)
def zRemRangeByScore(self, key, Min, Max):
resp = self.sendCommand(Command.ZREMRANGEBYSCORE, key, str(Min), str(Max))
return Response.toInt(resp)
def zRevRange(self, key, start, end):
resp = self.sendCommand(Command.ZREVRANGE, key, str(start), str(end))
return Response.toTuple(resp)
def zRevRangeWithScores(self, key, start, end):
resp = self.sendCommand(Command.ZREVRANGE, key, str(start), str(end), RedisKeyword.WITHSCORES)
return Response.toScoreMember(resp)
def zRevRangeByScore(self, key, Max, Min):
resp = self.sendCommand(Command.ZREVRANGEBYSCORE, key, str(Max), str(Min))
return Response.toTuple(resp)
def zRevRangeByScoreByOffset(self, key, Max, Min, offset, count):
resp = self.sendCommand(Command.ZREVRANGEBYSCORE, key, str(Max), str(Min), RedisKeyword.LIMIT, str(offset), str(count))
return Response.toTuple(resp)
def zRevRangeByScoreWithScores(self, key, Max, Min):
resp = self.sendCommand(Command.ZREVRANGEBYSCORE, key, str(Max), str(Min), RedisKeyword.WITHSCORES)
return Response.toScoreMember(resp)
def zRevRangeByScoreWithScoresByOffset(self, key, Max, Min, offset, count):
resp = self.sendCommand(Command.ZREVRANGEBYSCORE, key, str(Max), str(Min), RedisKeyword.WITHSCORES, RedisKeyword.LIMIT, str(offset), str(count))
return Response.toScoreMember(resp)
def zRevRank(self, key, member):
resp = self.sendCommand(Command.ZREVRANK, key, member)
return Response.toInt(resp)
def zScore(self, key, member):
resp = self.sendCommand(Command.ZSCORE, key, member)
return Response.toFloat(resp)
def zUnionStore(self, destKey, params, key, *moreKeys):
args = (destKey,)
numberKeys = 1 + len(moreKeys)
args += (numberKeys, key)
args += moreKeys
if params != None:
args += params.getParams()
resp = self.sendCommand(Command.ZUNIONSTORE, *args)
return Response.toInt(resp)
def zInterStore(self, destKey, params, key, *moreKeys):
args = (destKey,)
numberKeys = 1 + len(moreKeys)
args += (numberKeys, key)
args += moreKeys
if params != None:
args += params.getParams()
resp = self.sendCommand(Command.ZINTERSTORE, *args)
return Response.toInt(resp)
'''
The zscan command seem to be a bug
'''
def zScan(self, key, cursor, scanParams):
args = (key, cursor)
if scanParams != None:
args += scanParams.getParams()
resp = self.sendCommand(Command.ZSCAN, *args)
return Response.toScoreMemberScanResult(resp)
def select(self, index):
resp = self.sendCommand(Command.SELECT, str(index))
return Response.toStr(resp)
def auth(self, password):
resp = self.sendCommand(Command.AUTH, password)
return Response.toStr(resp)
def echo(self, message):
resp = self.sendCommand(Command.ECHO, message)
return Response.toStr(resp)
def ping(self):
resp = self.sendCommand(Command.PING)
return Response.toStr(resp)
def quit(self):
resp = self.sendCommand(Command.QUIT)
return Response.toStr(resp)
def flushAll(self):
resp = self.sendCommand(Command.FLUSHALL)
return Response.toStr(resp)
def flushDB(self):
resp = self.sendCommand(Command.FLUSHDB)
return Response.toStr(resp)
def eval(self, script, keyCount, *params):
resp = self.sendCommand(Command.EVAL, script, keyCount, *params)
return Response.toTuple(resp)
def evalSha(self, sha, keyCount, *params):
resp = self.sendCommand(Command.EVALSHA, sha, keyCount, *params)
return Response.toTuple(resp)
def scriptLoad(self, script):
resp = self.sendCommand(Command.SCRIPT, RedisKeyword.LOAD, script)
return Response.toStr(resp)
def scriptExist(self, script, *moreScripts):
resp = self.sendCommand(Command.SCRIPT, RedisKeyword.EXISTS, script, *moreScripts)
return Response.toTuple(resp)
def scriptFlush(self):
resp = self.sendCommand(Command.SCRIPT, RedisKeyword.FLUSH)
return Response.toStr(resp)
def scriptKill(self):
resp = self.sendCommand(Command.SCRIPT, RedisKeyword.KILL)
return Response.toStr(resp)
def sendCommand(self, command, *args):
message = BinaryCommand(command, *args)
data = RedisProtocol.generateRequestData(message)
self.connect()
# print('send command:')
# print(Decoder.decodeData(data))
try:
self.sock.send(data)
except:
self.isConnect = False
self.sendCommand(command, *args)
resp = RedisProtocol.parseResponse(self.sock)
print('response:', resp)
return resp
def connect(self):
if self.isConnect:
return;
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.connectTimeout > 0:
self.sock.settimeout(self.connectTimeout)
self.sock.connect((self.serverNode.getHost(), self.serverNode.getPort()))
self.isConnect = True
def close(self):
if not self.isConnect:
return;
self.isConnect = False
self.sock.close()
|
[
"348800349@qq.com"
] |
348800349@qq.com
|
58f90438673edd09c98d1e6d02b198892e98435b
|
62633b4b273cdd264d9669b69d54a06064d656e1
|
/backend_app/migrations/0001_initial.py
|
01365a9f5775a5af33e052007b855e6299833dfc
|
[] |
no_license
|
mrsamharuna/django_project
|
53784755e503c746315ccb7c12b98869de1e8953
|
bcd64474cc48689dd8330ac0f7754130092a7dda
|
refs/heads/master
| 2020-11-23T23:29:32.396272
| 2019-12-13T15:10:45
| 2019-12-13T15:10:45
| 227,864,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
# Generated by Django 2.1 on 2019-11-18 17:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique=True)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_title', models.CharField(max_length=50, verbose_name='Post Title')),
('content', models.TextField(blank=True, null=True, verbose_name='Content')),
('post_img', models.ImageField(blank=True, null=True, upload_to='uploads/post_img', verbose_name='Post Image')),
('published_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Author')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend_app.Category')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(default='', max_length=250)),
('first_name', models.CharField(default='', max_length=100)),
('last_name', models.CharField(default='', max_length=100)),
('image', models.ImageField(blank=True, upload_to='uploads/profile_image')),
('sex', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('role', models.CharField(choices=[('S', 'Super'), ('R', 'Regular')], max_length=1)),
('active', models.BooleanField(default=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"samsonharuna@yahoo.com"
] |
samsonharuna@yahoo.com
|
83884b6bc0bdd63b534e896ac4c6c081dac4c117
|
307a9a94091981325c6fcd0cfce5f1c1f12d5530
|
/basics.py
|
5aecef9d960db1559ff950b472048ee2fce2b4c9
|
[] |
no_license
|
88aleksandra88/python
|
a6b14eebef0d2848ce4ae32bd101cac6f816130e
|
1060f02e59d151e6bb1cff7fbc199df80c598e75
|
refs/heads/master
| 2022-12-29T07:17:45.106336
| 2020-10-09T14:14:57
| 2020-10-09T14:14:57
| 302,300,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
glass_1="water"
glass_2="soda"
temp=glass_1
glass_1=glass_2
glass_2=temp
print('glass_1='+str(glass_1))
#glass_1=soda
print('glass_2='+str(glass_2))
#glass_2=water
number_of_steps=70
print("You're on step:")
print("number_of_steps +1")
#kilometer convert
miles=500
kilometers=miles * 1.609344
print(kilometers)
#804.674
#switch light
is_day = False
lights_on = not is_day
print('Daytime?')
print(is_day)
print('Lights on ?')
print(lights_on)
#checking numbers equality
print(10==10)
#numbers inequality
print(1 != 10)
result = 1 != 2
print(result)
#tarck sales data
stock = 600
kitten_sold = 500
target =500
target_hit = kitten_sold == target
print('Hit kitten sale target: ')
print(target_hit)
current_stock = stock - kitten_sold
in_stock = current_stock != 0
print('Kitten in stock')
print(in_stock)
|
[
"aleksandra.slowik88@gmail.com"
] |
aleksandra.slowik88@gmail.com
|
fedce837ffffa9807fa1d941d6e9277cdd7c1908
|
7484e6e9bdf9fc8181d986fe7ff301937352c962
|
/preparation.py
|
33bc9fe244e340fff4948b524966340a87b75efc
|
[] |
no_license
|
Avagr/JBReactGraph
|
1323b93424b3c4521598d88560767f533267c7e8
|
90873b47f094e6104423caa677578bd62ed43114
|
refs/heads/master
| 2023-04-15T07:14:26.869449
| 2021-04-11T16:51:19
| 2021-04-11T16:51:19
| 356,913,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,140
|
py
|
import requests
import json
from requests.models import HTTPError
url = "https://api.github.com/repos/facebook/react/stats/contributors"
# Function for encoding sets into json as lists
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
# Prepares data and writes it to the disk
def prepare_data(token=None):
header = {}
if token:
header = {'Authorization': 'token %s' % token}
contributors = requests.get(url, headers=header)
if contributors.status_code != 200:
raise HTTPError(contributors.json())
authors = {} # A dictionary for storing all the relevant information
for cont in sorted(contributors.json(), key=lambda x: -x['total'])[:50]:
name_req = requests.get(cont['author']['url'], headers=header)
if name_req.status_code != 200:
raise HTTPError(name_req.json())
name = name_req.json()['name']
if not name: # If there is no name in github, use their login
name = name_req.json()['login']
if name == 'marocchino': # Cross-referenced this person using commit hashes
name = 'Shim Won'
authors[name] = {'total': cont['total'],
'files': set(),
'maxdate': -1,
# this unix time is in the far far future
'mindate': 99999999999}
with open('git_log.txt', 'r') as log:
for line in log:
if line.strip() == '':
continue
if line[0] == '$':
_, date, name = line.strip().split(maxsplit=2)
if name in authors:
# Searching for the earliest and the latest commit date
authors[name]['mindate'] = min(authors[name]['mindate'], int(date))
authors[name]['maxdate'] = max(authors[name]['maxdate'], int(date))
else:
if name in authors:
authors[name]['files'].add(line.strip())
with open('authors.json', 'w') as output:
json.dump(authors, output, default=set_default)
|
[
"avagroskin@edu.hse.ru"
] |
avagroskin@edu.hse.ru
|
c73127e8739fa2e754892d7131be696a9376fc51
|
90803c3d5db398d357e63c782231bbf57d135a77
|
/arrview/tools/paintbrush.py
|
27a7b9629c4fc4bd069ab8296df31c5a455350a6
|
[
"MIT"
] |
permissive
|
jthacker/arrview
|
08b70c948689c6998f53bd3ebc32dffac32226f4
|
93d5d0a17a33dbbc7098eb4e5eac92d84d9ba04c
|
refs/heads/master
| 2021-04-15T15:23:47.073725
| 2016-08-02T22:11:58
| 2016-08-02T22:16:11
| 63,819,285
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
from PySide.QtCore import Qt, QRectF, QPoint, QPointF
from PySide.QtGui import (QColor, QGraphicsItem, QGraphicsPixmapItem, QPixmap, QPainter)
import logging
import math
import skimage as ski
import skimage.draw
log = logging.getLogger(__name__)
class PaintBrushItem(QGraphicsItem):
"""PaintBrushItem is a QGraphicsItem that anchors to pixel locations."""
def __init__(self, radius=0, color=QColor(Qt.transparent)):
super(PaintBrushItem, self).__init__()
self._color = color
self._points = []
self._connect_points = True
self.set_radius(radius)
def set_color(self, color):
self._color = color
self._update_cursor()
@property
def diameter(self):
return 2 * self._radius + 1
def set_radius(self, r):
self.prepareGeometryChange()
self._radius = r
if r == 0:
pts = [(0, 0)]
else:
pts = zip(*ski.draw.circle_perimeter(r, r, r))
pts += zip(*ski.draw.circle(r, r, r))
self._points = [QPointF(x, y) for x, y in pts]
self._update_cursor()
def _update_cursor(self):
x = self.diameter
w, h = x, x
pixmap = QPixmap(w, h)
pixmap.fill(Qt.transparent)
p = QPainter(pixmap)
p.drawPoints(self._points)
p.end()
self._cursor_pixmap = pixmap.createMaskFromColor(Qt.transparent)
def _paint_cursor(self, p):
p.save()
p.setPen(self._color)
p.translate(-self._radius, -self._radius)
w, h = self._cursor_pixmap.width(), self._cursor_pixmap.height()
rect = QRectF(0, 0, w, h)
p.drawPixmap(rect, self._cursor_pixmap, rect)
p.restore()
self.update()
def snap_pos(self, pos):
f = lambda a: int(math.floor(a))
return QPoint(f(pos.x()), f(pos.y()))
def setPos(self, pos):
super(PaintBrushItem, self).setPos(self.snap_pos(pos))
def paint(self, p, option, widget):
r = self._radius
self._paint_cursor(p)
p.drawEllipse(-r, -r, self.diameter, self.diameter)
def boundingRect(self):
r = self._radius
return QRectF(0, 0, self.diameter, self.diameter)
def fill_pixmap(self, pixmap, origin, position):
origin = self.snap_pos(origin)
pos = self.snap_pos(position)
ox, oy = origin.x(), origin.y()
cx, cy = pos.x(), pos.y()
p = QPainter(pixmap)
p.setCompositionMode(QPainter.CompositionMode_Source)
if self._connect_points:
## This method of extending a line between points works quite well
## but it is very slow when the radius of the circle is large, which
## essential results in a lot of duplicate drawing.
p.translate(ox, oy)
px, py = 0, 0
for x, y in zip(*ski.draw.line(0, 0, cx-ox, cy-oy)):
p.translate(x-px, y-py)
px, py = x, y
self._paint_cursor(p)
else:
p.translate(cx, cy)
self._paint_cursor(p)
p.end()
|
[
"thacker.jon@gmail.com"
] |
thacker.jon@gmail.com
|
fb2f50056d7b8095e873a122bd1b1f9cedf06047
|
355e280100fd321584a153e9ace6426df7181deb
|
/examples/get_all_jobs.py
|
6306f38b7cd534dac2bc765c1f5b177ec8235ecb
|
[
"Apache-2.0"
] |
permissive
|
kimiwangjm/drmaa2-python
|
11d3563955d8468a3fb60afc7234b10e7972a09b
|
1df04191ae22aea3bd86f9d110e347a76bc6039d
|
refs/heads/master
| 2020-08-29T21:55:08.656600
| 2019-10-03T15:11:22
| 2019-10-03T15:22:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
#!/usr/bin/env python
# ___INFO__MARK_BEGIN__
##########################################################################
# Copyright 2016-2019 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
# ___INFO__MARK_END__
import random
from drmaa2 import JobSession
from drmaa2 import JobInfo
from drmaa2 import MonitoringSession
if __name__ == '__main__':
js = JobSession('js-01')
print('Created job session: %s' % js.name)
j_name = 'job-%s' % int(random.uniform(0, 1000))
j = js.run_job({'remote_command': '/bin/sleep', 'args': ['10'], 'job_name': j_name})
print('Submitted job: %s' % j)
# ji = j.get_info()
ji = JobInfo({'job_name': j_name})
ms = MonitoringSession('ms-01')
print('Opened monitoring session: %s' % ms.name)
print('Retrieving jobs matching job info %s' % ji)
j_list = ms.get_all_jobs(ji)
print('Got all jobs: %s' % j_list)
|
[
"aalefeld@univa.com"
] |
aalefeld@univa.com
|
9c1e1b031fb6ac5960c2e6a08b7e5fc4683cd159
|
b775ef7ae427d3199eb2015c08a6468f3b5cc8ad
|
/ant.py
|
6bd9855bc9bd129dbc29c68d89e14f44931c5d83
|
[] |
no_license
|
j3camero/Ant-Combat
|
ae671ae5c9ffe02d9429be524156898da34ccb34
|
3a3a86303cf316a56a19582369b7503962ece60a
|
refs/heads/master
| 2020-12-30T10:50:12.860917
| 2011-01-12T00:47:58
| 2011-01-12T00:47:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
class Ant:
def __init__(self, x, y, owner):
self.x = x
self.y = y
self.owner = owner
|
[
"cameron.jp@gmail.com"
] |
cameron.jp@gmail.com
|
941f4dbc2fbf6e26a024c61793156ca108517cfe
|
8d5acda7549d3625340d785698b521ed1583c6d6
|
/review.py
|
537c6c78f8ab35deabc9b6d399c2e9e94270ed17
|
[] |
no_license
|
jerry-sjtu/flask_demo
|
db7f5ac1dd332dd47f491b45f68a0e4838447ff7
|
c0bc7f80f89c5cff52b3c7ea284e3aae7f1d5cf0
|
refs/heads/master
| 2021-01-23T12:17:30.437414
| 2015-04-13T03:22:43
| 2015-04-13T03:22:43
| 29,714,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,448
|
py
|
# -*- coding: utf-8 -*-
from flask import Flask
import urllib2
from flask import render_template
import json
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
app = Flask(__name__)
BASE_URL = 'http://www.dianping.com/shop/%s/review_all'
BIZER_HOST = "192.168.220.235"
BIZER_ABSTRACT_URL = "http://%s:4153/search/shopreview?query=term(shopid,%s)&sort=desc(abstract)&stat=count(reviewtagsentiment)&limit=0,0"
BIZER_REVIEW_URL = "http://%s:4153/search/shopreview?query=term(shopid,%s)&sort=desc(addtime)&fl=*&limit=%s,%s"
BIZER_TAGRANK_URL = "http://%s:4153/search/shopreview?query=term(shopid,%s),term(reviewtagsentiment,%s)&sort=desc(tagrank)&fl=*&limit=%s,%s"
PAGE_SIZE = 20
@app.route("/")
def default():
return view_by_shop('1945402')
@app.route('/<shopid>')
def view_by_shop(shopid):
return view_by_shop_page(shopid, 1)
@app.route('/<shopid>/<pageno>')
def view_by_shop_page(shopid, pageno):
abstract = request_abstract(shopid)
page_dict = review_list_page(shopid, pageno)
return render_template('review.html', val=page_dict, abstract=abstract,
shopname=page_dict['shopname'], shopid=page_dict['shopid'], tag='')
@app.route('/<shopid>/tag/<tag>')
def view_by_shop_tag(shopid, tag):
return view_by_shop_tag_page(shopid, tag, 1)
@app.route('/<shopid>/<pageno>/tag/<tag>')
def view_by_shop_tag_page(shopid, tag, pageno):
abstract = request_abstract(shopid)
page_dict = review_search_page(shopid, tag, pageno)
return render_template('review.html', val=page_dict, abstract=abstract,
shopname=page_dict['shopname'], shopid=page_dict['shopid'], tag=tag)
def request_abstract(shopid):
url = BIZER_ABSTRACT_URL % (BIZER_HOST, shopid)
i_headers = dict()
request = urllib2.Request(url, headers=i_headers)
response = urllib2.urlopen(request)
decodejson = json.loads(response.read())
for x in decodejson['records']:
if x['tag'].endswith('-1'):
x['sent'] = '-1'
else:
x['sent'] = '1'
return decodejson
def request_review(shopid, start, pagesize):
url = BIZER_REVIEW_URL % (BIZER_HOST, shopid, start, pagesize)
i_headers = dict()
request = urllib2.Request(url, headers=i_headers)
response = urllib2.urlopen(request)
decodejson = json.loads(response.read())
return decodejson
def request_tgrank(shopid, tag, start, pagesize):
url = BIZER_TAGRANK_URL % (BIZER_HOST, shopid, tag, start, pagesize)
i_headers = dict()
request = urllib2.Request(url, headers=i_headers)
response = urllib2.urlopen(request)
decodejson = json.loads(response.read())
return decodejson
def review_list_page(shopid, pageno):
start = (int(pageno) - 1) * PAGE_SIZE
review_json = request_review(shopid, start, PAGE_SIZE)
r_dict = dict()
r_dict['review'] = list()
r_dict['shopid'] = shopid
r_dict['curr_page'] = pageno
if len(review_json['records']) == 0:
return r_dict
for r in review_json['records']:
r_dict['shopname'] = r['shopname']
time = r['addtime']
score1 = r['score1']
score2 = r['score2']
score3 = r['score3']
review_id = r['reviewid']
match_list = r['reviewmatch'].split()
tag_list = r['reviewtagsentiment'].split()
review_body = r['reviewbody']
review_body = highlight(review_body, match_list)
r_dict['review'].append((review_id, time, review_body, tag_list, score1, score2, score3))
total_hit = int(review_json['totalhits'])
r_dict['pno_list'] = pageno_list(int(pageno), total_hit)
return r_dict
def review_search_page(shopid, tag, pageno):
start = (int(pageno) - 1) * PAGE_SIZE
review_json = request_tgrank(shopid, tag, start, PAGE_SIZE)
r_dict = dict()
r_dict['review'] = list()
r_dict['shopid'] = shopid
r_dict['curr_page'] = pageno
if len(review_json['records']) == 0:
return r_dict
for r in review_json['records']:
r_dict['shopname'] = r['shopname']
time = r['addtime']
score1 = r['score1']
score2 = r['score2']
score3 = r['score3']
review_id = r['reviewid']
match_list = list()
match_list.append(r['highlight'])
tag_list = r['reviewtagsentiment'].split()
review_body = r['reviewbody']
review_body = highlight(review_body, match_list)
r_dict['review'].append((review_id, time, review_body, tag_list, score1, score2, score3))
total_hit = int(review_json['totalhits'])
#r_dict['pno_list'] = range(1, total_hit / PAGE_SIZE + 2)
r_dict['pno_list'] = pageno_list(int(pageno), total_hit)
return r_dict
def pageno_list(curr_page, total_hit):
max_page = total_hit / PAGE_SIZE + 1
no_list = list()
no_list.append('1')
if curr_page > 2:
no_list.append('...')
for i in range(curr_page, curr_page + 20):
if i < max_page and i > 1:
no_list.append(str(i))
if curr_page + 20 < max_page:
no_list.append('...')
no_list.append(str(max_page))
return no_list
def highlight(review_content, match_list):
for m in match_list:
review_content = review_content.replace(m, '<font color="red">%s</font>' % (m))
return review_content
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
#app.run(host='0.0.0.0', port=80, debug=False)
#demo host: 10.1.107.103/
#bizer host: search-bizer-shopreview01.nh
|
[
"qiang.wang@dianping.com"
] |
qiang.wang@dianping.com
|
07f7480b8204fdcc16a56564512124c02be477e2
|
f3050b7f84e584dcde54ca1690944bfccc6f5d9c
|
/demo/other/demo_fomat.py
|
767ea789d7dee62f3161cff0034d57438ab22024
|
[] |
no_license
|
azhenglianxi/api_Project
|
0c8444c2bad7464fd57911be4fdcd131a63c46b2
|
2ae87b87e41f522d4ef20f63bad6adcaec1f9874
|
refs/heads/master
| 2020-09-14T12:08:07.080748
| 2019-12-12T09:08:22
| 2019-12-12T09:08:22
| 223,124,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
name="tom"
age=20
#1. 输出:你好,tom先生,今晚吃鸡!
print(f"你好,{name}先生,今晚吃鸡!")
print("你好,{}先生,今晚吃鸡!".format(name))
#2. 输出:你好,tom先生,今晚{吃鸡}!
print(f"你好,{name}先生,今晚{{吃鸡}}!")
print("你好,{}先生,今晚{{吃鸡}}!".format(name))
#3. 输出:你好,{tom}先生,今晚吃鸡!
print(f"你好,{{{name}}}先生,今晚吃鸡!")
print("你好,{{{}}}先生,今晚吃鸡!".format(name))
print("姓名和年龄分别是:{}、{}".format(name, age)) # 不带编号,顺序填坑
print("姓名和年龄分别是:{1}、{0}".format(age, name)) # 带数字编号、可以变换顺序
print("姓名和年龄分别是:{x}、{y}".format(x='小明', y=age)) # 带关键字
|
[
"azhenglianxi@163.com"
] |
azhenglianxi@163.com
|
b7227548c832af5339342dcb56efb5c69c872ea2
|
1e4ddb07b9ab386d49036bae631508ca99d1375d
|
/django_jquery_bootstrap/mysite_jquery/basic_site/migrations/0001_initial.py
|
43737f97b2e118aa7ecae3adebbf11236a8c8861
|
[] |
no_license
|
air2637/django-ajax
|
ce55c22c11dd703db04faf65b71d6dc67c27d899
|
f791dedbf568a99dcaba614329f761071978023a
|
refs/heads/master
| 2021-05-14T01:52:15.487867
| 2018-01-07T16:15:27
| 2018-01-07T16:15:27
| 116,578,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-07 02:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('num_of_student', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
],
),
]
|
[
"weizheng@WeitekiMacBook-Pro.local"
] |
weizheng@WeitekiMacBook-Pro.local
|
0821f14666c075ca5ef4644670d667a41ce5450f
|
ce6c8e0e3a986af3fe3c347a4af16f1ca337f82c
|
/630.course-schedule-iii.py
|
c6f16e25c08edfd0eebd9959c9ace96be3683d8b
|
[] |
no_license
|
chenjienan/python-leetcode
|
dc098373ae7f73dd502d7747888a37a3bd0820cb
|
90c000c3be70727cde4f7494fbbb1c425bfd3da4
|
refs/heads/master
| 2020-04-28T20:46:50.395260
| 2020-02-12T18:48:01
| 2020-02-12T18:48:01
| 175,556,339
| 16
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
#
# @lc app=leetcode id=630 lang=python3
#
# [630] Course Schedule III
#
import heapq
class Solution:
def scheduleCourse(self, courses: List[List[int]]):
courses.sort(key=lambda x: x[1])
day = 0
heap = []
for i in range(len(courses)):
if day + courses[i][0] <= courses[i][1]:
day += courses[i][0]
heapq.heappush(heap, -courses[i][0])
else:
# has overlap
heapq.heappush(heap, -courses[i][0])
day += courses[i][0] + heap[0]
heapq.heappop(heap)
return len(heap)
|
[
"chenjienan2009@gmail.com"
] |
chenjienan2009@gmail.com
|
83f1f40c0a3f881f189f6a378cd661fcde67e432
|
962d42197c56346d348d334dd7435224d4aed99d
|
/Inventationery/apps/Customer/forms.py
|
5897817c4d2509271a3fce09f250f0156eed676d
|
[
"BSD-3-Clause"
] |
permissive
|
huarmenta/Inventationery
|
e5750274c0b281208fa1f33590f0402349f7e8da
|
1bf9ee2c56492ab66947886590b7ec17fa3a6195
|
refs/heads/master
| 2021-05-30T08:31:15.368712
| 2016-01-06T03:23:45
| 2016-01-06T03:23:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Alex
# @Date: 2015-11-16 19:22:39
# @Last Modified by: Alex
# @Last Modified time: 2015-12-28 19:32:30
from django import forms
from .models import CustomerModel
from Inventationery.apps.DirParty.models import DirPartyModel
from Inventationery.apps.LogisticsPostalAddress.models import (
LogisticsPostalAddressModel)
from Inventationery.apps.LogisticsElectronicAddress.models import (
LogisticsElectronicAddressModel)
# Class: Form for CustomerModel
# ----------------------------------------------------------------------------
class CustomerForm(forms.ModelForm):
class Meta:
model = CustomerModel
fields = '__all__'
def __init__(self, *args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
self.fields['AccountNum'].widget.attrs['readonly'] = True
instance = getattr(self, 'instance', None)
if instance and instance.pk:
self.fields['AccountType'].widget.attrs['readonly'] = True
def clean_sku(self):
instance = getattr(self, 'instance', None)
if instance and instance.pk:
return instance.AccountNum
else:
return self.cleaned_data['AccountNum']
# Class: Form for DirPartyModel
# ----------------------------------------------------------------------------
class PartyForm(forms.ModelForm):
class Meta:
model = DirPartyModel
fields = '__all__'
def __init__(self, *args, **kwargs):
super(PartyForm, self).__init__(*args, **kwargs)
# Class: Form for LogisticsPostalAddressModel
# ----------------------------------------------------------------------------
class LogisticsPostalForm(forms.ModelForm):
class Meta:
model = LogisticsPostalAddressModel
fields = '__all__'
# Class: Form for LogisticsElectronicAddressModel
# ----------------------------------------------------------------------------
class LogisticsElectronicForm(forms.ModelForm):
class Meta:
model = LogisticsElectronicAddressModel
fields = '__all__'
|
[
"h.alexarmenta@gmail.com"
] |
h.alexarmenta@gmail.com
|
d618c52095145ad11dea0dcaef997f4e40d3ab6d
|
505456805c026f2e6e2421ebaf745a3bc72c3dfb
|
/run.py
|
923c7644672cf855177e2d3a8c004c7d6cd78512
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
www10177/fbcrawl
|
1029f9fedea68d82e3c894fe0f60a841aa86a3d7
|
bcd79d8292c4016f31767b2c50ab6ff1e69efa73
|
refs/heads/master
| 2020-04-26T08:49:45.766583
| 2019-03-22T17:02:18
| 2019-03-22T17:02:18
| 173,435,725
| 1
| 0
| null | 2019-03-02T10:44:49
| 2019-03-02T10:44:49
| null |
UTF-8
|
Python
| false
| false
| 2,212
|
py
|
from subprocess import call
from tqdm import tqdm
import pandas as pd
import sqlite3
import os
# Config
email= 'www10177@gmail.com'
password= 'u9Vd53SCX5'
year= 2008
lang= 'it'
def crawl_pages():
l=pd.read_csv('./update.csv')
for name,url in tqdm(l.values):
print('crawling %s'%name)
print(url)
call(['python3', '-m', 'scrapy', 'crawl', 'fb', '-a', '-a','email=%s'%email,'-a','password=%s'%password,'-a', 'page=%s'%url, '-a', 'year=%d'%year, '-a', 'lang=%s'%lang, '-o', './result/%s.csv'%name])
def crawl_comments():
db = pd.read_sql('select * from posts ', sqlite3.connect('./fb.db'))
print(db.columns)
for index,name,url_temp in tqdm(db[['index','crawl_from','url']].values):
print('crawling %d,%s'%(index,name))
url= 'https://mbasic.facebook.com' + url_temp
print(url)
call(['python3', '-m', 'scrapy', 'crawl', 'comments', '-a', 'email=%s'%email, '-a' ,'password=%s'%password, '-a', 'page=%s'%url,'-a','lang=%s'%lang,'-o', './comments/%s.csv'%index])
def crawl_batch_pages():
url ='./pagelist'
name ='test'
call(['python3', '-m', 'scrapy', 'crawl', 'fb', '-a', '-a','email=%s'%email,'-a','password=%s'%password,'-a', 'page=%s'%url, '-a', 'year=%d' %year, '-a', 'lang=%s'%lang, '-o', './result/%s.csv'%name])
def crawl_batch_comments():
post_list_dir= './comment_urls/'
for url_filename in tqdm([i for i in os.listdir(post_list_dir) if i.endswith('.txt')]):
url = post_list_dir +url_filename
name =url_filename
call(['python3', '-m', 'scrapy', 'crawl', 'comments', '-a', 'email=%s'%email, '-a' ,'password=%s'%password, '-a', 'pagelist=%s'%url,'-a','lang=%s'%lang,'-o', './comments/%s.csv'%name])
def crawl_batch_reactions():
post_list_dir= './reactions_urls/'
for url_filename in tqdm([i for i in os.listdir(post_list_dir) if i.endswith('.txt')]):
url = post_list_dir +url_filename
name =url_filename
call(['python3', '-m', 'scrapy', 'crawl', 'reactions', '-a', 'email=%s'%email, '-a' ,'password=%s'%password, '-a', 'pagelist=%s'%url,'-a','lang=%s'%lang,'-o', './reactions/%s.csv'%name])
if __name__ == '__main__':
crawl_batch_reactions()
|
[
"www10177@gmail.com"
] |
www10177@gmail.com
|
924703d92cf2b60786d5198e1fdf455dfd379162
|
3a86fef1551d97141d3a192abe7cf5c826106377
|
/python_problems/union.py
|
a0d9f768de9c103c4e01b6e34838b7267ac658af
|
[] |
no_license
|
andrayantelo/hackerrank
|
bbaee4f3eb7fac1b60fce8a5dfb3f4368d7c48db
|
11589bc2858028ddbcdc7ab87285f3d16e3773b9
|
refs/heads/master
| 2020-03-29T20:55:15.543255
| 2018-11-06T21:46:03
| 2018-11-06T21:46:03
| 150,336,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# Task
# The students of District College have subscriptions to English and French
# newspapers. Some students have subscribed only to English, some have
# subscribed to only French and some have subscribed to both newspapers.
# You are given two sets of student roll numbers. One set has subscribed
# to the English newspaper, and the other set is subscribed to the French
# newspaper. The same student could be in both sets. Your task is to find
# the total number of students who have subscribed to at least one newspaper.
# Input Format
# The first line contains an integer, n, the number of students who have subscribed to the English newspaper.
# The second line contains n space separated roll numbers of those students.
# The third line contains b, the number of students who have subscribed to the French newspaper.
# The fourth line contains b space separated roll numbers of those students.
# Constraints
# 0 < Total number of students in college < 1000
# Output Format
# Output the total number of students who have at least one subscription
if __name__ == "__main__":
# number of students subscribed to English newspaper
n = int(input())
eng_students = set(map(int, input().rsplit()))
# number of students subscribed to French newspaper
b = int(input())
fr_students = set(map(int, input().rsplit()))
print(len(eng_students.union(fr_students)))
|
[
"andrayantelo@gmail.com"
] |
andrayantelo@gmail.com
|
4930f53fa7f6aae3b12d5558dc8e8d6aa7468c13
|
2161bf5488654aa2c2919f7839820c46bd9db0c4
|
/keyboards/inline/__init__.py
|
709d059e906a63e8a5362e98523b884c15eb7743
|
[] |
no_license
|
Asqarov-Fayzullo/antirobot_aiogram
|
c50f9ae451a575e434d8c664f8ee8a1ab30c4e92
|
02c85bcad6975473b5d264ed90d10515ad4b3625
|
refs/heads/master
| 2023-07-10T07:45:50.056757
| 2020-12-01T16:42:10
| 2020-12-01T16:42:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
from .guardian_keyboard import generate_confirm_markup, confirming_callback
|
[
"kostyajog@mail.ru"
] |
kostyajog@mail.ru
|
89a31706b3313829d16e3ef3246f1c8408518820
|
4c8df11e3d2c5065afb67e6d6a975a8afc678526
|
/Text Mining/Workshops/week2/generateDTM.py
|
c035812ebd4ca123459592c781ee69b88181543e
|
[] |
no_license
|
shenshutao/machine_learning
|
da8d866f95e8021044823f4b5b3d48596477db67
|
185532ffda1e3994ceca9c0ebf0f9cbe232222a1
|
refs/heads/master
| 2021-09-14T09:33:24.597332
| 2018-05-11T09:20:59
| 2018-05-11T09:20:59
| 89,137,160
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,001
|
py
|
# -*- coding: utf-8 -*-
"""
Workshop: Text Preparation (TDM)
Created on Wed Sep 6 16:19:52 2017
@author: Fan Zhenzhen
"""
from nltk.corpus import reuters
from nltk import FreqDist
import string
from nltk.corpus import stopwords
# We'll use the reuters corpus in NLTK.
# The same steps of preprocessing can be done on documents read in from external files.
# How many files are there in the corpus?
# What are their categories? Single or multiple categories for one file?
len(reuters.fileids())
cats = [ reuters.categories(f) for f in reuters.fileids() ]
#cats[0]
#cats[:10]
cat_num = [ len(c) for c in cats ]
fd_num = FreqDist(cat_num)
fd_num.plot()
# How many documents are there in each category?
# FreqDist() can be used to find the answer, but we need to flatten the list of categories first.
cats_flat = [ c for l in cats for c in l ]
fd_cat = FreqDist(cats_flat)
fd_cat
fd_cat.most_common(20)
# Let's pick two categories and visualize the articles in each category using word cloud
grain = reuters.fileids('grain')
trade = reuters.fileids('trade')
grain_tok = [ reuters.words(f) for f in grain ] #! retrive all the tokens
trade_tok = [ reuters.words(f) for f in trade ]
#grain_tok[:3]
#trade_tok[:3]
#Let's define a function preprocess() to perform the preprocessing steps given a file (token list):
# punctuation removal, case lowering, stopword removal,
# stemming/lemmatization, further cleaning
stop = stopwords.words('english')
snowball = nltk.SnowballStemmer('english')
#wnl = nltk.WordNetLemmatizer()
def preprocess(toks):
toks = [ t.lower() for t in toks if t not in string.punctuation ]
toks = [t for t in toks if t not in stop ]
toks = [ snowball.stem(t) for t in toks ]
# toks = [ wnl.lemmatize(t) for t in toks ]
toks_clean = [ t for t in toks if len(t) >= 3 ]
return toks_clean
# Preprocess each file in each category
grain_clean = [ preprocess(f) for f in grain_tok ]
trade_clean = [ preprocess(f) for f in trade_tok ]
# Flatten the list of lists for FreqDist
grain_flat = [ c for l in grain_clean for c in l ]
trade_flat = [ c for l in trade_clean for c in l ]
fd_grain = FreqDist(grain_flat)
fd_trade = FreqDist(trade_flat)
# Generate word clouds for the two categories.
from wordcloud import WordCloud
import matplotlib.pyplot as plt
wc_grain = WordCloud(background_color="white").generate_from_frequencies(fd_grain)
plt.imshow(wc_grain, interpolation='bilinear')
plt.axis("off")
plt.show()
wc_trade = WordCloud(background_color="white").generate_from_frequencies(fd_trade)
plt.imshow(wc_trade, interpolation='bilinear')
plt.axis("off")
plt.show()
# Finally, how to generate TDM
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# sklearn requires the input to be text string
grain_text = [ ' '.join(f) for f in grain_clean ]
# Create a matrix using term frequency first using CountVectorizer
# The result is in sparse matrix format
vec_tf = CountVectorizer()
grain_tf = vec_tf.fit_transform(grain_text)
grain_tf
# Where are the columns and rows then?
vec_tf.get_feature_names()
grain_tf_m = grain_tf.toarray()
vec_tf_2 = CountVectorizer(min_df = 2)
grain_tf_2 = vec_tf_2.fit_transform(grain_text)
grain_tf_2
# To have binary indexing, set "binary=True"
vec_bin = CountVectorizer(binary=True)
grain_bin = vec_bin.fit_transform(grain_text)
grain_bin.toarray()[:10]
# And tfidf indexing
vec_tfidf = TfidfVectorizer(min_df = 2)
grain_tfidf = vec_tfidf.fit_transform(grain_text)
grain_tfidf
grain_tfidf.toarray()[:10]
# To save the vectorized results for future use (save time)
import pickle
pickle.dump(grain_tfidf, open("tfidf.pkl", "wb"))
pickle.dump(vec_tfidf.vocabulary_, open("feature.pkl","wb"))
#load the content
loaded_vec = TfidfVectorizer(decode_error="replace",vocabulary=pickle.load(open("feature.pkl", "rb")))
tfidf = pickle.load(open("tfidf.pkl", "rb" ) )
tfidf
|
[
"sstwood@gmail.com"
] |
sstwood@gmail.com
|
cfffb928dd17b54c411a485b5ae009481e2a32e6
|
b24c500dfdbbe56229031d7bdc02ba22dd055a41
|
/image_stitch.py
|
0921be28ef8d9265185f6d637c295ea16797da6a
|
[] |
no_license
|
Trimax69/Python_random
|
5c147c9fbb04082d3b204f31a50bd9d2dab6469f
|
a969c19ca7198909e128bf4496811657f6498ed4
|
refs/heads/master
| 2022-12-27T14:15:51.289238
| 2020-10-04T15:44:50
| 2020-10-04T15:44:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
import time
import string
from os import listdir
from os.path import abspath, dirname, isfile, join
from PIL import Image
VALID_FORMATS = ('jpg', 'jpeg', 'png', 'gif', 'bmp', 'tiff', 'webp')
VALID_CHARS = set("-_.() %s%s" % (string.ascii_letters, string.digits))
DIRECTORY = dirname(abspath(__file__))
OFFSET = 5
def names_to_image(file_names):
return [Image.open(name) for name in file_names]
def hstack(images):
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths) + OFFSET * len(images)
max_height = max(heights)
stitched = Image.new('RGBA', (total_width, max_height))
x_offset = 0
for im in images:
stitched.paste(im, (x_offset, 0))
x_offset += im.size[0] + OFFSET
return stitched
def vstack(images):
widths, heights = zip(*(i.size for i in images))
total_height = sum(heights) + OFFSET * len(images)
max_width = max(widths)
stitched = Image.new('RGBA', (max_width, total_height))
y_offset = 0
for im in images:
x_offset = (max_width - im.size[0]) // 2
stitched.paste(im, (x_offset, y_offset))
y_offset += im.size[1] + OFFSET
return stitched
def stitch(file_names, x):
global directroy
total_files = len(file_names)
sections = [names_to_image(file_names[n:n + x])
if n + x <= total_files
else names_to_image(file_names[n: total_files])
for n in range(0, total_files, x)]
stitched = [hstack(images) for images in sections]
name = input("\nEnter the Image name to be Saved As, Existing File Name will be overwritten > ")
if not (name and set(name) & VALID_CHARS):
name = 'result'
vstack(stitched).save(join(DIRECTORY, f'{name}.png'))
return name
def main():
print("""
Enter Max horizonatal Stitch Limit , i.e 2 or 5 , Images will stacked vertically if more.
> Example | [ LIMIT = 3 ] and [ No of Images = 8 ] <
| X X X |
| X X X |
| X X |
""")
x = -1
while True:
try:
x = int(input('Stitch limit > '))
assert x >= 1
except (KeyError, AssertionError):
print("\nERROR: Invalid Input, Limit must greater than or equal to 1.\n")
else:
break
file_names = [join(DIRECTORY, f) for f in listdir(DIRECTORY)
if isfile(join(DIRECTORY, f)) and f.endswith(VALID_FORMATS)]
if not file_names:
print(f"\nERROR: No Images Found in the Current Directory: {DIRECTORY}\n")
else:
start = time.perf_counter()
name = stitch(file_names, x)
duration = time.perf_counter() - start
print(f"\nResult Image Saved as '{name}.png' | Stitch Time : {duration:.2f}s")
input()
return
main()
|
[
"noreply@github.com"
] |
Trimax69.noreply@github.com
|
cfe183779f01a5fbe1379f11d1cc62902be02994
|
9079a555d1fd22ad9701227c58151ae1ca3595d3
|
/CSES/1097.py
|
85e2d0f2b48353028d34faa5b5a999c8a74da857
|
[] |
no_license
|
DebRC/My-Competitve-Programming-Solutions
|
c2a03b18f15cebd3793ce1c288dbb51fc0a33ef4
|
fe956eed619a21bd24a5fd647791d4c56cd1b021
|
refs/heads/main
| 2023-02-04T08:28:13.915967
| 2020-12-28T09:11:29
| 2020-12-28T09:11:29
| 324,591,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
def removal_game_tab(a, n):
dp=[[0 for i in range(n)] for i in range(n)]
for i in range(n-1,-1,-1):
for j in range(i,n):
if i==j:
dp[i][i]=a[i]
else:
dp[i][j] = max(a[i]-dp[i+1][j],a[j]-dp[i][j-1])
return dp[0][n-1]
n = int(input())
a = list(map(int, input().split()))
print((sum(a)+removal_game_tab(a,n))//2)
|
[
"noreply@github.com"
] |
DebRC.noreply@github.com
|
ca1de3b0d14a8422dee3935ae2801d6ac3503037
|
f725df1a1a494f13d9f179e5981a30d3a843f8b7
|
/piEstimate.py
|
76659ac84ae4f282602f1cd6ac28e3eb65c8804b
|
[] |
no_license
|
aaronscode/piEstimate
|
f8fadeefdb0e3a7f49b94f1f490c96608ff91dc6
|
cb7d18e726399c5c482796e16e66b2ecd1b80579
|
refs/heads/master
| 2021-01-22T13:23:25.780468
| 2018-07-04T03:19:44
| 2018-07-04T03:19:44
| 100,665,792
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
import math as m
import random as r
def main():
numSamples = int(input("Number of Samples: "))
numWithinRadius = 0
for i in range(0, numSamples):
x = r.uniform(0,1)
y = r.uniform(0,1)
if m.sqrt(x * x + y * y) < 1:
numWithinRadius += 1
ratio = numWithinRadius / numSamples
piEstimate = 4 * ratio
print("Number of samples within radius: {0}".format(numWithinRadius))
print("Estimate for pi: {0}".format(piEstimate))
if __name__ == "__main__":
main()
|
[
"agross@temple.edu"
] |
agross@temple.edu
|
b3f1c0052489cfc59ab9c29a478a91935c971633
|
27c1053165bad4d93b375c92a458504a27ed2628
|
/app/forms/create_client_form.py
|
7557f29cb94ed4d59dbcc078b9649e92fb4814ae
|
[] |
no_license
|
ransonk/trainer-hub
|
d13068e1b8d2f37604cd1e92b37fd043c50228a9
|
463b78147f7d6f2330346025cb3c14e8254febf4
|
refs/heads/master
| 2023-04-09T15:29:11.539943
| 2021-04-19T01:16:10
| 2021-04-19T01:16:10
| 318,020,072
| 0
| 0
| null | 2021-04-09T19:05:18
| 2020-12-02T23:34:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, BooleanField
from wtforms.validators import DataRequired, Email, ValidationError, Optional
from app.models import Trainer, Client
# from app.models import User
def client_exists(form, field):
print("Checking if client exists", field.data)
email = field.data
client = Client.query.filter(Client.email == email).first()
if client:
raise ValidationError("User is already registered.")
class CreateClientForm(FlaskForm):
firstName = StringField('first name', validators=[DataRequired()])
lastName = StringField('last name', validators=[DataRequired()])
email = StringField('email', validators=[client_exists])
phone = StringField('phone', validators=[Optional()])
weight = StringField('weight', validators=[Optional()])
age = IntegerField('age', validators=[Optional()])
duedate = StringField('duedate', validators=[Optional()])
amount = StringField('amount', validators=[Optional()])
paid = BooleanField('paid', validators=[Optional()])
noshows = IntegerField('No Shows', validators=[Optional()])
cancellations = IntegerField('Cancellations', validators=[Optional()])
password = StringField('password', validators=[DataRequired()])
trainer_id = IntegerField('trainer_id')
|
[
"ransonknorr@gmail.com"
] |
ransonknorr@gmail.com
|
6e46d41d4d26e0cd32bd2ca55fb3fe92590599e8
|
080c78a077796d06e80aa6b02c220099c6fa41dc
|
/helpers.py
|
08ec00a63ed9617041444e5a504ce61d400c03c9
|
[] |
no_license
|
nikita-mishunyayev/Kaggle-Aptos-Blindness-Detection-2019-solution
|
a44081259db095ed97c2cac4bbd866fed649a6eb
|
e3c6479c99570339775f28e8c42462193a4cf9a5
|
refs/heads/master
| 2022-03-09T11:13:36.886285
| 2019-11-19T02:17:32
| 2019-11-19T02:17:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,901
|
py
|
import re
import math
import collections
from functools import partial
from collections import Counter
import json
import imageio
import os
import re
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import collections
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset
from albumentations import (
Compose, Resize, Normalize, Flip
)
from albumentations.torch import ToTensor
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',
'num_classes', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
# Change namedtuple defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
sigmoid = torch.nn.Sigmoid()
class Swish(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
swish = Swish.apply
class Swish_module(nn.Module):
def forward(self, x):
return swish(x)
swish_layer = Swish_module()
def relu_fn(x):
""" Swish activation function """
# return x * torch.sigmoid(x)
return swish_layer(x)
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth multiplier. """
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
""" Round number of filters based on depth multiplier. """
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
""" Drop connect. """
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_same_padding_conv2d(image_size=None):
""" Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models. """
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a dynamic image size """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]]*2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a fixed image size"""
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = image_size if type(image_size) == list else [image_size, image_size]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class Identity(nn.Module):
def __init__(self,):
super(Identity, self).__init__()
def forward(self, input):
return input
########################################################################
############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ##############
########################################################################
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
""" Block Decoder for readability, straight from the official TensorFlow repository """
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,
drop_connect_rate=0.2, image_size=None, num_classes=1000):
""" Creates a efficientnet model. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
# data_format='channels_last', # removed, this is always true in PyTorch
num_classes=num_classes,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
image_size=image_size,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
""" Get the block args and global params for a given model """
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
url_map = {
'efficientnet-b0': 'http://storage.googleapis.com/public-models/efficientnet-b0-08094119.pth',
'efficientnet-b1': 'http://storage.googleapis.com/public-models/efficientnet-b1-dbc7070a.pth',
'efficientnet-b2': 'http://storage.googleapis.com/public-models/efficientnet-b2-27687264.pth',
'efficientnet-b3': 'http://storage.googleapis.com/public-models/efficientnet-b3-c8376fa2.pth',
'efficientnet-b4': 'http://storage.googleapis.com/public-models/efficientnet-b4-e116e8b3.pth',
'efficientnet-b5': 'http://storage.googleapis.com/public-models/efficientnet-b5-586e6cc6.pth',
}
def load_pretrained_weights(model, model_name, load_fc=True):
""" Loads pretrained weights, and downloads if loading for the first time. """
state_dict = model_zoo.load_url(url_map[model_name])
if load_fc:
model.load_state_dict(state_dict)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
res = model.load_state_dict(state_dict, strict=False)
assert str(res.missing_keys) == str(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
print('Loaded pretrained weights for {}'.format(model_name))
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = relu_fn(self._bn0(self._expand_conv(inputs)))
x = relu_fn(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params))
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._dropout = self._global_params.dropout_rate
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = relu_fn(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = relu_fn(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
""" Calls extract_features to extract features, applies final linear layer, and returns logits. """
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)
if self._dropout:
x = F.dropout(x, p=self._dropout, training=self.training)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return EfficientNet(blocks_args, global_params)
@classmethod
def from_pretrained(cls, model_name, num_classes=1000):
model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes})
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):
""" Validates model name. None that pretrained weights are only available for
the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. """
num_models = 4 if also_need_pretrained_weights else 8
valid_models = ['efficientnet_b'+str(i) for i in range(num_models)]
if model_name.replace('-','_') not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
class Crop_From_Gray:
def __init__(self, tol=7):
self.tol = tol
def _crop_image_from_gray(self, img):
if img.ndim == 2:
mask = img > self.tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim == 3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>self.tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1 = img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2 = img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3 = img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def __call__(self, image, force_apply=None):
img = self._crop_image_from_gray(image)
return {"image": img}
class Circle_Crop:
def _circle_crop(self, img):
height, width, depth = img.shape
x = int(width/2)
y = int(height/2)
r = np.amin((x,y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x,y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
return img
def __call__(self, image, force_apply=None):
img = self._circle_crop(image)
return {"image": img}
class Ben_preprocess:
def __init__(self, sigma=10):
self.sigma = sigma
def __call__(self, image, force_apply=None):
image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0) , self.sigma), -4, 128)
return {"image": image}
class RetinopathyDatasetTest(Dataset):
def __init__(self, csv_file, path_to_data, transform):
self.data = pd.read_csv(csv_file)
self.path_to_data = path_to_data
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_name = os.path.join(self.path_to_data, self.data.loc[idx, 'id_code'] + '.png')
image = cv2.imread(img_name)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = self.transform(image=image)
return {'image': image['image']}
def predict(test_dl, model, batch_size, len_test_dataset, device):
with torch.no_grad():
test_preds = np.zeros((len_test_dataset, 1))
t = tqdm(test_dl)
for i, x_batch in enumerate(t):
pred = model(x_batch['image'].to(device))
test_preds[i * batch_size:(i + 1) * batch_size] = pred.cpu().squeeze().numpy().ravel().reshape(-1, 1)
return test_preds
def round_preds(preds):
coef = [0.5, 1.5, 2.5, 3.5]
for i, pred in enumerate(preds):
if pred < coef[0]:
preds[i] = 0
elif pred >= coef[0] and pred < coef[1]:
preds[i] = 1
elif pred >= coef[1] and pred < coef[2]:
preds[i] = 2
elif pred >= coef[2] and pred < coef[3]:
preds[i] = 3
else:
preds[i] = 4
return preds
def pre_transforms(image_size=224, crop_from_gray=False, circle_crop=False, ben_preprocess=0):
transforms = [Resize(image_size, image_size)]
if crop_from_gray is True:
transforms = [Crop_From_Gray()] + transforms
if ben_preprocess > 0:
transforms.append(Ben_preprocess(sigma=ben_preprocess))
if circle_crop is True:
transforms.append(Circle_Crop())
return Compose(transforms)
def post_transforms(normalize=True):
transforms = [ToTensor()]
if normalize is True:
transforms = [Normalize()] + transforms
return Compose(transforms)
class efficientnet_pretrained(nn.Module):
def __init__(self, k, num_classes, pretrained):
super(efficientnet_pretrained, self).__init__()
self.name = 'efficientnet-b' + str(k)
self.model = EfficientNet.from_pretrained(self.name, num_classes)
def forward(self, x):
return self.model(x)
|
[
"noreply@github.com"
] |
nikita-mishunyayev.noreply@github.com
|
e70cb2bf382b44dc97be7bff59e7923a6a9b57ec
|
94675efda520e7c7927e0ff98302ba8d11d0f7e6
|
/myvenv/bin/pip-3.4
|
ec0a4a7114df884b35b77e8c995e7b33f10d95ba
|
[] |
no_license
|
estefib/my-first-blog-
|
23358cfda50aab1f85b8d333ba7b8ad5300b8ccb
|
b4128b1035af300e0e3db9863716183dae0c7639
|
refs/heads/master
| 2021-01-18T22:13:38.488749
| 2016-10-29T17:38:32
| 2016-10-29T17:38:32
| 72,300,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
4
|
#!/Users/Estefi/Documents/django_girls/dg-project/myvenv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==1.2.1','console_scripts','pip-3.4'
__requires__ = 'pip==1.2.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pip==1.2.1', 'console_scripts', 'pip-3.4')()
)
|
[
"estefaniaballesterg@gmail.com"
] |
estefaniaballesterg@gmail.com
|
9460c243621b5445e7a03324b7c44b33627e7ba6
|
91406d4983e24a4ac8563bc68b50ce1060259300
|
/digital_handwriting/urls.py
|
cf1c8e1bd054ee2548f2f8b3814eaaa9fda25c40
|
[
"MIT"
] |
permissive
|
therajmaurya/Digital-Handwriting
|
c71b208c0c8268a026eafd3ca7d09c53bae40a51
|
8ec89a0ad11b1d071536f77d6cac85ba02b83d43
|
refs/heads/master
| 2022-03-30T01:54:36.743159
| 2019-12-25T21:44:39
| 2019-12-25T21:44:39
| 113,925,666
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
"""digital_handwriting URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from handwriting import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$',views.text),
url(r'^created/',views.create,name='create')
]
|
[
"therajmaurya@gmail.com"
] |
therajmaurya@gmail.com
|
f59d1cdb320110df45c4594d131216bb073e5546
|
33f697fd4fd2398500cf5f8d555617a863de1eb4
|
/problem1_3sand5s.py
|
d7dc311b254a287122e893cbb1e9ee460d322951
|
[] |
no_license
|
AlexHahnPublic/EulerProblems
|
c70e5744dc74b293a2d325b0dface457f517ad5f
|
53efea4f941c93d8a5e114e80f836a53dc5d9cf9
|
refs/heads/master
| 2021-06-07T02:05:16.801556
| 2016-10-22T04:25:00
| 2016-10-22T04:25:00
| 33,910,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
# Euler Problem 1:
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we
# get 3, 5, 6, and 9. The sum of these multiples is 23
# Find the sum of all the multiples of 3 and 5 below 1000.
# Solution: trial division, check each number up to n, if divisible by 3 or 5
# add it to the running total
import time
def sum3and5(n):
start_time = time.time()
total = 0
for nat in range(n):
if (nat % 3 == 0) or (nat % 5 == 0):
total = total + nat
total_time = time.time() - start_time
print "The sum of all the multiples of 3 and 5 below", n, "is", total
print "This program took:", total_time, "seconds to run"
if __name__ == "__main__":
import sys
sum3and5(int(sys.argv[1]))
|
[
"afh53@physics.cornell.edu"
] |
afh53@physics.cornell.edu
|
95f77f9ccf028b1c765b0de7d6a75f8a0979455b
|
d4e5a2095e15e5e8fc5e3ddb232f41cc3c5aa608
|
/Python/1001.py
|
c7703ba4060b8fbfdd25f5ed8fec879df3dcd8ec
|
[] |
no_license
|
xaadu/uri-solves
|
bea5c3e78e3c6a6f3aa8c83ef9407e08d714a94a
|
7f0d03376c4295e9e658c5b198d1d304c93699b3
|
refs/heads/master
| 2020-05-14T17:03:44.545029
| 2019-04-17T12:33:49
| 2019-04-17T12:33:49
| 181,885,158
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
# URI --> 1001
x = int(input())
y = int(input())
print("X =", (x+y))
|
[
"xayed42@gmail.com"
] |
xayed42@gmail.com
|
41e0c35d8f6f140f28776895e5818242163e49f8
|
7a239875dc9147377c68403e33ce234448b3b443
|
/libsubmit/version.py
|
e35bbbade12937ef58ce92916b9c34a995a2520d
|
[
"Apache-2.0"
] |
permissive
|
benhg/libsubmit
|
a51f14ffe7220a5c523d5b9b5c079b2a4f030749
|
3ff05719d01e9a7c78c81d002b22f9a927cb6a4f
|
refs/heads/master
| 2021-05-14T02:25:27.566641
| 2018-02-09T21:35:44
| 2018-02-09T21:35:44
| 116,594,575
| 0
| 0
| null | 2018-02-09T21:12:08
| 2018-01-07T19:55:57
|
Python
|
UTF-8
|
Python
| false
| false
| 91
|
py
|
''' Set module version
<Major>.<Minor>.<maintenance>[-alpha/beta/..]
'''
VERSION = '0.3.0'
|
[
"yadudoc1729@gmail.com"
] |
yadudoc1729@gmail.com
|
e08959efc568fd56daefcf8ab0405bd7db16d4b2
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayOpenMiniTipsDeliveryCreateResponse.py
|
0f0bc4813082df45cd6c1d04d87d1319c5c3daad
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 754
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniTipsDeliveryCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniTipsDeliveryCreateResponse, self).__init__()
self._delivery_id = None
@property
def delivery_id(self):
return self._delivery_id
@delivery_id.setter
def delivery_id(self, value):
self._delivery_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniTipsDeliveryCreateResponse, self).parse_response_content(response_content)
if 'delivery_id' in response:
self.delivery_id = response['delivery_id']
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
c1415377ed6f1240038e86f44212940480f53494
|
eb8d1aaf5992b5734bc5be5320144552decacba0
|
/reccurent.py
|
6a1603a28ec394be4cd5a9b368aa25c7d09a13c4
|
[] |
no_license
|
olivernina/htr-ctc
|
3fa719a9ffb041dc7cd7499c0731cfd4f0a03e15
|
a147801741b94bac9f1658e83790d06a2a043b99
|
refs/heads/master
| 2021-01-10T05:54:09.562636
| 2018-07-18T10:27:06
| 2018-07-18T10:27:06
| 50,888,562
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
import theano
import theano.tensor as tt
from weights import init_wts, share
class RecurrentLayer():
def __init__(self, inpt, nin, nunits, conv_sz=1,
learn_init_state=True):
# inpt is transposed a priori
tablet_wd, _ = inpt.shape
if conv_sz > 1:
inpt_clipped = inpt[:conv_sz * (tablet_wd // conv_sz), :]
inpt_conv = inpt_clipped.reshape(
(tablet_wd // conv_sz, nin * conv_sz))
else:
inpt_conv = inpt
wio = share(init_wts(nin * conv_sz, nunits)) # input to output
woo = share(init_wts(nunits, nunits)) # output to output
bo = share(init_wts(nunits))
h0 = share(init_wts(nunits))
def step(in_t, out_tm1):
return tt.tanh(tt.dot(out_tm1, woo) + tt.dot(in_t, wio) + bo)
self.output, _ = theano.scan(
step,
sequences=[inpt_conv],
outputs_info=[h0]
)
self.params = [wio, woo, bo]
if learn_init_state:
self.params += [h0]
self.nout = nunits
class BiRecurrentLayer():
def __init__(self, inpt, nin, nunits, conv_sz=1,
learn_init_state=True):
fwd = RecurrentLayer(inpt, nin, nunits, conv_sz, learn_init_state)
bwd = RecurrentLayer(inpt[::-1], nin, nunits, conv_sz, learn_init_state)
self.params = fwd.params + bwd.params
self.nout = fwd.nout + bwd.nout
self.output = tt.concatenate([fwd.output,
bwd.output[::-1]],
axis=1)
|
[
"pk_blaze@knights.ucf.edu"
] |
pk_blaze@knights.ucf.edu
|
2f5faa2b2d0339df0be2c970e5ca671545dfad00
|
f11c0cb4e9fafde02c2d4a59ce5627a21a0f2e93
|
/turtlebot/build/turtlebot/turbot_map/catkin_generated/pkg.develspace.context.pc.py
|
00c3586ca66dbfaabc797269a59875e3e3884c69
|
[] |
no_license
|
ilazy991/turtlebot
|
99e3b914758c257e92a87b34beef75c353941034
|
09740786a9cda8abd4c1defe90cdceb7c6e1a18f
|
refs/heads/master
| 2020-04-30T00:09:51.391880
| 2019-03-19T10:25:25
| 2019-03-19T10:25:25
| 176,495,982
| 0
| 2
| null | 2019-03-19T11:21:12
| 2019-03-19T11:21:11
| null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_map"
PROJECT_SPACE_DIR = "/home/nvidia/Desktop/Code/turtlebot/devel"
PROJECT_VERSION = "0.0.0"
|
[
"1142144213@qq.com"
] |
1142144213@qq.com
|
cce5f7d532abfffad86be6ec42bce232e7d6d494
|
fe97b7ba5dd24e52e0ef2f4a9cf3b2461e10ceea
|
/To-Do List/task/todolist/datastore.py
|
c2666d56a5fb7998aa6c6439f767e326bd6fff2d
|
[] |
no_license
|
nghianja/To-Do-List
|
4acc8a5c82110d3959ddd5c1f6bc23a53cd4397b
|
fab7313da51a5bc4ce398550c6ba2f48e3108b0c
|
refs/heads/master
| 2022-12-18T02:44:12.213690
| 2020-09-26T14:26:40
| 2020-09-26T14:26:40
| 298,739,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
# A model class that describes the table in the database.
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from datetime import datetime
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Table(Base):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String)
deadline = Column(Date, default=datetime.today())
def __repr__(self):
return self.task
def create_datastore():
engine = create_engine('sqlite:///todo.db?check_same_thread=False')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
return Session()
|
[
"james.ng@projapps.com"
] |
james.ng@projapps.com
|
d6f643dec24cf89d8b97066a05f2dc90e6dfbe46
|
1adf7d71d32ac15a94251cbf8c296857e7e0ab11
|
/Advanced Python/Design Patterns/anti-patterns_correctness.py
|
0d9b6613645fcd5bed2d4de35eb7266d506756ef
|
[] |
no_license
|
pvvanilkumar/Learning
|
691c07688dc6395b2e9e92bd44e33ee8f19c7cd0
|
83e5bbd296031812526b7df0ae4cf7dac8aae568
|
refs/heads/master
| 2020-12-22T03:40:07.319396
| 2020-01-28T04:56:14
| 2020-01-28T04:56:14
| 236,659,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
class Rectangle(object):
def __init__(self, width, height):
self._width = width
self._height = height
r = Rectangle(5, 6)
# direct access of protected member
print("Width: {:d}".format(r._width))
|
[
"pveerav@apac.corpdir.net"
] |
pveerav@apac.corpdir.net
|
7baaeaed32956cf32db8273ce882ac55fbcf7d77
|
5695d365852a5b9bc4f8092c8aba139530def229
|
/hs_collection_resource/migrations/0002_collectiondeletedresource_resource_owners.py
|
d6a80fde974db5778255ad0469fa26b6f89dd634
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
heliumdatacommons/commonsshare
|
6863705e71be2fb9ef4a822e391d60cfcbc82a44
|
4336dc337ca2b36c2d0a0a7ea793af624c1356c7
|
refs/heads/develop
| 2021-05-11T14:12:53.511860
| 2020-04-15T20:48:38
| 2020-04-15T20:48:38
| 117,697,775
| 2
| 4
|
BSD-3-Clause
| 2020-03-31T14:08:15
| 2018-01-16T14:58:04
|
Python
|
UTF-8
|
Python
| false
| false
| 586
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hs_collection_resource', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='collectiondeletedresource',
name='resource_owners',
field=models.ManyToManyField(related_name='collectionDeleted', to=settings.AUTH_USER_MODEL),
),
]
|
[
"zyli2004@gmail.com"
] |
zyli2004@gmail.com
|
c60c7909ee17189186d37d45b7eda97c4c7d3bf0
|
bc441bb06b8948288f110af63feda4e798f30225
|
/resource_manage_sdk/api/cmdb_approve/get_history_approver_list_pb2.pyi
|
abd98e65926c1c34c66d48ba053cdace0455c688
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,398
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class GetHistoryApproverListRequest(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def query(self) -> google___protobuf___struct_pb2___Struct: ...
def __init__(self,
*,
query : typing___Optional[google___protobuf___struct_pb2___Struct] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListRequest: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListRequest: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> None: ...
class GetHistoryApproverListResponse(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
userList = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
def __init__(self,
*,
userList : typing___Optional[typing___Iterable[typing___Text]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListResponse: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListResponse: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"userList",b"userList"]) -> None: ...
class GetHistoryApproverListResponseWrapper(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
code = ... # type: builtin___int
codeExplain = ... # type: typing___Text
error = ... # type: typing___Text
@property
def data(self) -> GetHistoryApproverListResponse: ...
def __init__(self,
*,
code : typing___Optional[builtin___int] = None,
codeExplain : typing___Optional[typing___Text] = None,
error : typing___Optional[typing___Text] = None,
data : typing___Optional[GetHistoryApproverListResponse] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListResponseWrapper: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListResponseWrapper: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"codeExplain",b"codeExplain",u"data",b"data",u"error",b"error"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
f56554b1fcba93eccdd0b91c791dba09a26ef386
|
699b75c65e1bc5a793aef69dad6350fdc6e99fd6
|
/number_1.py
|
259ac43ae6b7476b3d09f30d0a94aa13a80d6a18
|
[] |
no_license
|
najihabrilianti/flip.id-test-qa
|
4bc56487b65d88713ea606de42c74d4167717fe1
|
750c0ee2aa3eef1a93f4ebd1acb418e7822e3417
|
refs/heads/main
| 2023-04-18T23:46:11.497349
| 2021-05-08T21:56:03
| 2021-05-08T21:56:03
| 365,614,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
db_transaction = [['011','Smith','BCA','BRI',1000000],['100','John','BRI','BCA',1000000],['101','Fulan','Mandiri','BCA',4333011],['110','Sri','BNI','BSI',3000000],['111','Bambang','BCA','BSI',1500000]]
dash_admin = [['001','Smith','BCA','BRI',1000000],['100','John','BRI','BCA',None],['101','Fulan','Mandiri','BCA',4000000],['110','Sri','BNI','BSI',3000000],['111','Bambang','BCA','BSI',1500000]]
for data in db_transaction :
dash_index = db_transaction.index(data)
if data == dash_admin[dash_index] :
print ('pass')
else :
print ('failed')
|
[
"73904901+najihabrilianti@users.noreply.github.com"
] |
73904901+najihabrilianti@users.noreply.github.com
|
8819265c82f77a7ee9ccf1be48314333ca28c1c5
|
704fda0d0e05f66f0c4f3c17cc4b39e2b0bc6220
|
/homework1/task2.py
|
29779bec2ac406bc0aae1e8f0b6b2cd82a98eb24
|
[] |
no_license
|
humantom88/geekbrains-python-basics
|
71eae1be4f8b1d757db17200d64d2b14ea51d83f
|
ebde8e0a9b92386f2a5b994a880184adde8c7454
|
refs/heads/master
| 2021-05-17T10:48:56.788147
| 2020-04-23T09:51:10
| 2020-04-23T09:51:10
| 250,743,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# 2. Пользователь вводит время в секундах. Переведите время в часы,
# минуты и секунды и выведите в формате чч:мм:сс. Используйте форматирование строк.
print('Введите количество секунд: ')
seconds = int(input())
minutes = seconds // 60
hours = minutes // 60
seconds = seconds % 60
minutes = minutes % 60
hours = hours % 24
if seconds < 10:
seconds = f'0{seconds}'
if minutes < 10:
minutes = f'0{minutes}'
if hours < 10:
hours = f'0{hours}'
print(f'Вы ввели время: {hours}:{minutes}:{seconds}')
|
[
"humantom88@gmail.com"
] |
humantom88@gmail.com
|
0d2af27c7b63e8f21fc7c713d6004cfdb8063ea9
|
820a8e7ec541299f315ac43ddb3b41236e11cd33
|
/demo/streaming/message_based_client.py
|
8bba3e3493dd7f6aadd1d443706b2ee614e2f6f3
|
[
"Apache-2.0"
] |
permissive
|
hpsaturn/Autobahn
|
5caba163ee976e8ddedadfb1a79139ba6014861b
|
f7bd44433f227130901440e768073e2afbf410bf
|
refs/heads/master
| 2021-01-17T22:09:02.484645
| 2011-11-01T18:27:57
| 2011-11-01T18:27:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from ranstring import randomByteString
from twisted.internet import reactor
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol
MESSAGE_SIZE = 1 * 2**20
class MessageBasedHashClientProtocol(WebSocketClientProtocol):
"""
Message-based WebSockets client that generates stream of random octets
sent to WebSockets server as a sequence of messages. The server will
respond to us with the SHA-256 computed over each message. When
we receive response, we repeat by sending a new message.
"""
def sendOneMessage(self):
data = randomByteString(MESSAGE_SIZE)
self.sendMessage(data, binary = True)
def onOpen(self):
self.count = 0
self.sendOneMessage()
def onMessage(self, message, binary):
print "Digest for message %d computed by server: %s" % (self.count, message)
self.count += 1
self.sendOneMessage()
if __name__ == '__main__':
factory = WebSocketClientFactory()
factory.protocol = MessageBasedHashClientProtocol
reactor.connectTCP("localhost", 9000, factory)
reactor.run()
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
870c4b191c4b5666fd9ab5962c5bad455a42e9b3
|
4b0b0031c812b457925002236ec0ad5c98df2c63
|
/app.py
|
12b289d731d9f18327a4580bb2ba06d407f898be
|
[
"MIT"
] |
permissive
|
antoreep-jana/webapp-main
|
1e6e418ec032492f7ef22c6be0e51bd867c2c7b1
|
d81cfc2f90ae5fe571e287307d194517cb202070
|
refs/heads/main
| 2023-01-05T09:31:15.452956
| 2020-11-05T13:59:22
| 2020-11-05T13:59:22
| 310,307,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,695
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
# Keras
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from tensorflow.keras.applications.imagenet_utils import decode_predictions
from werkzeug.utils import secure_filename
#from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__)
# Model saved with Keras model.save()
MODEL_PATH ='model_mobilenetv2.h5'
# Load your trained model
model = MobileNetV2(weights = MODEL_PATH)
import numpy as np
import re
import base64
import numpy as np
from PIL import Image
from io import BytesIO
def base64_to_pil(img_base64):
"""
Convert base64 image data to PIL image
"""
image_data = re.sub('^data:image/.+;base64,', '', img_base64)
pil_image = Image.open(BytesIO(base64.b64decode(image_data)))
return pil_image
def np_to_base64(img_np):
"""
Convert numpy image (RGB) to base64 string
"""
img = Image.fromarray(img_np.astype('uint8'), 'RGB')
buffered = BytesIO()
img.save(buffered, format="PNG")
return u"data:image/png;base64," + base64.b64encode(buffered.getvalue()).decode("ascii")
#def model_predict(img_path, model):
def model_predict(img, model):
#print(img_path)
#img = image.load_img(img_path, target_size=(224, 224))
#img = img.resize((224,224))
img = np.resize(img, (224,224,3))
print(img.shape)
print('-'*20)
print(img)
# Preprocessing the image
x = image.img_to_array(img)
x = x.astype('float32')
print(x.shape)
# x = np.true_divide(x, 255)
## Scaling
x=x/255.
x = np.expand_dims(x, axis=0)
#print(x.shape)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
# x = preprocess_input(x)
preds = model.predict(x)
#print(preds)
#preds=np.argmax(preds, axis=1)
"""if preds==0:
preds="The leaf is a diseased cotton leaf"
elif preds==1:
preds="The leaf is a diseased cotton plant"
elif preds==2:
preds="The leaf is a fresh cotton leaf"
else:
preds="The leaf is a fresh cotton plant"
"""
pred_proba = "{:.3f}".format(np.amax(preds))
pred_class = decode_predictions(preds, top = 1)
result = str(pred_class[0][0][1])
result = result.replace('_', ' ').capitalize()
return f"The result is {result}"
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
#f = request.files['file']
#print(request.files)
#print('-' * 20)
filestr = request.files['file'].read()
print(filestr)
import cv2
npimg = np.fromstring(filestr, np.uint8)
print(npimg)
print(npimg.shape)
#img = cv2.imdecode(npimg, cv2.CV_LOAD_IMAGE_UNCHANGED)
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
#img = base64_to_pil(request.files['file'].read())
print(img)
print(img.shape)
# Save the file to ./uploads
#basepath = os.path.dirname(__file__)
#file_path = os.path.join(
# basepath, 'uploads', secure_filename(f.filename))
#f.save(file_path)
# Make prediction
#preds = model_predict(file_path, model)
preds= model_predict(img, model)
result=preds
return result
return None
if __name__ == '__main__':
app.run(port=5001,debug=True)
|
[
"antoreepjana@yahoo.in"
] |
antoreepjana@yahoo.in
|
6d82dbb77c8190ea8ac5e5035aba0ecb0d2223df
|
8d7bd15a2d303510e483940a96200e4811d1f4df
|
/14_this_s.py
|
2e8d235b2e0b8b87497801ff7cf20b0fb0ff5184
|
[
"Apache-2.0"
] |
permissive
|
zhuhuifeng/PyOJ
|
cecd5a97418cc282a335cb2841bb5783020a75b0
|
94f2bcd6f0efab099eca5ffa7e776d149b69d8d8
|
refs/heads/master
| 2021-01-13T04:46:31.843020
| 2017-01-31T12:58:59
| 2017-01-31T12:58:59
| 78,987,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 61
|
py
|
# coding=utf-8
# 输出Python之禅
import this
print this.s
|
[
"zhuhuifeng_4981@126.com"
] |
zhuhuifeng_4981@126.com
|
be9e9f2b85890f23ffa63690cb52d7a7b92e2eb5
|
35bf55a3e18dfe5086d7ba39f3b5e7cdc068a9f9
|
/segment/statistic.py
|
20f0cee553f4654482b69bb385e756ad26a6603b
|
[] |
no_license
|
chensian/TextProcess
|
c566eec375bb37c4c30f7e2c8caaaedbd4b49e92
|
fb50d15cc9fd2440e77c96f0b5f2e2b764043e37
|
refs/heads/master
| 2021-01-15T18:41:15.499983
| 2018-03-04T10:50:19
| 2018-03-04T10:50:19
| 99,799,189
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,562
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/10/9 12:35
# @Author : chesian
# @Site :
# @File : statistic.py
# @Software: PyCharm
import pandas as pd
# 统计 总词数
from load.import_util import file_to_dict
def compute_word_num(name):
path = "D:/python/workspace/TextProcess/segment/data/lda/"
filename = name + ".lda-c"
annual_word_pure_num = {}
annual_word_num = {}
data = open(path + filename).readlines()
for key, annual in enumerate(data):
annual = annual.split(" ")
annual_word_pure_num[key] = annual[0]
annual_word = annual[1:]
# 统计不重复数
word_num = 0
for word2id_freq in annual_word:
try:
word_num += int(word2id_freq.split(":")[1])
except:
print key, word2id_freq
annual_word_num[key] = word_num
return annual_word_pure_num, annual_word_num
# 统计 总字数
def compute_char_num(name):
path = "D:/python/workspace/TextProcess/dataset/output/seg/"
filename = "seg_result_" + name + ".txt"
data = open(path + filename).readlines()
annual_char_num = {}
for key, annual in enumerate(data):
# print len(annual)
annual = annual.replace(" ", "")
annual_char_num[key] = len(annual) / 3
return annual_char_num
# 合并 字数 次数
def turn_merge_char_word(name):
annual_char_num = compute_char_num(name)
annual_word_pure_num, annual_word_num = compute_word_num(name)
id_path = "D:/python/workspace/TextProcess/dataset/output/csv/%s_file2id.csv"
type = name.split("_")[1]
file2id = pd.read_csv(id_path % type)
# print file2id.columns
file2id["word_pure_num"] = file2id["id"].apply(lambda x: annual_word_pure_num[x])
file2id["word_num"] = file2id["id"].apply(lambda x: annual_word_num[x])
file2id["char_num"] = file2id["id"].apply(lambda x: annual_char_num[int(x)])
file2id.to_csv("D:/python/workspace/TextProcess/dataset/output/statistic/" + name +"_annual_statistic.csv", index=False)
# 匹配 单词
def match_word_freq(name, wordlist):
# 1、 wordlist to idlist need word2id
path = "D:/python/workspace/TextProcess/segment/data/lda/"
filename = name + ".tokens2id"
word2id = file_to_dict(path, filename)
# print word2id
idlist = []
for word in wordlist:
# if word in word2id:
for token in word2id:
if token.find(word) != -1:
print token, word2id[token]
idlist.append(word2id[token])
# 加载 lda 词统计库
path = "D:/python/workspace/TextProcess/segment/data/lda/"
filename = name + ".lda-c"
data = open(path + filename).readlines()
docs_freq_num = {}
for key, annual in enumerate(data):
annual = annual.split(" ")
# id freq
annual_word = annual[1:]
id_freq_dict = {}
for word2id_freq in annual_word:
item = word2id_freq.split(":")
try:
id_freq_dict[item[0]] = int(item[1])
except:
print key, word2id_freq
freq_num = 0
for id in idlist:
if id in id_freq_dict:
freq_num += id_freq_dict[id]
docs_freq_num[key] = freq_num
return docs_freq_num
# 合并 wordlist 在name 出现的次数
def merge_char_word(name):
wordlist1 = ["丝绸之路", "一带一路"]
wordlist2 = ["结构性改革", "供给侧改革"]
wordlist3 = ["新常态"]
id_path = "D:/python/workspace/TextProcess/dataset/output/csv/%s_file2id.csv"
type = name.split("_")[1]
file2id = pd.read_csv(id_path % type)
# print file2id.columns
docs_freq_num1 = match_word_freq(name, wordlist1)
docs_freq_num2 = match_word_freq(name, wordlist2)
docs_freq_num3 = match_word_freq(name, wordlist3)
file2id["wordlist1"] = file2id["id"].apply(lambda x: docs_freq_num1[x])
file2id["wordlist2"] = file2id["id"].apply(lambda x: docs_freq_num2[x])
file2id["wordlist3"] = file2id["id"].apply(lambda x: docs_freq_num3[x])
file2id.to_csv("D:/python/workspace/TextProcess/dataset/output/statistic/" + name +"_match_word_freq.csv", index=False)
if __name__ == "__main__":
# name = "mda_mda"
# name = "all_mda"
name = "all_all"
# turn_merge_char_word(name)
merge_char_word(name)
|
[
"1217873870@qq.com"
] |
1217873870@qq.com
|
68c0899de663ac126455d7e02416c71399934052
|
720aea0fd454b029cfa09331f2d1bbd4775e0b89
|
/crowdfunding/users/migrations/0002_auto_20200915_1006.py
|
afc27b8e39d26775181bd47d0f06ea61dd95d478
|
[] |
no_license
|
RebeccaMillwood/DRF_project
|
e159d5f276ab51e37ac95ccd49f2f288c7ee5497
|
b93a1cac5ff8c4ed7736966869d93c1fe8dbd5b4
|
refs/heads/master
| 2022-12-18T16:15:14.937724
| 2020-09-26T02:07:39
| 2020-09-26T02:07:39
| 293,739,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# Generated by Django 3.0.8 on 2020-09-15 10:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='about_me',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='customuser',
name='image',
field=models.TextField(default='https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/golden-retriever-royalty-free-image-506756303-1560962726.jpg'),
),
migrations.AddField(
model_name='customuser',
name='location',
field=models.TextField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='customuser',
name='first_name',
field=models.TextField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='customuser',
name='last_name',
field=models.TextField(blank=True, max_length=30),
),
]
|
[
"bec-millwood@live.com.au"
] |
bec-millwood@live.com.au
|
61f6f08c005103942b34e89882f9fddfa2baaf82
|
e61f16faf742ce29a12706880acd96cf4b267b91
|
/main.py
|
a2b5e84de363dbbdb962c5e752c6df6f8efb323a
|
[] |
no_license
|
sabbaskaragiannidis/studyGame
|
cbae14cb9ecf31786ca477a92fa9fc5bfe0caf4b
|
08199b8e34a219065f086390af4c431c29f6564b
|
refs/heads/master
| 2022-12-16T22:14:09.651773
| 2020-09-20T12:10:59
| 2020-09-20T12:10:59
| 297,060,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
from enemy import Enemy, Troll, Vampire
ugly_troll = Troll("Pug")
print("Ugly troll - {}".format(ugly_troll))
another_troll = Troll("Ug")
print("Another troll - {}".format(another_troll))
another_troll.take_damage(18)
print(another_troll)
brother = Troll("Urg")
print(brother)
ugly_troll.grunt()
another_troll.grunt()
brother.grunt()
vamp = Vampire("Vlad")
print(vamp)
vamp.take_damage(5)
print(vamp)
print("-" * 40)
another_troll.take_damage(30)
print(another_troll)
vamp._lives = 0
vamp._hit_points = 1
print(vamp)
|
[
"sabbaskaragiannidis@yahoo.com"
] |
sabbaskaragiannidis@yahoo.com
|
f346a398fe5784e66ff4835f354e82f233b674e1
|
aea77af99b0667930c5adaa4d4e73670f6533568
|
/app/lookup.py
|
eeda258772d1c96ab902ce6a51b127f7202adb93
|
[] |
no_license
|
dionseow/entity-extraction-api
|
ed57028a24c7f9d7be76d82d671f478aed10109f
|
56613dcbff5955d70d2419f72921fdfdf46ad96e
|
refs/heads/master
| 2023-06-21T06:27:55.617051
| 2021-08-04T07:48:10
| 2021-08-04T07:48:10
| 392,598,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
import spacy
import os
import glob
import pandas as pd
class LookUpService:
def __init__(self):
# Use chinese since it can handle whitespace and
# non whitespace tokens
self.nlp = spacy.blank("zh")
ruler = self.nlp.add_pipe("entity_ruler")
lookup_files = glob.glob("./rules/*")
patterns = []
for file in lookup_files:
ent_type = os.path.basename(file).split(".")[0].upper()
ents = list(pd.read_csv(file, header=None)[0])
patterns += [{"label": ent_type, "pattern": ent} for ent in ents]
with self.nlp.select_pipes(enable="tagger"):
ruler.add_patterns(patterns)
def lookup(self, sentence):
doc = self.nlp(sentence)
return [(ent.text, ent.label_) for ent in doc.ents]
|
[
"dseow93@gmail.com"
] |
dseow93@gmail.com
|
2413c49ba7094d1184240f864f659a5d3c483dd9
|
038e9f4f0d2d0907ca7c93fd703fee7b5484bfc3
|
/travel_api/asgi.py
|
fa8fdd6ce36eafcbf209688c3ef6c6364703ec0e
|
[] |
no_license
|
Begayim1/final-project
|
d62c5cfa60ae03670b17179f5028c7b45d37ecd1
|
9377a608b7d02f6c4eff0f45246bc080ea57733f
|
refs/heads/master
| 2023-07-13T15:40:10.206086
| 2021-08-20T06:24:20
| 2021-08-20T06:24:20
| 397,110,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for travel_api project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'travel_api.settings')
application = get_asgi_application()
|
[
"kyrgyzalieva12@gmail.com"
] |
kyrgyzalieva12@gmail.com
|
cb1d4ea82e737e5bf9c2ec42560c94336b5e4563
|
5182897b2f107f4fd919af59c6762d66c9be5f1d
|
/.history/src/Simulador_20200711163425.py
|
aca21747f8e28878eb2b0759badeae8ea1d21215
|
[
"MIT"
] |
permissive
|
eduardodut/Trabalho_final_estatistica_cd
|
422b7e702f96291f522bcc68d2e961d80d328c14
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
refs/heads/master
| 2022-11-23T03:14:05.493054
| 2020-07-16T23:49:26
| 2020-07-16T23:49:26
| 277,867,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,607
|
py
|
import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
SADIO = 0
INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial
INFECTADO_TIPO_2 = 2 #sintomático
CURADO = 3
MORTO = 4
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.matriz_status = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
self.matriz_atualizacoes_cura = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
#self.matriz_status = self.df_individuos.to_numpy()
self.popular(tamanho_matriz)
self.lista_matrizes_status = []
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict,index = [0])
self.salvar_posicionamento()
def criar_individuo(self, status, posicao):
self.matriz_status[posicao[0], posicao[1]] = status
if status == self.INFECTADO_TIPO_1 or status == self.INFECTADO_TIPO_2:
self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = self.atualizacoes_cura
def salvar_posicionamento(self):
self.lista_matrizes_status.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice_infectante in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_infectante)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for indice_vizinho in lista_vizinhos:
#verificação de SADIO
if self.verifica_status(indice_vizinho) == self.SADIO:
#verificação do novo status
novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice_vizinho)
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo2.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice_vizinho)
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def checagem_morte_individual(self, chance_morte, indice):
rng_morte = random.random()
if rng_morte <= chance_morte:
self.matriz_status[indice[0], indice[1]] = self.MORTO
return self.MORTO
else:
return self.checar_cura_individual(indice)
def checar_cura_individual(self, indice):
#print("passei na cura")
#num_atualizacoes_restantes = self.matriz_atualizacoes_cura[indice[0], indice[1]]
self.matriz_atualizacoes_cura[indice[0], indice[1]] -= 1
if self.matriz_atualizacoes_cura[indice[0], indice[1]] == 0:
self.matriz_status[indice[0], indice[1]] = self.CURADO
return self.CURADO
else:
return self.matriz_status[indice[0], indice[1]]
def checagem_morte_cura_lista(self, lista_infectantes_tipo2):
lista_curados = []
lista_mortos = []
for indice_infectante in lista_infectantes_tipo2:
novo_status = self.checagem_morte_individual(self.chance_morte, indice_infectante)
if novo_status == Individuo.MORTO:
lista_mortos.append(indice_infectante)
if novo_status == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_mortos, lista_curados
def checagem_cura_lista(self, lista_infectantes):
lista_curados = []
for indice_infectante in lista_infectantes:
novo_status = self.checar_cura_individual(indice_infectante)
if novo_status == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_curados
def iterar(self):
#Verifica os novos infectados por infectantes do tipo 1 e 2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#Verifica morte/cura dos infectados tipo 2
lista_mortos, lista_curados_t2 = self.checagem_morte_cura_lista(self.lista_infectados_tipo_2)
#Verifica cura dos infectados tipo 1
lista_curados_t1 = self.checagem_cura_lista(self.lista_infectados_tipo_1)
#remove os mortos e curados das listas de infectantes tipo 1 e 2
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
if indice not in lista_mortos and indice not in lista_curados_t2:
nova_lista_infectados_t2.append(indice)
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
if indice not in lista_curados_t1:
nova_lista_infectados_t1.append(indice)
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
#atualiza o número de mortos
self.num_mortos = self.num_mortos + len(lista_mortos)
#atualiza o número de curados
self.num_curados = self.num_curados + len(lista_curados_t1) + len(lista_curados_t2)
#movimentar infectantes:
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
nova_lista_infectados_t1.append(self.mover_infectante(indice))
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
nova_lista_infectados_t2.append(self.mover_infectante(indice))
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + lista_novos_infectados_tipo2
self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + lista_novos_infectados_tipo1
dict = {
'num_sadios':self.populacao_inicial - self.num_mortos - self.num_curados - len(self.lista_infectados_tipo_1) - len(self.lista_infectados_tipo_2) ,
'num_infect_t1':len(self.lista_infectados_tipo_1),
'num_infect_t2':len(self.lista_infectados_tipo_2),
'num_curados':self.num_curados,
'num_mortos':self.num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
# print("num t1: ", len(self.lista_infectados_tipo_1))
# print("num t2: ", len(self.lista_infectados_tipo_2))
# print("num curados: ", self.num_curados)
# print("num mortos: ", self.num_mortos)
# print("---------")
# #salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1, indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice)
self.lista_infectados_tipo_2.append(indice)
def trocar(self,matriz,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux = matriz[x_fin,y_fin]
matriz[x_fin,y_fin] = matriz[x_ini,y_ini]
matriz[x_ini,y_ini] = aux
def verifica_status(self, indice):
return self.matriz_status[indice[0], indice[1]]
def mover_infectante(self, posicao_inicial):
pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
self.trocar(self.matriz_status, posicao_inicial, posicao_final)
self.trocar(self.matriz_atualizacoes_cura, posicao_inicial, posicao_final)
return posicao_final
chance_infeccao = 0.3
chance_infeccao_tipo2 = 1
chance_morte = 1
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.0
percentual_inicial_tipo2 = 0.0
sim = Simulador(
5,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])
while sim.dataframe.iloc[-1]['num_infect_t1']+sim.dataframe.iloc[-1]['num_infect_t2'] > 0:
plt.matshow(sim.matriz_status.toarray(), cmap = cmap, vmin= 0, vmax = 4)
#
sim.iterar()
#print(sim.dataframe.iloc[-1])
#print("xxxxxxxxxxxxxxxxxTipo: ",type(sim.lista_matrizes_posicionamento[len(sim.lista_matrizes_posicionamento)-1].toarray()))
print(sim.dataframe)
plt.show()
# for i in range(12):
# #plt.matshow(sim.lista_matrizes_status[i].toarray(), cmap = cmap, vmin= 0, vmax = 4)
# print(i)
# print("Status")
# print(sim.matriz_status.toarray())
# print("Cura")
# print(sim.matriz_atualizacoes_cura.toarray())
# sim.iterar()
# m = sim.matriz_atualizacoes_cura[sim.matriz_status == 1 or sim.matriz_status == 2].toarray()
# print(m)
#plt.show()
#print(sim.dataframe)
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# sim.iterar()
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# print(sim.dataframe)
# print("status inicial: ", sim.df_individuos[1][0].status)
# print("Novos infectados: ", sim.verificar_infeccao(sim.lista_infectados_tipo_1))
# plt.show()
|
[
"eduardo_dut@edu.unifor.br"
] |
eduardo_dut@edu.unifor.br
|
34025f1e64a14c25f1d5699d3cd87e25e6ea9587
|
969e5c8152ef5001ba7dbaa187c75b057b4ef2ef
|
/pm/migrations/0030_auto_20170826_1351.py
|
579de1666f756f8c53406aad794a822ef001c4a3
|
[] |
no_license
|
nickwilkinson/projectmatica
|
94ab930843d6512efd0c55093d4da5ffc6d293bb
|
8e9874ac713d995a6a92814030d51aa607cb0369
|
refs/heads/master
| 2021-01-12T04:18:32.379449
| 2018-02-02T19:54:01
| 2018-02-02T19:54:01
| 77,577,536
| 5
| 1
| null | 2021-01-25T10:16:07
| 2016-12-29T02:54:48
|
Python
|
UTF-8
|
Python
| false
| false
| 460
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-26 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pm', '0029_auto_20170818_2230'),
]
operations = [
migrations.AlterField(
model_name='projectlogentry',
name='entry_type',
field=models.BooleanField(default=False),
),
]
|
[
"nwilkinson@artefactual.com"
] |
nwilkinson@artefactual.com
|
86c6fa174383a66c46552769888fee6328dd982e
|
aa431a5cd979fe5b02467758bf818a1c2f503724
|
/make/photon/prepare/migrations/version_2_2_0/__init__.py
|
6a3ef56e44e83fc3f47c9fd6802c266bbcc664cd
|
[
"Apache-2.0"
] |
permissive
|
stonezdj/harbor
|
3f12fd935ce1a5c4b020a93347ce050d0d4fdeb8
|
bc6a7f65a6fa854735e39381846e2a68951905c4
|
refs/heads/master
| 2023-08-31T17:31:43.685007
| 2021-10-21T01:25:18
| 2021-10-21T01:25:18
| 102,460,256
| 1
| 12
|
Apache-2.0
| 2019-03-12T08:28:26
| 2017-09-05T09:12:25
|
Go
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
import os
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from utils.migration import read_conf
revision = '2.2.0'
down_revisions = ['2.1.0']
def migrate(input_cfg, output_cfg):
current_dir = os.path.dirname(__file__)
tpl = Environment(
loader=FileSystemLoader(current_dir),
undefined=StrictUndefined,
trim_blocks=True,
lstrip_blocks=True
).get_template('harbor.yml.jinja')
config_dict = read_conf(input_cfg)
with open(output_cfg, 'w') as f:
f.write(tpl.render(**config_dict))
|
[
"dengq@vmware.com"
] |
dengq@vmware.com
|
76586634f0e167954ccb36328502d4e8c4ce378d
|
a6aadaa997afb9d93f3968d7b35856d2df30eda4
|
/examples/Adafruit/Adafruit_Python_BNO055-master/setup.py
|
ae4a2b55fcb770d922f0ed93ba54a2e4e8f95809
|
[
"MIT"
] |
permissive
|
Stranjyr/FireflyControls
|
40e1be7236e37bac2d25b86e43cff2217544d999
|
662b366717e60d875a13db7338fe532978785b20
|
refs/heads/master
| 2021-01-20T04:17:17.989595
| 2017-04-16T10:14:06
| 2017-04-16T10:14:06
| 83,838,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Adafruit_BNO055',
version = '1.0.1',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Library for accessing the Bosch BNO055 absolute orientation sensor on a Raspberry Pi or Beaglebone Black.',
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/adafruit/Adafruit_Python_BNO055/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.9.3'],
install_requires = ['Adafruit-GPIO>=0.9.3', 'pyserial'],
packages = find_packages())
|
[
"whampton99@gmail.com"
] |
whampton99@gmail.com
|
a2822f528b7ce6c08129ddb728eb5ec76c5718e3
|
5374df75c4c1edf044040d838b8bca5eee7428a3
|
/apps/quiz/migrations/0002_auto_20180225_1348.py
|
6590f8e86d0a435ae2b8a1ef0fb37bf8c0023790
|
[] |
no_license
|
jonathan-murmu/trivia
|
0d6a81fca1b00fa08fde4ba999066b2772c9dc1d
|
ba9d2b5e0700c17f65e851d42cb53d2673a02aa3
|
refs/heads/master
| 2022-12-09T22:36:27.079105
| 2018-07-11T18:17:11
| 2018-07-11T18:17:11
| 122,649,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
# Generated by Django 2.0.2 on 2018-02-25 13:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quiz', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='objective',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='objectives', to='quiz.Question'),
),
]
|
[
"jonathanmurmu@gmail.com"
] |
jonathanmurmu@gmail.com
|
ab754465e32782642ed3f665783f5a0990399811
|
c30c7cb65835e483d8329354892756390ad170db
|
/.config/python/pythonrc
|
0ff561f40fca611ec79da79e1c7da1efc9f889a9
|
[] |
no_license
|
resuscv/dotfiles-python
|
0d94ab9741ff005b1ab21d13699d37c4af5f4921
|
f373a064ab25333afb4d4125b847c1e8c0881c3b
|
refs/heads/master
| 2020-06-01T00:22:35.938978
| 2013-12-24T05:02:28
| 2013-12-24T05:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
# Hey Emacs! This is a -*- python -*- file.
# Get tab completion
# http://docs.python.org/2/library/rlcompleter.html
try:
import readline
except ImportError:
print "Module readline not available."
else:
import rlcompleter
readline.parse_and_bind("tab: complete")
# Import pandas and numpy
try:
import pandas as pd
except ImportError:
print "Module pandas not available."
try:
import numpy as np
except ImportError:
print "Module numpy not available."
|
[
"still.another.person@gmail.com"
] |
still.another.person@gmail.com
|
|
8c1cdeb504e120cfab9d249122eaae7a5174815b
|
36091288a431917f8966dce0316ae2a8091a182b
|
/scripts/shell.py
|
eda07f45115e9f359a1b3db5a6a5b16f2ac0a0ad
|
[
"MIT"
] |
permissive
|
qustodio/libimobiledevice-node
|
f31860963c9d3dba65732f954a98fbcde6169f6b
|
d97b452c3e5166b3f4f9e8ba180c8de6dec2d08d
|
refs/heads/qustodio-main
| 2023-09-05T23:31:40.254804
| 2023-06-29T08:15:51
| 2023-06-29T08:15:51
| 142,663,144
| 1
| 0
|
MIT
| 2023-06-29T08:15:53
| 2018-07-28T09:06:49
|
C
|
UTF-8
|
Python
| false
| false
| 778
|
py
|
import subprocess
from typing import Text
import os
def uname(option: str):
return subprocess.run(["uname", option], capture_output=True, text=True).stdout.strip()
def shell(command: str, cwd: str = None, check=True, env = None, executable=None):
subprocess.run(command, cwd=cwd, shell=True, check=check, env=env, executable=executable)
def make(arg: str = None, cwd: str = None, env = None):
if arg:
shell(f'make {arg}', cwd=cwd, env=env)
else:
shell('make', cwd=cwd, env=env)
def get_relative_path(path: str) -> str:
dirname = os.path.dirname(__file__)
return os.path.join(dirname, path)
def otool(option: str, binary: str):
return subprocess.run(["otool", option, binary], capture_output=True, text=True).stdout.strip()
|
[
"didix21@users.noreply.github.com"
] |
didix21@users.noreply.github.com
|
cfbf423ff62caf2ee97e36223dd20e5712eaaf0c
|
d1ae812c9338da065197bb936cdd96d173f868c3
|
/8/adventofcode-8.py
|
e9dcced161f163b6749a89f51a7080e7e1a39cbb
|
[] |
no_license
|
NickUK/advent-of-code-2018
|
3d98e728eacc2f64efe63305d4dd75562ba63eff
|
91f5b694cedce69cfeb8b02499691090391ed67d
|
refs/heads/master
| 2020-04-09T08:20:18.156802
| 2018-12-25T08:10:46
| 2018-12-25T08:10:46
| 160,190,618
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
# Day 8 part 1+2
from functools import reduce
class ParsedFile:
def __init__(self, children, meta, value):
self.children = children
self.meta = meta
self.value = value
all_meta = []
def parse(input, pos):
files = []
meta = []
value = 0
num_files = int(input[pos])
num_meta = int(input[pos + 1])
start = pos + 2
for _ in range(num_files):
new_file, start = parse(input, start)
files.append(new_file)
meta = input[start:start+num_meta]
# For part 1
for m in meta:
all_meta.append(m)
if num_files == 0:
value = reduce(lambda a,b: int(a) + int(b), meta)
else:
for ref in map(lambda x: int(x), meta):
if ref - 1 < len(files):
value = value + files[ref - 1].value
return ParsedFile(files, meta, value), start+num_meta
with open("input") as f:
testinput = f.readlines()[0]
#testinput = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
result, _ = parse(testinput.split(" "), 0)
print(reduce(lambda a,b: int(a) + int(b), all_meta)) # Part 1
print(result.value) # Part 2
|
[
"nick_horne_90@hotmail.com"
] |
nick_horne_90@hotmail.com
|
097321d7ebb305770869f9fd631a4837eeeec702
|
b87f66b13293782321e20c39aebc05defd8d4b48
|
/maps/build/mayavi/enthought/mayavi/tools/sources.py
|
2dcd4601cae2528189b5d19a6fb7ba72ba533b83
|
[] |
no_license
|
m-elhussieny/code
|
5eae020932d935e4d724c2f3d16126a0d42ebf04
|
5466f5858dbd2f1f082fa0d7417b57c8fb068fad
|
refs/heads/master
| 2021-06-13T18:47:08.700053
| 2016-11-01T05:51:06
| 2016-11-01T05:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49,570
|
py
|
"""
Data sources classes and their associated functions for mlab.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Prabhu Ramachandran
# Copyright (c) 2007-2010, Enthought, Inc.
# License: BSD Style.
import operator
import numpy as np
from enthought.traits.api import (HasTraits, Instance, CArray, Either,
Bool, on_trait_change, NO_COMPARE)
from enthought.tvtk.api import tvtk
from enthought.tvtk.common import camel2enthought
from enthought.mayavi.sources.array_source import ArraySource
from enthought.mayavi.core.registry import registry
import tools
from engine_manager import engine_manager
__all__ = [ 'vector_scatter', 'vector_field', 'scalar_scatter',
'scalar_field', 'line_source', 'array2d_source', 'grid_source',
'open', 'triangular_mesh_source', 'vertical_vectors_source',
]
################################################################################
# A subclass of CArray that will accept floats and do a np.atleast_1d
################################################################################
class CArrayOrNumber(CArray):
def validate( self, object, name, value):
if operator.isNumberType(value):
value = np.atleast_1d(value)
return CArray.validate(self, object, name, value)
################################################################################
# `MlabSource` class.
################################################################################
class MlabSource(HasTraits):
"""
This class represents the base class for all mlab sources. These
classes allow a user to easily update the data without having to
recreate the whole pipeline.
"""
# The TVTK dataset we manage.
dataset = Instance(tvtk.DataSet)
# The Mayavi data source we manage.
m_data = Instance(HasTraits)
########################################
# Private traits.
# Disable the update when data is changed.
_disable_update = Bool(False)
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Function to create the data from input arrays etc.
This is to be used when the size of the arrays change or the
first time when the data is created. This regenerates the data
structures and will be slower in general.
"""
raise NotImplementedError()
def update(self):
"""Update the visualization.
This is to be called after the data of the visualization has
changed.
"""
if not self._disable_update:
self.dataset.modified()
md = self.m_data
if md is not None:
if hasattr(md, '_assign_attribute'):
md._assign_attribute.update()
md.data_changed = True
def set(self, trait_change_notify=True, **traits):
"""Shortcut for setting object trait attributes.
This is an overridden method that will make changing multiple
traits easier. This method is to be called when the arrays have
changed content but not in shape/size. In that case one must
call the `reset` method.
Parameters
----------
trait_change_notify : Boolean
If **True** (the default), then each value assigned may generate a
trait change notification. If **False**, then no trait change
notifications will be generated. (see also: trait_setq)
traits : list of key/value pairs
Trait attributes and their values to be set
Returns
-------
self
The method returns this object, after setting attributes.
"""
try:
self._disable_update = True
super(MlabSource, self).set(trait_change_notify, **traits)
finally:
self._disable_update = False
if trait_change_notify:
self.update()
return self
######################################################################
# Non-public interface.
######################################################################
def _m_data_changed(self, ds):
if not hasattr(ds, 'mlab_source'):
ds.add_trait('mlab_source', Instance(MlabSource))
ds.mlab_source = self
ArrayOrNone = Either(None, CArray, comparison_mode=NO_COMPARE)
ArrayNumberOrNone = Either(None, CArrayOrNumber, comparison_mode=NO_COMPARE)
################################################################################
# `MGlyphSource` class.
################################################################################
class MGlyphSource(MlabSource):
"""
This class represents a glyph data source for Mlab objects and
allows the user to set the x, y, z, scalar/vector attributes.
"""
# The x, y, z and points of the glyphs.
x = ArrayNumberOrNone
y = ArrayNumberOrNone
z = ArrayNumberOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayNumberOrNone
# The u, v, w components of the vector and the vectors.
u = ArrayNumberOrNone
v = ArrayNumberOrNone
w = ArrayNumberOrNone
vectors = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First convert numbers to arrays.
for name in ('x', 'y', 'z', 'u', 'v', 'w', 'scalars'):
if name in traits and traits[name] is not None:
traits[name] = np.atleast_1d(traits[name])
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
vectors = self.vectors
scalars = self.scalars
points = self.points
x, y, z = self.x, self.y, self.z
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
if 'points' in traits:
x=points[:,0].ravel()
y=points[:,1].ravel()
z=points[:,2].ravel()
self.set(x=x,y=y,z=z,trait_change_notify=False)
else:
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (points.size/3, 3)
self.set(points=points, trait_change_notify=False)
u, v, w = self.u, self.v, self.w
if u is not None:
u = np.atleast_1d(u)
v = np.atleast_1d(v)
w = np.atleast_1d(w)
if len(u) > 0:
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (vectors.size/3, 3)
self.set(vectors=vectors, trait_change_notify=False)
if 'vectors' in traits:
u=vectors[:,0].ravel()
v=vectors[:,1].ravel()
w=vectors[:,2].ravel()
self.set(u=u,v=v,w=w,trait_change_notify=False)
else:
if u is not None and len(u) > 0:
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (vectors.size/3, 3)
self.set(vectors=vectors, trait_change_notify=False)
if vectors is not None and len(vectors) > 0:
assert len(points) == len(vectors)
if scalars is not None:
scalars = np.atleast_1d(scalars)
if len(scalars) > 0:
assert len(points) == len(scalars)
# Create the dataset.
polys = np.arange(0, len(points), 1, 'l')
polys = np.reshape(polys, (len(points), 1))
if self.dataset is None:
# Create new dataset if none exists
pd = tvtk.PolyData()
else:
# Modify existing one.
pd = self.dataset
pd.set(points=points, polys=polys)
if self.vectors is not None:
pd.point_data.vectors = self.vectors
pd.point_data.vectors.name = 'vectors'
if self.scalars is not None:
pd.point_data.scalars = self.scalars
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
x = np.atleast_1d(x)
self.points[:,0] = x
self.update()
def _y_changed(self, y):
y = np.atleast_1d(y)
self.points[:,1] = y
self.update()
def _z_changed(self, z):
z = np.atleast_1d(z)
self.points[:,2] = z
self.update()
def _u_changed(self, u):
u = np.atleast_1d(u)
self.vectors[:,0] = u
self.update()
def _v_changed(self, v):
v = np.atleast_1d(v)
self.vectors[:,1] = v
self.update()
def _w_changed(self, w):
w = np.atleast_1d(w)
self.vectors[:,2] = w
self.update()
def _points_changed(self, p):
p = np.atleast_2d(p)
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
if s is None:
self.dataset.point_data.scalars = None
self.dataset.point_data.remove_array('scalars')
else:
s = np.atleast_1d(s)
self.dataset.point_data.scalars = s
self.dataset.point_data.scalars.name = 'scalars'
self.update()
def _vectors_changed(self, v):
self.dataset.point_data.vectors = v
self.dataset.point_data.vectors.name = 'vectors'
self.update()
################################################################################
# `MVerticalGlyphSource` class.
################################################################################
class MVerticalGlyphSource(MGlyphSource):
"""
This class represents a vertical glyph data source for Mlab objects
and allows the user to set the x, y, z, scalar attributes. The
vectors are created from the scalars to represent them in the
vertical direction.
"""
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
if 'scalars' in traits:
s = traits['scalars']
if s is not None:
traits['u'] = traits['v'] = np.ones_like(s),
traits['w'] = s
super(MVerticalGlyphSource, self).reset(**traits)
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s
self.dataset.point_data.scalars.name = 'scalars'
self.set(vectors=np.c_[np.ones_like(s),
np.ones_like(s),
s])
self.update()
################################################################################
# `MArraySource` class.
################################################################################
class MArraySource(MlabSource):
"""
This class represents an array data source for Mlab objects and
allows the user to set the x, y, z, scalar/vector attributes.
"""
# The x, y, z arrays for the volume.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
# The u, v, w components of the vector and the vectors.
u = ArrayOrNone
v = ArrayOrNone
w = ArrayOrNone
vectors = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
vectors = self.vectors
scalars = self.scalars
x, y, z = [np.atleast_3d(a) for a in self.x, self.y, self.z]
u, v, w = self.u, self.v, self.w
if 'vectors' in traits:
u=vectors[:,0].ravel()
v=vectors[:,1].ravel()
w=vectors[:,2].ravel()
self.set(u=u,v=v,w=w,trait_change_notify=False)
else:
if u is not None and len(u) > 0:
#vectors = np.concatenate([u[..., np.newaxis],
# v[..., np.newaxis],
# w[..., np.newaxis] ],
# axis=3)
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (u.shape[0] , u.shape[1], w.shape[2], 3)
self.set(vectors=vectors, trait_change_notify=False)
if vectors is not None and len(vectors) > 0 and scalars is not None:
assert len(scalars) == len(vectors)
if x.shape[0] <= 1:
dx = 1
else:
dx = x[1, 0, 0] - x[0, 0, 0]
if y.shape[1] <= 1:
dy = 1
else:
dy = y[0, 1, 0] - y[0, 0, 0]
if z.shape[2] <= 1:
dz = 1
else:
dz = z[0, 0, 1] - z[0, 0, 0]
if self.m_data is None:
ds = ArraySource(transpose_input_array=True)
else:
ds = self.m_data
old_scalar = ds.scalar_data
ds.set(vector_data=vectors,
origin=[x.min(), y.min(), z.min()],
spacing=[dx, dy, dz],
scalar_data=scalars)
if scalars is old_scalar:
ds._scalar_data_changed(scalars)
self.dataset = ds.image_data
self.m_data = ds
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('[x, y, z]')
def _xyz_changed(self):
x, y, z = self.x, self.y, self.z
dx = x[1, 0, 0] - x[0, 0, 0]
dy = y[0, 1, 0] - y[0, 0, 0]
dz = z[0, 0, 1] - z[0, 0, 0]
ds = self.dataset
ds.origin = [x.min(), y.min(), z.min()]
ds.spacing = [dx, dy, dz]
if self.m_data is not None:
self.m_data.set(origin=ds.origin, spacing=ds.spacing)
self.update()
def _u_changed(self, u):
self.vectors[...,0] = u
self.m_data._vector_data_changed(self.vectors)
def _v_changed(self, v):
self.vectors[...,1] = v
self.m_data._vector_data_changed(self.vectors)
def _w_changed(self, w):
self.vectors[...,2] = w
self.m_data._vector_data_changed(self.vectors)
def _scalars_changed(self, s):
old = self.m_data.scalar_data
self.m_data.scalar_data = s
if old is s:
self.m_data._scalar_data_changed(s)
def _vectors_changed(self, v):
self.m_data.vector_data = v
################################################################################
# `MLineSource` class.
################################################################################
class MLineSource(MlabSource):
"""
This class represents a line data source for Mlab objects and
allows the user to set the x, y, z, scalar attributes.
"""
# The x, y, z and points of the glyphs.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
if 'points' in traits:
x=points[:,0].ravel()
y=points[:,1].ravel()
z=points[:,2].ravel()
self.set(x=x,y=y,z=z,trait_change_notify=False)
else:
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (len(x), 3)
self.set(points=points, trait_change_notify=False)
# Create the dataset.
n_pts = len(points) - 1
lines = np.zeros((n_pts, 2), 'l')
lines[:,0] = np.arange(0, n_pts-0.5, 1, 'l')
lines[:,1] = np.arange(1, n_pts+0.5, 1, 'l')
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
# Avoid lines refering to non existing points: First set the
# lines to None, then set the points, then set the lines
# refering to the new points.
pd.set(lines=None)
pd.set(points=points)
pd.set(lines=lines)
if scalars is not None and len(scalars) > 0:
assert len(x) == len(scalars)
pd.point_data.scalars = np.ravel(scalars)
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.points[:,0] = x
self.update()
def _y_changed(self, y):
self.points[:,1] = y
self.update()
def _z_changed(self, z):
self.points[:,2] = z
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
################################################################################
# `MArray2DSource` class.
################################################################################
class MArray2DSource(MlabSource):
"""
This class represents a 2D array data source for Mlab objects and
allows the user to set the x, y and scalar attributes.
"""
# The x, y values.
# Values of X and Y as None are accepted, in that case we would build
# values of X and Y automatically from the shape of scalars
x = ArrayOrNone
y = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
# The masking array.
mask = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
x, y, mask = self.x, self.y, self.mask
scalars = self.scalars
# We may have used this without specifying x and y at all in
# which case we set them from the shape of scalars.
nx, ny = scalars.shape
#Build X and Y from shape of Scalars if they are none
if x is None and y is None:
x, y = np.mgrid[-nx/2.:nx/2, -ny/2.:ny/2]
if mask is not None and len(mask) > 0:
scalars[mask.astype('bool')] = np.nan
# The NaN trick only works with floats.
scalars = scalars.astype('float')
self.set(scalars=scalars, trait_change_notify=False)
z = np.array([0])
self.set(x=x, y=y, z=z, trait_change_notify=False)
# Do some magic to extract the first row/column, independently of
# the shape of x and y
x = np.atleast_2d(x.squeeze().T)[0, :].squeeze()
y = np.atleast_2d(y.squeeze())[0, :].squeeze()
if x.ndim == 0:
dx = 1
else:
dx = x[1] - x[0]
if y.ndim == 0:
dy = 1
else:
dy = y[1] - y[0]
if self.m_data is None:
ds = ArraySource(transpose_input_array=True)
else:
ds = self.m_data
old_scalar = ds.scalar_data
ds.set(origin=[x.min(), y.min(), 0],
spacing=[dx, dy, 1],
scalar_data=scalars)
if old_scalar is scalars:
ds._scalar_data_changed(scalars)
self.dataset = ds.image_data
self.m_data = ds
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('[x, y]')
def _xy_changed(self):
x, y,scalars = self.x, self.y, self.scalars
nx, ny = scalars.shape
if x is None or y is None:
x, y = np.mgrid[-nx/2.:nx/2, -ny/2.:ny/2]
self.trait_setq(x=x,y=y)
x = np.atleast_2d(x.squeeze().T)[0, :].squeeze()
y = np.atleast_2d(y.squeeze())[0, :].squeeze()
dx = x[1] - x[0]
dy = y[1] - y[0]
ds = self.dataset
ds.origin = [x.min(), y.min(), 0]
ds.spacing = [dx, dy, 1]
if self.m_data is not None:
self.m_data.set(origin=ds.origin, spacing=ds.spacing)
self.update()
def _scalars_changed(self, s):
mask = self.mask
if mask is not None and len(mask) > 0:
s[mask.astype('bool')] = np.nan
# The NaN tric only works with floats.
s = s.astype('float')
self.set(scalars=s, trait_change_notify=False)
old = self.m_data.scalar_data
self.m_data.scalar_data = s
if s is old:
self.m_data._scalar_data_changed(s)
################################################################################
# `MGridSource` class.
################################################################################
class MGridSource(MlabSource):
"""
This class represents a grid source for Mlab objects and
allows the user to set the x, y, scalar attributes.
"""
# The x, y, z and points of the grid.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
assert len(x.shape) == 2, "Array x must be 2 dimensional."
assert len(y.shape) == 2, "Array y must be 2 dimensional."
assert len(z.shape) == 2, "Array z must be 2 dimensional."
assert x.shape == y.shape, "Arrays x and y must have same shape."
assert y.shape == z.shape, "Arrays y and z must have same shape."
#Points in the grid source will always be created using x,y,z
#Changing of points is not allowed because it cannot be used to modify values of x,y,z
nx, ny = x.shape
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (nx*ny, 3)
self.set(points=points, trait_change_notify=False)
i, j = np.mgrid[0:nx-1,0:ny-1]
i, j = np.ravel(i), np.ravel(j)
t1 = i*ny+j, (i+1)*ny+j, (i+1)*ny+(j+1)
t2 = (i+1)*ny+(j+1), i*ny+(j+1), i*ny+j
nt = len(t1[0])
triangles = np.zeros((nt*2, 3), 'l')
triangles[0:nt,0], triangles[0:nt,1], triangles[0:nt,2] = t1
triangles[nt:,0], triangles[nt:,1], triangles[nt:,2] = t2
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
pd.set(points=points, polys=triangles)
if scalars is not None and len(scalars) > 0:
if not scalars.flags.contiguous:
scalars = scalars.copy()
self.set(scalars=scalars, trait_change_notify=False)
assert x.shape == scalars.shape
pd.point_data.scalars = scalars.ravel()
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.trait_setq(x=x);
self.points[:,0] = x.ravel()
self.update()
def _y_changed(self, y):
self.trait_setq(y=y)
self.points[:,1] = y.ravel()
self.update()
def _z_changed(self, z):
self.trait_setq(z=z)
self.points[:,2] = z.ravel()
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
################################################################################
# `MTriangularMeshSource` class.
################################################################################
class MTriangularMeshSource(MlabSource):
"""
This class represents a triangular mesh source for Mlab objects and
allows the user to set the x, y, scalar attributes.
"""
# The x, y, z and points of the grid.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
triangles = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (points.size/3, 3)
self.set(points=points, trait_change_notify=False)
triangles = self.triangles
assert triangles.shape[1] == 3, \
"The shape of the triangles array must be (X, 3)"
assert triangles.max() < len(points), \
"The triangles indices must be smaller that the number of points"
assert triangles.min() >= 0, \
"The triangles indices must be positive or null"
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
# Set the points first, and the triangles after: so that the
# polygone can refer to the right points, in the polydata.
pd.set(points=points)
pd.set(polys=triangles)
if (not 'scalars' in traits
and scalars is not None
and scalars.shape != x.shape):
# The scalars where set probably automatically to z, by the
# factory. We need to reset them, as the size has changed.
scalars = z
if scalars is not None and len(scalars) > 0:
if not scalars.flags.contiguous:
scalars = scalars.copy()
self.set(scalars=scalars, trait_change_notify=False)
assert x.shape == scalars.shape
pd.point_data.scalars = scalars.ravel()
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.trait_setq(x=x);
self.points[:,0] = x.ravel()
self.update()
def _y_changed(self, y):
self.trait_setq(y=y)
self.points[:,1] = y.ravel()
self.update()
def _z_changed(self, z):
self.trait_setq(z=z)
self.points[:,2] = z.ravel()
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
def _triangles_changed(self, triangles):
if triangles.min() < 0:
raise ValueError, 'The triangles array has negative values'
if triangles.max() > self.x.size:
raise ValueError, 'The triangles array has values larger than' \
'the number of points'
self.dataset.polys = triangles
self.update()
############################################################################
# Argument processing
############################################################################
def convert_to_arrays(args):
""" Converts a list of iterables to a list of arrays or callables,
if needed.
"""
args = list(args)
for index, arg in enumerate(args):
if not callable(arg):
if not hasattr(arg, 'shape'):
arg = np.atleast_1d(np.array(arg))
if np.any(np.isinf(arg)):
raise ValueError("""Input array contains infinite values
You can remove them using: a[np.isinf(a)] = np.nan
""")
args[index] = arg
return args
def process_regular_vectors(*args):
""" Converts different signatures to (x, y, z, u, v, w). """
args = convert_to_arrays(args)
if len(args)==3:
u, v, w = [np.atleast_3d(a) for a in args]
assert len(u.shape)==3, "3D array required"
x, y, z = np.indices(u.shape)
elif len(args)==6:
x, y, z, u, v, w = args
elif len(args)==4:
x, y, z, f = args
if not callable(f):
raise ValueError, "When 4 arguments are provided, the fourth must be a callable"
u, v, w = f(x, y, z)
else:
raise ValueError, "wrong number of arguments"
assert ( x.shape == y.shape and
y.shape == z.shape and
u.shape == z.shape and
v.shape == u.shape and
w.shape == v.shape ), "argument shape are not equal"
return x, y, z, u, v, w
def process_regular_scalars(*args):
""" Converts different signatures to (x, y, z, s). """
args = convert_to_arrays(args)
if len(args)==1:
s = np.atleast_3d(args[0])
assert len(s.shape)==3, "3D array required"
x, y, z = np.indices(s.shape)
elif len(args)==3:
x, y, z = args
s = None
elif len(args)==4:
x, y, z, s = args
if callable(s):
s = s(x, y, z)
else:
raise ValueError, "wrong number of arguments"
assert ( x.shape == y.shape and
y.shape == z.shape and
( s is None
or s.shape == z.shape ) ), "argument shape are not equal"
return x, y, z, s
def process_regular_2d_scalars(*args, **kwargs):
""" Converts different signatures to (x, y, s). """
args = convert_to_arrays(args)
for index, arg in enumerate(args):
if not callable(arg):
args[index] = np.atleast_2d(arg)
if len(args)==1:
s = args[0]
assert len(s.shape)==2, "2D array required"
x, y = np.indices(s.shape)
elif len(args)==3:
x, y, s = args
if callable(s):
s = s(x, y)
else:
raise ValueError, "wrong number of arguments"
assert len(s.shape)==2, "2D array required"
if 'mask' in kwargs:
mask = kwargs['mask']
s[mask.astype('bool')] = np.nan
# The NaN tric only works with floats.
s = s.astype('float')
return x, y, s
############################################################################
# Sources
############################################################################
def vector_scatter(*args, **kwargs):
""" Creates scattered vector data.
**Function signatures**::
vector_scatter(u, v, w, ...)
vector_scatter(x, y, z, u, v, w, ...)
vector_scatter(x, y, z, f, ...)
If only 3 arrays u, v, w are passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If 4 positional arguments are passed the last one must be a callable, f,
that returns vectors.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
x, y, z, u, v, w = process_regular_vectors(*args)
scalars = kwargs.pop('scalars', None)
if scalars is not None:
scalars = np.ravel(scalars)
name = kwargs.pop('name', 'VectorScatter')
data_source = MGlyphSource()
data_source.reset(x=x, y=y, z=z, u=u, v=v, w=w, scalars=scalars)
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def vector_field(*args, **kwargs):
""" Creates vector field data.
**Function signatures**::
vector_field(u, v, w, ...)
vector_field(x, y, z, u, v, w, ...)
vector_field(x, y, z, f, ...)
If only 3 arrays u, v, w are passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If the x, y and z arrays are passed, they should have been generated
by `numpy.mgrid` or `numpy.ogrid`. The function builds a scalar field
assuming the points are regularily spaced on an orthogonal grid.
If 4 positional arguments are passed the last one must be a callable, f,
that returns vectors.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args) == 3:
x = y = z = np.atleast_3d(1)
u, v, w = [np.atleast_3d(a) for a in args]
else:
x, y, z, u, v, w = [np.atleast_3d(a)
for a in process_regular_vectors(*args)]
scalars = kwargs.pop('scalars', None)
if scalars is not None:
scalars = np.atleast_3d(scalars)
data_source = MArraySource()
data_source.reset(x=x, y=y, z=z, u=u, v=v, w=w, scalars=scalars)
name = kwargs.pop('name', 'VectorField')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def scalar_scatter(*args, **kwargs):
"""
Creates scattered scalar data.
**Function signatures**::
scalar_scatter(s, ...)
scalar_scatter(x, y, z, s, ...)
scalar_scatter(x, y, z, s, ...)
scalar_scatter(x, y, z, f, ...)
If only 1 array s is passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
x, y, z, s = process_regular_scalars(*args)
if s is not None:
s = np.ravel(s)
data_source = MGlyphSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'ScalarScatter')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def scalar_field(*args, **kwargs):
"""
Creates a scalar field data.
**Function signatures**::
scalar_field(s, ...)
scalar_field(x, y, z, s, ...)
scalar_field(x, y, z, f, ...)
If only 1 array s is passed the x, y and z arrays are assumed to be
made from the indices of arrays.
If the x, y and z arrays are passed they are supposed to have been
generated by `numpy.mgrid`. The function builds a scalar field assuming
the points are regularily spaced.
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args) == 1:
# Be lazy, don't create three big arrays for 1 input array. The
# MArraySource is clever-enough to handle flat arrays
x = y = z = np.atleast_1d(1)
s = args[0]
else:
x, y, z, s = process_regular_scalars(*args)
data_source = MArraySource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'ScalarField')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def line_source(*args, **kwargs):
"""
Creates line data.
**Function signatures**::
line_source(x, y, z, ...)
line_source(x, y, z, s, ...)
line_source(x, y, z, f, ...)
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args)==1:
raise ValueError, "wrong number of arguments"
x, y, z, s = process_regular_scalars(*args)
data_source = MLineSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'LineSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def array2d_source(*args, **kwargs):
"""
Creates structured 2D data from a 2D array.
**Function signatures**::
array2d_source(s, ...)
array2d_source(x, y, s, ...)
array2d_source(x, y, f, ...)
If 3 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array. x and y give the
coordinnates of positions corresponding to the s values.
x and y can be 1D or 2D arrays (such as returned by numpy.ogrid or
numpy.mgrid), but the points should be located on an orthogonal grid
(possibly non-uniform). In other words, all the points sharing a same
index in the s array need to have the same x or y value.
If only 1 array s is passed the x and y arrays are assumed to be
made from the indices of arrays, and an uniformly-spaced data set is
created.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
:mask: Mask points specified in a boolean masking array.
"""
data_source = MArray2DSource()
mask = kwargs.pop('mask', None)
if len(args) == 1 :
args = convert_to_arrays(args)
s = np.atleast_2d(args[0])
data_source.reset(scalars=s, mask=mask)
else:
x, y, s = process_regular_2d_scalars(*args, **kwargs)
data_source.reset(x=x, y=y, scalars=s, mask=mask)
name = kwargs.pop('name', 'Array2DSource')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def grid_source(x, y, z, **kwargs):
"""
Creates 2D grid data.
x, y, z are 2D arrays giving the positions of the vertices of the surface.
The connectivity between these points is implied by the connectivity on
the arrays.
For simple structures (such as orthogonal grids) prefer the array2dsource
function, as it will create more efficient data structures.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
scalars = kwargs.pop('scalars', None)
if scalars is None:
scalars = z
x, y, z, scalars = convert_to_arrays((x, y, z, scalars))
data_source = MGridSource()
data_source.reset(x=x, y=y, z=z, scalars=scalars)
name = kwargs.pop('name', 'GridSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def vertical_vectors_source(*args, **kwargs):
"""
Creates a set of vectors pointing upward, useful eg for bar graphs.
**Function signatures**::
vertical_vectors_source(s, ...)
vertical_vectors_source(x, y, s, ...)
vertical_vectors_source(x, y, f, ...)
vertical_vectors_source(x, y, z, s, ...)
vertical_vectors_source(x, y, z, f, ...)
If only one positional argument is passed, it can be a 1D, 2D, or 3D
array giving the length of the vectors. The positions of the data
points are deducted from the indices of array, and an
uniformly-spaced data set is created.
If 3 positional arguments (x, y, s) are passed the last one must be
an array s, or a callable, f, that returns an array. x and y give the
2D coordinates of positions corresponding to the s values. The
vertical position is assumed to be 0.
If 4 positional arguments (x, y, z, s) are passed, the 3 first are
arrays giving the 3D coordinates of the data points, and the last one
is an array s, or a callable, f, that returns an array giving the
data value.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
if len(args) == 3:
x, y, data = args
if np.isscalar(x):
z = 0
else:
z = np.zeros_like(x)
args = (x, y, z, data)
x, y, z, s = process_regular_scalars(*args)
if s is not None:
s = np.ravel(s)
data_source = MVerticalGlyphSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'VerticalVectorsSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def triangular_mesh_source(x, y, z, triangles, **kwargs):
"""
Creates 2D mesh by specifying points and triangle connectivity.
x, y, z are 2D arrays giving the positions of the vertices of the surface.
The connectivity between these points is given by listing triplets of
vertices inter-connected. These vertices are designed by there
position index.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
x, y, z, triangles = convert_to_arrays((x, y, z, triangles))
if triangles.min() < 0:
raise ValueError, 'The triangles array has negative values'
if triangles.max() > x.size:
raise ValueError, 'The triangles array has values larger than' \
'the number of points'
scalars = kwargs.pop('scalars', None)
if scalars is None:
scalars = z
data_source = MTriangularMeshSource()
data_source.reset(x=x, y=y, z=z, triangles=triangles, scalars=scalars)
name = kwargs.pop('name', 'TriangularMeshSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def open(filename, figure=None):
"""Open a supported data file given a filename. Returns the source
object if a suitable reader was found for the file.
"""
if figure is None:
engine = tools.get_engine()
else:
engine = engine_manager.find_figure_engine(figure)
engine.current_scene = figure
src = engine.open(filename)
return src
############################################################################
# Automatically generated sources from registry.
############################################################################
def _create_data_source(metadata):
"""Creates a data source and adds it to the mayavi engine given
metadata of the source. Returns the created source.
"""
factory = metadata.get_callable()
src = factory()
engine = tools.get_engine()
engine.add_source(src)
return src
def _make_functions(namespace):
"""Make the automatic functions and add them to the namespace."""
for src in registry.sources:
if len(src.extensions) == 0:
func_name = camel2enthought(src.id)
if func_name.endswith('_source'):
func_name = func_name[:-7]
func = lambda metadata=src: _create_data_source(metadata)
func.__doc__ = src.help
func.__name__ = func_name
# Inject function into the namespace and __all__.
namespace[func_name] = func
__all__.append(func_name)
_make_functions(locals())
|
[
"fspaolo@gmail.com"
] |
fspaolo@gmail.com
|
fbf7172250b9692f46a56d7db4810ee51a517b74
|
e1ef870bcb76966092da2e7d8c80764e88564bd1
|
/lad/lad/spiders/jiujianglaolingwang2.py
|
3b566db65f79512e84b229585c7db11779ea23e0
|
[] |
no_license
|
nicholaskh/lad-crawler
|
77b3586210d916758d214c1eda665d1aedf38c0a
|
8bee1d3c8e7dbf8ad1dfb98b9f325b9100a72df3
|
refs/heads/master
| 2020-03-22T19:20:16.661953
| 2018-06-06T12:59:38
| 2018-06-06T12:59:38
| 140,521,672
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
#coding=utf-8
import scrapy
import re
from ..items import YanglaoItem
from ..spiders.beautifulSoup import processText, processImgSep
from datetime import datetime
from .basespider import BaseTimeCheckSpider
class newsSpider(BaseTimeCheckSpider):
name = 'jiujianglaolingwang2'
start_urls = ['http://www.jjllw.gov.cn/jjhlg2017/vip_doc/15762803_0_0_1.html']
def parse(self, response):
should_deep = True
times= response.xpath('//div[@id="text_listmodule_2"]//li/div/span[2]/text()').extract()
if len(times) == 0:
should_deep =False
#是相对链接
urls_ori = response.xpath('//div[@id="text_listmodule_2"]//li//a/@href').extract()
urls = []
for each in urls_ori:
url = 'http://www.jjllw.gov.cn' + each
urls.append(url)
#titles_ori = response.xpath('//td[@valign="top"]/table[1]').extract()
#titles = re.findall('title="(.*?)">', str(titles_ori))
valid_child_urls = list()
for time, url in zip(times, urls):
try:
time_now = datetime.strptime(time.strip(), '%Y-%m-%d')
self.update_last_time(time_now)
except:
print("Something Wrong")
break
if self.last_time is not None and self.last_time >= time_now:
should_deep = False
break
# 变成绝对url
#url = 'http://www.ahllb.cn' + url
valid_child_urls.append(url)
next_requests = list()
# if should_deep:
# #maxPageNum = int(response.xpath('//div[@class="pagenum_label"]//dd/p/text()').extract()[0].strip())
# currentPageNum = int(response.url.split('_')[-1].split('.')[0])
#
# nextPageNum = currentPageNum + 1
# if nextPageNum > 5:
# return
#
# next_url = 'http://www.jjllw.gov.cn/jjhlg2017/vip_doc/15702508_0_0_' + str(nextPageNum) + '.html'
# req = scrapy.Request(url=next_url, callback=self.parse)
# yield req
for index, temp_url in enumerate(valid_child_urls):
req = scrapy.Request(url=temp_url, callback=self.parse_info)
m_item = YanglaoItem()
#m_item['title'] = titles[index]
m_item['time'] = times[index]
m_item['className'] = '老龄新闻'
req.meta['item'] = m_item
yield req
def parse_info(self, response):
item = response.meta['item']
item["source"] = "九江老龄网"
# title_ori = response.xpath('//div[@class="w96"]/h1/text() | //div[@class="w96"]/h1/span/text()').extract()
# title =''
# for each in title_ori:
# title = title + each
title = response.xpath('//h1[@class="h1-title"]/text()').extract()[0].strip()
if len(title) == 0:
return
item["title"] = title
item["sourceUrl"] = response.url
# 修改了text_list
#text_list = response.xpath('//*[@id="ivs_content"]/p/text() | //*[@id="ivs_content"]/p//font/text()')
text_list = response.xpath('//div[@class="wap-add-img"]/*')
text = processText(text_list)
item["text"] = text
text_list = response.xpath('//div[@class="wap-add-img"]/*')
img_list = processImgSep(text_list)
final_img_list = []
for img in img_list:
if 'http' not in img:
img = '' + img
final_img_list.append(img)
item['imageUrls'] = final_img_list
if text.strip() == "" and len(img_list) == 0:
return
yield item
|
[
"onebrolei@gmail.com"
] |
onebrolei@gmail.com
|
bd4ec6f3e5db07b6e41f8752228054e9727b6fde
|
eca39bee61b46256bbaa417beb9f340c60bcfe2f
|
/settings.py
|
f8c20689d6aa7e8edfb67f7411ce36fc22b18d81
|
[] |
no_license
|
tjsavage/runahouse
|
968fd8683d10f7f4ec04bc569a52990cdab1c61b
|
8e0f094c6120a0fd78248346a2b3320cb32e588c
|
refs/heads/master
| 2021-01-01T19:29:33.185862
| 2011-09-27T00:41:16
| 2011-09-27T00:41:16
| 2,464,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,266
|
py
|
# Django settings for runahouse project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
AUTH_PROFILE_MODULE = "stanford_registration.UserProfile"
AUTH_URL = "http://glados.stanford.edu:8080/accounts/authenticate"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9*ruiw%l74uv&tq^pmcb6=$rt%3n8z5nm)i+&&9i@7r&hb(v_9'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'runahouse.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'stanford_registration',
'south',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"tjsavage@glados.stanford.edu"
] |
tjsavage@glados.stanford.edu
|
a8a95539dac6b0b456a25ccbafca9321dd5c8b20
|
5e8832e7a49e121c4db1f57d036fe39b4250246a
|
/347_top_k_frequent_elements.py
|
f3f54cdd069c2acf438ed7c5694b526627821a0d
|
[] |
no_license
|
shaniavina/Leetcode_Python
|
9e80477794cd80e00a399d65b76088eea41d80d1
|
185bf1542265f5f4feca2e937d1d36a7bb4a5d2b
|
refs/heads/master
| 2022-10-12T10:56:23.476219
| 2022-09-21T01:53:40
| 2022-09-21T01:53:40
| 52,979,850
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
import collections
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
frq = collections.defaultdict(list)
for key, cnt in collections.Counter(nums).items():
frq[cnt].append(key)
res = []
for times in reversed(range(len(nums) + 1)):
res.extend(frq[times])
if len(res) >= k:
return res[:k]
return res[:k]
|
[
"noreply@github.com"
] |
shaniavina.noreply@github.com
|
b9b347334b0c918f282370fb07ea6d660c7155fa
|
378a987ebe3c4418a5c3362fad5c243f8ae9ccf7
|
/book/writer/models.py
|
16a874b13c5c1a102ceeda8e691f48eb92789b93
|
[] |
no_license
|
wccou/lab44
|
8669cb3804a346fb45c2af38d4ef6f9a3f9f9ce2
|
59b40b930af43ead468de9558b89a53d9d121c0d
|
refs/heads/master
| 2021-01-10T01:36:38.632269
| 2015-11-10T10:13:20
| 2015-11-10T10:13:20
| 45,905,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
from django.db import models
from django.contrib import admin
# Create your models here.
class Author(models.Model):
AuthorID = models.CharField(max_length = 150)
test = models.CharField(max_length = 150)
Name = models.CharField(max_length = 150)
Age = models.CharField(max_length = 150)
Country = models.CharField(max_length = 150)
class Authors(admin.ModelAdmin):
list_display = ('AuthorID','Name','Age','Country')
admin.site.register(Author,Authors)
|
[
"1281585844@qq.com"
] |
1281585844@qq.com
|
89fc1e0b297896bce1e1f940d5d733a873e63077
|
71f7c70bcf044eceb3f31103eade40a2f6f342a1
|
/p9/hello/apps/hello/views.py
|
e0dcb332ca45cfea8324aa12b44fc42e1c165dc5
|
[
"MIT"
] |
permissive
|
honestcomrade/diveintodocker
|
9212eab83e22b02ce87411d927618930f0fbd6dc
|
7dcde4272704fdbbab9b37c6ade4484792b3be2d
|
refs/heads/master
| 2021-08-10T13:31:36.272431
| 2017-11-12T16:22:43
| 2017-11-12T16:22:43
| 109,224,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from django.http import HttpResponse
from django.template import loader
def index(request):
template = loader.get_template('index.html')
return HttpResponse(template.render(request))
|
[
"honestcomrade@gmail.com"
] |
honestcomrade@gmail.com
|
d0b3b7556a2fc76ae67d6b26584cda560e0e48a1
|
4d0f0fe04b7d9d96ad8f202550f2d04f7b873621
|
/agSciencesCollege/agsci.UniversalExtender/trunk/agsci/UniversalExtender/interfaces.py
|
604b397d384721cb0b90907ed3b9a9c92da21225
|
[] |
no_license
|
tsimkins/svn-import-agSciencesCollege
|
20509d679fdd3f297c5eda4f324ae382e49e9826
|
42b4d5cd0a8bb7bda2b413424cb3aa342ba42924
|
refs/heads/master
| 2021-01-01T15:31:24.614632
| 2015-04-10T14:25:10
| 2015-04-10T14:25:10
| 26,240,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from zope.interface import Interface, Attribute
from plone.theme.interfaces import IDefaultPloneLayer
from plone.app.portlets.portlets.navigation import INavigationPortlet
class IUniversalExtenderLayer(IDefaultPloneLayer):
"""A Layer Specific to UniversalExtender"""
class IFSDPersonExtender(Interface):
""" marker interface """
class IDefaultExcludeFromNav(Interface):
""" marker interface """
class IFolderTopicExtender(Interface):
""" marker interface """
class ITopicExtender(Interface):
""" marker interface """
class IFolderExtender(Interface, INavigationPortlet):
""" marker interface """
class IMarkdownDescriptionExtender(Interface):
""" marker interface """
class IFullWidthTableOfContentsExtender(Interface):
""" marker interface """
class ITableOfContentsExtender(IFullWidthTableOfContentsExtender):
""" marker interface """
class INoComments(Interface):
""" marker interface """
class ITagExtender(Interface):
""" marker interface """
class IEventModifiedEvent(Interface):
context = Attribute("The content object that was saved.")
class ICustomNavigation(INavigationPortlet):
""" marker interfaces """
class IUniversalPublicationExtender(Interface):
"""
Marker interface to denote something as a "publication", which will add
the necessary fields to it.
"""
class IFilePublicationExtender(Interface):
"""
Marker interface to denote something as a "publication", which will add
the necessary fields to it.
"""
|
[
"trs22@ea9a3244-bef9-0310-a2a5-f2a3d2dd1ec8"
] |
trs22@ea9a3244-bef9-0310-a2a5-f2a3d2dd1ec8
|
6ddd919ec3149aaced404211f7ae154ec52fce11
|
7ee986a1957c621813cae28d5ab5eb688a1e4e65
|
/rsalgos/ressys_nearest_neighbour.py
|
27e3d8a5d602712e2b39c064935072dfab32fbfb
|
[] |
no_license
|
datagrad/MS_Reference_recsys-rl-base
|
7810247351435314709a7ec55e06cd0edb61a28a
|
6e93aed18cf9c1b857ff13a3642abe134c8a6a9f
|
refs/heads/master
| 2023-07-08T09:08:53.843168
| 2020-09-30T05:30:09
| 2020-09-30T05:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
from sklearn.neighbors import NearestNeighbors
from joblib import dump, load
class RecSysNearestActions(object):
NN_MODEL_SAVE_PATH = 'models/nearest_neighbour.model'
def __init__(self):
self.model_nn = None
def get_nearest_neighbours(self, query_array, k_neighbours):
distances, indexs = self.model_nn.kneighbors([query_array], k_neighbours)
indexs = list(indexs.flatten())
distances = list(distances.flatten())
return indexs, distances
def train_nearest_neighbour(self, rest_context_pc, n_neighbors=10):
self.model_nn = NearestNeighbors(n_neighbors=n_neighbors, algorithm='kd_tree')
self.model_nn.fit(rest_context_pc)
dump(self.model_nn, self.NN_MODEL_SAVE_PATH)
print('model saved to -->', self.NN_MODEL_SAVE_PATH)
def load_nearest_neighbour(self):
print('model loaded from -->', self.NN_MODEL_SAVE_PATH)
self.model_nn = load(self.NN_MODEL_SAVE_PATH)
return self.model_nn
|
[
"sjana@yodlee.com"
] |
sjana@yodlee.com
|
89ced595161f3d6bd446f66218771ab53cf7a9ae
|
3a9454ad6d7bb5a3d313c4266f0ddd8dc87190e1
|
/Geeksforgeeks/count_even_odd_num_list.py
|
dbf009c76e2bbdc5f9b3b37ad9eb024410d81b69
|
[] |
no_license
|
indrakumar0812/develop
|
bc60f49fbed29e148966b880a606b1f18464ac76
|
f0268b5a70dc2590cc236864428e1192cb30ba5e
|
refs/heads/master
| 2022-12-20T14:49:56.396032
| 2020-10-19T03:37:47
| 2020-10-19T03:37:47
| 305,252,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
list = [2, 7, 5, 64, 14]
list1 =[]
count_odd=0
count_even=0
for i in list:
if i%2==0:
count_even+=1
else:
count_odd+=1
print("no. of odd numbers are", count_odd)
print("no. of even numbers are", count_even)
|
[
"indrasen.170190@gmail.com"
] |
indrasen.170190@gmail.com
|
e48b835e1d5120077a7225a0065dbe3c3ce26277
|
21c805ceb4a1f2f9142d81d4e3bf4d910e5aa19d
|
/mail/views.py
|
5bef491af9166436db497e899b2837c6adc3371e
|
[] |
no_license
|
dbonach/cs50web-project-3-mail
|
7d9895898ebfd6defbf4e9461bdcb656d5b7a24e
|
0e338831d1599e14930f48d34886639bd94653d0
|
refs/heads/main
| 2023-07-02T14:01:05.953569
| 2021-08-08T21:05:07
| 2021-08-08T21:05:07
| 369,681,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,467
|
py
|
import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django.http import JsonResponse
from django.shortcuts import HttpResponse, HttpResponseRedirect, render
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from .models import User, Email
def index(request):
# Authenticated users view their inbox
if request.user.is_authenticated:
return render(request, "mail/inbox.html")
# Everyone else is prompted to sign in
else:
return HttpResponseRedirect(reverse("login"))
@csrf_exempt
@login_required
def compose(request):
# print(request.body)
# print(json.loads(request.body)['recipients'])
# Composing a new email must be via POST
if request.method != "POST":
return JsonResponse({"error": "POST request required."}, status=400)
# Check recipient emails
data = json.loads(request.body)
emails = [email.strip() for email in data.get("recipients").split(",")]
if emails == [""]:
return JsonResponse({
"error": "At least one recipient required."
}, status=400)
# Convert email addresses to users
recipients = []
for email in emails:
try:
user = User.objects.get(email=email)
recipients.append(user)
except User.DoesNotExist:
return JsonResponse({
"error": f"User with email {email} does not exist."
}, status=400)
# Get contents of email
subject = data.get("subject", "")
body = data.get("body", "")
# Create one email for each recipient, plus sender
users = set()
users.add(request.user)
users.update(recipients)
for user in users:
email = Email(
user=user,
sender=request.user,
subject=subject,
body=body,
read=user == request.user
)
email.save()
for recipient in recipients:
email.recipients.add(recipient)
email.save()
return JsonResponse({"message": "Email sent successfully."}, status=201)
@login_required
def mailbox(request, mailbox):
# Filter emails returned based on mailbox
if mailbox == "inbox":
emails = Email.objects.filter(
user=request.user, recipients=request.user, archived=False
)
elif mailbox == "sent":
emails = Email.objects.filter(
user=request.user, sender=request.user
)
elif mailbox == "archive":
emails = Email.objects.filter(
user=request.user, recipients=request.user, archived=True
)
else:
return JsonResponse({"error": "Invalid mailbox."}, status=400)
# Return emails in reverse chronologial order
emails = emails.order_by("-timestamp").all()
return JsonResponse([email.serialize() for email in emails], safe=False)
@csrf_exempt
@login_required
def email(request, email_id):
# Query for requested email
try:
email = Email.objects.get(user=request.user, pk=email_id)
except Email.DoesNotExist:
return JsonResponse({"error": "Email not found."}, status=404)
# Return email contents
if request.method == "GET":
return JsonResponse(email.serialize())
# Update whether email is read or should be archived
elif request.method == "PUT":
data = json.loads(request.body)
if data.get("read") is not None:
email.read = data["read"]
if data.get("archived") is not None:
email.archived = data["archived"]
email.save()
return HttpResponse(status=204)
# Email must be via GET or PUT
else:
return JsonResponse({
"error": "GET or PUT request required."
}, status=400)
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
email = request.POST["email"]
password = request.POST["password"]
user = authenticate(request, username=email, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "mail/login.html", {
"message": "Invalid email and/or password."
})
else:
return render(request, "mail/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "mail/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(email, email, password)
user.save()
except IntegrityError as e:
print(e)
return render(request, "mail/register.html", {
"message": "Email address already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "mail/register.html")
|
[
"deivitlopes@gmail.com"
] |
deivitlopes@gmail.com
|
fed1bc7f40bb6feaf49880cd138754f9076b0128
|
fd7fe855490f80bef9cbb1b44230c31c1133635b
|
/HardwareObjects/SOLEIL/PX2MultiCollect.py
|
5773693f8f2a2eb09c7891a5ed0f11043faf7541
|
[] |
no_license
|
SOLEILPX/mxcube
|
75dd528bbcd8743896217a7e490bf530cb5be4b1
|
062ccc01ecf6e9ad1ee40f6a0d4d5b42f7a49be0
|
refs/heads/master
| 2021-01-22T18:32:36.584828
| 2013-11-01T17:44:09
| 2013-11-01T17:44:09
| 13,930,813
| 0
| 0
| null | 2013-11-01T17:44:10
| 2013-10-28T16:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
from SOLEILMultiCollect import *
import shutil
import logging
from PyTango import DeviceProxy
import numpy
import re
class PX2MultiCollect(SOLEILMultiCollect):
def __init__(self, name):
SOLEILMultiCollect.__init__(self, name, LimaAdscDetector(), TunableEnergy())
#SOLEILMultiCollect.__init__(self, name, DummyDetector(), TunableEnergy())
def init(self):
logging.info("headername is %s" % self.headername )
self.headerdev = DeviceProxy( self.headername )
self.mono1dev = DeviceProxy( self.mono1name )
self.det_mt_ts_dev = DeviceProxy( self.detmttsname )
self.det_mt_tx_dev = DeviceProxy( self.detmttxname )
self.det_mt_tz_dev = DeviceProxy( self.detmttzname )
self._detector.prepareHeader = self.prepareHeader
SOLEILMultiCollect.init(self)
def prepareHeader(self):
'''Will set up header given the actual values of beamline energy, mono and detector distance'''
X, Y = self.beamCenter()
BeamCenterX = str(round(X, 3))
BeamCenterY = str(round(Y, 3))
head = self.headerdev.read_attribute('header').value
head = re.sub('BEAM_CENTER_X=\d\d\d\.\d', 'BEAM_CENTER_X=' + BeamCenterX, head)
head = re.sub('BEAM_CENTER_Y=\d\d\d\.\d', 'BEAM_CENTER_Y=' + BeamCenterY, head)
return head
def beamCenter(self):
'''Will calculate beam center coordinates'''
# Useful values
tz_ref = -6.5 # reference tz position for linear regression
tx_ref = -17.0 # reference tx position for linear regression
q = 0.102592 # pixel size in milimeters
wavelength = self.mono1dev.read_attribute('lambda').value
distance = self.det_mt_ts_dev.read_attribute('position').value
tx = self.det_mt_tx_dev.read_attribute('position').value
tz = self.det_mt_tz_dev.read_attribute('position').value
zcor = tz - tz_ref
xcor = tx - tx_ref
Theta = numpy.matrix([[1.55557116e+03, 1.43720063e+03],
[-8.51067454e-02, -1.84118001e-03],
[-1.99919592e-01, 3.57937064e+00]]) # values from 16.05.2013
X = numpy.matrix([1., distance, wavelength])
Origin = Theta.T * X.T
Origin = Origin * q
return Origin[1] + zcor, Origin[0] + xcor
|
[
"blissadm@proxima2a-5.(none)"
] |
blissadm@proxima2a-5.(none)
|
ddf31aa0247b5bd2963cdb3c8159a26bb33c77e0
|
fe039f62337b210061bfd7291000c5fa406fd0ff
|
/list/webapp/models.py
|
4a9bf3ad037982d3daaeb33bc2a410482cb276bf
|
[] |
no_license
|
Erlan1998/python_group_7_homework_45_Erlan_Kurbanaliev
|
a5f5956490d778341e4958fe6740ab6e1a395f45
|
4f860b561f046413bbc9ab8f587b8f7c40b8c23a
|
refs/heads/main
| 2023-05-07T00:16:28.530637
| 2021-03-04T12:32:36
| 2021-03-04T12:32:36
| 342,240,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
from django.db import models
status_choices = [('new', 'Новая'), ('in_progress', 'В процессе'), ('done', 'Сделано')]
class List(models.Model):
description = models.TextField(max_length=200, null=False, blank=False)
detailed_description = models.TextField(max_length=3000, null=True, blank=True)
status = models.CharField(max_length=120, null=False, blank=False, choices=status_choices)
updated_at = models.DateField(null=True, blank=True)
class Meta:
db_table = 'Lists'
verbose_name = 'Задача'
verbose_name_plural = 'Задачи'
def __str__(self):
return f'{self.id}. {self.status}: {self.description}'
|
[
"kurbanalieverlan@gmail.com"
] |
kurbanalieverlan@gmail.com
|
271f0e1a53a69327d69451cb15835b1dbd9ab828
|
13b8b25240f2fc4ed2a2c5e2a5384888c7920760
|
/company_logo.py
|
841c6a0fc2ae94d659fb1be5da20893d035079fc
|
[] |
no_license
|
Harshkakani/hackerrank_python
|
9227f408552e10e645863c6e994ea5b6bb976bd4
|
fbd0391111316439c1dc8a206f68d76c3b5e132c
|
refs/heads/master
| 2022-12-21T23:57:48.569952
| 2020-10-01T04:00:08
| 2020-10-01T04:00:08
| 280,214,248
| 0
| 0
| null | 2020-10-01T04:00:09
| 2020-07-16T17:19:25
|
Python
|
UTF-8
|
Python
| false
| false
| 310
|
py
|
s = input()
letters = [0]*26
for letter in s:
letters[ord(letter) - ord('a')] += 1
for i in range(3):
max_letter = max(letters)
for index in range(26):
if max_letter == letters[index]:
print(chr(ord('a') + index), max_letter)
letters[index] = -1
break
|
[
"harshkakani.hk@gmail.com"
] |
harshkakani.hk@gmail.com
|
f53ac3f6c538688800be418ff966c4e0919f43ec
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_248/ch81_2020_04_12_22_18_46_334181.py
|
8e1cf83d9390b88d1079f2c7a2e6970a6b74812b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
def interseccao_valores(dic1,dic2):
v=dic1.values
v2=dic2.values
lista=[]
if v1==v2:
lista.append(v1)
return lista
|
[
"you@example.com"
] |
you@example.com
|
5c6ca9b47c769d947c3323009808774f0d662ef7
|
fd8286efe303afa40354aff60282b3ec81fbb479
|
/Python Data Structure/Ch6-Solutions-Student.py
|
ba0b7fbc4bd7ecd7c6d02a99ad03125bb2e6f142
|
[] |
no_license
|
Busymeng/MyPython
|
76419db7e2a4ec1ba91ca94a3340c889e4545165
|
2ca149de972d5834c912485883811a58e0ca715d
|
refs/heads/master
| 2023-07-06T18:14:29.708542
| 2023-06-30T19:29:03
| 2023-06-30T19:29:03
| 138,058,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#####################################################################
## Ch6 Quiz
##
"""
1. Hellothere
2. 42
3. print(x[8])
4. print(x[14:17])
5. letter
6. 42
7. print(greet.upper())
8. shout()
9. .ma
10. strip()
"""
#####################################################################
## Assignment 6.5
##
"""
* Write code using find() and string slicing (see section 6.10) to
extract the number at the end of the line below.
Convert the extracted value to a floating point number and print it out.
"""
text = "X-DSPAM-Confidence: 0.8475";
|
[
"noreply@github.com"
] |
Busymeng.noreply@github.com
|
9ac90402d2cf35ca49656daf947ac350ce5fdcb0
|
5291587d464fa1845e4689b07252d1c9c3a91994
|
/DNS_homework/Unit4_python_homework/test.py
|
b4e1fb11ee13f3716ad7e5edcadf19755ab8eca0
|
[] |
no_license
|
CamboRambo83/My_projects
|
03a01c1c3141085fdc7ceffdd203b6f75c5d539d
|
0f976b5511141419ab26a28d0a4726053f1767dd
|
refs/heads/master
| 2020-07-04T11:19:29.018424
| 2019-08-17T21:02:49
| 2019-08-17T21:02:49
| 202,271,684
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17
|
py
|
print('wtfffff')
|
[
"Khmerprince83@gmail.com"
] |
Khmerprince83@gmail.com
|
de3d4233cf25be9b3f24af1908a33f3ae45e1f5e
|
b3a5f2a9a1d777978a27d39325872ca8faf2f681
|
/RockPaperScissors.py
|
e31020392f25c2f2dace4042a6130b418613c6b9
|
[] |
no_license
|
nickiapalucci/RockPaperScissors
|
28ee78e518e5801038f962d46020ea0fb0f0afbe
|
492bec6d3386400f6147bf747cde5a91005a46cf
|
refs/heads/master
| 2021-01-02T09:27:53.368446
| 2020-10-02T04:28:48
| 2020-10-02T04:28:48
| 39,541,269
| 1
| 1
| null | 2020-10-02T04:28:49
| 2015-07-23T02:29:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
# include random module to generate opponent hand in this game of chance
import random
# Welcome, title, explanation
print("\n\nWelcome to Rock, Paper, Scissors!\n\n\n"
"This is a game of luck, you versus the computer.\n"
"You both will secretly pick a rock, paper, or scissors.\n\n"
"The winner is determined by classic rock, paper, scissor standards:\n"
"Rock smashes Scissors, Scissors cut Paper, and "
"Paper covers a Rock\n\n")
# Prompt user to enter a number
print("When you are ready, choose your weapon, then press [ENTER] \n")
print("Press 0 for scissors, 1 for rock, or 2 for paper\n")
# Assign input to Player1
Player1 = input("=> ")
# Test input to see if user typed an integer.
# If input string cannot be converted integer, replace with an integer
# that will represent a loss for player 1
try :
Player1 = int(Player1)
except :
Player1 = -1
# Assign random choice to Player2
Player2 = random.randint(0, 2)
# Determine winner or tie
if Player1 == Player2 :
Winner = 0
elif Player1 == 0 and Player2 == 1 :
Winner = 2
elif Player1 == 0 and Player2 == 2 :
Winner = 1
elif Player1 == 1 and Player2 == 0 :
Winner = 1
elif Player1 == 1 and Player2 == 2 :
Winner = 2
elif Player1 == 2 and Player2 == 0 :
Winner = 2
elif Player1 == 2 and Player2 == 1 :
Winner = 1
elif Player1 <= -1 or Player1 >= 3 :
Winner = 2
else :
Winner = -1
# Create result strings
Player1ch = 'you'
if Player1 == 0 :
Player1ch = Player1ch + ' picked scissors'
elif Player1 == 1 :
Player1ch = Player1ch + ' picked rock'
elif Player1 == 2 :
Player1ch = Player1ch + ' picked paper'
else :
Player1ch = Player1ch + ' did not enter 0, 1, or 2'
Player2ch = 'The computer'
if Player2 == 0 :
Player2ch = Player2ch + ' picked scissors'
elif Player2 == 1 :
Player2ch = Player2ch + ' picked rock'
elif Player2 == 2 :
Player2ch = Player2ch + ' picked paper'
else :
Player2ch = Player2ch + ' messed up (which is weird)'
# Translate results to character string
if Winner == 0 :
Winnerch = 'It is a draw.'
elif Winner == 1 :
Winnerch = 'You won!'
elif Winner == 2 :
Winnerch = 'The computer won.'
else :
Winnerch = 'Nobody won. Something went wrong.'
# Display results
print("\n", Player2ch, ", and ", Player1ch, ". ",
Winnerch, "\n", sep = '')
|
[
"cukid9876@outlook.com"
] |
cukid9876@outlook.com
|
1da0fa467b0dabe86ba10b2cefbb02583f434cc5
|
4831ef3cf429899d657f29a6f84f7c396a8d9872
|
/avito/extract_features.py
|
360bc84b48bf0fe8f5dd8847541898d6d57ba65f
|
[] |
no_license
|
senkin13/kaggle
|
f0aebef1fa8ecc5e4f234df91be63279d900cd0d
|
5f1a078eb864623376813dea620138653ada69a8
|
refs/heads/master
| 2022-12-08T09:58:40.363726
| 2022-12-07T08:17:48
| 2022-12-07T08:17:48
| 99,398,475
| 110
| 23
| null | 2022-12-07T08:15:14
| 2017-08-05T04:15:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 26,007
|
py
|
import pandas as pd
import numpy as np
import pickle
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
train = pd.read_csv('../input/train.csv', parse_dates = ['activation_date'])
test = pd.read_csv('../input/test.csv', parse_dates = ['activation_date'])
train_active = pd.read_csv('../input/train_active.csv', parse_dates = ['activation_date'])
test_active = pd.read_csv('../input/test_active.csv', parse_dates = ['activation_date'])
train_periods = pd.read_csv('../input/periods_train.csv', parse_dates=['activation_date','date_from', 'date_to'])
test_periods = pd.read_csv('../input/periods_test.csv', parse_dates=['activation_date','date_from', 'date_to'])
df_all = pd.concat([
train,
train_active,
test,
test_active
]).reset_index(drop=True)
df_all.drop_duplicates(['item_id'], inplace=True)
df_all['wday'] = df_all['activation_date'].dt.weekday
df_all['price'].fillna(0, inplace=True)
df_all['price'] = np.log1p(df_all['price'])
df_all['city'] = df_all['city'] + "_" + df_all['region']
df_all['param_123'] = (df_all['param_1'].fillna('') + ' ' + df_all['param_2'].fillna('') + ' ' + df_all['param_3'].fillna('')).astype(str)
text_vars = ['user_id','region', 'city', 'parent_category_name', 'category_name', 'user_type','param_1','param_2','param_3','param_123']
for col in tqdm(text_vars):
lbl = LabelEncoder()
lbl.fit(df_all[col].values.astype('str'))
df_all[col] = lbl.transform(df_all[col].values.astype('str'))
all_periods = pd.concat([
train_periods,
test_periods
])
all_periods['days_up'] = all_periods['date_to'].dt.dayofyear - all_periods['date_from'].dt.dayofyear
def agg(df,agg_cols):
for c in tqdm(agg_cols):
new_feature = '{}_{}_{}'.format('_'.join(c['groupby']), c['agg'], c['target'])
gp = df.groupby(c['groupby'])[c['target']].agg(c['agg']).reset_index().rename(index=str, columns={c['target']:new_feature})
df = df.merge(gp,on=c['groupby'],how='left')
return df
agg_cols = [
{'groupby': ['item_id'], 'target':'days_up', 'agg':'count'},
{'groupby': ['item_id'], 'target':'days_up', 'agg':'sum'},
]
all_periods = agg(all_periods,agg_cols)
all_periods.drop_duplicates(['item_id'], inplace=True)
#all_periods.drop(['activation_date','date_from','date_to','days_up','days_total'],axis=1, inplace=True)
all_periods.drop(['activation_date','date_from','date_to'],axis=1, inplace=True)
all_periods.reset_index(drop=True,inplace=True)
df_all = df_all.merge(all_periods, on='item_id', how='left')
#Impute Days up
df_all['item_id_count_days_up_impute'] = df_all['item_id_count_days_up']
df_all['item_id_sum_days_up_impute'] = df_all['item_id_sum_days_up']
enc = df_all.groupby('category_name')['item_id_count_days_up'].agg('median').astype(np.float32).reset_index()
enc.columns = ['category_name' ,'count_days_up_impute']
df_all = pd.merge(df_all, enc, how='left', on='category_name')
df_all['item_id_count_days_up_impute'].fillna(df_all['count_days_up_impute'], inplace=True)
enc = df_all.groupby('category_name')['item_id_sum_days_up'].agg('median').astype(np.float32).reset_index()
enc.columns = ['category_name' ,'sum_days_up_impute']
df_all = pd.merge(df_all, enc, how='left', on='category_name')
df_all['item_id_sum_days_up_impute'].fillna(df_all['sum_days_up_impute'], inplace=True)
df_numerical_active = df_all[['category_name','city','deal_probability',
'item_id','item_seq_number','param_1','param_2','param_3','param_123','parent_category_name','price',
'region','user_id','user_type','wday','item_id_count_days_up','item_id_sum_days_up',
'item_id_count_days_up_impute','item_id_sum_days_up_impute']]
# create numerical features with active
#df_numerical_active.to_pickle('/tmp/basic_numerical_active.pkl')
## with active
df_all_tmp = df_numerical_active.copy()
raw_columns = df_all_tmp.columns.values
print ('1')
## aggregate features
def agg(df,agg_cols):
for c in tqdm(agg_cols):
new_feature = '{}_{}_{}'.format('_'.join(c['groupby']), c['agg'], c['target'])
gp = df.groupby(c['groupby'])[c['target']].agg(c['agg']).reset_index().rename(index=str, columns={c['target']:new_feature})
df = df.merge(gp,on=c['groupby'],how='left')
return df
agg_cols = [
############################unique aggregation##################################
{'groupby': ['user_id'], 'target':'price', 'agg':'nunique'},
{'groupby': ['parent_category_name'], 'target':'price', 'agg':'nunique'},
{'groupby': ['category_name'], 'target':'price', 'agg':'nunique'},
{'groupby': ['region'], 'target':'price', 'agg':'nunique'},
{'groupby': ['city'], 'target':'price', 'agg':'nunique'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'nunique'},
{'groupby': ['wday'], 'target':'price', 'agg':'nunique'},
{'groupby': ['param_1'], 'target':'price', 'agg':'nunique'},
{'groupby': ['category_name'], 'target':'image_top_1', 'agg':'nunique'},
{'groupby': ['parent_category_name'], 'target':'image_top_1', 'agg':'nunique'},
{'groupby': ['user_id'], 'target':'image_top_1', 'agg':'nunique'},
{'groupby': ['user_id'], 'target':'parent_category_name', 'agg':'nunique'},
{'groupby': ['user_id'], 'target':'category_name', 'agg':'nunique'},
{'groupby': ['user_id'], 'target':'wday', 'agg':'nunique'},
{'groupby': ['user_id'], 'target':'param_1', 'agg':'nunique'},
############################count aggregation##################################
{'groupby': ['user_id'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','param_1'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','region'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','city'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','parent_category_name'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','category_name'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','image_top_1'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','category_name'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','image_top_1'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','parent_category_name'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','city'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','region'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','category_name','city'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','category_name','city'], 'target':'item_id', 'agg':'count'},
{'groupby': ['price'], 'target':'item_id', 'agg':'count'},
{'groupby': ['price','user_id'], 'target':'item_id', 'agg':'count'},
{'groupby': ['price','category_name'], 'target':'item_id', 'agg':'count'},
############################mean/median/sum/min/max aggregation##################################
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'mean'},
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'median'},
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'sum'},
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'max'},
{'groupby': ['param_2'], 'target':'price', 'agg':'mean'},
{'groupby': ['param_2'], 'target':'price', 'agg':'max'},
{'groupby': ['param_3'], 'target':'price', 'agg':'mean'},
{'groupby': ['param_3'], 'target':'price', 'agg':'max'},
{'groupby': ['user_id'], 'target':'price', 'agg':'mean'},
{'groupby': ['user_id'], 'target':'price', 'agg':'median'},
{'groupby': ['user_id'], 'target':'price', 'agg':'sum'},
{'groupby': ['user_id'], 'target':'price', 'agg':'min'},
{'groupby': ['user_id'], 'target':'price', 'agg':'max'},
{'groupby': ['item_seq_number'], 'target':'price', 'agg':'mean'},
{'groupby': ['item_seq_number'], 'target':'price', 'agg':'median'},
{'groupby': ['item_seq_number'], 'target':'price', 'agg':'sum'},
{'groupby': ['item_seq_number'], 'target':'price', 'agg':'min'},
{'groupby': ['item_seq_number'], 'target':'price', 'agg':'max'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'mean'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'median'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'sum'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'max'},
{'groupby': ['param_1'], 'target':'price', 'agg':'mean'},
{'groupby': ['param_1'], 'target':'price', 'agg':'max'},
{'groupby': ['region'], 'target':'price', 'agg':'mean'},
{'groupby': ['region'], 'target':'price', 'agg':'max'},
{'groupby': ['city'], 'target':'price', 'agg':'mean'},
{'groupby': ['city'], 'target':'price', 'agg':'max'},
{'groupby': ['parent_category_name'], 'target':'price', 'agg':'mean'},
{'groupby': ['parent_category_name'], 'target':'price', 'agg':'sum'},
{'groupby': ['parent_category_name'], 'target':'price', 'agg':'max'},
{'groupby': ['category_name'], 'target':'price', 'agg':'mean'},
{'groupby': ['category_name'], 'target':'price', 'agg':'sum'},
{'groupby': ['category_name'], 'target':'price', 'agg':'max'},
{'groupby': ['wday','category_name','city'], 'target':'price', 'agg':'mean'},
{'groupby': ['wday','category_name','city'], 'target':'price', 'agg':'median'},
{'groupby': ['wday','category_name','city'], 'target':'price', 'agg':'sum'},
{'groupby': ['wday','category_name','city'], 'target':'price', 'agg':'max'},
{'groupby': ['wday','region'], 'target':'price', 'agg':'mean'},
{'groupby': ['wday','region'], 'target':'price', 'agg':'median'},
{'groupby': ['wday','region'], 'target':'price', 'agg':'sum'},
{'groupby': ['wday','region'], 'target':'price', 'agg':'max'},
{'groupby': ['wday','city'], 'target':'price', 'agg':'mean'},
{'groupby': ['wday','city'], 'target':'price', 'agg':'median'},
{'groupby': ['wday','city'], 'target':'price', 'agg':'sum'},
{'groupby': ['wday','city'], 'target':'price', 'agg':'max'},
{'groupby': ['wday','user_id'], 'target':'price', 'agg':'mean'},
{'groupby': ['wday','user_id'], 'target':'price', 'agg':'median'},
{'groupby': ['wday','user_id'], 'target':'price', 'agg':'sum'},
{'groupby': ['wday','user_id'], 'target':'price', 'agg':'min'},
{'groupby': ['wday','user_id'], 'target':'price', 'agg':'max'},
{'groupby': ['user_id'], 'target':'item_id_sum_days_up', 'agg':'mean'},
{'groupby': ['user_id'], 'target':'item_id_count_days_up', 'agg':'mean'},
]
print ('2')
df_all_tmp = agg(df_all_tmp,agg_cols)
tmp_columns = df_all_tmp.columns.values
df_train = df_all_tmp[df_all_tmp['deal_probability'].notnull()]
test_id = pd.read_csv('../input/test.csv',usecols=['item_id'])
test_id = test_id.merge(df_all_tmp,on='item_id',how='left')
del df_all_tmp
for i in tmp_columns:
if i not in raw_columns:
print (i)
df_train[i].to_pickle('/tmp/features/number_agg/clean_train_active/' + str(i))
test_id[i].to_pickle('/tmp/features/number_agg/clean_test_active/' + str(i))
train = pd.read_csv('../input/train.csv', parse_dates = ['activation_date'])
test = pd.read_csv('../input/test.csv', parse_dates = ['activation_date'])
df_all = pd.concat([train,test],axis=0).reset_index(drop=True)
df_all['wday'] = df_all['activation_date'].dt.weekday
df_all['price'].fillna(0, inplace=True)
df_all['price'] = np.log1p(df_all['price'])
df_all['city'] = df_all['city'] + "_" + df_all['region']
df_all['param_123'] = (df_all['param_1'].fillna('') + ' ' + df_all['param_2'].fillna('') + ' ' + df_all['param_3'].fillna('')).astype(str)
df_all['title'] = df_all['title'].fillna('').astype(str)
df_all['text'] = df_all['description'].fillna('').astype(str) + ' ' + df_all['title'].fillna('').astype(str) + ' ' + df_all['param_123'].fillna('').astype(str)
# from https://www.kaggle.com/christofhenkel/text2image-top-1
# tr.csv is train_image_top_1_features.csv + test_image_top_1_features.csv
image_top_2 = pd.read_csv('/tmp/features/category/tr.csv')
df_all['image_top_2'] = image_top_2['image_top_1']
text_vars = ['user_id','region', 'city', 'parent_category_name', 'category_name', 'user_type','param_1','param_2','param_3','param_123']
for col in tqdm(text_vars):
lbl = LabelEncoder()
lbl.fit(df_all[col].values.astype('str'))
df_all[col] = lbl.transform(df_all[col].values.astype('str'))
# create image_top_1,2 category feature
df_train = df_all[df_all['deal_probability'].notnull()]
df_test = df_all[df_all['deal_probability'].isnull()]
df_train['image_top_1'].to_pickle('/tmp/features/number_agg/clean_train_image_top_1/image_top_1')
df_test['image_top_1'].to_pickle('/tmp/features/number_agg/clean_test_image_top_1/image_top_1')
df_train['image_top_2'].to_pickle('/tmp/features/number_agg/clean_train_image_top_1/image_top_2')
df_test['image_top_2'].to_pickle('/tmp/features/number_agg/clean_test_image_top_1/image_top_2')
# create image_top_1,2 aggregation feature
df_all_tmp = df_all.copy()
raw_columns = df_all_tmp.columns.values
agg_cols = [
############################unique aggregation##################################
{'groupby': ['image_top_1'], 'target':'price', 'agg':'nunique'},
{'groupby': ['image_top_2'], 'target':'price', 'agg':'nunique'},
{'groupby': ['user_id'], 'target':'image_top_1', 'agg':'nunique'},
{'groupby': ['user_id'], 'target':'image_top_2', 'agg':'nunique'},
############################count aggregation##################################
{'groupby': ['user_id','image_top_1'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','image_top_2'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','image_top_1'], 'target':'item_id', 'agg':'count'},
{'groupby': ['user_id','wday','image_top_2'], 'target':'item_id', 'agg':'count'},
############################mean/median/sum/min/max aggregation##################################
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'mean'},
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'median'},
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'sum'},
{'groupby': ['image_top_1','user_id'], 'target':'price', 'agg':'max'},
{'groupby': ['image_top_2','user_id'], 'target':'price', 'agg':'mean'},
{'groupby': ['image_top_2','user_id'], 'target':'price', 'agg':'median'},
{'groupby': ['image_top_2','user_id'], 'target':'price', 'agg':'sum'},
{'groupby': ['image_top_2','user_id'], 'target':'price', 'agg':'max'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'mean'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'median'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'sum'},
{'groupby': ['image_top_1'], 'target':'price', 'agg':'max'},
{'groupby': ['image_top_2'], 'target':'price', 'agg':'mean'},
{'groupby': ['image_top_2'], 'target':'price', 'agg':'median'},
{'groupby': ['image_top_2'], 'target':'price', 'agg':'sum'},
{'groupby': ['image_top_2'], 'target':'price', 'agg':'max'},
]
df_all_tmp = agg(df_all_tmp,agg_cols)
tmp_columns = df_all_tmp.columns.values
df_train = df_all_tmp[df_all_tmp['deal_probability'].notnull()]
df_test = df_all_tmp[df_all_tmp['deal_probability'].isnull()]
for i in tmp_columns:
if i not in raw_columns:
print (i)
df_train[i].to_pickle('/tmp/features/number_agg/clean_train_image_top_1/' + str(i))
df_test[i].to_pickle('/tmp/features/number_agg/clean_test_image_top_1/' + str(i))
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import preprocessing, model_selection, metrics
from sklearn.decomposition import TruncatedSVD
train = pd.read_csv('../input/train.csv', parse_dates = ['activation_date'])
test = pd.read_csv('../input/test.csv', parse_dates = ['activation_date'])
df_all = pd.concat([train,test],axis=0).reset_index(drop=True)
df_all['param_123'] = (df_all['param_1'].fillna('') + ' ' + df_all['param_2'].fillna('') + ' ' + df_all['param_3'].fillna('')).astype(str)
df_all['title'] = df_all['title'].fillna('').astype(str)
df_all['text'] = df_all['description'].fillna('').astype(str) + ' ' + df_all['title'].fillna('').astype(str) + ' ' + df_all['param_123'].fillna('').astype(str)
df_text = df_all[['deal_probability','title','param_123','text']]
df_train_text = df_text[df_text['deal_probability'].notnull()]
df_test_text = df_text[df_text['deal_probability'].isnull()]
### TFIDF Vectorizer ###
tfidf_vec = TfidfVectorizer(ngram_range=(1,1))
full_title_tfidf = tfidf_vec.fit_transform(df_text['title'].values.tolist() )
train_title_tfidf = tfidf_vec.transform(df_train_text['title'].values.tolist())
test_title_tfidf = tfidf_vec.transform(df_test_text['title'].values.tolist())
### SVD Components ###
n_comp = 40
svd_title_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_title_obj.fit(full_title_tfidf)
train_title_svd = pd.DataFrame(svd_title_obj.transform(train_title_tfidf))
test_title_svd = pd.DataFrame(svd_title_obj.transform(test_title_tfidf))
train_title_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
test_title_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
for i in train_title_svd.columns:
print (i)
test_title_svd[i].to_pickle('/tmp/features/tsvd/train/' + str(i))
test_title_svd[i].to_pickle('/tmp/features/tsvd/test/' + str(i))
%%time
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
import re
import string
def count_regexp_occ(regexp="", text=None):
""" Simple way to get the number of occurence of a regex"""
return len(re.findall(regexp, text))
stopwords = {x: 1 for x in stopwords.words('russian')}
punct = set(string.punctuation)
emoji = set()
for s in df_all['text'].fillna('').astype(str):
for c in s:
if c.isdigit() or c.isalpha() or c.isalnum() or c.isspace() or c in punct:
continue
emoji.add(c)
all = df_text.copy()
# Meta Text Features
textfeats = ['param_123']
for cols in textfeats:
all[cols] = all[cols].astype(str)
all[cols + '_num_cap'] = all[cols].apply(lambda x: count_regexp_occ('[А-ЯA-Z]', x))
all[cols + '_num_low'] = all[cols].apply(lambda x: count_regexp_occ('[а-яa-z]', x))
all[cols + '_num_rus_cap'] = all[cols].apply(lambda x: count_regexp_occ('[А-Я]', x))
all[cols + '_num_eng_cap'] = all[cols].apply(lambda x: count_regexp_occ('[A-Z]', x))
all[cols + '_num_rus_low'] = all[cols].apply(lambda x: count_regexp_occ('[а-я]', x))
all[cols + '_num_eng_low'] = all[cols].apply(lambda x: count_regexp_occ('[a-z]', x))
all[cols + '_num_dig'] = all[cols].apply(lambda x: count_regexp_occ('[0-9]', x))
all[cols + '_num_pun'] = all[cols].apply(lambda x: sum(c in punct for c in x))
all[cols + '_num_space'] = all[cols].apply(lambda x: sum(c.isspace() for c in x))
all[cols + '_num_chars'] = all[cols].apply(len) # Count number of Characters
all[cols + '_num_words'] = all[cols].apply(lambda comment: len(comment.split())) # Count number of Words
all[cols + '_num_unique_words'] = all[cols].apply(lambda comment: len(set(w for w in comment.split())))
all[cols + '_ratio_unique_words'] = all[cols+'_num_unique_words'] / (all[cols+'_num_words']+1)
textfeats = ['text']
for cols in textfeats:
all[cols] = all[cols].astype(str)
all[cols + '_num_cap'] = all[cols].apply(lambda x: count_regexp_occ('[А-ЯA-Z]', x))
all[cols + '_num_low'] = all[cols].apply(lambda x: count_regexp_occ('[а-яa-z]', x))
all[cols + '_num_rus_cap'] = all[cols].apply(lambda x: count_regexp_occ('[А-Я]', x))
all[cols + '_num_eng_cap'] = all[cols].apply(lambda x: count_regexp_occ('[A-Z]', x))
all[cols + '_num_rus_low'] = all[cols].apply(lambda x: count_regexp_occ('[а-я]', x))
all[cols + '_num_eng_low'] = all[cols].apply(lambda x: count_regexp_occ('[a-z]', x))
all[cols + '_num_dig'] = all[cols].apply(lambda x: count_regexp_occ('[0-9]', x))
all[cols + '_num_pun'] = all[cols].apply(lambda x: sum(c in punct for c in x))
all[cols + '_num_space'] = all[cols].apply(lambda x: sum(c.isspace() for c in x))
all[cols + '_num_emo'] = all[cols].apply(lambda x: sum(c in emoji for c in x))
all[cols + '_num_row'] = all[cols].apply(lambda x: x.count('/\n'))
all[cols + '_num_chars'] = all[cols].apply(len) # Count number of Characters
all[cols + '_num_words'] = all[cols].apply(lambda comment: len(comment.split())) # Count number of Words
all[cols + '_num_unique_words'] = all[cols].apply(lambda comment: len(set(w for w in comment.split())))
all[cols + '_ratio_unique_words'] = all[cols+'_num_unique_words'] / (all[cols+'_num_words']+1) # Count Unique Words
all[cols +'_num_stopwords'] = all[cols].apply(lambda x: len([w for w in x.split() if w in stopwords]))
all[cols +'_num_words_upper'] = all[cols].apply(lambda x: len([w for w in str(x).split() if w.isupper()]))
all[cols +'_num_words_lower'] = all[cols].apply(lambda x: len([w for w in str(x).split() if w.islower()]))
all[cols +'_num_words_title'] = all[cols].apply(lambda x: len([w for w in str(x).split() if w.istitle()]))
textfeats = ['title']
for cols in textfeats:
all[cols] = all[cols].astype(str)
all[cols + '_num_cap'] = all[cols].apply(lambda x: count_regexp_occ('[А-ЯA-Z]', x))
all[cols + '_num_low'] = all[cols].apply(lambda x: count_regexp_occ('[а-яa-z]', x))
all[cols + '_num_rus_cap'] = all[cols].apply(lambda x: count_regexp_occ('[А-Я]', x))
all[cols + '_num_eng_cap'] = all[cols].apply(lambda x: count_regexp_occ('[A-Z]', x))
all[cols + '_num_rus_low'] = all[cols].apply(lambda x: count_regexp_occ('[а-я]', x))
all[cols + '_num_eng_low'] = all[cols].apply(lambda x: count_regexp_occ('[a-z]', x))
all[cols + '_num_dig'] = all[cols].apply(lambda x: count_regexp_occ('[0-9]', x))
all[cols + '_num_pun'] = all[cols].apply(lambda x: sum(c in punct for c in x))
all[cols + '_num_space'] = all[cols].apply(lambda x: sum(c.isspace() for c in x))
all[cols + '_num_chars'] = all[cols].apply(len) # Count number of Characters
all[cols + '_num_words'] = all[cols].apply(lambda comment: len(comment.split())) # Count number of Words
all[cols + '_num_unique_words'] = all[cols].apply(lambda comment: len(set(w for w in comment.split())))
all[cols + '_ratio_unique_words'] = all[cols+'_num_unique_words'] / (all[cols+'_num_words']+1)
df_train = all[all['deal_probability'].notnull()]
df_test = all[all['deal_probability'].isnull()]
df_all_tmp = all.drop(['deal_probability','param_123','title','text'],axis=1)
tmp_columns = df_all_tmp.columns.values
for i in tmp_columns:
print (i)
df_train[i].to_pickle('/tmp/features/text_agg/train/' + str(i))
df_test[i].to_pickle('/tmp/features/text_agg/test/' + str(i))
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
import pickle
from scipy import sparse
from nltk.tokenize.toktok import ToktokTokenizer # tokenizer tested on russian
from nltk.stem.snowball import RussianStemmer
from nltk import sent_tokenize # should be multilingual
from string import punctuation
from nltk import sent_tokenize
from nltk.corpus import stopwords
from gensim.models import FastText
import re
from string import punctuation
punct = set(punctuation)
# Tf-Idf
def clean_text(s):
s = re.sub('м²|\d+\\/\d|\d+-к|\d+к', ' ', s.lower())
s = re.sub('\\s+', ' ', s)
s = s.strip()
return s
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
df_text['param_123'] = df_text['param_123'].apply(lambda x: clean_text(x))
df_text['title'] = df_text['title'].apply(lambda x: clean_text(x))
df_text["text"] = df_text["text"].apply(lambda x: clean_text(x))
df_train_text = df_text[df_text['deal_probability'].notnull()]
df_test_text = df_text[df_text['deal_probability'].isnull()]
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"lowercase": True,
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
vectorizer = FeatureUnion([
('text',TfidfVectorizer(
ngram_range=(1, 2),
max_features=200000,
**tfidf_para,
preprocessor=get_col('text'))),
('title',TfidfVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#lowercase=True,
#max_features=7000,
preprocessor=get_col('title'))),
('param_123',TfidfVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#lowercase=True,
#max_features=7000,
preprocessor=get_col('param_123')))
])
vectorizer.fit(df_text.to_dict('records'))
ready_df_train = vectorizer.transform(df_train_text.to_dict('records'))
ready_df_test = vectorizer.transform(df_test_text.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
sparse.save_npz('/tmp/features/nlp/ready_df_train_200000_new.npz', ready_df_train)
sparse.save_npz('/tmp/features/nlp/ready_df_test_200000_new.npz', ready_df_test)
with open('/tmp/features/nlp/tfvocab_200000_new.pkl', 'wb') as tfvocabfile:
pickle.dump(tfvocab, tfvocabfile)
|
[
"noreply@github.com"
] |
senkin13.noreply@github.com
|
e29535f0d26e87b86587500aa5e2cb512dab6bd2
|
1118aec39a839da2ebc508f1d2a6b377aa70274d
|
/src/lib/test_weakset.py
|
9f3e8aba2ca12ecf934a509ad65cbf622dd40597
|
[] |
no_license
|
serg0987/python
|
b3a9a2b22b4ef5a39e612a0a170ba9629933c802
|
074449ad6b3a90352939c55a9db37bd248cab428
|
refs/heads/master
| 2020-05-15T09:30:17.500158
| 2015-10-18T21:28:08
| 2015-10-18T21:28:08
| 2,454,952
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# -*- coding: utf-8 -*-
'''
weakref — Weak references
link: http://docs.python.org/2/library/weakref.html
'''
import unittest
from weakref import WeakSet, WeakValueDictionary
class Object:
pass
class TestWeakSet(unittest.TestCase):
def test_len(self):
obj = Object()
obj2 = Object()
ws = WeakSet([obj])
self.assertIn(obj, ws)
self.assertEqual(len(ws), 1)
ws.add(obj2)
self.assertEqual(len(ws), 2)
self.assertIn(obj2, ws)
del obj
self.assertEqual(len(ws), 1)
self.assertIn(obj2, ws)
if __name__ == '__main__':
unittest.main()
|
[
"serg0987@gmail.com"
] |
serg0987@gmail.com
|
836368a464341d665f660be2e150e73f7438b986
|
e2ea841245e6e28604d529a34e4b2e21f41e97cd
|
/NeuralNetworkUI/NetworkXNetworkInterpreter.py
|
95a0022392281310632171b99fc226b594d9be89
|
[] |
no_license
|
chezanthony/NaiveNEAT
|
81e20db90c454521b5efa4c066659f25c04e6f4d
|
98881c0cbcc6b6fd1c9bcd347896a5537943aba8
|
refs/heads/master
| 2023-03-23T16:48:09.661784
| 2020-08-31T17:11:37
| 2020-08-31T17:11:37
| 289,199,711
| 0
| 0
| null | 2020-08-24T18:27:07
| 2020-08-21T06:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
class CNetworkXNetworkInterpreter:
@staticmethod
def interpret_network(network, nx):
node_container =\
network.get_node_container()
connection_container =\
network.get_connection_container()
CNetworkXNetworkInterpreter.\
_add_nodes(node_container, nx)
CNetworkXNetworkInterpreter.\
_add_connections(connection_container,
node_container,
nx)
@staticmethod
def _add_nodes(nodes, nx):
for node in nodes.values():
nx.add_node(node)
@staticmethod
def _add_connections(connections,
nodes,
nx):
for connection in connections.values():
node_tuple =\
CNetworkXNetworkInterpreter.\
_get_nodes_from_connection(nodes,
connection)
n_input_node = node_tuple[0]
n_output_node = node_tuple[1]
nx.add_connection(n_input_node,
n_output_node)
@staticmethod
def _get_nodes_from_connection(nodes,
connection):
input_node =\
nodes[connection.get_input_node_key()]
output_node =\
nodes[connection.get_output_node_key()]
return input_node, output_node
|
[
"chezanthonyintud@gmail.com"
] |
chezanthonyintud@gmail.com
|
1579047ff059b9711269bf038bcafe72d691e46b
|
7a305f7f5bfb0789f12fe3f846a5aa10fcb79d4b
|
/python_module/stomp/test/local_test.py
|
fc96b984880423f96252e893cf27c1b543cee504
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
GeneralizedLearningUtilities/SuperGLU
|
873dbd8c9c19117721e01e8ce7da1edea962b803
|
1c373d1358431fb96dd70b324b26a14fc8ed1fcb
|
refs/heads/master
| 2023-01-03T00:47:18.183214
| 2021-08-24T16:49:24
| 2021-08-24T16:49:24
| 38,255,548
| 8
| 6
|
MIT
| 2022-12-29T09:47:41
| 2015-06-29T15:36:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
import time
import unittest
import stomp
from stomp.listener import TestListener
from stomp.test.testutils import *
class TestIPV6Send(unittest.TestCase):
def setUp(self):
conn = stomp.Connection11(get_ipv6_host())
listener = TestListener('123')
conn.set_listener('', listener)
conn.start()
conn.connect('admin', 'password', wait=True)
self.conn = conn
self.listener = listener
self.timestamp = time.strftime('%Y%m%d%H%M%S')
def tearDown(self):
if self.conn:
self.conn.disconnect(receipt=None)
def test_ipv6(self):
queuename = '/queue/testipv6-%s' % self.timestamp
self.conn.subscribe(destination=queuename, id=1, ack='auto')
self.conn.send(body='this is a test', destination=queuename, receipt='123')
self.listener.wait_on_receipt()
self.assertTrue(self.listener.connections == 1, 'should have received 1 connection acknowledgement')
self.assertTrue(self.listener.messages == 1, 'should have received 1 message')
self.assertTrue(self.listener.errors == 0, 'should not have received any errors')
|
[
"daniel.auerbach@gmail.com"
] |
daniel.auerbach@gmail.com
|
505e5e0ce0cb191a5ec404f1e81be10da0578bf5
|
268d9c21243e12609462ebbd6bf6859d981d2356
|
/Python/python_stack/Django/Dojo_ninjas/main/apps/dojo_ninjas/migrations/0002_dojo_desc.py
|
58a3322cbefd8d01f3ac70e8cbe91f35e5cc03d2
|
[] |
no_license
|
dkang417/cdj
|
f840962c3fa8e14146588eeb49ce7dbd08b8ff4c
|
9966b04af1ac8a799421d97a9231bf0a0a0d8745
|
refs/heads/master
| 2020-03-10T03:29:05.053821
| 2018-05-23T02:02:07
| 2018-05-23T02:02:07
| 129,166,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-08 14:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo_ninjas', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dojo',
name='desc',
field=models.TextField(null=True),
),
]
|
[
"dkang417@gmail.com"
] |
dkang417@gmail.com
|
cd802f3c8a2cb2614620310b207adc8d3b08794c
|
e709ba8bea070d1178b14609b771f7d751a881ef
|
/Ada.py
|
83a1dca833ce65a769f39737b19586867d4d897b
|
[] |
no_license
|
youko70s/2018-Humana-TAMU-HealthCare-Competition
|
6b25e0e16cd412243a78ed861a88dd5dd79bc92a
|
19ea30673c59a7d0abd9279da3517855282a382f
|
refs/heads/master
| 2020-03-31T09:58:22.148712
| 2019-02-22T23:11:53
| 2019-02-22T23:11:53
| 152,117,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,158
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 7 18:01:07 2018
@author: youko
"""
##import packages needed
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pprint import pprint
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from sklearn.utils.fixes import signature
##setting n_estimators and learning rate
myData=pd.read_csv('deleted_numerical_data_full.csv')
del myData['Unnamed: 0']
##split the whole data set in to Active and InActive
Active=myData[myData['AMI_FLAG']==1]
InActive=myData[myData['AMI_FLAG']==0]
##split Active and InActive into test dataset and training set
trA, tsA = train_test_split(Active, test_size=0.382)
trIn, tsIn = train_test_split(InActive, test_size=(2726*1.62)/97274)
##combine the test dataframe
frames = [tsA, tsIn]
test_set = pd.concat(frames)
attributes=list(myData.columns.values)
attributes.remove('AMI_FLAG')
attributes.remove('ID')
target=['AMI_FLAG']
X_test=pd.DataFrame(test_set, columns=attributes)
Y_test=pd.DataFrame(test_set, columns=target)
##====================================================================
##try it on one subsample
remain_trIn,chosen_trIn=train_test_split(trIn, test_size=len(trA)/len(trIn))
frames = [trA, chosen_trIn]
subsample = pd.concat(frames)
X_train = pd.DataFrame(subsample, columns=attributes)
Y_train=pd.DataFrame(subsample, columns=target)
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
algorithm="SAMME",
n_estimators=50, learning_rate=0.5)
bdt.fit(X_train, Y_train)
pred=bdt.predict(X_test)
matrix =confusion_matrix(Y_test,pred)
print(matrix)
metrics.roc_auc_score(Y_test,pred)
##=====================================================================
#n_estimators=400
#learning_rate=1.0
'''
Adapredict_value=[]
remain_trIn=trIn
bdtscore=[]
for i in range(0,5,1):
trIn=remain_trIn
remain_trIn,chosen_trIn=train_test_split(trIn, test_size=len(trA)/len(remain_trIn))
##bind the chosen_trIn and trA to create a subsample
frames = [trA, chosen_trIn]
subsample = pd.concat(frames)
##get X_train and Y_train
X_train = pd.DataFrame(subsample, columns=attributes)
Y_train=pd.DataFrame(subsample, columns=target)
##build the random forest tree model
'''#here the parameters?
'''
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
algorithm="SAMME",
n_estimators=200, learning_rate=0.8)
bdt.fit(X_train, Y_train)
Adapredict_value.append(bdt.predict(X_test))
bdtscore.append(bdt.score(X_test,Y_test))
##apply the majority voting scheme for the prediction results generated by subsamples
df = pd.DataFrame(Adapredict_value)
Adap=df.sum(axis=0)
Adap=Adap.to_frame()
Adap.index=test_set.index
Adap.columns = ['votes']
def label (row):
if row['votes'] >24:
return 1
else:
return 0
return 'Other'
Adap['Label'] = Adap.apply (lambda row: label (row),axis=1)
print("Accuracy:",metrics.accuracy_score(Y_test, Adap['Label']))
#trIn=remain_trIn
#remain_trIn,chosen_trIn=train_test_split(trIn, test_size=len(trA)/len(trIn))
##bind the chosen_trIn and trA to create a subsample
#frames = [trA, chosen_trIn]
#ubsample = pd.concat(frames)
#subsample = pd.concat(frames)
##get X_train and Y_train
#X_train = pd.DataFrame(subsample, columns=attributes)
#Y_train=pd.DataFrame(subsample, columns=target)
##build the Adaboost model
#bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
# algorithm="SAMME",
# n_estimators=200, learning_rate=0.8)
#bdt.fit(X_train, Y_train)
#pred=bdt.predict(X_test)
#bdt.score(X_test,Y_test)
'''
|
[
"noreply@github.com"
] |
youko70s.noreply@github.com
|
a5049b077e0a7a7ba99e60920e4dd3e0ba882227
|
c2bd34dfcfb45f6b0da327badc928779773696a8
|
/math_paradox.py
|
9190ba1593c547758962be10c41a487ae069f9b0
|
[] |
no_license
|
lisiynos/sis-cpy
|
2f8555b24c9e301649569b1617ad1742fc763143
|
d43c62697bb37d763003d1e1e4bf22e92ec5f8dc
|
refs/heads/master
| 2021-01-02T09:28:22.476953
| 2018-03-29T11:08:09
| 2018-03-29T11:08:39
| 99,215,997
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
# Начнем мы с равенства:
assert 16 + 45 == 25 + 36
# которое перепишем в виде:
assert 16 + 9 * 5 == 25 + 9 * 4
# Перенесем некоторые слагаемые в другие части равенства —
# естественно, изменяя знаки на противоположные:
assert 16 - 9 * 4 == 25 - 9 * 5
# теперь к обеим частям добавим по 81/4.
assert 16 - 9 * 4 + 81 / 4 == 25 - 9 * 5 + 81 / 4
# Заметим, что в обеих частях стоят полные квадраты:
assert 16 - 9 * 4 + 81 / 4 == 16 - 2 * 4 * 9 / 2 + 81 / 4 == (4 - 9 / 2) ** 2
assert 25 - 9 * 5 + 81 / 4 == 25 - 2 * 5 * 9 / 2 + 81 / 4 == (5 - 9 / 2) ** 2
# Извлекая квадратный корень из обеих частей равенства, получаем что
assert 4 - 9 / 2 == - (5 - 9 / 2)
assert abs(4 - 9 / 2) == abs(5 - 9 / 2)
|
[
"denis@taxify.eu"
] |
denis@taxify.eu
|
fa2e03c8a8e5896e8ea1a1cf821802b666c2ccf0
|
7fa5dd810aa8f2e20c59590aa6be132de749542c
|
/GestorCaptura.py
|
59ab586fbc557fb008841d7c6ce4bce079d51033
|
[] |
no_license
|
such-stupid6/-Miner_Detector
|
802fa5cde37bb508dbcc7f7c927eb95bdc5ee818
|
7e2b265781d36d0bd877238a3aa691a2afaae3de
|
refs/heads/master
| 2023-03-01T16:25:19.005802
| 2021-01-24T04:20:13
| 2021-01-24T04:20:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
import time
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
#Lee los archivo
def readArchive():
archive = open("./Entrenamiento/data0.txt" , "r")
print(archive)
j = 434
for i in archive.readlines():
try:
driver = createSession()
managmentBrowser(driver, i, j)
j+=1
except:
archive = open("./Entrenamiento/noEscaneada.txt" , "a")
archive.write(i)
pass
#Ejecuta el navegador
def createSession():
driver = webdriver.Chrome("./chromedriver")
driver.maximize_window()
return driver
#Manipula los parametros como el tiempo de ejecución
def managmentBrowser(driver, url, i):
print(str(url))
driver.implicitly_wait(100)
driver.get('https://'+str(url))
trafficStartCapture(i)
time.sleep(30)
driver.implicitly_wait(100)
driver.quit()
def execution():
readArchive();
#Ejecuta el tshark
def trafficStartCapture(i):
print(i)
cmd = "sudo tshark -i enp1s0 -a duration:300 -w - > /home/steven/Documents/GestorCapture/s/Entrenamiento/{}.pcap".format(i)
os.system(cmd)
if __name__ == '__main__':
execution()
|
[
"steven.bernal@correo.icesi.edu.co"
] |
steven.bernal@correo.icesi.edu.co
|
ef52298f256957366a62065c6bbda48bbbfa0568
|
8efd8bcd3945d88370f6203e92b0376ca6b41c87
|
/problems100_200/131_Palindrome_Partitioning.py
|
4fd4acc10c6135bdd9be20744a848feda4634b56
|
[] |
no_license
|
Provinm/leetcode_archive
|
732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5
|
3e72dcaa579f4ae6f587898dd316fce8189b3d6a
|
refs/heads/master
| 2021-09-21T08:03:31.427465
| 2018-08-22T15:58:30
| 2018-08-22T15:58:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
#coding=utf-8
'''
131. Palindrome Partitioning
Given a string s, partition s such that every substring of the partition is a palindrome.
Return all possible palindrome partitioning of s.
For example, given s = "aab",
Return
[
["aa","b"],
["a","a","b"]
]
'''
class Solution:
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
if not s:
return [[]]
res = []
for idx, item in enumerate(s):
cur_s = s[:idx+1]
if self.is_p(cur_s):
r = self.partition(s[idx+1:])
for sub_item in r:
res.append([cur_s] + sub_item)
return res
def is_p(self, s):
return s == s[::-1]
s = Solution()
r = s.partition("aab")
print(r)
## 深度优先算法
|
[
"zhouxin@gmail.com"
] |
zhouxin@gmail.com
|
0b047206028e48fd160273fdfe964e854fa7c65d
|
82418725b9f000282171dc8a455402078858d0f9
|
/headache/main.py
|
00a28263b96e3b17acff81c9bec4c3bea08d2491
|
[] |
no_license
|
sabinzero/various-py
|
ee2c9c16334ab3e985da2e36a3cebbceca08c6ea
|
2fb859a1fb64b764ef8c75a7a66db45b6e46a01c
|
refs/heads/master
| 2020-11-28T08:42:19.359185
| 2019-12-23T13:37:39
| 2019-12-23T13:37:39
| 229,759,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
fact = 1
sum = 0
n = input("enter value of n: ")
for i in range(1, n + 1):
fact = 1
for j in range(1, i + 1):
fact *= j
sum += fact
print "%d! = %d" % (i, fact)
print "sum of factorial of every individual number is : %d" % (sum)
|
[
"sabinzero@gmail.com"
] |
sabinzero@gmail.com
|
38a7f9d190da0e26eb059cb2731c59a8ff39c6b9
|
288ab32e56cf34ec8a49cb57cd9674a8792b004e
|
/roles/migrations/0066_auto_20190813_1118.py
|
6f6e404b79bfcdeb44045d1dd00b4d8d0fb24437
|
[] |
no_license
|
Artvell/apex
|
a231747a81e09bb06bab84c61aaac7d5069cba90
|
46dbfe5f0cd9480cdf77197116d8ccef099e25a4
|
refs/heads/master
| 2022-04-10T18:20:18.602564
| 2020-03-09T12:21:52
| 2020-03-09T12:21:52
| 219,150,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
# Generated by Django 2.2 on 2019-08-13 06:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roles', '0065_auto_20190813_1113'),
]
operations = [
migrations.AlterField(
model_name='purchase',
name='fact_kol',
field=models.FloatField(blank=True, null=True, verbose_name='Фактическое кол-во'),
),
migrations.AlterField(
model_name='purchase',
name='is_accepted_zakup',
field=models.BooleanField(default=False, verbose_name='Принято закупщиком?'),
),
migrations.AlterField(
model_name='purchase',
name='is_delivered',
field=models.BooleanField(default=False, verbose_name='Куплен?'),
),
migrations.AlterField(
model_name='purchase',
name='is_returned',
field=models.BooleanField(default=False, verbose_name='Возвращен?'),
),
migrations.AlterField(
model_name='purchase',
name='new_cost',
field=models.FloatField(blank=True, null=True, verbose_name='Новая цена'),
),
migrations.AlterField(
model_name='purchase',
name='purchased_kol',
field=models.FloatField(default=0.0, verbose_name='Купленное кол-во'),
),
migrations.AlterField(
model_name='purchase',
name='summ',
field=models.FloatField(blank=True, null=True, verbose_name='Сумма'),
),
]
|
[
"hurilla2003@mail.ru"
] |
hurilla2003@mail.ru
|
eb7b37d089415c0132787510a0ff8cd33942e87c
|
b06e5e9bbc2e801135822ffee40ac52475cf5a84
|
/Revision/Recursion/productSum.py
|
97d681ba90e3ce385769cb965fc2cf04bcf56cd3
|
[] |
no_license
|
vikashvishnu1508/algo
|
7755d4d5da3c2866d6dd623793a7777623778640
|
a376565362123c44288233b49e20293ebf870233
|
refs/heads/main
| 2023-06-09T08:20:46.140158
| 2021-06-26T19:05:31
| 2021-06-26T19:05:31
| 355,154,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
# Tip: You can use the type(element) function to check whether an item
# is a list or an integer.
def productSum(array):
# Write your code here.
return getSum(array, 1)
def getSum(array, depth):
print(f"array = {array}")
productSum = 0
for curItem in array:
print(f"curItem = {curItem}, productSum = {productSum}")
if isinstance(curItem, list):
productSum += getSum(curItem, depth + 1)
print(f"is a list productSum = {productSum}")
else:
productSum += (depth * curItem)
print(f"else productSum = {productSum}")
return productSum
array = [5, 2, [7, -1], 3, [6, [-13, 8], 4]]
print(productSum(array))
|
[
"vikashvishnu1508@gmail.com"
] |
vikashvishnu1508@gmail.com
|
d22e6d2825462adbd210700d367af82d43fbe5a7
|
ffced6fdbfd50ccd8db25e95bbd8dfa799a2e515
|
/school-project/pro_blog/pro_blog/wsgi.py
|
df55660045bbd2b1008881944cca1ca38dd029ee
|
[] |
no_license
|
danilF4/school-project
|
6751ad8996ab50090e487503b709f37f4f41841d
|
3dad6d8c72b89e84e2225d0d8b54e0d131798779
|
refs/heads/master
| 2022-12-11T05:49:07.556358
| 2019-01-24T21:53:49
| 2019-01-24T21:53:49
| 166,719,375
| 0
| 1
| null | 2022-11-27T03:42:50
| 2019-01-20T23:09:14
|
Python
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
"""
WSGI config for pro_blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pro_blog.settings")
application = get_wsgi_application()
|
[
"danilfatkin23082000@gmail.com"
] |
danilfatkin23082000@gmail.com
|
6b3dc31037a691d81abf299b989c4221b7da7ad9
|
20aa042f9c37d52c86aee8fef58652e321a0a61a
|
/titanic/analyze.py
|
bc1d69470896cda3f937e04b6bddeab63a5e6b3b
|
[] |
no_license
|
EagleEYErus/coursera_ml
|
6705de81d488608169911c3b6abd7d5ce268d9d5
|
e6941a4542e0fe53a945567335299d0f3c18b106
|
refs/heads/master
| 2021-04-29T14:08:10.431471
| 2018-02-21T12:40:23
| 2018-02-21T12:40:23
| 121,766,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
import re
from collections import Counter
import pandas as pd
from titanic.file import write_file
# Read data from CSV
data = pd.read_csv("titanic.csv", index_col="PassengerId")
# Counting gender
count_sex = data["Sex"].value_counts()
write_file(sub_dirname="analyze", filename="count_sex.txt", data_to_file=f"{count_sex['male']} {count_sex['female']}")
# Counting survived
count_survived = data["Survived"].value_counts()
per_survived = round(count_survived[1] / sum(count_survived) * 100, 2)
write_file(sub_dirname="analyze", filename="count_survived.txt", data_to_file=f"{per_survived}")
# Counting people of first class
count_first_class = data["Pclass"].value_counts()
per_first_class = round(count_first_class[1] / sum(count_first_class) * 100, 2)
write_file(sub_dirname="analyze", filename="count_first_class.txt", data_to_file=f"{per_first_class}")
# Counting mean and median of age
age_mean = data["Age"].mean()
age_median = data["Age"].median()
write_file(sub_dirname="analyze", filename="age_mean.txt", data_to_file=f"{round(age_mean, 2)} {age_median}")
# Counting correlation between SibSp and Parch
corr = data["SibSp"].corr(data["Parch"], method="pearson")
write_file(sub_dirname="analyze", filename="corr.txt", data_to_file=f"{round(corr, 2)}")
# Finding the most popular female name on the ship
female_fullnames = data.loc[(data["Sex"] == "female"), ["Name"]]["Name"]
female_names = []
for name in female_fullnames:
if "Mrs." in name:
try:
female_names.append(re.findall(r"\((.*)\)", name)[0].split()[0])
except IndexError:
pass
elif "Miss." in name:
try:
female_names.append(re.findall("(Miss.)\s(.+)", name)[0][1].split()[0])
except IndexError:
pass
count_names = Counter(female_names)
most_popular_name = max(count_names, key=count_names.get)
write_file(sub_dirname="analyze", filename="most_popular_name.txt", data_to_file=f"{most_popular_name}")
|
[
"alex_afsoc@mail.ru"
] |
alex_afsoc@mail.ru
|
22082fac0984c7728a7ac71f5666b9a60a1c7171
|
15cace5f904c5c2389ca3cc02b5ff1fc029c7651
|
/parsing/management/commands/scraper/test.py
|
cc3b4a7431673dd5f8c4b261fd80953be86ccffa
|
[] |
no_license
|
ghostnoop/django-youtube-parser-asyncio
|
fb7146e788dfe5986ad31a45a5d5b1da918583c6
|
631bc4ddc0eed0407f09a810c334a0e9d8d0ed7a
|
refs/heads/main
| 2023-03-26T12:57:32.248097
| 2021-03-25T11:02:54
| 2021-03-25T11:02:54
| 341,303,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
main_list = [i for i in range(100)]
size = len(main_list) // 4
a = main_list[:size]
b = (main_list[size:size * 2])
c = (main_list[size * 2:size * 3])
d = (main_list[size * 3:])
|
[
"giliyazovmarat@gmail.com"
] |
giliyazovmarat@gmail.com
|
334391aae07943250837c1baee5637ca0f10d64d
|
14946cd014b3bbd20a8cb687ef10748901a1762b
|
/Task/Task/settings.py
|
0e5dc408b452a5f6777caf3219cfc1b915d7f11c
|
[] |
no_license
|
bhatiboss/Task-Manager
|
ade247e0c856e2f79ee720461f18200ee23f3036
|
445f33323d7f5f3b64140a38edbce22c4e420028
|
refs/heads/master
| 2023-05-23T01:41:01.330288
| 2021-06-13T20:23:05
| 2021-06-13T20:23:05
| 376,008,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
"""
Django settings for Task project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8jyg%wp&4vz%%vu!t%fo=nm5d^91%8tfouklb)*_qxu$y%6t6d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Task.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'templates/static')
]
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
]
|
[
"harshitbhati40@gmail.com"
] |
harshitbhati40@gmail.com
|
5c816bd4cd0e394c580f6a6778a47207256c2d70
|
4f696b0712f530f0d8e7d968ee52ed4dda97a2c6
|
/admix/dataset/_index.py
|
2ffb7196e475cb142a769a03dd81c359aa616806
|
[] |
no_license
|
KangchengHou/admix-kit
|
5d0e1f4225f6339f10bece6fded7c156794bccbe
|
136e8999d94440d604a2dcfb7b7d1a340a5f6e67
|
refs/heads/main
| 2023-09-01T08:58:05.219692
| 2023-08-24T17:33:58
| 2023-08-24T17:33:58
| 335,100,490
| 7
| 1
| null | 2022-08-20T23:01:26
| 2021-02-01T22:23:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,468
|
py
|
import pandas as pd
import numpy as np
from typing import (
List,
Union,
Tuple,
Sequence,
)
def normalize_indices(
index, snp_names: pd.Index, indiv_names: pd.Index
) -> Tuple[Union[slice, int, np.ndarray], Union[slice, int, np.ndarray]]:
"""Normalize the indices to return the snp slices, and individual slices
Parameters
----------
index : [type]
[description]
snp_names : pd.index
[description]
indiv_names : pd.index
[description]
Returns
-------
[type]
[description]
Raises
------
ValueError
[description]
"""
# deal with tuples of length 1
if isinstance(index, tuple) and len(index) == 1:
index = index[0]
if isinstance(index, tuple):
if len(index) > 2:
raise ValueError(
"data can only be sliced in SNPs (first dim) and individuals (second dim)"
)
snp_ax, indiv_ax = unpack_index(index)
snp_ax = _normalize_index(snp_ax, snp_names)
indiv_ax = _normalize_index(indiv_ax, indiv_names)
return snp_ax, indiv_ax
# convert the indexer (integer, slice, string, array) to the actual positions
# reference: https://github.com/theislab/anndata/blob/566f8fe56f0dce52b7b3d0c96b51d22ea7498156/anndata/_core/index.py#L16
def _normalize_index(
indexer: Union[
slice,
int,
str,
np.ndarray,
],
index: pd.Index,
) -> Union[slice, int, np.ndarray]: # ndarray of int
"""Convert the indexed (integer, slice, string, array) to the actual positions
Parameters
----------
indexer : Union[ slice, int, str, np.ndarray, ]
[description]
index : pd.Index
[description]
Returns
-------
Union[slice, int, np.ndarray]
[description]
"""
if not isinstance(index, pd.RangeIndex):
assert (
index.dtype != float and index.dtype != int
), "Don't call _normalize_index with non-categorical/string names"
# the following is insanely slow for sequences,
# we replaced it using pandas below
def name_idx(i):
if isinstance(i, str):
i = index.get_loc(i)
return i
if isinstance(indexer, slice):
start = name_idx(indexer.start)
stop = name_idx(indexer.stop)
# string slices can only be inclusive, so +1 in that case
if isinstance(indexer.stop, str):
stop = None if stop is None else stop + 1
step = indexer.step
return slice(start, stop, step)
elif isinstance(indexer, (np.integer, int)):
return indexer
elif isinstance(indexer, str):
return index.get_loc(indexer) # int
elif isinstance(indexer, (Sequence, np.ndarray, pd.Index, np.matrix)):
if hasattr(indexer, "shape") and (
(indexer.shape == (index.shape[0], 1))
or (indexer.shape == (1, index.shape[0]))
):
indexer = np.ravel(indexer)
if not isinstance(indexer, (np.ndarray, pd.Index)):
indexer = np.array(indexer)
if issubclass(indexer.dtype.type, (np.integer, np.floating)):
return indexer # Might not work for range indexes
elif issubclass(indexer.dtype.type, np.bool_):
if indexer.shape != index.shape:
raise IndexError(
f"Boolean index does not match Dataset's shape along this "
f"dimension. Boolean index has shape {indexer.shape} while "
f"Dataset index has shape {index.shape}."
)
positions = np.where(indexer)[0]
return positions # np.ndarray[int]
else: # indexer should be string array
positions = index.get_indexer(indexer)
if np.any(positions < 0):
not_found = indexer[positions < 0]
raise KeyError(
f"Values {list(not_found)}, from {list(indexer)}, "
"are not valid obs/ var names or indices."
)
return positions # np.ndarray[int]
else:
raise IndexError(f"Unknown indexer {indexer!r} of type {type(indexer)}")
def unpack_index(index):
if not isinstance(index, tuple):
return index, slice(None)
elif len(index) == 2:
return index
elif len(index) == 1:
return index[0], slice(None)
else:
raise IndexError("invalid number of indices")
|
[
"kangchenghou@gmail.com"
] |
kangchenghou@gmail.com
|
4cb6895734e7784bb846ef54f0aa69215cadbcf7
|
524d9d26fa05d554261ad8ce97566bf7aa517b6a
|
/Logistic Regression/Logistic_Regression.py
|
2328431fab851f487ea625e082030e29e20839ab
|
[] |
no_license
|
karolsitarz/MLalgorithms-collection
|
9bfccc5af5b01a7e57b34f7e506e82c8c13c7d15
|
86f6ac95625dcf1290d1e353a7de01f7bc14f4ff
|
refs/heads/main
| 2023-08-14T04:08:32.388054
| 2021-10-04T15:11:47
| 2021-10-04T15:11:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,833
|
py
|
import numpy as np
class LogisticRegression():
def __init__(self,iterations=100,alpha=0.0115):
self.iterations = iterations
self.alpha = alpha
self.J = []
self.J_val = []
self.output = 0
self.o_val = 0
self.regularization = False
def sigmoid(self,a):
return 1/(1+np.exp(-a))
def split(self,dataset,value):
part1 = dataset[:int(len(dataset)*(1-value))]
part2 = dataset[len(part1):]
return part1, part2
def fit(self,X,y,regularization = False,validation = 0.1):
self.validation = validation
self.regularization = regularization
self.X , self.x_val = self.split(X,self.validation)
self.y , self.y_val = self.split(y.reshape(y.shape[0],1),self.validation)
self.m = self.X.shape[1]
self.W = np.random.randn(self.m,1)
self.m_val = self.x_val.shape[1]
self.W_val = np.random.randn(self.m_val,1)
assert self.X.shape[0] == self.y.shape[0] and self.x_val.shape[0] == self.y_val.shape[0]
for i in range(self.iterations):
a = self.X.dot(self.W)
a_val = self.x_val.dot(self.W_val)
self.output = self.sigmoid(a)
self.o_val = self.sigmoid(a_val)
assert self.output.shape == self.y.shape and self.o_val.shape == self.y_val.shape
self.cost()
self.update()
def cost(self,Lambda=0):
if self.regularization is False:
J = sum(self.y * np.log(self.output) + (1-self.y)*np.log(1-self.output))
self.J.append(*(1/-self.m)*J)
# Validation
J_val = sum(self.y_val * np.log(self.o_val) + (1-self.y_val)*np.log(1-self.o_val))
self.J_val.append(*(1/-self.m_val)*J_val)
else :
J = sum(self.y * np.log(self.output) + (1-self.y)*np.log(1-self.output)) + Lambda * sum(np.power(W,2))
self.J.append(*(1/-self.m)*J)
# Validation
J_val = sum(self.y_val * np.log(self.o_val) + (1-self.y_val)*np.log(1-self.o_val)) + Lambda * sum(np.power(self.W_val,2))
self.J_val.append(*(1/-self.m_val)*J)
def update(self):
dw = np.dot(self.X.T,self.output-self.y)
self.W = self.W - (self.alpha * dw)
# Validation
dw_val = np.dot(self.x_val.T, self.o_val - self.y_val)
self.W_val = self.W_val - (self.alpha * dw_val)
def predict(self,x,threshold=0.5):
prediction = self.sigmoid(np.dot(x,self.W))
prediction[prediction>threshold] = 1
prediction[prediction<=threshold] = 0
return prediction
def confusion_matrix(self,predictions,y):
assert predictions.shape[0] == y.shape[0]
self.TP, self.TN, self.FN, self.FP = 0,0,0,0
for i in range(len(predictions)):
if predictions[i]==1 and y[i]==1:self.TP+=1
elif predictions[i]==1 and y[i]==0:self.FP+=1
elif predictions[i]==0 and y[i]==1:self.FN+=1
elif predictions[i]==0 and y[i]==0:self.TN+=1
def precision(self):
return self.TP/(self.TP+self.FP)
def recall(self):
return self.TP/(self.TP+self.FN)
def accuracy(self):
return (self.TP+self.TN)/(self.TP+self.TN+self.FP+self.FN)
def F1_score(self):
return (2*self.precision()*self.recall())/(self.precision() + self.recall())
def plot(self):
x = np.linspace(0,self.iterations,self.iterations)
y = np.asarray(self.J)
y_val = np.asarray(self.J_val)
assert x.shape == y.shape and x.shape == y_val.shape
plt.plot(x,y)
plt.plot(x,y_val)
plt.xlabel('Iterations')
plt.ylabel('Loss')
|
[
"noreply@github.com"
] |
karolsitarz.noreply@github.com
|
b1474276c846fd67467f0624f4fe65200dc1f5cd
|
cd117c1bcde2d7d7d7a389ef6889630b99a53059
|
/Lesson4/L4_T2/L4_T2/L4_T2.py
|
aea2e29ff75371393f396fd5bc0ecf1a83e577ab
|
[] |
no_license
|
mironnay/Python-Classes
|
e120cf5ea5403a907a9c567f9e634ab89cd04bea
|
6f44618519983b28e2b743f9f7f982d556fee177
|
refs/heads/master
| 2022-08-31T17:28:59.064379
| 2020-05-27T12:57:46
| 2020-05-27T12:57:46
| 259,941,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
user_line = input("Enter some number. I`ll calculate sum of all digits: ").strip()
while user_line.isnumeric() == False:
user_line = input("Wrong format. Try again ").strip()
sum = 0
for i in user_line:
sum += int(i)
print(sum)
|
[
"noreply@github.com"
] |
mironnay.noreply@github.com
|
459e5bfca5c7c425942e7aed02526142213dbb81
|
f9d3f08a3eaa857d36e9348ab1d72273aefb30db
|
/peachbox/task/task_chain.py
|
0cb37ad15e3f745656e31850f82daf0e0f07fec0
|
[
"Apache-2.0"
] |
permissive
|
orakle/peachbox
|
466f0c4ce8d25be715928f34b7ba21e339dce990
|
568f10f14cac5cc2794dbb9b652aa3e86a881aa0
|
refs/heads/master
| 2021-01-15T08:37:39.714575
| 2015-06-24T11:01:44
| 2015-06-24T11:01:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
from peachbox.task import Task
"""
Copyright 2015 D. Britzger, P. Pahl, S. Schubert
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class TaskChain(Task):
""" Peachbox Chain:
A collection of tasks
"""
def __init__(self, tasks):
super(TaskChain, self).__init__()
self.set_tasks(tasks)
def add_task(self, task):
self.tasks += task
def set_tasks(self, tasks):
self.tasks = tasks
def get_tasks(self,tasks):
return tasks
def execute(self, rdd):
return self.execute_chain(rdd, self.tasks)
def execute_chain(self, rdd, tasks):
if len(tasks)==0:
raise ValueError('No tasks defined.')
ex = tasks[0].execute(rdd)
if len(tasks)==1:
return ex
else:
return self.execute_chain(ex, tasks[1:])
|
[
"ph.pahl@gmail.com"
] |
ph.pahl@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.