blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80789ef559492ca6a64e0a8b1b121187f0272687
|
517795023b06c572c5fcbb2b80f0b280e5f49b08
|
/DriverCam/01_face_dataset.py
|
a05696f65fb82965686c2fb8f3ebba29a27f07c6
|
[] |
no_license
|
Nagrawal1510/Driver-Cam
|
89972d6bdb4ed8cc26a7441f523145fe2cbdf833
|
f815d54a4ed0601050ddb6b7e90548ed02645769
|
refs/heads/main
| 2023-01-27T13:27:38.278032
| 2020-12-16T06:57:36
| 2020-12-16T06:57:36
| 321,889,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
import cv2
import os
cam = cv2.VideoCapture(0)
#cam.set(3, 640) # set video width
#cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
# For each person, enter one numeric face id
face_id = input('\n enter user id end press <return> ==> ')
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# Initialize individual sampling face count
count = 0
while(True):
ret, img = cam.read()
# img = cv2.flip(img, -1) # flip video image vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
# cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
elif count >= 15: # Take 30 face sample and stop video
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
Nagrawal1510.noreply@github.com
|
39a113e0707f370a8c761a042f5ba7296120bc2d
|
5b532729586700e5c554c86474a1800d41e539f7
|
/data_acquisition/figure_A7/2016_11_18_modulated_imaging_darkfield_nanodiamond_7_extra_green_filter/modulated_imaging_reps_separate.py
|
73a9e6dd50f9ab85315199bb84494fa477253e1e
|
[
"CC-BY-4.0"
] |
permissive
|
AndrewGYork/stimulated_emission_imaging
|
8b258c21525c7bdec33eaab7e1a083919c9e2dcc
|
d7d98da523e1e19f978e84dea9c38999caa7005a
|
refs/heads/master
| 2022-12-19T12:04:28.417615
| 2020-09-18T01:26:53
| 2020-09-18T01:26:53
| 76,388,853
| 2
| 3
|
NOASSERTION
| 2020-08-12T00:47:33
| 2016-12-13T18:55:04
|
Python
|
UTF-8
|
Python
| false
| false
| 10,882
|
py
|
import time
import numpy as np
import image_data_pipeline
import ni
import np_tif
#import thorlabs
from pco import pco_edge_camera_child_process
import pickle
def main():
# This incantation is forced on us so the IDP won't print everything twice:
import logging
import multiprocessing as mp
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
# Set parameters for IDP (Image Data Pipeline)
num_buffers_needed = 200
image_height_pixels = 128
image_width_pixels = 380
# Set parameters for DAQ (analog out card)
num_daq_channels = 3
daq_rate = 8e5
##############################################################
# Set exposure parameters for camera and laser illumination: #
##############################################################
green_AOM_mV = [
0,
60,
69,
82,
92,
103,
114,
127,
151,
174,
212,
247,
300,
] #calibrated
green_powers = [
'0mW',
'25mW',
'50mW',
'100mW',
'150mW',
'225mW',
'300mW',
'400mW',
'600mW',
'800mW',
'1100mW',
'1300mW',
'1500mW',
]
red_AOM_mV = [
0,
100,
131,
158,
186,
219,
269,
] #calibrated
red_powers = [
'0mW',
'50mW',
'100mW',
'150mW',
'200mW',
'250mW',
'300mW',
]
angle_string = '117_5'
# Set laser pulse duration VERY SHORT
green_pulse_duration_pixels = 1
red_pulse_duration_pixels = 1
# Set green pulse train repetition time short enough to
# thermally stabilize the sample
green_rep_time_us = 600
green_rep_time_pixels = int(np.ceil(
green_rep_time_us * 1e-6 * daq_rate))
# how many red laser shots in an exposure?
pulses_per_exposure = 25
# you don't want red light leaking into next exposure so set this to
# 1 if you're imaging 720 nm.
# set to zero if you're looking for depletion, because you need
# every green pulse matched with a red for that measurement
less_red_pulses = 1
desired_effective_exposure_time_pixels = (green_rep_time_pixels *
pulses_per_exposure)
assert desired_effective_exposure_time_pixels > 0
#define red/green pulse delays
red_start_pixel_array = np.array([-2, -1, 0, 1, 2])
num_delays = red_start_pixel_array.shape[0]
print('Red/green delay (us) =', red_start_pixel_array / daq_rate * 1e6)
# number of exposures should be the first dimension of the idp buffer
num_reps = num_buffers_needed
num_exposures = (
num_delays *
len(green_powers) *
len(red_powers)
)
# actual roll time is 640 us, which should be a multiple of
# green_rep_time_us, but may not always be this only works for the
# current field of view height 128 pixels 10 us per line, rolling is
# symmetrical around middle of chip
rolling_time_us = 640 #experimentally determined for this field of view
rolling_time_pixels = int(np.ceil(
rolling_time_us * 1e-6 * daq_rate))
extra_time_after_roll_pixels = (green_rep_time_pixels -
rolling_time_pixels %
green_rep_time_pixels)
effective_exposure_time_pixels = (extra_time_after_roll_pixels +
desired_effective_exposure_time_pixels)
# reminder: negative delay values (red before green) are only valid if the
# camera roll finishes before the red pulse gets there
assert extra_time_after_roll_pixels > -min(red_start_pixel_array)
set_exposure_time_pixels = (rolling_time_pixels +
effective_exposure_time_pixels)
# set exposure time must be an integer multiple of green rep time
assert (set_exposure_time_pixels % green_rep_time_pixels) == 0
set_exposure_time_us = int(np.ceil(
set_exposure_time_pixels / daq_rate * 1e6))
# Initialize the IDP:
idp = image_data_pipeline.Image_Data_Pipeline(
num_buffers=num_buffers_needed,
buffer_shape=(num_exposures, image_height_pixels, image_width_pixels),
camera_child_process=pco_edge_camera_child_process)
assert idp.buffer_shape[0] == num_exposures
# Initialize the DAQ:
daq = ni.PCI_6733(
num_channels=num_daq_channels,
rate=daq_rate,
verbose=True)
assert daq.rate == daq_rate
try:
# Apply camera settings:
idp.display.set_intensity_scaling('median_filter_autoscale')
idp.apply_camera_settings(
trigger='external_trigger',
exposure_time_microseconds = set_exposure_time_us,
region_of_interest ={'bottom': 1088,
'top': 961,
'left': 841,
'right': 1220},
preframes=0)
# UNCOMMON COMMAND: the daq voltage string can get very long, so
# Andy wrote a new part of pco.py that adjusts the set timeout
# for waiting for the FIRST camera trigger (Oct 4, 2016)
idp.camera.commands.send(('set_first_trigger_timeout_seconds',
{'first_trigger_timeout_seconds': 3}))
assert idp.camera.commands.recv() == 3 # clear command queue
# Figure out some basic timing information: This is what the
# camera thinks it's doing. Is it what we want it to do?
exposure_time_us = idp.camera.get_setting('exposure_time_microseconds')
print('I want exposure time to be (us)',set_exposure_time_us)
print('Exposure time actually is (us)',exposure_time_us)
assert exposure_time_us == set_exposure_time_us
rolling_time_us = idp.camera.get_setting('rolling_time_microseconds')
rolling_time_jitter_us = 15 #experimentally measured and also in spec
rolling_time_us += rolling_time_jitter_us
pulse_tail_us = 25 #experimentally measured response of buffer amp and AOM
print("\nCamera exposure time:", exposure_time_us, "(us)\n")
print("\nCamera rolling time:", rolling_time_us, "(us)\n")
effective_exposure_us = exposure_time_us - rolling_time_us
print("\nCamera effective exposure:", effective_exposure_us, "(us)\n")
# Calculate DAQ voltages
for rep_num in range(num_reps):
exposure_number = 0
for [red_voltage_num, my_red_voltage_mV] in enumerate(red_AOM_mV):
for [green_voltage_num, my_green_voltage_mV] in enumerate(green_AOM_mV):
# Set voltages to play on analog out card
green_voltage = my_green_voltage_mV/1000
red_voltage = my_red_voltage_mV/1000
trig_voltage = 3
# time between exposures must be greater than camera trigger
# jitter and a multiple of the green rep time
# trigger jitter is about 10 us
time_between_exposures_pixels = 2 * green_rep_time_pixels
camera_rep_time_pixels = (set_exposure_time_pixels +
time_between_exposures_pixels)
camera_rep_time_us = (
camera_rep_time_pixels / daq_rate * 1e6)
voltages_delay_scan = np.zeros(
(camera_rep_time_pixels * num_delays, num_daq_channels))
# green laser pulses on for the duration of the daq play
green_chunk = np.zeros(green_rep_time_pixels)
green_chunk[0:green_pulse_duration_pixels] = green_voltage
voltages_delay_scan[:,1] = np.tile(
green_chunk, int(
voltages_delay_scan.shape[0]/green_rep_time_pixels))
# camera trigger duration should be 3us or greater
trigger_duration_us = 3
trigger_duration_pixels = int(np.ceil(
trigger_duration_us / 1e6 * daq_rate))
# loop used to define camera trigger and red laser pulse
# voltages
for which_exposure in range(num_delays):
cursor = which_exposure * camera_rep_time_pixels
# Camera triggers:
voltages_delay_scan[
cursor:cursor + trigger_duration_pixels, 0] = (
trig_voltage)
# Red laser pulses
red_start_pixel = (
red_start_pixel_array[which_exposure % num_delays])
red_series_start = (cursor +
rolling_time_pixels +
extra_time_after_roll_pixels +
red_start_pixel)
red_chunk = np.zeros(green_rep_time_pixels)
red_chunk[0:red_pulse_duration_pixels] = red_voltage
red_exposure_array = np.tile(red_chunk, (
pulses_per_exposure - less_red_pulses))
voltages_delay_scan[
red_series_start:(
red_series_start + red_exposure_array.shape[0]), 2
] = red_exposure_array
if exposure_number == 0:
all_voltages = voltages_delay_scan
else:
all_voltages = np.append(
all_voltages,voltages_delay_scan, axis=0)
exposure_number += 1
# Put it all together
idp.load_permission_slips(
num_slips=1,
file_saving_info=[
{'filename': (
'STE_darkfield_power_delay_scan_' +
str(rep_num) +
'.tif'),
'channels': num_delays,
'slices': num_exposures/num_delays,
}])
daq.play_voltages(all_voltages, block=True)
finally:
# Shut everything down. This can be important!
daq.close()
idp.close()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
AndrewGYork.noreply@github.com
|
963d0c4ff77d9e8a978d13bdb14ec7dd4d5878c1
|
632fc698a1c1a8aa24ec488273e6c26301dfeb37
|
/main/urls.py
|
97234d339761b63e7bdd198048b5f4d61d521b66
|
[
"MIT"
] |
permissive
|
sirodoht/martianpins
|
8ffcb9bbb41062438ae7e6114c1b68ead7411c6a
|
533c8ba38919f5121aa3de87d915d2e126b1ecd6
|
refs/heads/master
| 2022-01-25T16:48:07.422556
| 2022-01-18T00:08:38
| 2022-01-18T00:08:59
| 226,012,519
| 32
| 10
| null | 2020-05-08T17:05:43
| 2019-12-05T03:59:52
|
Python
|
UTF-8
|
Python
| false
| false
| 446
|
py
|
from django.contrib import admin
from django.urls import path
from . import views
admin.site.site_header = "Martian Pins administration"
app_name = "main"
urlpatterns = [
path("", views.index, name="index"),
path("terms", views.terms, name="terms"),
path("pins/hash/", views.hash_pin, name="hash_pin"),
path("pins/upload/", views.upload_pin, name="upload_pin"),
path("pins/rm/<int:pin_id>", views.rm_pin, name="rm_pin"),
]
|
[
"theodorekeloglou@gmail.com"
] |
theodorekeloglou@gmail.com
|
3dc5d661a041cf4ffae943409ab8044dbda71c09
|
7f0548b7191b7589712af19baebafddae1d0505f
|
/dojoassignments/python/django/time_display_assignment/time_display_assignment/urls.py
|
c30dd694aefa8ba50d01aec0c0da45bc72da21cc
|
[] |
no_license
|
mtjhartley/codingdojo
|
dd8eab1bd61fb847e44766e89fe3db2340468102
|
65dc558d19adbe62f85ad61c32cb1c392b56567c
|
refs/heads/master
| 2022-12-14T23:06:11.927445
| 2017-08-16T21:08:35
| 2017-08-16T21:08:35
| 92,218,728
| 1
| 5
| null | 2022-12-07T23:59:48
| 2017-05-23T20:46:03
|
Python
|
UTF-8
|
Python
| false
| false
| 837
|
py
|
"""time_display_assignment URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('apps.timedisplay.urls'))
]
|
[
"mtjhartley@gmail.com"
] |
mtjhartley@gmail.com
|
9719dffb829de023bcccac9eb5d0356996f0e002
|
e7255a5262deb1d8ee82f8a3248d44276f867428
|
/Project/bin/lbx.py
|
4bf23dc601e6d42b9b2b57c9df3eee6293b2c68d
|
[] |
no_license
|
anhquantr/Fill-a-Pix
|
f59b3d6922c23ce4ad67add3264f4c63bbffbbb4
|
ad2537b0c3ae7e7bb41ace4ec209c990a07f358e
|
refs/heads/main
| 2023-07-05T21:44:13.092277
| 2021-08-19T09:22:55
| 2021-08-19T09:22:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,200
|
py
|
#!/Users/trananhquan/Desktop/ARTIFICIAL INTELLIGENCE/Proj2/Project/bin/python
#-*- coding:utf-8 -*-
##
## lbx.py
##
## Created on: Jan 9, 2017
## Author: Alexey S. Ignatiev
## E-mail: aignatiev@ciencias.ulisboa.pt
##
"""
===============
List of classes
===============
.. autosummary::
:nosignatures:
LBX
==================
Module description
==================
This module implements a prototype of the LBX algorithm for the computation
of a *minimal correction subset* (MCS) and/or MCS enumeration. The LBX
abbreviation stands for *literal-based MCS extraction* algorithm, which was
proposed in [1]_. Note that this prototype does not follow the original
low-level implementation of the corresponding MCS extractor available
`online <https://reason.di.fc.ul.pt/wiki/doku.php?id=lbx>`_ (compared to
our prototype, the low-level implementation has a number of additional
heuristics used). However, it implements the LBX algorithm for partial
MaxSAT formulas, as described in [1]_.
.. [1] Carlos Mencia, Alessandro Previti, Joao Marques-Silva.
*Literal-Based MCS Extraction*. IJCAI 2015. pp. 1973-1979
The implementation can be used as an executable (the list of available
command-line options can be shown using ``lbx.py -h``) in the following
way:
::
$ xzcat formula.wcnf.xz
p wcnf 3 6 4
1 1 0
1 2 0
1 3 0
4 -1 -2 0
4 -1 -3 0
4 -2 -3 0
$ lbx.py -d -e all -s glucose3 -vv formula.wcnf.xz
c MCS: 1 3 0
c cost: 2
c MCS: 2 3 0
c cost: 2
c MCS: 1 2 0
c cost: 2
c oracle time: 0.0002
Alternatively, the algorithm can be accessed and invoked through the
standard ``import`` interface of Python, e.g.
.. code-block:: python
>>> from pysat.examples.lbx import LBX
>>> from pysat.formula import WCNF
>>>
>>> wcnf = WCNF(from_file='formula.wcnf.xz')
>>>
>>> lbx = LBX(wcnf, use_cld=True, solver_name='g3')
>>> for mcs in lbx.enumerate():
... lbx.block(mcs)
... print(mcs)
[1, 3]
[2, 3]
[1, 2]
==============
Module details
==============
"""
#
#==============================================================================
from __future__ import print_function
import collections
import getopt
from math import copysign
import os
from pysat.formula import CNFPlus, WCNFPlus
from pysat.solvers import Solver, SolverNames
import re
from six.moves import range
import sys
#
#==============================================================================
class LBX(object):
"""
LBX-like algorithm for computing MCSes. Given an unsatisfiable partial
CNF formula, i.e. formula in the :class:`.WCNF` format, this class can
be used to compute a given number of MCSes of the formula. The
implementation follows the LBX algorithm description in [1]_. It can
use any SAT solver available in PySAT. Additionally, the "clause
:math:`D`" heuristic can be used when enumerating MCSes.
The default SAT solver to use is ``m22`` (see :class:`.SolverNames`).
The "clause :math:`D`" heuristic is disabled by default, i.e.
``use_cld`` is set to ``False``. Internal SAT solver's timer is also
disabled by default, i.e. ``use_timer`` is ``False``.
:param formula: unsatisfiable partial CNF formula
:param use_cld: whether or not to use "clause :math:`D`"
:param solver_name: SAT oracle name
:param use_timer: whether or not to use SAT solver's timer
:type formula: :class:`.WCNF`
:type use_cld: bool
:type solver_name: str
:type use_timer: bool
"""
def __init__(self, formula, use_cld=False, solver_name='m22', use_timer=False):
"""
Constructor.
"""
# bootstrapping the solver with hard clauses
self.oracle = Solver(name=solver_name, bootstrap_with=formula.hard,
use_timer=use_timer)
self.solver = solver_name
# adding native cardinality constraints (if any) as hard clauses
# this can be done only if the Minicard solver is in use
if isinstance(formula, WCNFPlus) and formula.atms:
assert self.oracle.supports_atmost(), \
'{0} does not support native cardinality constraints. Make sure you use the right type of formula.'.format(solver_name)
for atm in formula.atms:
self.oracle.add_atmost(*atm)
self.topv = formula.nv # top variable id
self.soft = formula.soft
self.sels = []
self.ucld = use_cld
# mappings between internal and external variables
VariableMap = collections.namedtuple('VariableMap', ['e2i', 'i2e'])
self.vmap = VariableMap(e2i={}, i2e={})
# at this point internal and external variables are the same
for v in range(1, formula.nv + 1):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
for cl in self.soft:
sel = cl[0]
if len(cl) > 1 or cl[0] < 0:
self.topv += 1
sel = self.topv
self.oracle.add_clause(cl + [-sel])
self.sels.append(sel)
def __del__(self):
"""
Destructor.
"""
self.delete()
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
def delete(self):
"""
Explicit destructor of the internal SAT oracle.
"""
if self.oracle:
self.oracle.delete()
self.oracle = None
def add_clause(self, clause, soft=False):
"""
The method for adding a new hard of soft clause to the problem
formula. Although the input formula is to be specified as an
argument of the constructor of :class:`LBX`, adding clauses may be
helpful when *enumerating* MCSes of the formula. This way, the
clauses are added incrementally, i.e. *on the fly*.
The clause to add can be any iterable over integer literals. The
additional Boolean parameter ``soft`` can be set to ``True``
meaning the the clause being added is soft (note that parameter
``soft`` is set to ``False`` by default).
:param clause: a clause to add
:param soft: whether or not the clause is soft
:type clause: iterable(int)
:type soft: bool
"""
# first, map external literals to internal literals
# introduce new variables if necessary
cl = list(map(lambda l: self._map_extlit(l), clause if not len(clause) == 2 or not type(clause[0]) == list else clause[0]))
if not soft:
if not len(clause) == 2 or not type(clause[0]) == list:
# the clause is hard, and so we simply add it to the SAT oracle
self.oracle.add_clause(cl)
else:
# this should be a native cardinality constraint,
# which can be used only together with Minicard
assert solver_name in SolverNames.minicard or \
solver_name in SolverNames.gluecard3 or \
solver_name in SolverNames.gluecard4, \
'{0} does not support native cardinality constraints'.format(solver_name)
self.oracle.add_atmost(cl, clause[1])
else:
self.soft.append(cl)
# soft clauses should be augmented with a selector
sel = cl[0]
if len(cl) > 1 or cl[0] < 0:
self.topv += 1
sel = self.topv
self.oracle.add_clause(cl + [-sel])
self.sels.append(sel)
def compute(self, enable=[]):
"""
Compute and return one solution. This method checks whether the
hard part of the formula is satisfiable, i.e. an MCS can be
extracted. If the formula is satisfiable, the model computed by the
SAT call is used as an *over-approximation* of the MCS in the
method :func:`_compute` invoked here, which implements the LBX
algorithm.
An MCS is reported as a list of integers, each representing a soft
clause index (the smallest index is ``1``).
An optional input parameter is ``enable``, which represents a
sequence (normally a list) of soft clause indices that a user
would prefer to enable/satisfy. Note that this may result in an
unsatisfiable oracle call, in which case ``None`` will be reported
as solution. Also, the smallest clause index is assumed to be
``1``.
:param enable: a sequence of clause ids to enable
:type enable: iterable(int)
:rtype: list(int)
"""
self.setd = []
self.satc = [False for cl in self.soft] # satisfied clauses
self.solution = None
self.bb_assumps = [] # backbone assumptions
self.ss_assumps = [] # satisfied soft clause assumptions
if self.oracle.solve(assumptions=[self.sels[cl_id - 1] for cl_id in enable]):
# hard part is satisfiable => there is a solution
self._filter_satisfied(update_setd=True)
self._compute()
self.solution = list(map(lambda i: i + 1, filter(lambda i: not self.satc[i], range(len(self.soft)))))
return self.solution
def enumerate(self):
"""
This method iterates through MCSes enumerating them until the
formula has no more MCSes. The method iteratively invokes
:func:`compute`. Note that the method does not block the MCSes
computed - this should be explicitly done by a user.
"""
done = False
while not done:
mcs = self.compute()
if mcs != None:
yield mcs
else:
done = True
def block(self, mcs):
"""
Block a (previously computed) MCS. The MCS should be given as an
iterable of integers. Note that this method is not automatically
invoked from :func:`enumerate` because a user may want to block
some of the MCSes conditionally depending on the needs. For
example, one may want to compute disjoint MCSes only in which case
this standard blocking is not appropriate.
:param mcs: an MCS to block
:type mcs: iterable(int)
"""
self.oracle.add_clause([self.sels[cl_id - 1] for cl_id in mcs])
def _satisfied(self, cl, model):
"""
Given a clause (as an iterable of integers) and an assignment (as a
list of integers), this method checks whether or not the assignment
satisfies the clause. This is done by a simple clause traversal.
The method is invoked from :func:`_filter_satisfied`.
:param cl: a clause to check
:param model: an assignment
:type cl: iterable(int)
:type model: list(int)
:rtype: bool
"""
for l in cl:
if len(model) < abs(l) or model[abs(l) - 1] == l:
# either literal is unassigned or satisfied by the model
return True
return False
def _filter_satisfied(self, update_setd=False):
"""
This method extracts a model provided by the previous call to a SAT
oracle and iterates over all soft clauses checking if each of is
satisfied by the model. Satisfied clauses are marked accordingly
while the literals of the unsatisfied clauses are kept in a list
called ``setd``, which is then used to refine the correction set
(see :func:`_compute`, and :func:`do_cld_check`).
Optional Boolean parameter ``update_setd`` enforces the method to
update variable ``self.setd``. If this parameter is set to
``False``, the method only updates the list of satisfied clauses,
which is an under-approximation of a *maximal satisfiable subset*
(MSS).
:param update_setd: whether or not to update setd
:type update_setd: bool
"""
model = self.oracle.get_model()
setd = set()
for i, cl in enumerate(self.soft):
if not self.satc[i]:
if self._satisfied(cl, model):
self.satc[i] = True
self.ss_assumps.append(self.sels[i])
else:
setd = setd.union(set(cl))
if update_setd:
self.setd = sorted(setd)
def _compute(self):
"""
The main method of the class, which computes an MCS given its
over-approximation. The over-approximation is defined by a model
for the hard part of the formula obtained in :func:`compute`.
The method is essentially a simple loop going over all literals
unsatisfied by the previous model, i.e. the literals of
``self.setd`` and checking which literals can be satisfied. This
process can be seen a refinement of the over-approximation of the
MCS. The algorithm follows the pseudo-code of the LBX algorithm
presented in [1]_.
Additionally, if :class:`LBX` was constructed with the requirement
to make "clause :math:`D`" calls, the method calls
:func:`do_cld_check` at every iteration of the loop using the
literals of ``self.setd`` not yet checked, as the contents of
"clause :math:`D`".
"""
# unless clause D checks are used, test one literal at a time
# and add it either to satisfied of backbone assumptions
i = 0
while i < len(self.setd):
if self.ucld:
self.do_cld_check(self.setd[i:])
i = 0
if self.setd: # if may be empty after the clause D check
if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [self.setd[i]]):
# filtering satisfied clauses
self._filter_satisfied()
else:
# current literal is backbone
self.bb_assumps.append(-self.setd[i])
i += 1
def do_cld_check(self, cld):
"""
Do the "clause :math:`D`" check. This method receives a list of
literals, which serves a "clause :math:`D`" [2]_, and checks
whether the formula conjoined with :math:`D` is satisfiable.
.. [2] Joao Marques-Silva, Federico Heras, Mikolas Janota,
Alessandro Previti, Anton Belov. *On Computing Minimal
Correction Subsets*. IJCAI 2013. pp. 615-622
If clause :math:`D` cannot be satisfied together with the formula,
then negations of all of its literals are backbones of the formula
and the LBX algorithm can stop. Otherwise, the literals satisfied
by the new model refine the MCS further.
Every time the method is called, a new fresh selector variable
:math:`s` is introduced, which augments the current clause
:math:`D`. The SAT oracle then checks if clause :math:`(D \\vee
\\neg{s})` can be satisfied together with the internal formula.
The :math:`D` clause is then disabled by adding a hard clause
:math:`(\\neg{s})`.
:param cld: clause :math:`D` to check
:type cld: list(int)
"""
# adding a selector literal to clause D
# selector literals for clauses D currently
# cannot be reused, but this may change later
self.topv += 1
sel = self.topv
cld.append(-sel)
# adding clause D
self.oracle.add_clause(cld)
if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [sel]):
# filtering satisfied
self._filter_satisfied(update_setd=True)
else:
# clause D is unsatisfiable => all literals are backbones
self.bb_assumps.extend([-l for l in cld[:-1]])
self.setd = []
# deactivating clause D
self.oracle.add_clause([-sel])
def _map_extlit(self, l):
"""
Map an external variable to an internal one if necessary.
This method is used when new clauses are added to the formula
incrementally, which may result in introducing new variables
clashing with the previously used *clause selectors*. The method
makes sure no clash occurs, i.e. it maps the original variables
used in the new problem clauses to the newly introduced auxiliary
variables (see :func:`add_clause`).
Given an integer literal, a fresh literal is returned. The returned
integer has the same sign as the input literal.
:param l: literal to map
:type l: int
:rtype: int
"""
v = abs(l)
if v in self.vmap.e2i:
return int(copysign(self.vmap.e2i[v], l))
else:
self.topv += 1
self.vmap.e2i[v] = self.topv
self.vmap.i2e[self.topv] = v
return int(copysign(self.topv, l))
def oracle_time(self):
"""
Report the total SAT solving time.
"""
return self.oracle.time_accum()
#
#==============================================================================
def parse_options():
"""
Parses command-line options.
"""
try:
opts, args = getopt.getopt(sys.argv[1:],
'de:hs:v',
['dcalls',
'enum=',
'help',
'solver=',
'verbose'])
except getopt.GetoptError as err:
sys.stderr.write(str(err).capitalize() + '\n')
usage()
sys.exit(1)
dcalls = False
to_enum = 1
solver = 'm22'
verbose = 0
for opt, arg in opts:
if opt in ('-d', '--dcalls'):
dcalls = True
elif opt in ('-e', '--enum'):
to_enum = str(arg)
if to_enum != 'all':
to_enum = int(to_enum)
elif opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-s', '--solver'):
solver = str(arg)
elif opt in ('-v', '--verbose'):
verbose += 1
else:
assert False, 'Unhandled option: {0} {1}'.format(opt, arg)
return dcalls, to_enum, solver, verbose, args
#
#==============================================================================
def usage():
"""
Prints help message.
"""
print('Usage:', os.path.basename(sys.argv[0]), '[options] file')
print('Options:')
print(' -d, --dcalls Try to bootstrap algorithm')
print(' -e, --enum=<string> How many solutions to compute')
print(' Available values: [1 .. all] (default: 1)')
print(' -h, --help')
print(' -s, --solver SAT solver to use')
print(' Available values: g3, g4, lgl, mcb, mcm, mpl, m22, mc, mgh (default = m22)')
print(' -v, --verbose Be verbose')
#
#==============================================================================
if __name__ == '__main__':
dcalls, to_enum, solver, verbose, files = parse_options()
if type(to_enum) == str:
to_enum = 0
if files:
# reading standard CNF, WCNF, or (W)CNF+
if re.search('cnf[p|+]?(\.(gz|bz2|lzma|xz))?$', files[0]):
if re.search('\.wcnf[p|+]?(\.(gz|bz2|lzma|xz))?$', files[0]):
formula = WCNFPlus(from_file=files[0])
else: # expecting '*.cnf[,p,+].*'
formula = CNFPlus(from_file=files[0]).weighted()
with LBX(formula, use_cld=dcalls, solver_name=solver, use_timer=True) as mcsls:
for i, mcs in enumerate(mcsls.enumerate()):
if verbose:
print('c MCS:', ' '.join([str(cl_id) for cl_id in mcs]), '0')
if verbose > 1:
cost = sum([formula.wght[cl_id - 1] for cl_id in mcs])
print('c cost:', cost)
if to_enum and i + 1 == to_enum:
break
mcsls.block(mcs)
print('c oracle time: {0:.4f}'.format(mcsls.oracle_time()))
|
[
"trananhquanttbpvn@gmail.com"
] |
trananhquanttbpvn@gmail.com
|
27b6bcf7074e83cbd289142a3e9781b9bb8031bf
|
0a22dca130804854d892d7f5d33510c4f1aed160
|
/queue.py
|
b2acadae2f00d54c3eb5e00b5cb6a68d6dd6034b
|
[] |
no_license
|
abossard/interview_code
|
7c9b80c94de649589f4d4910a9f5c5064fd79095
|
3fd4ba450a2423f7c345e57c6d765512ec3ba7c2
|
refs/heads/master
| 2021-01-21T22:58:30.914500
| 2014-03-19T22:51:06
| 2014-03-19T22:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
class Queue(object):
a = list()
b = list()
def enqueue(self, data):
tmp = self.b.pop() if self.b else None
while tmp:
self.a.append(tmp)
tmp = self.b.pop() if self.b else None
self.a.append(data)
tmp = self.a.pop() if self.a else None
while tmp:
self.b.append(tmp)
tmp = self.a.pop() if self.a else None
def dequeue(self):
return self.b.pop() if self.b else None
def size(self):
return len(self.b)
def __unicode__(self):
return self.b
q = Queue()
q.enqueue('first')
q.enqueue('second1')
q.enqueue('second2')
q.enqueue('second3')
q.enqueue('last')
print q.dequeue()
print q.dequeue()
print q.dequeue()
print q.dequeue()
print q.dequeue()
|
[
"abossard@gmail.com"
] |
abossard@gmail.com
|
ef21c363df2cf73f2d3e12e9a5bf03fdd55c5ad3
|
eb2367cd85d8616751cc588d3531bf0fae420871
|
/hellowebapp/urls.py
|
924e902cb229eb1d06dde5f9e7dca1882b3f5f4f
|
[] |
no_license
|
Shai1436/Collection-app
|
169c2c0d394c6b151b9a488ff7ba2b4d0ce171b3
|
1f20d558ffacd07700760bf18f85a5d7e008023e
|
refs/heads/master
| 2021-01-21T10:30:27.833898
| 2017-05-21T02:58:18
| 2017-05-21T02:58:18
| 91,694,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,323
|
py
|
from django.contrib.auth.views import (
password_reset,
password_reset_done,
password_reset_confirm,
password_reset_complete
)
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import (
TemplateView,
RedirectView
)
from collection import views
from collection.backends import MyRegistrationView
urlpatterns = [
url(r'^$', views.index, name='home'),
# The new URL entries we're adding:
url(r'^about/$',
TemplateView.as_view(template_name='about.html'),
name='about'),
url(r'^contact/$',
TemplateView.as_view(template_name='contact.html'),
name='contact'),
url(r'^profiles/(?P<slug>[-\w]+)/$', views.profile_detail,
name='profile_detail'),
url(r'^things/(?P<slug>[-\w]+)/edit/$',
views.edit_profile,
name='edit_profile'),
url(r'^accounts/register/$',
MyRegistrationView.as_view(),
name='registration_register'),
url(r'^accounts/create_profile/$', views.create_profile,
name='registration_create_profile'),
# our new browse flow
url(r'^things/$', RedirectView.as_view(pattern_name='browse', permanent=True)),
url(r'^browse/$', RedirectView.as_view(pattern_name='browse', permanent=True)),
url(r'^browse/name/$',
views.browse_by_name, name='browse'),
url(r'^browse/name/(?P<initial>[-\w]+)/$',
views.browse_by_name, name='browse_by_name'),
url(r'^accounts/password/reset/$',
password_reset,
{'template_name':
'registration/password_reset_form.html'},
name="password_reset"),
url(r'^accounts/password/reset/done/$',
password_reset_done,
{'template_name':
'registration/password_reset_done.html'},
name="password_reset_done"),
url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
password_reset_confirm,
{'template_name':
'registration/password_reset_confirm.html'},
name="password_reset_confirm"),
url(r'^accounts/password/done/$',
password_reset_complete,
{'template_name':
'registration/password_reset_complete.html'},
name="password_reset_complete"),
url(r'^accounts/',
include('registration.backends.simple.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"shahrukh.haider35@gmail.com"
] |
shahrukh.haider35@gmail.com
|
1d15e2a81dc7faddc9fd019cf75d10f62e17e236
|
28c3137a00ca91c14b08211a6621404e52d1a9e3
|
/scripts/State Space Simulator/apparent_wind
|
95a991e5442c823c42baac6b89ba09ba1b72481e
|
[] |
no_license
|
brunopinto900/sailboat_auto_navigation
|
0241df00f12132b3265d6baaf68b4643de5631f4
|
02bece9fc18a5bc87751b928724d44f350679034
|
refs/heads/main
| 2023-03-25T17:11:26.699931
| 2021-03-20T22:11:37
| 2021-03-20T22:11:37
| 349,806,874
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,818
|
#!/usr/bin/python
# Simulate the heading state
import rospy
from std_msgs.msg import Float32
from sailboat.msg import Vector3, Wind, Vector2
import math, time, mpmath
import numpy as np
from sailboat.cfg import windConfig
from dynamic_reconfigure.server import Server
#Nao esquecer dynamic config
class apparent_wind_simulation():
def __init__(self):
# Initializes the node
rospy.init_node("apparent_wind_simulation", anonymous=True)
def wind_callback(config, level):
self.wind_speed = config.TWS
self.wind_direction_north = config.TWA
return config
srv = Server(windConfig, wind_callback)
self.sample_time = rospy.get_param("simulation/sample_time") # rate at which this node runs
self.node = rospy.Rate(self.sample_time) # wait function to use inside threads
# Publishers (heading)
self.set_apparent_wind = rospy.Publisher('apparent_wind', Wind, queue_size = 10)
self.apparent_wind = Wind()
# Subscribers (angular_velocity)
rospy.Subscriber('heading', Float32, self.get_heading)
self.heading = rospy.get_param("scenario/initial_state/heading")
rospy.Subscriber('linear_velocity', Vector2, self.get_linear_velocity)
self.linear_velocity = Vector2()
# True Wind speed and direction
self.true_wind_speed = rospy.get_param("scenario/true_wind/speed")
self.true_wind_angle = rospy.get_param("scenario/true_wind/angle") # from the north
#Noise
self.wind_angle_noise_range = rospy.get_param('model/noise/wind_angle_noise_range')
self.wind_speed_noise_range = rospy.get_param('model/noise/wind_speed_noise_range')
rospy.loginfo("Wind direction simulated")
self.calculate_apparent_wind() # function responsible to publish, created here after all initializations
def get_heading(self, data):
self.heading = data.data
def get_linear_velocity(self, data):
# velocity in the boat reference system
self.linear_velocity.x = data.x
self.linear_velocity.y = data.y
speed = mpmath.hypot(self.linear_velocity.x,self.linear_velocity.y)
heading = math.degrees(math.atan2(self.linear_velocity.y, self.linear_velocity.x) )
self.linear_velocity.x = speed * math.cos(math.radians(heading - self.heading))
self.linear_velocity.y = speed * math.sin(math.radians(heading - self.heading))
def calculate_apparent_wind(self):
# every sample_time seconds, it executes this loop
while not rospy.is_shutdown():
if self.wind_angle_noise_range:
noise_angle = np.random.normal(scale= self.wind_angle_noise_range)
else:
noise_angle = 0
if self.wind_speed_noise_range:
noise_speed = np.random.normal(scale= self.wind_speed_noise_range)
else:
noise_speed = 0
angle = (self.true_wind_angle + noise_angle - self.heading) % 360
true_wind_vector = ((self.true_wind_speed + noise_speed)* math.cos(math.radians(angle)),
(self.true_wind_speed + noise_speed)* math.sin(math.radians(angle)),) #alterar sinal menos
apparent_wind_vector = (-self.linear_velocity.x + true_wind_vector[0], #alterar sinal menos nas velocidades
-self.linear_velocity.y + true_wind_vector[1],)
self.apparent_wind.speed = mpmath.hypot(apparent_wind_vector[0],apparent_wind_vector[1])
self.apparent_wind.angle = ( math.degrees(math.atan2(apparent_wind_vector[1], apparent_wind_vector[0]))) % 360
self.set_apparent_wind.publish(self.apparent_wind)
self.node.sleep()
if __name__ == '__main__':
try:
apparent_wind_simulation()
except rospy.ROSInterruptException:
pass
|
[
"bruno@DESKTOP-TSTEU1E.localdomain"
] |
bruno@DESKTOP-TSTEU1E.localdomain
|
|
f7bf373ffaca6f71f7cd4fc4ed4c868ca26266be
|
05ee3f8d8f66657ee8e62f85edecb36b2bb98786
|
/udemy-design-patterns-in-python/geomatric_shapes.py
|
e580d93df99538ce82199ee022b1251375172d03
|
[] |
no_license
|
zahedul/python-practice
|
b490fa6f16db048a8ab107671faf33508008eb15
|
3b101e44b69fd065044e3e4e88062bdb7b53fa93
|
refs/heads/master
| 2023-04-14T19:58:13.758234
| 2021-05-12T09:31:32
| 2021-05-12T09:31:32
| 260,423,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
class GraphicObject:
def __init__(self, color=None):
self.color = color
self.children = []
self._name = 'Group'
@property
def name(self):
return self._name
def _print(self, items, depth):
items.append('*' * depth)
if self.color:
items.append(self.color)
items.append(f"{self.name}\n")
for child in self.children:
child._print(items, depth + 1)
def __str__(self):
items = []
self._print(items, 0)
return "".join(items)
class Circle(GraphicObject):
@property
def name(self):
return "Circle"
class Square(GraphicObject):
@property
def name(self):
return "Square"
if __name__ == "__main__":
drawing = GraphicObject()
drawing._name = "My Drawing"
drawing.children.append(Square('Red'))
drawing.children.append(Circle('Yellow'))
group = GraphicObject()
group.children.append(Circle('Blue'))
group.children.append(Square('Blue'))
drawing.children.append(group)
print(drawing)
|
[
"z.alam015@gmail.com"
] |
z.alam015@gmail.com
|
94cece20c7eab62186905a05f4b938605bcee510
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/c03373921d734b99f5b462090aa7a70158844266-<__init__>-bug.py
|
ef3c2834ce746add31f043dba3be28fe76c02351
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
def __init__(self):
self.module_arg_spec = dict(default_rules=dict(type='list', elements='dict', options=rule_spec), location=dict(type='str'), name=dict(type='str', required=True), purge_default_rules=dict(type='bool', default=False), purge_rules=dict(type='bool', default=False), resource_group=dict(required=True, type='str'), rules=dict(type='list', elements='dict', options=rule_spec), state=dict(type='str', default='present', choices=['present', 'absent']))
self.default_rules = None
self.location = None
self.name = None
self.purge_default_rules = None
self.purge_rules = None
self.resource_group = None
self.rules = None
self.state = None
self.tags = None
self.client = None
self.nsg_models = None
self.results = dict(changed=False, state=dict())
super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec, supports_check_mode=True)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
d2cb00feb1adee6e4816e40c8e4f588943c24ee9
|
121d2ab76d137d51c8c085eb8b15420d75708269
|
/text11/text/写日记.py
|
a4ce9478847a4ab77b16770650ab669b108d9e51
|
[] |
no_license
|
yogaxmj/xmj
|
94381fe36f65f5f0ed2291dac793665e15296f4d
|
75aa6fd5895ed9cfa94ae60effbe04f4d178408b
|
refs/heads/master
| 2022-09-20T01:12:21.530090
| 2020-06-02T14:01:12
| 2020-06-02T14:01:12
| 268,051,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
import time
now = time.strftime("%y-%m-%d %H:%M:%M")
text = input("输入")
with open("D:\日记.text","a",encoding="utf8") as f:
f.write(now+"\n")
f.write(text+"\n")
f.write("-------------\n" )
#文件写入
|
[
"2941671445@qq.com"
] |
2941671445@qq.com
|
616d4da0e2ce0322fb87506d85ca26e033c11674
|
fd36faed2a313bd506f4c0ce8fec9331b03cfcc6
|
/1. Array/13. merge two sorted arrays without extra space.py
|
cd49382be9e2c465a01a781892812bda13888f46
|
[] |
no_license
|
Vivekyadv/450-DSA
|
6f760604ec462d38058a5ea0f2faa5f17680a0aa
|
f03cd763f11ed801d07de93cfe4b6b2e03235e1b
|
refs/heads/master
| 2023-07-13T12:59:04.587801
| 2021-08-13T17:15:23
| 2021-08-13T17:15:23
| 393,289,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
# Given two sorted arrays, merge them and return sorted array without using extra space
def solve(a, b):
for i in range(len(a)):
if a[i] > b[0]:
a[i], b[0] = b[0], a[i]
b.sort()
return a, b
a = [1,4,7,8,10]
b = [2,3,9]
print(solve(a,b))
# extend 1st array with 2nd array and compare from the end
def merge(arr1, arr2, m, n):
i = m-1
j = n-1
arr1.extend(arr2)
k = len(arr1)-1
while i >= 0 and j >= 0:
if arr1[i] > arr2[j]:
arr1[k] = arr1[i]
i -= 1
k -= 1
elif arr1[i] < arr2[j]:
arr1[k] = arr2[j]
j -= 1
k -= 1
while i >= 0:
arr1[k] = arr1[i]
i -= 1
k -= 1
while j >= 0:
arr1[k] = arr2[j]
j -= 1
k -= 1
return arr1
# Method: All elements in Arr 1 must be smaller than all elements in Arr2
# Algorithm
# 1. Take two pointers -> i = m-1 and j = 0
# 2. if a[i] > b[j] -> swap(a[i], b[j]) and i -= 1, j += 1
# 3. if not, that means all elements in a is smaller than all elements in b.
# So break the loop
# 4. Now arrays might be unsorted so sort them
def merge(a, b, m, n):
i = m-1
j = 0
while i >= 0 and j < n:
if a[i] >= b[j]:
a[i], b[j] = b[j], a[i]
i -= 1
j += 1
else:
break
a.sort()
b.sort()
return a, b
a = [5, 8, 10, 12, 13, 15]
b = [2,6,7,9,11]
print(merge(a, b, len(a), len(b)))
|
[
"vivek.yadav2611@gmail.com"
] |
vivek.yadav2611@gmail.com
|
d79e7c1a7553c893d22772c74e536301c7ba6ca3
|
bdf11484e889086d7cef919658a6ab04e01d204b
|
/codevent/2019/03/Coordinates.py
|
e915ac7b52c163488db8ef2e8d97b8f269407d4b
|
[
"Unlicense"
] |
permissive
|
paralleldynamic/challenges
|
e0fd5180a0b1050336eabe9e9706ba9692dc8c97
|
9fa92c39da909ba95cf3815d8083e963d490e774
|
refs/heads/master
| 2023-02-22T20:47:52.394764
| 2023-02-08T15:58:45
| 2023-02-08T15:58:45
| 226,190,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,650
|
py
|
from functools import reduce
directions = {
'U': (0, 1),
'D': (0, -1),
'R': (1, 0),
'L': (-1, 0)
}
def walk(path:list, origin:tuple = (0, 0)):
coordinates = [origin]
for turn in path:
d = turn[0]
steps = int(turn[1:])
i = 0
while i < steps:
coordinates.append( ((directions[d][0] * 1) + coordinates[-1][0] , (directions[d][1] * 1) + coordinates[-1][1]))
i += 1
return coordinates
def walk_until(path:list, destination:tuple = (0,0)):
step = 0
for step in range(len(path)):
if path[step] == destination:
return step
return False
def find_intersections(coordinates):
return reduce(set.intersection, map(set,coordinates))
def closest_intersection(paths):
coordinates = []
for path in paths:
coordinates.append(walk(path))
manhattan_distance = lambda x: abs(x[0]) + abs(x[1])
intersections = find_intersections(coordinates)
intersections.discard((0,0))
#min([manhattan_distance(i) for i in intersections]))
return min(intersections, key=manhattan_distance), intersections, coordinates
if __name__ == '__main__':
from sys import argv
wires = []
with open(argv[1], 'r') as f:
for r in f:
wires.append(r.strip().split(','))
mapped = closest_intersection(wires)
closest, intersections, paths = mapped[0], mapped[1], mapped[2]
distances = []
for intersect in intersections:
steps = []
for path in paths:
steps.append(walk_until(path, intersect))
distances.append(steps)
print(min(map(sum, distances)))
|
[
"spearspa@gmail.com"
] |
spearspa@gmail.com
|
c57922c3990c40a764ae5fd90a742f2d922e539d
|
68b526f6958e8bf0b99f6d08c02bf1bbdf651bd2
|
/pages/migrations/0001_initial.py
|
8263cacbc70bcf857b740e6729335d33ed850fbf
|
[] |
no_license
|
vvojvoda/conference-web
|
5f053dca4c1e0e39efa01cd8413e6d707abf1d05
|
362e558b6c980823cc57c53a5d16253401318d4d
|
refs/heads/master
| 2020-04-01T18:18:31.914116
| 2016-05-20T08:51:43
| 2016-05-20T08:51:43
| 31,506,926
| 0
| 0
| null | 2015-03-01T18:59:14
| 2015-03-01T18:59:14
| null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flatpages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('flatpage_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='flatpages.FlatPage')),
('meta_description', models.TextField(help_text=b'Used for og:description')),
('hero_type', models.CharField(default=b'main', max_length=20, choices=[(b'main', b'Main'), (b'blog', b'Blog'), (b'cfp', b'CFP')])),
],
options={
},
bases=('flatpages.flatpage',),
),
]
|
[
"ivan.habunek@gmail.com"
] |
ivan.habunek@gmail.com
|
c12756b4fcc336d69f3d57e4f574233a4d87dc96
|
1924f93ead8754a5fc881d1f0d5743b9f1c0165a
|
/first_project/first_app/migrations/0001_initial.py
|
db8233e569381c32474594f432a14e72cf85a879
|
[] |
no_license
|
mbclark1995/first_django_app
|
682751a43b9f45e98ae54d0ed051e4022b745dc6
|
78689d4abd613ab05d5d7bd41ece6c5080ea7752
|
refs/heads/master
| 2022-06-14T06:16:48.963138
| 2020-05-07T21:27:58
| 2020-05-07T21:27:58
| 261,954,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
# Generated by Django 3.0.5 on 2020-04-30 04:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top_name', models.CharField(max_length=264, unique=True)),
],
),
migrations.CreateModel(
name='Webpage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=264, unique=True)),
('url', models.URLField(unique=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Topic')),
],
),
migrations.CreateModel(
name='AccessRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Webpage')),
],
),
]
|
[
"mbclark1995@gmail.com"
] |
mbclark1995@gmail.com
|
9a911cbd9bf119080928a19dadbcecfa2c0514ba
|
cd03d96e01e2aaca9056f73032a98d4c4988f1a8
|
/7_input_and_while()/exercises.py
|
bb3dcdc5bf2f04cb5d403fe3a11ed51dde2f1ed3
|
[] |
no_license
|
Balowen/python_crashcourse
|
a5d45276e6bd166e03cb2515881d5745cd1cb569
|
66a70304d18aa44381ace8d066b21ec5917eba76
|
refs/heads/master
| 2022-03-14T15:40:18.131629
| 2019-11-15T19:48:52
| 2019-11-15T19:48:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
#7_1 rental car
# car = input("What car would you like to rent? ")
#
# print("Let me see if I have a " + car + ".")
#7-2 restaurant seating
# group_size = input("How many people are in your dinner group? ")
# group_size = int(group_size)
#
# if group_size > 8:
# print("You have to wait for a table.")
# else:
# print("Table is ready.")
#7_3 multiples of ten
# number = input("Give me a number: ")
# number = int(number)
#
# if number % 10 == 0:
# print("The number " + str(number) + " is a multiple of 10.")
# else:
# print("The number " + str(number) + " is not a multiple of 10.")
#7_4 pizza toppings
# prompt = "Enter a pizza topping: "
# prompt += "\n(Type 'quit' to close the program.) "
#
# while True:
# topping = input(prompt)
#
# if topping == 'quit':
# break
# else:
# print("I'll add " + topping + " to your pizza.")
#7-5 movie tickets
# prompt = "Enter your age: "
# prompt += "\n(Type 'quit' to close the program.) "
#
# active = True
# while active:
# age = input(prompt)
#
# if age == 'quit':
# active = False
# break
#
# age = int(age)
#
# if age <= 3:
# print("Your ticket is free.")
# elif age <= 12:
# print("Your ticket costs $10.")
# else:
# print("Your ticket costs $15.")
#
#7-8 Deli
sandwich_orders = ['kurczakowa', 'pastrami', 'z szynkom', 'pastrami', 'gyros', 'vege', 'pastrami']
finished_sandwiches = []
print("We currently don't have any pastrami left.")
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
while sandwich_orders:
current_sandwich = sandwich_orders.pop()
print("I made your " + current_sandwich)
finished_sandwiches.append(current_sandwich)
print("\nList of made sandwiches:")
for sandwich in finished_sandwiches:
print(sandwich)
|
[
"thebalowen@gmail.com"
] |
thebalowen@gmail.com
|
320f35c9b5407f00a3b9e0a5413fbeccd759903f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/117/usersdata/209/27086/submittedfiles/al2.py
|
71e64f89f8642d574d44f6bf81c1c6e074c3b926
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
# -*- coding: utf-8 -*-
I1=float(input('digite um valor I: '))
I2=int(I1)
I3=I1-I2
print('I2 é %d '%I2)
print('I3 é %f '%I3)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
af90a3e3339dd9d89082d8337f64cb4110d37dbc
|
67c86d1514ae573d424716283a582c7d9238ff40
|
/Chapter4/4-22.py
|
12198731bf2b2a80540603a74c80a2eb3204d8c3
|
[] |
no_license
|
TheXJT/python_study
|
e55f53a020c0219e0c39a9b00fd93c4d1af17df9
|
be5308f3bc2dadef283671de8aec5681c2dde8d6
|
refs/heads/master
| 2021-05-04T10:27:51.760751
| 2019-01-14T09:36:34
| 2019-01-14T09:36:34
| 45,985,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
import re
re_numbers_str=re.compile(r'\d+')
re_words_str=re.compile(r'\w+')
re_numbers_bytes=re.compile(rb'\d+')
re_words_bytes=re.compile(rb'\w+')
text_str = ("Ramanujan saw \u0be7\u0bed\u0be8\u0bef"
" as 1729 = 13 + 123 = 93 + 103.")
text_bytes=text_str.encode('utf-8')
print('Text',repr(text_str),sep='\n ')
print('Numbers')
print(' str :',re_numbers_str.findall(text_str))
print(' bytes:',re_numbers_bytes.findall(text_bytes))
print('Words')
print(' str :',re_words_str.findall(text_str))
print(' bytes:',re_words_bytes.findall(text_bytes))
|
[
"1251284167@qq.com"
] |
1251284167@qq.com
|
500a278b3b7ad4eec72f966d268dd97f323318c0
|
17e3234ab01fd93233cc453f1495d50424c3bd8f
|
/latte/monkey_patches/frappe/utils/nestedset.py
|
9591e8132e74d38895ced01c31b24efbff611f16
|
[
"MIT"
] |
permissive
|
sunnyakaxd/latte
|
8943dbf70ce934e04e51b147a54e6dd02dfe43db
|
de74065122a1f858bd75f8e1a36fca3b23981f4c
|
refs/heads/master
| 2023-06-11T10:25:31.217047
| 2021-07-06T06:40:19
| 2021-07-06T06:40:19
| 383,363,137
| 0
| 0
|
NOASSERTION
| 2021-07-06T06:26:49
| 2021-07-06T06:26:49
| null |
UTF-8
|
Python
| false
| false
| 4,149
|
py
|
import frappe
from frappe import local
from frappe.utils import nestedset
import random
def patched_update_add_node(doc, parent, parent_field):
print('Parent=', parent, not not parent)
if not parent:
parent_lft = frappe.db.sql(f'''
select
rgt
from
`tab{doc.doctype}`
where
name != %(name)s
order by
rgt desc
limit 1
''', {
'name': doc.name,
})
if parent_lft:
parent_lft = parent_lft[0][0]
else:
parent_lft = -10.0**12
parent_rgt = 10.0**12
else:
parent_lft, parent_rgt = frappe.db.get_value(doc.doctype, parent, ['lft', 'rgt'])
add_child(doc.doctype, doc.name, parent_lft, parent_rgt)
def patched_update_move_node(doc, parent_field):
'''
is called when op!=p
so just add_child under p with doc's details
'''
parent_rgt = frappe.db.get_value(doc.doctype, doc.get(parent_field), 'rgt')
tree_lft = local.db.sql(f'''
select
rgt
from
`tab{doc.doctype}`
where
rgt < %(parent_rgt)s
order by
rgt desc
limit
1
''', {
'parent_rgt': parent_rgt,
})[0][0]
add_child(doc.doctype, doc.name, tree_lft, parent_rgt)
# called in the on_update method
def update_nsm(doc):
# get fields, data from the DocType
opf = 'old_parent'
pf = "parent_" + frappe.scrub(doc.doctype)
if hasattr(doc,'nsm_parent_field'):
pf = doc.nsm_parent_field
if hasattr(doc,'nsm_oldparent_field'):
opf = doc.nsm_oldparent_field
p, op = doc.get(pf) or None, doc.get(opf) or None
# has parent changed (?) or parent is None (root)
if not doc.lft and not doc.rgt:
patched_update_add_node(doc, p or '', pf)
elif op != p:
patched_update_move_node(doc, pf)
# set old parent
doc.set(opf, p)
frappe.db.set_value(doc.doctype, doc.name, opf, p or '', update_modified=False)
doc.reload()
def add_child(doctype, docname, left, right):
print('add_child running for', doctype, docname, left, right)
PRECISION = -9
safe_left = left + 10**PRECISION
safe_right = right - 10**PRECISION
lft = _getsafeval(safe_left, safe_right, retry=True)
rgt = _getsafeval(lft, safe_right, retry=True)
frappe.log_error(f'''{doctype=} {docname=} {left=} {right=} {lft=} {rgt=}''', title=f'Add child for {doctype}')
if lft == rgt:
frappe.throw(f'''INVALID VALUES FOR {lft=} {rgt=}''')
frappe.db.sql(f"""
update
`tab{doctype}`
set
lft = %(lft)s
, rgt = %(rgt)s
where
name = %(docname)s
""", {
'lft': lft,
'rgt': rgt,
'docname': docname,
}, debug=1)
for descendent in get_immediate_descendents(doctype, docname):
add_child(doctype, descendent.name, lft, rgt)
def get_immediate_descendents(doctype, docname):
return frappe.get_all(doctype, filters={
f'parent_{frappe.scrub(doctype)}': docname
})
def _getsafeval(left, right, retry=False):
'''
Returns a mid value in the given range
param retry:
-----------
default: False. if set to true, ensures that mid value does not equal left/right
'''
PRECISION = -9
inf_loop_safe_counter = 4
if retry:
while inf_loop_safe_counter > 0:
inf_loop_safe_counter -= 1
mid = random.uniform(left, right)
if mid == left or mid == right:
safe_left = left + 10**PRECISION
safe_right = right - 10**PRECISION
return _getsafeval(safe_left, safe_right)
else:
return mid
return random.uniform(left, right)
def rebuild_node(doctype, parent, left, parent_field):
"""
reset lft, rgt and recursive call for all children
"""
# the right value of this node is the left value + 1
right = left+1
# get all children of this node
result = local.db.sql(f"SELECT name FROM `tab{doctype}` WHERE `{parent_field}`=%s", (parent))
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
# we've got the left value, and now that we've processed
# the children of this node we also know the right value
local.db.sql(f"""
UPDATE `tab{doctype}`
SET
lft=%s,
rgt=%s
WHERE
name=%s
""", (left, right, parent))
#return the right value of this node + 1
return right+1
# nestedset.update_move_node = patched_update_move_node
# nestedset.update_nsm = update_nsm
# nestedset.update_add_node = patched_update_add_node
# nestedset.rebuild_node = rebuild_node
|
[
"himanshu.mishra@elastic.run"
] |
himanshu.mishra@elastic.run
|
a77276dcfe2ce85fc3ab7ed8035cd1fa03b503a8
|
1d91044544ea0a4772b379cfbb3c23f66adaeeff
|
/6_7.py
|
c44606d3be7e36a32cc3b54feb06386c8ce254d6
|
[] |
no_license
|
rursvd/pynumerical
|
f87d6e72b9178caf926a15503d9a2310c4642677
|
863e9722d273fd1f2dce5d917e3d4d9ad71aba4d
|
refs/heads/master
| 2021-01-22T20:49:09.154726
| 2017-08-19T07:50:34
| 2017-08-19T07:50:34
| 100,776,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from numpy import array
A = array([[1,2,3],[4,5,6],[7,8,9]])
mult = A * 10.0
divi = A/2.0
print('A*10 = ',mult)
print('A/2 = ',divi)
|
[
"noreply@github.com"
] |
rursvd.noreply@github.com
|
f45d92ca761ba0fa9220db55616cf8ccb0ab5b82
|
33acbd324a87d704691831ab3c57b972f52de552
|
/web_flask/5-number_template.py
|
3a5a85b24d643ee91090b9e7c618b5bd5dba308f
|
[] |
no_license
|
bean710/AirBnB_clone_v2
|
83cb57be4a135a08ef0c9a1f8f782323157c1420
|
97369b0b8a52f0622bf065e1ae48f2f4b2e33183
|
refs/heads/master
| 2021-05-17T01:11:26.670973
| 2020-04-23T00:14:53
| 2020-04-23T00:14:53
| 250,550,752
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
#!/usr/bin/python3
"""This module starts a simple flask server"""
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/", strict_slashes=False)
def hello_world():
"""Sends simple text"""
return ("Hello HBNB!")
@app.route("/hbnb", strict_slashes=False)
def hello_hbnb():
"""Sends simple text"""
return ("HBNB")
@app.route("/c/<text>", strict_slashes=False)
def dyn_text(text):
"""Returns dynamic text"""
return ("C {}".format(text.replace("_", " ")))
@app.route("/python/<text>", strict_slashes=False)
@app.route("/python/", strict_slashes=False, defaults={"text": "is_cool"})
def opt_text(text="is_cool"):
"""Returns optional dynamic text"""
return("Python {}".format(text.replace("_", " ")))
@app.route("/number/<int:n>", strict_slashes=False)
def is_num(n):
"""Returns if n is a number"""
return ("{} is a number".format(n))
@app.route("/number_template/<int:n>", strict_slashes=False)
def html_num(n):
"""Returns an HTML template"""
return (render_template("5-number.html", name=n))
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000")
|
[
"keener4christ@gmail.com"
] |
keener4christ@gmail.com
|
fffd473437f434f2c87334c2be9ee615692c4749
|
cec8eb058a730185b1a3845aaf1fa743a3b41074
|
/sql/user_detail.py
|
991f6435f57b3516a96bb9e9d5f78c2244bcab9d
|
[] |
no_license
|
chinatian/python-orm
|
c9842e826829be2e3bcea7b2e25fbdb51d39b654
|
705d205b7ee9d41d37dc5ec44f7c5434869f7002
|
refs/heads/master
| 2020-12-24T14:36:06.505669
| 2019-03-29T03:15:21
| 2019-03-29T03:15:21
| 41,459,016
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
#-*- coding: utf-8 -*-
import MySQLdb
for d in range(16):
db=MySQLdb.connect(
host="localhost",
user="root",
passwd="sa",
db="platform_test_%d" % d,
use_unicode=True,
charset="utf8")
for t in range(16):
c = db.cursor()
c.execute("""
CREATE TABLE `user_detail_%d` (
`id` BIGINT NOT NULL,
`user_id` BIGINT NOT NULL,
`field_name` VARCHAR(32) NOT NULL,
`field_value` VARCHAR(127) NULL,
`updated_at` DATETIME NOT NULL,
`created_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
INDEX `user_id_index` (`user_id` ASC) )
""" % t)
c.execute("INSERT INTO seq (id, tb) VALUES (%s, %s)", (16*t+d, 'user_detail_%d' % t))
c.close()
db.commit()
db.close()
|
[
"tian_2200@163.com"
] |
tian_2200@163.com
|
3ecb8352186ee5ef15d5b9cc7ca20990ac371643
|
07060cca582c9c4af00846d102e75daff5771ad9
|
/quiz_brain.py
|
e50f38091617e023e757cbd0ce9f995af7598508
|
[] |
no_license
|
khadada/Quiz_game
|
0d0700f0a7272aa270ecc75ad6a935d36f268546
|
53e0e8801c2da367fc67cdbe7718f9aaa802d62f
|
refs/heads/master
| 2023-09-05T05:58:25.146725
| 2021-11-09T14:30:50
| 2021-11-09T14:30:50
| 426,261,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
class QuizBrain:
def __init__(self,question_list):
self.question_list = question_list
self.question_number = 0
self.score = 0
self.name = input("What's your name: ").title()
def next_question(self):
current_question = self.question_list[self.question_number]
self.question_number += 1
question = current_question.question
print('-'*40)
print(f'The difficulty of this quiz is: {current_question.difficulty}')
print('-'*40)
number_list = 1
print(f"Q.{self.question_number}: {question} ")
print(f'Choose the correct answer: ')
for choice in current_question.generete_list:
print(f'[{number_list}]: {choice}. ')
number_list += 1
print(f'pssssssssss correct answer is: {current_question.correct_answer}') # for testing my code remove it later on
user_answer = input('Type your answer here: -> ').title()
self.check_correct_answer(user_answer,current_question.correct_answer)
def has_question_left(self):
return self.question_number < len(self.question_list)
def check_correct_answer(self,user_answer,correct_answer):
if user_answer == correct_answer.title():
print(f'You get it right!')
self.score += 1
else:
print(f'Your answer is wrong!')
print(f'Your score: {self.score} / {len(self.question_list)}')
|
[
"lkhadada@gmail.com"
] |
lkhadada@gmail.com
|
268d409fe9f3dd81a0b2a795d6b0bf4f5dd4f08c
|
b43e0ce4be9091e0f442de1a8e8db785a562195a
|
/intro_to_data_science/practice_1/playing_with_pandas.py
|
291d64b20dbb5d8bbaf60fb60cff907757400459
|
[] |
no_license
|
wmorris75/Python_Data_Science
|
e7d08d150b536a01ad5c8f957c6c23f10be00b6d
|
99e9a1eb418af35df21e8e00b8653e8223ce4a61
|
refs/heads/master
| 2021-01-23T06:01:08.988390
| 2017-06-02T15:06:24
| 2017-06-02T15:06:24
| 93,005,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
import pandas as pd
'''
The following code is to help you play with the concept of Series in Pandas.
You can think of Series as an one-dimensional object that is similar to
an array, list, or column in a database. By default, it will assign an
index label to each item in the Series ranging from 0 to N, where N is
the number of items in the Series minus one.
Please feel free to play around with the concept of Series and see what it does
*This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures:
http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/
'''
# Change False to True to create a Series object
if False:
series = pd.Series(['Dave', 'Cheng-Han', 'Udacity', 42, -1789710578])
print series
'''
You can also manually assign indices to the items in the Series when
creating the series
'''
# Change False to True to see custom index in action
if True:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series
'''
You can use index to select specific items from the Series
'''
# Change False to True to see Series indexing in action
if True:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series['Instructor']
print ""
print series[['Instructor', 'Curriculum Manager', 'Course Number']]
'''
You can also use boolean operators to select specific items from the Series
'''
# Change False to True to see boolean indexing in action
if True:
cuteness = pd.Series([1, 2, 3, 4, 5], index=['Cockroach', 'Fish', 'Mini Pig',
'Puppy', 'Kitten'])
print cuteness > 3
print ""
print cuteness[cuteness > 3]
|
[
"wmorris@charterschoolsusa.com"
] |
wmorris@charterschoolsusa.com
|
2c55c872587964789335b2f7feba506100a1ff33
|
11d3985fdb0a3a2bee032de68616174c9c2d0884
|
/PYQT/pyqt_3_message_box.py
|
17e4647daed37919b62996f9a2ea32f246238c53
|
[
"MIT"
] |
permissive
|
dogancantorun8/python-application
|
780850aa79a855b87aa064a817caeb2608b43de5
|
3ef972e52bb6950108cde36974ceaf5c3cde3667
|
refs/heads/main
| 2023-04-15T11:20:56.187416
| 2021-04-27T11:50:04
| 2021-04-27T11:50:04
| 306,041,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 14 18:26:00 2021
@author: Doğancan
"""
#Butona basıldığında message box çıksın istersem:
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.pushButtonOk = QPushButton('Ok', self)
self.pushButtonOk.move(100, 100)
self.pushButtonOk.resize(QSize(100, 100))
self.pushButtonOk.clicked.connect(self.buttonOkHandler)
def buttonOkHandler(self):
mb = QMessageBox(self) #QMessageBox(None) yazarsam alt pencereden bağımsız çıkar
mb.setWindowTitle('Test')
mb.setText('Are you sure?')
mb.exec() #her message box için exec yazılmalı
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec()
##diyalog penceresi çıktığında standart bazı butonlar çıksın istersem:
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.pushButtonOk = QPushButton('Ok', self)
self.pushButtonOk.move(100, 100)
self.pushButtonOk.resize(QSize(100, 100))
self.pushButtonOk.clicked.connect(self.buttonOkHandler)
def buttonOkHandler(self):
mb = QMessageBox(None)
mb.setWindowTitle('Test') #içteki pencere başlığı
mb.setText('Are you sure?')#messageboxtaki iç yazı
mb.setStandardButtons(QMessageBox.Yes|QMessageBox.No|QMessageBox.Cancel)
mb.setDefaultButton(QMessageBox.No) #defaultta hangisi seçili olsun istersem bu bloğu yazıyorum
result = mb.exec()
#hangi butonla çıktığımı anlamak için bu if bloğunu yazdım
if result == QMessageBox.Yes:
print('Yes')
elif result == QMessageBox.No:
print('No')
else:
print('Cancel')
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec()
#Yukarıdaki messagebox için daha kolay bir yöntemi aşağıdaki gibi izleyebilirim
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.pushButtonOk = QPushButton('Ok', self)
self.pushButtonOk.move(100, 100)
self.pushButtonOk.resize(QSize(100, 100))
self.pushButtonOk.clicked.connect(self.buttonOkHandler)
def buttonOkHandler(self):
result = QMessageBox.information(None, 'Test', 'Are youe sure?', QMessageBox.Yes|QMessageBox.No|QMessageBox.Cancel, QMessageBox.Cancel)
if result == QMessageBox.Yes:
print('Yes')
elif result == QMessageBox.No:
print('No')
else:
print('Cancel')
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec()
|
[
"48547417+dogancantorun8@users.noreply.github.com"
] |
48547417+dogancantorun8@users.noreply.github.com
|
41d2eb65eb826173f2fae38d0186f32ccc4580ed
|
34156142ba1e4cf4ece3a1d71f66b2784324eb97
|
/3_retrainfixedrlrlam/noRegu/log5.py
|
103f9fda1b58d4cd3cb7f15baab9e8552d21884e
|
[] |
no_license
|
Richardych/HW_recommendationsys
|
c0a4732d499f817c143c3e4efab2da13d9c47f4c
|
0fc2bf6ea0573ef56a3ce0a96f24eb4f4c5fabb3
|
refs/heads/master
| 2021-09-07T14:57:53.427276
| 2018-02-24T12:21:51
| 2018-02-24T12:21:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
import logging
logger = logging.getLogger('mylogger5')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('monitor5.log')
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
|
[
"yuchaohui@megvii.com"
] |
yuchaohui@megvii.com
|
cef46687f6b1acb654ce8b4b43221b200bd04db3
|
c328fb0b0794bbc113d1f560ad8cc4e3d118e90a
|
/doc/source/conf.py
|
2e63e9a070655ba354b80587cb3faf7e95d79973
|
[
"Apache-2.0"
] |
permissive
|
mtreinish/pysubunit
|
d4c3d3fb966e6aa9da0f2647010a8864f86cf08e
|
5588ae7308dd51abb748212373eec00652179504
|
refs/heads/master
| 2021-08-23T12:59:10.423987
| 2017-12-04T23:30:11
| 2017-12-04T23:30:11
| 112,677,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,840
|
py
|
# -*- coding: utf-8 -*-
#
# pysubunit documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Enable todos in the output
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysubunit'
copyright = u'2017, Matthew Treinish'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'trunk'
# The full version, including alpha/beta/rc tags.
release = 'trunk'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysubunitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pysubunit.tex', u'pysubunit Documentation',
u'Matthew Treinish', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysubunit', u'pysubunit Documentation',
[u'Matthew Treinish'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysubunit', u'pysubunit Documentation',
u'pysubunit Contributors', 'pysubunit', '',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
[
"mtreinish@kortar.org"
] |
mtreinish@kortar.org
|
45b40923152a8285e97189d52f1fbb9fb92ec503
|
fdf3b7185d3aae13106491329a00b34171fb4def
|
/post_bot.py
|
c45f5f1f55dc48875c7379488935d56a9492fa11
|
[
"MIT"
] |
permissive
|
lacymorrow/rbot
|
7911df075419e995999d14fcd91af79a2cc3a0af
|
4b401f93b6de62008102fd96b75f8e18b57eef43
|
refs/heads/master
| 2021-05-08T02:22:15.898002
| 2019-10-20T02:24:04
| 2019-10-20T02:24:04
| 108,056,243
| 1
| 0
|
MIT
| 2019-10-20T02:24:05
| 2017-10-24T00:25:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,747
|
py
|
######
# Todo:
# Language processing
# Don't self repost on same sub
# Post Image
# Video/gif(v)
import os
import time
import datetime
import random
import requests
# Required in requirements.txt
import gensim
import nltk
import praw
nltk.download('punkt')
# import configuration files
import config
import subs
##
## 1) REPOST OLD, POPULAR POSTS FROM SUBS EVERY <delay>
## 2) REPOST RELEVANT POPULAR COMMENT TO A REPOSTED QUESTION
# Check new posts in popular subs for something similar to a popular repost. Find a high comment from the original, paraphrase, and repost
# Find a hot post not on all and xrosspost to similar sub
########### (first sub on sidebar?)
# check hottest posts of all-time, repost as new
########### Paraphrase
########### Change ownership
# random subreddits
# rply to comments
# If True, will CREATE LIVE REDDIT POSTS
SEND_FLAG = True
# Config
SORT_DATE = 'month' # 'week', 'month', etc. via reddit
POST_DELAY = 10 # seconds ; 600 === 10 mins
SEARCH_LIMIT = 50 # Posts within subs that are searched Max 1000
AGE_THRESHOLD = 30 # Default 25 days (considered a month); 20-25 is active
EXECUTION_TOTAL = 500 # TOTAL APPLICATION RUN COUNT
BANNED_WORDS = ['[OC]', 'OC', 'test']
# PERSONAL_SUBS = ['test', 'me_irl', 'lgbqt',] # 'askreddit', 'askwomen', 'askmen']
# ACTIVE_SUBS = ['test']
ACTIVE_SUBS = subs.most_subscribed_subs
# NEWS SUBS
NEWS_SUBS = ['usanews', 'worldnews', 'poitics', 'neutralpolitics', 'unceensorednews']
NO_REPOST_SUBS = ['nottheonion', 'personalfinance']
# 'trees' = 1 karman
# 'Futurology' = 5 day old
# 'technology' = 5 days old
# 'youtubehaiku' = 5 daays old
WAIT_SUBS = ['trees']
BANNED_SUBS = ['pics', 'gifs', 'nosleep', 'bodyweightfitness', 'CrappyDesign', 'Overwatch',
'personalfinance' #trolling/plagarism
'hearthstone', 'tattoos', 'pokemon', 'futurology', 'technology', 'worldnews', 'youtubehaiku'] + WAIT_SUBS
preface = ''
# Skip banned subs
was = [x.lower() for x in ACTIVE_SUBS]
wns = [x.lower() for x in NEWS_SUBS]
wbs = [x.lower() for x in BANNED_SUBS]
working_active_subs = [x for x in was if x not in wbs]
working_news_subs = [x for x in was if x not in wbs]
# Other Subs to consider to widen the net
## print "subscribed to " str(subscribed)
## print "most subscribers" + str(subs.most_subscribed_subs)
## print "most activity" + str(subs.most_active_subs)
# Monitors incoming submissions
# Get the newest submissions from a subreddit ('all')
# for submission in r.subreddit('askreddit').stream.submissions():
# print(submission)
def bot_login():
print "Logging in..."
r = praw.Reddit(username = config.username,
password = config.password,
client_id = config.client_id,
client_secret = config.client_secret,
user_agent = config.user_agent)
return r
def run_bot(r):
# random.choice(ACTIVE_SUBS) # number is how far from top post to use (0 is default/top)
## Todo, active sub may be repeated/ not removed
active_sub = random.choice(working_active_subs)
news_sub = random.choice(working_news_subs)
print '\n ** RUNNING **\n'
print 'TARGETING ACTIVE SUB: ' + active_sub
print 'TARGETING NEWS SUB: ' + news_sub
# get top OLD posts from working subs (default ALL TIME) (POSTS that stood test of time)
posts = [] # posts old enough to consider for reposting
top_posts = r.subreddit(active_sub).top(SORT_DATE, limit=SEARCH_LIMIT)
for post in top_posts:
if post.shortlink:
# get post age delta
post.age = datetime.date.today() - datetime.date.fromtimestamp(post.created)
# print max(post.age.days, key=lambda item: item[1])
# check if post is old enough to warrant a repost (don't repost brand new posts, better for xposts)
if(int(post.age.days) > AGE_THRESHOLD):
post = restyle_post(post)
print post.shortlink
posts.append(post)
# # # repost top stories immediately
# news_posts = r.subreddit(news_sub).new()
# for post in news_posts:
# if post.shortlink:
# post = restyle_post(post)
# posts.append(post)
# optionally, randomize;
create_posts(r, posts[:-1])
# POST
def create_posts(r, posts):
for post in posts:
# Truncate post title in console
pretty_post_title = pretty_text(post.title)
try:
# check if link or text post
if post.selftext:
# text post
print "\n\n********Text Post:\n" + post.subreddit.display_name + "\nPost Title: \n" + post.title
if(SEND_FLAG):
submission = r.subreddit(post.subreddit.display_name).submit(preface + post.title, post.selftext)
print "URL: " + submission.url
sleep(POST_DELAY)
else:
debug('No data will be sent.')
elif post.url:
# link post
print "\n\n********Link Post: \n" + post.subreddit.display_name + "\nPost Title: " + pretty_post_title + "\nURL: " + post.url
if(SEND_FLAG):
submission = r.subreddit(post.subreddit.display_name).submit(preface + post.title, url=post.url )
print "URL: " + submission.url
sleep(POST_DELAY)
else:
debug('No data will be sent.')
else:
debug("NNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA!!!! \nyou broke me...")
print "******\nReposting: " + pretty_post_title + "\nAge: " + str(post.age.days) + " days \nSubreddit: " + post.subreddit.display_name
# credits
print "#####" + str(post.author)
# We've been banned
except APIException.PRAW as e:
s = str(e)
print "APIException PRAW: " + s
if "SUBREDDIT_NOTALLOWED: 'you aren't allowed to post there.'" in s:
print "Private Sub"
next_post_delay = int(filter(str.isdigit, s))
if not str(next_post_delay.isdigit()):
exit(1)
sleep(next_post_delay)
# Auto Sleep the delay provided
except praw.exceptions.APIException as e:
s = str(e)
print "APIException PRAW: " + s
if "SUBREDDIT_NOTALLOWED" in s:
print "Private Sub"
next_post_delay = int(filter(str.isdigit, s))
if not str(type(next_post_delay) is int):
exit(1)
sleep(next_post_delay)
except AttributeError as e:
print "AttributeError: " + str(e)
exit(1)
except AssertionError as e:
print "AssertionError: " + str(e)
exit(1)
except Exception as e:
print "Error" + e
exit(1)
except:
print "Uncaught Error"
exit(1)
def restyle_post(post):
for word in BANNED_WORDS:
post.title = post.title.replace(word, '')
return post
def pretty_text(text):
return (text[:75] + '...') if len(text) > 75 else text
def sleep(mins):
if mins > 11:
mins = 2 #prevent seconds parsed as minutes
if mins == 1:
mins = 2
print "Sleep " + str(mins) + " minutes..."
time.sleep(mins * 60)
def debug(msg):
print "DEBUGGING....\n" + msg
print "NOT SENDING LIVE DATA"
execution_count = 0
r = bot_login()
while execution_count < EXECUTION_TOTAL:
print "### Iteration ", str(execution_count) + " ###\n"
run_bot(r)
execution_count += 1
# get subscribed subreddits
# subscribed = list(r.user.subreddits(limit=None))
|
[
"me@lacymorrow.com"
] |
me@lacymorrow.com
|
ac8a5bedf7559d486b7c1d9412b4342e3d4234d6
|
a72e17b0f45297d403160ab1ae7569c1fbe2dd52
|
/inventarios/SaveInventario.py
|
7720823cc32601849a49e6a6e4e6a0e327971488
|
[] |
no_license
|
mmejia/systra
|
649f2ffde5d62ecbec0e1e4d865be7f0fbbeb7f9
|
fcee7e7421422631a38c3a023a00939e6d7f88dc
|
refs/heads/master
| 2020-12-24T18:03:37.347805
| 2014-01-23T02:50:20
| 2014-01-23T02:50:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
from django.db import IntegrityError, transaction
from json import dumps, loads, JSONEncoder
from inventarios.models import *
import logging
from datetime import date
log = logging.getLogger(__name__)
class SaveInventario:
def __init__(self,request):
inventario_str=request.POST.get('data')
self.inv= loads(inventario_str)
def do(self):
inve= Inventario()
inventario= self.inv
fi=inventario.get('folio_del_inventario',0)
if fi== 0 or fi =='':
fi=0
inve.folio_inventario=fi
f_inf=inventario.get('folio_infraccion',0)
if f_inf == 0 or f_inf == '':
f_inf=0
inve.folio_infraccion=f_inf
fa= inventario.get('folio_accidente',0)
if fa == 0 or fa== '':
fa=0
inve.folio_accidente=fa
inve.delegacion=inventario.get('delegacion',"")
inve.comandancia=inventario.get('comandancia',"")
inve.turno=inventario.get('turno',"")
inve.motivo_de_recojimiento=inventario.get('motivo_de_recojimiento',"")
inve.tipo_servicio=inventario.get('tipo_servicio',"")
inve.folio_agente=inventario.get('folio_agente',"")
inve.nombre_agente=inventario.get('nombre_agente',"")
inve.fecha_incid=inventario.get('fecha_incidente')
inve.fecha_captura=date.today()
inve.pension_hora=inventario.get('hora_entrada')
inve.pension_fecha=inventario.get('fecha_entrada')
inve.calle1=inventario.get('calle1',"")
inve.calle2=inventario.get('calle2',"")
inve.colonia=inventario.get('colonia',"")
inve.interior=inventario.get('num_interior',"")
inve.exterior=inventario.get('exterior',"")
inve.depositado_en=inventario.get('depositado_en',"")
inve.autoridad_competente=inventario.get('autoridad_competente',"")
inve.clas_vehiculo=inventario.get('clas_vehiculo',"")
inve.tipo=inventario.get('tipoveh',"")
inve.marca=inventario.get('marca',"")
inve.submarca=inventario.get('submarca',"")
inve.veh_modelo=inventario.get('modelo',"")
inve.veh_color=inventario.get('color',"")
inve.veh_color_detalle=inventario.get('detalle',"")
inve.veh_placas=inventario.get('placas',"")
inve.veh_serie=inventario.get('serie',"")
inve.numero_economico=inventario.get('numero_economico',"")
inve.infractor_nombre=inventario.get('conductor_nombre',"")
inve.infractor_apepaterno=inventario.get('apellido_paterno',"")
inve.infractor_apematerno=inventario.get('apellido_materno',"")
inve.pension_lugar=inventario.get('lugar',"")
inve.pension_recibido_por=inventario.get('recibido_por',"")
inve.pension_transportado_por=inventario.get('vehiculo_transportado_por',"")
inve.pension_transportado_pension=inventario.get('vehiculo_transportado_pension',"")
inve.pension_transportado_unidad_transito=inventario.get('vehiculo_transportado_unidad_transito',"")
inve.usuario=inventario.get('usuario',"")
inve.agencia_mp=inventario.get('mp',0)
inve.activo=1
inve.corporacion=inventario.get('corporacion',"")
l =Inventario.objects.filter(folio_inventario__exact=inve.folio_inventario)
lista= list(l)
if len(lista)>0:
res='{"ERROR":' + '"SE INTENTA AGREGAR UN INVENTARIO QUE SU FOLIO YA EXISTE"}'
return res
frente= inventario.get('frente',None)
if frente !=None:
id= frente.get('id',0)
nombre=frente.get('nombre','')
inve.frente=id
else:
inve.frente=0
posterior=inventario.get('posterior',None)
if posterior !=None:
id= posterior.get('id',0)
nombre=posterior.get('nombre','')
inve.posterior=id
else:
inve.posterior=0
inve.save()
res= "{\"id\":\"" + str(inve.id) + "\"}"
return res
|
[
"mmejia_mmm@hotmail.com"
] |
mmejia_mmm@hotmail.com
|
f2d39ed43e60f086c5bb5287b29282c5a8d53124
|
cf455d62eeb4a1e4364b87067d0c9c9040242339
|
/performance_model/utils.py
|
241f53ba44797148727e25021baeec9168bdcfb1
|
[] |
no_license
|
WenqiJiang/SIGMOD-2022-submission-Fanns
|
fb3c33336197a9e732f901106166e9e3e1284fb7
|
36e947985c7f446b63821c3238f7785bca5241fb
|
refs/heads/main
| 2023-06-12T12:46:57.835530
| 2021-07-01T23:14:14
| 2021-07-01T23:14:14
| 381,019,262
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,087
|
py
|
""" Helper functions & Unit test """
from constants import *
def max_of_two(a, b):
if a >= b:
return a
else:
return b
def max_of_three(a, b, c):
if a >= b:
if a >= c:
return a
else:
return c
else:
if b >= c:
return b
else:
return c
def get_bottleneck(perf_resource_dict_list, PE_num_list):
"""
Given a list of stages (each stage is a perf_resource_dict),
return (a) which stage (ID in the list) is the bottleneck
(b) the overall accelerator QPS
"""
min_QPS = 9999999999
min_QPS_ID = 0
for i, perf_resource_dict in enumerate(perf_resource_dict_list):
if perf_resource_dict["QPS"] * PE_num_list[i] < min_QPS:
min_QPS = perf_resource_dict["QPS"] * PE_num_list[i]
min_QPS_ID = i
assert min_QPS != 9999999999
accelerator_QPS = min_QPS
return min_QPS_ID, accelerator_QPS
def resource_consumption_A_less_than_B(
perf_resource_dict_list_A, PE_num_list_A,
perf_resource_dict_list_B, PE_num_list_B):
consumed_HBM_bank_A = 0
consumed_BRAM_18K_A = 0
consumed_DSP48E_A = 0
consumed_FF_A = 0
consumed_LUT_A = 0
consumed_URAM_A = 0
consumed_HBM_bank_B = 0
consumed_BRAM_18K_B = 0
consumed_DSP48E_B = 0
consumed_FF_B = 0
consumed_LUT_B = 0
consumed_URAM_B = 0
for i, perf_resource_dict in enumerate(perf_resource_dict_list_A):
consumed_HBM_bank_A = consumed_HBM_bank_A + perf_resource_dict["HBM_bank"] * PE_num_list_A[i]
consumed_BRAM_18K_A = consumed_BRAM_18K_A + perf_resource_dict["BRAM_18K"] * PE_num_list_A[i]
consumed_DSP48E_A = consumed_DSP48E_A + perf_resource_dict["DSP48E"] * PE_num_list_A[i]
consumed_FF_A = consumed_FF_A + perf_resource_dict["FF"] * PE_num_list_A[i]
consumed_LUT_A = consumed_LUT_A + perf_resource_dict["LUT"] * PE_num_list_A[i]
consumed_URAM_A = consumed_URAM_A + perf_resource_dict["URAM"] * PE_num_list_A[i]
for i, perf_resource_dict in enumerate(perf_resource_dict_list_B):
consumed_HBM_bank_B = consumed_HBM_bank_B + perf_resource_dict["HBM_bank"] * PE_num_list_B[i]
consumed_BRAM_18K_B = consumed_BRAM_18K_B + perf_resource_dict["BRAM_18K"] * PE_num_list_B[i]
consumed_DSP48E_B = consumed_DSP48E_B + perf_resource_dict["DSP48E"] * PE_num_list_B[i]
consumed_FF_B = consumed_FF_B + perf_resource_dict["FF"] * PE_num_list_B[i]
consumed_LUT_B = consumed_LUT_B + perf_resource_dict["LUT"] * PE_num_list_B[i]
consumed_URAM_B = consumed_URAM_B + perf_resource_dict["URAM"] * PE_num_list_B[i]
# Priority: LUT is the most important one
if consumed_LUT_A < consumed_LUT_B:
return True
else:
return False
def fit_resource_constraints(perf_resource_dict_list, PE_num_list, count_shell=False):
"""
Given a list of stages (each stage is a perf_resource_dict),
return whether it is within the resource constraint
"""
consumed_HBM_bank = 0
consumed_BRAM_18K = 0
consumed_DSP48E = 0
consumed_FF = 0
consumed_LUT = 0
consumed_URAM = 0
for i, perf_resource_dict in enumerate(perf_resource_dict_list):
consumed_HBM_bank = consumed_HBM_bank + perf_resource_dict["HBM_bank"] * PE_num_list[i]
consumed_BRAM_18K = consumed_BRAM_18K + perf_resource_dict["BRAM_18K"] * PE_num_list[i]
consumed_DSP48E = consumed_DSP48E + perf_resource_dict["DSP48E"] * PE_num_list[i]
consumed_FF = consumed_FF + perf_resource_dict["FF"] * PE_num_list[i]
consumed_LUT = consumed_LUT + perf_resource_dict["LUT"] * PE_num_list[i]
consumed_URAM = consumed_URAM + perf_resource_dict["URAM"] * PE_num_list[i]
if count_shell:
consumed_HBM_bank += shell_consumption["HBM_bank"]
consumed_BRAM_18K += shell_consumption["BRAM_18K"]
consumed_DSP48E += shell_consumption["DSP48E"]
consumed_FF += shell_consumption["FF"]
consumed_LUT += shell_consumption["LUT"]
consumed_URAM += shell_consumption["URAM"]
if consumed_HBM_bank <= MAX_HBM_bank and consumed_BRAM_18K <= MAX_BRAM_18K and \
consumed_DSP48E <= MAX_DSP48E and consumed_FF <= MAX_FF and \
consumed_LUT < MAX_LUT and consumed_URAM < MAX_URAM:
return True
else:
return False
def get_resource_consumption(perf_resource_dict_list, PE_num_list, count_shell=False):
"""
Given a list of stages (each stage is a perf_resource_dict),
return the resource consumption dictionary
"""
consumed_HBM_bank = 0
consumed_BRAM_18K = 0
consumed_DSP48E = 0
consumed_FF = 0
consumed_LUT = 0
consumed_URAM = 0
for i, perf_resource_dict in enumerate(perf_resource_dict_list):
consumed_HBM_bank = consumed_HBM_bank + perf_resource_dict["HBM_bank"] * PE_num_list[i]
consumed_BRAM_18K = consumed_BRAM_18K + perf_resource_dict["BRAM_18K"] * PE_num_list[i]
consumed_DSP48E = consumed_DSP48E + perf_resource_dict["DSP48E"] * PE_num_list[i]
consumed_FF = consumed_FF + perf_resource_dict["FF"] * PE_num_list[i]
consumed_LUT = consumed_LUT + perf_resource_dict["LUT"] * PE_num_list[i]
consumed_URAM = consumed_URAM + perf_resource_dict["URAM"] * PE_num_list[i]
perf_resource_dict = dict()
if not count_shell:
perf_resource_dict["HBM_bank"] = consumed_HBM_bank
perf_resource_dict["BRAM_18K"] = consumed_BRAM_18K
perf_resource_dict["DSP48E"] = consumed_DSP48E
perf_resource_dict["FF"] = consumed_FF
perf_resource_dict["LUT"] = consumed_LUT
perf_resource_dict["URAM"] = consumed_URAM
else:
perf_resource_dict["HBM_bank"] = consumed_HBM_bank + shell_consumption["HBM_bank"]
perf_resource_dict["BRAM_18K"] = consumed_BRAM_18K + shell_consumption["BRAM_18K"]
perf_resource_dict["DSP48E"] = consumed_DSP48E + shell_consumption["DSP48E"]
perf_resource_dict["FF"] = consumed_FF + shell_consumption["FF"]
perf_resource_dict["LUT"] = consumed_LUT + shell_consumption["LUT"]
perf_resource_dict["URAM"] = consumed_URAM + shell_consumption["URAM"]
return perf_resource_dict
def get_utilization_rate(perf_resource_dict):
utilization_rate = dict()
utilization_rate["BRAM_18K"] = "{}%".format(perf_resource_dict["BRAM_18K"] / TOTAL_BRAM_18K * 100)
utilization_rate["DSP48E"] = "{}%".format(perf_resource_dict["DSP48E"] / TOTAL_DSP48E * 100)
utilization_rate["FF"] = "{}%".format(perf_resource_dict["FF"] / TOTAL_FF * 100)
utilization_rate["LUT"] = "{}%".format(perf_resource_dict["LUT"] / TOTAL_LUT * 100)
utilization_rate["URAM"] = "{}%".format(perf_resource_dict["URAM"] / TOTAL_URAM * 100)
return utilization_rate
def unit_test():
""" Print the options of each function unit """
print("\nget_priority_queue_info:\n")
perf_resource_dict = get_priority_queue_info(queue_len=32, N_insertion=8192)
print(perf_resource_dict)
print("\nget_bitonic_sort_16_info:\n")
perf_resource_dict = get_bitonic_sort_16_info(N_insertion=1e8/8192/64*32)
print(perf_resource_dict)
print("\nget_parallel_merge_32_to_16_info:\n")
perf_resource_dict = get_parallel_merge_32_to_16_info(N_insertion=1e8/8192/64*32)
print(perf_resource_dict)
option_list = get_options_preprocessing_OPQ()
print("\nget_options_preprocessing_OPQ:\n")
for option in option_list:
print(option)
print("\nget_options_stage_1_cluster_distance_computation:\n")
nlist_options = [2**10, 2**11, 2**12, 2**13, 2**14, 2**15, 2**16, 2**17, 2**18]
for nlist in nlist_options:
option_list = get_options_stage_1_cluster_distance_computation(nlist)
print("nlist={}".format(nlist))
for option in option_list:
print(option)
print("\nget_options_stage_2_select_Voronoi_cells:\n")
option_list = get_options_stage_2_select_Voronoi_cells(nlist=8192, nprobe=32)
for option in option_list:
print(option)
print("\nget_options_stage_3_distance_LUT_construction:\n")
option_list = get_options_stage_3_distance_LUT_construction(nprobe=32)
for option in option_list:
print(option)
print("\nget_options_stage_4_distance_estimation_by_LUT:\n")
option_list = get_options_stage_4_distance_estimation_by_LUT(
PE_num=63, nprobe=32, N_compute_per_nprobe=int(1e8/8192/63))
for option in option_list:
print(option)
# for a small amount of number being scanned, hierachical priority queue is not
# really an option
print("\nget_options_stage_5_sort_reduction:\n")
# for small number of Voronoi cells, only 2 level is required
print("nlist=8192, nprobe=32, nstreams=64")
option_list = get_options_stage_5_sort_reduction(
input_stream_num=64,
N_insertion_per_stream=int(1e8/8192*32/64))
for option in option_list:
print(option)
# for large number of Voronoi cells, 4 level is required
print("nlist=262144, nprobe=32, nstreams=64")
option_list = get_options_stage_5_sort_reduction(
input_stream_num=64,
N_insertion_per_stream=int(1e8/262144*32/64))
for option in option_list:
print(option)
# try different stream num
print("nlist=8192, nprobe=32, nstreams=48")
option_list = get_options_stage_5_sort_reduction(
input_stream_num=48,
N_insertion_per_stream=int(1e8/8192*32/64))
for option in option_list:
print(option)
print("nlist=8192, nprobe=32, nstreams=32")
option_list = get_options_stage_5_sort_reduction(
input_stream_num=32,
N_insertion_per_stream=int(1e8/8192*32/64))
for option in option_list:
print(option)
print("nlist=8192, nprobe=32, nstreams=16")
option_list = get_options_stage_5_sort_reduction(
input_stream_num=16,
N_insertion_per_stream=int(1e8/8192*32/64))
for option in option_list:
print(option)
|
[
"wejiang@r630-03.ethz.ch"
] |
wejiang@r630-03.ethz.ch
|
bdb48e86eb17c48178b7ed1e9f44a9a4d7f89034
|
40d84c19e1e5ef0d12656e5288ad5c9667d52f41
|
/accounts/tests/test_auth_api.py
|
c14d7ee3eb71f1c573a3120e8f63f311c5bd3488
|
[] |
no_license
|
Michael-Spirit/StarNavi
|
be6b727dfe689308c9db7497c05810bc2ee7f7d3
|
0a1e2994d9a7e96837c9a0842977658d507cbb00
|
refs/heads/master
| 2023-01-08T12:08:58.028321
| 2019-10-15T14:10:06
| 2019-10-15T14:10:06
| 215,303,284
| 0
| 0
| null | 2023-01-04T22:47:05
| 2019-10-15T13:16:24
|
Vue
|
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
from rest_framework.test import APITestCase
from rest_framework.reverse import reverse
from accounts.tests.factories import UserFactory, TEST_USER_PASSWORD
from allauth.account.models import EmailAddress
class TestAuth(APITestCase):
def test_registration(self):
data = {
'email': 'test@mail.com',
'password1': 'test-password123',
'password2': 'test-password123'
}
response = self.client.post(reverse('rest_register'), data=data)
self.assertEqual(response.status_code, 201)
def test_login_success(self):
user = UserFactory()
data = {
'email': user.email,
'password': TEST_USER_PASSWORD
}
response = self.client.post(reverse('rest_login'), data=data)
self.assertIsNotNone(response.data['token'])
def test_login_unverified_email(self):
user = UserFactory()
EmailAddress.objects.filter(email=user.email).update(verified=False)
data = {
'email': user.email,
'password': TEST_USER_PASSWORD
}
response = self.client.post(reverse('rest_login'), data=data)
self.assertContains(response, data['email'], status_code=200)
|
[
"MSpiridonov94@gmail.com"
] |
MSpiridonov94@gmail.com
|
e75f7bb601e0c23858b263f18e3f92a930c7e303
|
0f80565b3d15fbb493dfda1f8522600b6bb69c74
|
/BasicPolling/BasicPolling/urls.py
|
60cd34f9519a8cb754a8235fd8f779f9b77ff134
|
[] |
no_license
|
chimpansiets/BasicPolling
|
ccbdabd817bb07919f346bdc15972f49a779adaf
|
6ad6ad27dfd3fb09b26de8240afe15134bbb5a5d
|
refs/heads/main
| 2023-03-24T09:42:27.990896
| 2021-03-23T09:07:14
| 2021-03-23T09:07:14
| 350,048,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
"""BasicPolling URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
|
[
"svoort1@live.nl"
] |
svoort1@live.nl
|
f00b018ae4c08271e7b5558c629a0ab780474a45
|
a42be7826de86dd3f961b0486a30f8305ceb2b94
|
/src/common/Molecule.py
|
6a108463d0f20645886fa6a94ae9bca790ac96e4
|
[
"MIT"
] |
permissive
|
catenate15/Pilgrim
|
9199bc50714c60abbc7f97e239a2cbee1e48b573
|
5dd261c0f3fb89732a8a57681d7f6cc3a41a8085
|
refs/heads/master
| 2022-08-04T04:54:22.513185
| 2020-05-25T20:16:14
| 2020-05-25T20:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,606
|
py
|
#!/usr/bin/python3.6
'''
---------------------------
Licensing and Distribution
---------------------------
Program name: Pilgrim
Version : 2020.2
License : MIT/x11
Copyright (c) 2020, David Ferro Costas (david.ferro@usc.es) and
Antonio Fernandez Ramos (qf.ramos@usc.es)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
---------------------------
*----------------------------------*
| Module : common |
| Sub-module : Molecule |
| Last Update: 2020/02/03 (Y/M/D) |
| Main Author: David Ferro-Costas |
*----------------------------------*
This module contains the Molecule class
'''
#--------------------------------------------------#
import os
import numpy as np
#--------------------------------------------------#
import common.fncs as fncs
import common.partfns as pf
import common.internal as intl
import common.Exceptions as Exc
#--------------------------------------------------#
from common.criteria import EPS_IC
from common.dicts import dpt_im
from common.files import read_gtsfile
from common.files import read_fchk
from common.files import read_gauout
from common.files import write_gtsfile
from common.files import write_xyz, write_molden
from common.pgs import get_pgs
#--------------------------------------------------#
from common.physcons import AMU
from common.physcons import KCALMOL
from common.physcons import EV
from common.physcons import ANGSTROM
from common.physcons import H2CM
#--------------------------------------------------#
class Molecule():
# Initialization method
def __init__(self,label=None):
self._label = label
# Unidimensional
self._mform = "-"
self._mu = None
self._ch = None
self._mtp = None
self._V0 = None
self._pgroup = None
self._rotsigma = None
self._nel = None # number of electrons
self._rtype = None
self._linear = None
# Multi-dimensional
self._atnums = None
self._symbols = None
self._masses = None
self._les = None # list of electronic states
self._itensor = None
self._imoms = None
self._rotTs = None
# Arrays of importance
self._xcc = None
self._gcc = None
self._Fcc = None
self._xms = None
self._gms = None
self._Fms = None
# related to frequencies
self._fscal = 1.0
self._nvdof = None
self._cczpe = None
self._ccfreqs = None
self._ccFevals = None
self._ccFevecs = None
self._iczpe = None
self._icfreqs = None
self._icFevals = None
self._icFevecs = None
# other stuff for very particular occasion
self._gts = None
def __str__(self): return self._mform
def setvar(self,xcc=None,gcc=None,Fcc=None,\
atonums=None,symbols=None,masses=None,\
ch=None,mtp=None, V0=None,\
pgroup=None,rotsigma=None,\
fscal=None,les=None):
if xcc is not None: self._xcc = xcc
if gcc is not None: self._gcc = gcc
if Fcc is not None: self._Fcc = Fcc
if atonums is not None: self._atnums = atonums
if symbols is not None: self._symbols = symbols
if masses is not None: self._masses = masses
if ch is not None: self._ch = int(ch)
if mtp is not None: self._mtp = int(mtp)
if V0 is not None: self._V0 = V0
if pgroup is not None: self._pgroup = pgroup
if rotsigma is not None: self._rotsigma = rotsigma
if fscal is not None: self._fscal = fscal
if les is not None: self._les = les
def genderivates(self):
self._mform = fncs.get_molformula(self._symbols)
self._natoms = len(self._atnums)
self._mass = sum(self._masses)
self._nel = sum(self._atnums)-self._ch
if self._les is None: self._les = [ (self._mtp,0.0) ]
def prepare(self):
# check atnums
if self._atnums is not None and type(self._atnums[0]) == str:
self._symbols = list(self._atnums)
# check symbols
if self._symbols is not None and type(self._symbols[0]) == int:
self._atnums = list(self._symbols)
# Get both atnums and symbols if None
if self._atnums is None: self._atnums = fncs.symbols2atonums(self._symbols)
if self._symbols is None: self._symbols = fncs.atonums2symbols(self._atnums)
# check masses
if self._masses is None:
self._masses = fncs.atonums2masses(self._atnums)
# derivated magnitudes
self.genderivates()
# check Fcc
if self._Fcc not in (None,[]) and len(self._Fcc) != 3*self._natoms:
self._Fcc = fncs.lowt2matrix(self._Fcc)
def calc_pgroup(self,force=False):
calculate = False
if force : calculate = True
if self._pgroup is None: calculate = True
if self._rotsigma is None: calculate = True
if calculate: self._pgroup,self._rotsigma = get_pgs(self._atnums,self._masses,self._xcc)
def remove_frozen(self):
frozen = fncs.detect_frozen(self._Fcc,self._natoms)
if len(frozen) == 0: return [],[]
# coordinates and symbols of frozen moiety
bN = [at in frozen for at in range(self._natoms)]
b3N = [at in frozen for at in range(self._natoms) for ii in range(3)]
frozen_xcc = np.array(self._xcc)[b3N]
frozen_symbols = np.array(self._symbols)[bN]
# now system is just the flexible moiety
bN = [at not in frozen for at in range(self._natoms)]
b3N = [at not in frozen for at in range(self._natoms) for ii in range(3)]
self._xcc = np.array(self._xcc)[b3N].tolist()
self._symbols = np.array(self._symbols)[bN].tolist()
self._atnums = np.array(self._atnums)[bN].tolist()
self._masses = np.array(self._masses)[bN].tolist()
self._pgroup = None
self._rotsigma = None
# Gradient and hessian
if self._gcc is not None and len(self._gcc) != 0:
self._gcc = np.array(self._gcc)[b3N].tolist()
if self._Fcc is not None and len(self._Fcc) != 0:
n3 = self._natoms*3
self._Fcc = [[self._Fcc[idx1][idx2] for idx1 in range(n3) if b3N[idx1]]\
for idx2 in range(n3) if b3N[idx2]]
# set origin for frozen moiety
com = fncs.get_com(self._xcc,self._masses)
frozen_xcc = fncs.set_origin(frozen_xcc,com)
# prepare system
self.prepare()
return frozen_xcc, frozen_symbols
def mod_masses(self,masses):
self._masses = list(masses)
self._mass = sum(self._masses)
# re-calculate point group
self.calc_pgroup(force=True)
def apply_imods(self,imods,imasses):
'''
example: imods = ["H2(4,5)","C13(all_C)"]
imasses = {"H2":2.0141/AMU, "C13":13.0034/AMU}
'''
if imods is None: return
for imod in imods:
isymbol = imod.split("(")[0]
if isymbol in imasses.keys(): imass = imasses[isymbol]
elif isymbol in dpt_im.keys(): imass = dpt_im[isymbol]
else:
exception = Exc.WrongInIsomass
exception._var = isymbol
raise exception
atoms = imod.split("(")[1].split(")")[0]
if "all_" in atoms:
atype = atoms.split("all_")[1].strip()
for idx,symbol in enumerate(self._symbols):
if symbol == atype: self._masses[idx] = imass
else:
list_of_atoms = []
for atom in atoms.split(","):
if "-" in atom:
at1,atn = atom.split("-")
list_of_atoms += range(int(at1),int(atn)+1)
else: list_of_atoms.append(int(atom))
list_of_atoms = sorted(list(set(list_of_atoms)))
for idx in list_of_atoms: self._masses[idx-1] = imass
# re-calculate total mass and point group
self.mod_masses(self._masses)
def setup(self,mu=1.0/AMU,projgrad=False):
self._mu = mu
# shift to center of mass and reorientate molecule
idata = (self._xcc,self._gcc,self._Fcc,self._masses)
self._xcc, self._gcc, self._Fcc = fncs.center_and_orient(*idata)
# symmetry
self.calc_pgroup(force=False)
# Generate mass-scaled arrays
self._xms = fncs.cc2ms_x(self._xcc,self._masses,self._mu)
self._gms = fncs.cc2ms_g(self._gcc,self._masses,self._mu)
self._Fms = fncs.cc2ms_F(self._Fcc,self._masses,self._mu)
#-------------#
# Atomic case #
#-------------#
if self._natoms == 1:
self._nvdof = 0
self._linear = False
#self._xms = list(self._xcc)
#self._gms = list(self._gcc)
#self._Fms = list(self._Fcc)
self._ccfreqs = []
self._ccFevals = []
self._ccFevecs = []
#----------------#
# Molecular case #
#----------------#
else:
# Calculate inertia
self._itensor = fncs.get_itensor_matrix(self._xcc,self._masses)
self._imoms, self._rotTs, self._rtype, self._linear = \
fncs.get_itensor_evals(self._itensor)
# Vibrational degrees of freedom
if self._linear: self._nvdof = 3*self._natoms - 5
else : self._nvdof = 3*self._natoms - 6
# calculate frequencies
if self._Fcc is None : return
if len(self._Fcc) == 0 : return
if self._ccfreqs is not None: return
v0 = self._gms if projgrad else None
data = fncs.calc_ccfreqs(self._Fcc,self._masses,self._xcc,self._mu,v0=v0)
self._ccfreqs, self._ccFevals, self._ccFevecs = data
# Scale frequencies
self._ccfreqs = fncs.scale_freqs(self._ccfreqs,self._fscal)
def get_imag_main_dir(self):
ic, fwsign = intl.ics_idir(self._xcc,self._symbols,\
self._masses,self._ccfreqs,self._ccFevecs)
return ic, fwsign
def icfreqs(self,ics,bool_pg=False):
#----------------#
# Molecular case #
#----------------#
if self._natoms != 1:
ituple = (self._Fcc,self._masses,self._xcc,self._gcc,ics,bool_pg)
self._icfreqs, self._icFevals, self._icFevecs = intl.calc_icfreqs(*ituple)
#-------------#
# Atomic case #
#-------------#
else:
self._icfreqs = []
self._icFevals = []
self._icFevecs = []
# scale frequencies
self._icfreqs = [freq*self._fscal for freq in self._icfreqs]
def ana_freqs(self,case="cc"):
if case == "cc":
# Keep record of imaginary frequencies
if self._ccFevecs is not None:
self._ccimag = [ (frq,self._ccFevecs[idx]) for idx,frq in enumerate(self._ccfreqs)\
if frq < 0.0]
else:
self._ccimag = [ (frq,None) for idx,frq in enumerate(self._ccfreqs)\
if frq < 0.0]
# Calculate zpe
self._cczpes = [fncs.afreq2zpe(frq) for frq in self._ccfreqs]
self._cczpe = sum(self._cczpes)
self._ccV1 = self._V0 + self._cczpe
if case == "ic":
# Keep record of imaginary frequencies
if self._icFevecs is not None:
self._icimag = [ (frq,self._icFevecs[idx]) for idx,frq in enumerate(self._icfreqs)\
if frq < 0.0]
else:
self._icimag = [ (frq,None) for idx,frq in enumerate(self._icfreqs)\
if frq < 0.0]
# Calculate zpe
self._iczpes = [fncs.afreq2zpe(frq) for frq in self._icfreqs]
self._iczpe = sum(self._iczpes)
self._icV1 = self._V0 + self._iczpe
def clean_freqs(self,case="cc"):
# select case
if case == "cc": freqs = self._ccfreqs
else : freqs = self._icfreqs
# keep track of those to save
keep = []
for idx,freq in enumerate(freqs):
if abs(fncs.afreq2cm(freq)) < EPS_IC: continue
keep.append(idx)
# keep only those > EPS_IC
if case == "cc":
self._ccfreqs = [self._ccfreqs[idx] for idx in keep]
if self._ccFevals is not None:
self._ccFevals = [self._ccFevals[idx] for idx in keep]
if self._ccFevecs is not None:
self._ccFevecs = [self._ccFevecs[idx] for idx in keep]
if case == "ic":
self._icfreqs = [self._icfreqs[idx] for idx in keep]
if self._icFevals is not None:
self._icFevals = [self._icFevals[idx] for idx in keep]
if self._icFevecs is not None:
self._icFevecs = [self._icFevecs[idx] for idx in keep]
def deal_lowfq(self,lowfq={},case="cc"):
# for Cartesian Coordinates
if case == "cc":
# frequencies were not projected along MEP
if self._nvdof - len(self._ccfreqs) == 0:
for idx,newfreq in lowfq.items():
self._ccfreqs[idx] = max(self._ccfreqs[idx],newfreq)
# frequencies were projected along MEP
elif self._nvdof - len(self._ccfreqs) == 1:
for idx,newfreq in lowfq.items():
self._ccfreqs[idx-1] = max(self._ccfreqs[idx-1],newfreq)
# for Internal Coordinates
elif case == "ic":
# frequencies were not projected along MEP
if self._nvdof - len(self._icfreqs) == 0:
for idx,newfreq in lowfq.items():
self._icfreqs[idx] = max(self._icfreqs[idx],newfreq)
# frequencies were projected along MEP
elif self._nvdof - len(self._icfreqs) == 1:
for idx,newfreq in lowfq.items():
self._icfreqs[idx-1] = max(self._icfreqs[idx-1],newfreq)
def calc_pfns(self,temps,case="cc",fmode=0):
'''
fmode = -1 or 0 (0 is default)
'''
# Calculate translational partition function (per unit volume)
ph_tra = np.array([pf.pf_partinbox(self._mass,T) for T in temps])
# Calculate rotational partition function (Rigid-Rotor)
if self._natoms > 1:
pf_rot = np.array([pf.pf_rigidrotor(self._imoms,T,self._rotsigma) for T in temps])
else:
pf_rot = np.array([1.0 for T in temps])
# Calculate vibrational partition function (Harmonic-Oscillator)
if self._nvdof != 0:
# remove freq if required
nf = self._nvdof + fmode
if case == "cc": afreqs = list(self._ccfreqs)
if case == "ic": afreqs = list(self._icfreqs)
while len(afreqs) > nf: afreqs = afreqs[1:]
# Calculate vib pfn
pf_vib = np.array([pf.pf_harmosc(afreqs,T,imag=1E10) for T in temps])
else:
pf_vib = np.array([1.0 for T in temps])
# Calculate electronic partition function
pf_ele = np.array([pf.pf_electr(self._les,T) for T in temps])
# Total partition function
qtot = ph_tra * pf_rot * pf_vib * pf_ele
if case == "cc": return qtot, self._ccV1, (ph_tra,pf_rot,pf_vib,pf_ele)
if case == "ic": return qtot, self._icV1, (ph_tra,pf_rot,pf_vib,pf_ele)
def info_string(self,ib=0):
root_mass = sum(fncs.symbols2masses(self._symbols))
string = "Molecular formula : %s\n"%self._mform
string += "Number of atoms : %i\n"%self._natoms
string += "Number of electrons : %i\n"%self._nel
string += "Vibrational DOFs : %i\n"%self._nvdof
string += "Charge : %i\n"%self._ch
string += "Multiplicity : %i\n"%self._mtp
string += "Electronic energy (V0): %.8f hartree\n"%self._V0
string += "Total mass [root] : %.4f amu\n"%(root_mass *AMU)
string += "Total mass : %.4f amu\n"%(self._mass*AMU)
if self._pgroup is not None: string += "Point group symmetry : %s\n"%(self._pgroup)
if self._rotsigma is not None: string += "Rotational sym num : %i\n"%(self._rotsigma)
string += "Cartesian coordinates (Angstrom):\n"
for at,symbol in enumerate(self._symbols):
mass = self._masses[at]*AMU
x,y,z = fncs.xyz(self._xcc,at)
x *= ANGSTROM
y *= ANGSTROM
z *= ANGSTROM
string += " %2s %+10.6f %+10.6f %+10.6f [%7.3f amu]\n"%(symbol,x,y,z,mass)
try:
str2 = "Moments and product of inertia (au):\n"
if len(self._imoms) == 1:
str2 += " %+10.3E\n"%self._imoms[0]
if len(self._imoms) == 3:
prodinert = self._imoms[0]*self._imoms[1]*self._imoms[2]
dataline = (self._imoms[0],self._imoms[1],self._imoms[2],prodinert)
str2 += " %+10.3E %+10.3E %+10.3E [%10.3E]\n"%dataline
string += str2
except: pass
try:
str2 = "Vibrational frequencies [1/cm] (scaled by %.3f):\n"%self._fscal
for idx in range(0,len(self._ccfreqs),6):
str2 += " %s\n"%(" ".join("%8.2f"%fncs.afreq2cm(freq) \
for freq in self._ccfreqs[idx:idx+6]))
if len(self._ccfreqs) != 0: string += str2
except: pass
try:
str2 = "Vibrational zero-point energies [kcal/mol]:\n"
for idx in range(0,len(self._cczpes),6):
str2 += " %s\n"%(" ".join("%8.2f"%(zpe*KCALMOL) \
for zpe in self._cczpes[idx:idx+6]))
zpe_au = self._cczpe
zpe_kcal = self._cczpe * KCALMOL
zpe_eV = self._cczpe * EV
zpe_cm = self._cczpe * H2CM
str2 += "Vibrational zero-point energy: %+14.8f hartree = \n"%zpe_au
str2 += " %+14.2f kcal/mol = \n"%zpe_kcal
str2 += " %+14.2f eV = \n"%zpe_eV
str2 += " %+14.2f cm^-1 \n"%zpe_cm
str2 += "V0 + zero-point energy (V1) : %+14.8f hartree\n"%self._ccV1
if self._cczpe != 0.0: string += str2
except: pass
# add blank spaces
string = "\n".join([" "*ib+line for line in string.split("\n")])
return string
#=======================================#
# Set variables from external files #
#=======================================#
def set_from_gts(self,gtsfile):
if not os.path.exists(gtsfile): return
# read file
self._gts = gtsfile
xcc,atonums,ch,mtp,E,gcc,Fcc,masses,pgroup,rotsigma,freq_list = read_gtsfile(self._gts)
# set variables
self.setvar(xcc=xcc,gcc=gcc,Fcc=Fcc)
self.setvar(atonums=atonums,masses=masses)
self.setvar(ch=ch,mtp=mtp,V0=E,pgroup=pgroup,rotsigma=rotsigma)
# Prepare system
self.prepare()
# only for developers: freq list
if freq_list is not None and len(freq_list) != 0: self._ccfreqs = freq_list
def set_from_fchk(self,fchk):
if not os.path.exists(fchk): return
# read file
xcc, atonums, ch, mtp, E, gcc, Fcc, masses, calclevel = read_fchk(fchk)
# set variables
self.setvar(xcc=xcc,gcc=gcc,Fcc=Fcc)
self.setvar(atonums=atonums,masses=masses)
self.setvar(ch=ch,mtp=mtp,V0=E)
# Prepare system
self.prepare()
def set_from_gauout(self,gauout):
if not os.path.exists(gauout): return
# read file
xcc, atonums, ch, mtp, E, gcc, Fcc, masses, calclevel = read_gauout(gauout)
# set variables
self.setvar(xcc=xcc,gcc=gcc,Fcc=Fcc)
self.setvar(atonums=atonums,masses=masses)
self.setvar(ch=ch,mtp=mtp,V0=E)
# Prepare system
self.prepare()
#=======================================#
# Generation of different kind of files #
#=======================================#
def genfile_xyz(self,filename):
try : write_xyz(filename,self._xcc,self._symbols)
except: return 0
return 1
#---------------------------------------#
def genfile_molden(self,filename):
try : write_molden(filename,self._xcc,self._symbols,self._ccfreqs,self._ccFevecs)
except: return 0
return 1
#---------------------------------------#
def genfile_gts(self,filename,level=""):
write_gtsfile(self._xcc,self._atnums,self._ch,self._mtp,\
self._V0,self._pgroup,self._rotsigma,self._gcc,\
self._Fcc,filename,level=level)
try : write_gtsfile(self._xcc,self._atnums,self._ch,self._mtp,\
self._V0,self._pgroup,self._rotsigma,self._gcc,\
self._Fcc,filename,level=level)
except: return 0
return 1
#=======================================#
|
[
"cathedralpkg@gmail.com"
] |
cathedralpkg@gmail.com
|
77eb8e1fa8124381406bb991d72844b5849b75c9
|
5cfaa7709237e4fd0bdc2f00a5bde6b063324580
|
/tests/test_features.py
|
09359dca7a96226ce0d9d9edf939634cf73beaaa
|
[] |
no_license
|
khamv/ml_investment
|
6f1f0fe0e0cc4a7fc0b84524b519b21a4582a85a
|
9251847dcd5e88427106eed3c9952902210d645c
|
refs/heads/main
| 2023-03-22T05:52:12.537206
| 2021-03-17T20:24:15
| 2021-03-17T20:24:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,636
|
py
|
import pytest
import hashlib
import pandas as pd
import numpy as np
from data import SF1Data
from features import calc_series_stats, QuarterlyFeatures, BaseCompanyFeatures,\
QuarterlyDiffFeatures, FeatureMerger, \
DailyAggQuarterFeatures
from utils import load_json
config = load_json('config.json')
@pytest.mark.parametrize(
["series", "norm", "expected"],
[([10, 0, 1], False,
{'_mean': 3.6666666666666665,
'_median': 1.0,
'_max': 10.0,
'_min': 0.0,
'_std': 4.4969125210773475}),
([10, -30, 1, 4, 15.2], False,
{'_mean': 0.039999999999999855,
'_median': 4.0,
'_max': 15.2,
'_min': -30.0,
'_std': 15.798936673080249}),
([1], False,
{'_mean': 1.0,
'_median': 1.0,
'_max': 1.0,
'_min': 1.0,
'_std': 0.0} ),
([10, -30, 1, 4, 15.2], True,
{'_mean': 0.0039999999999999855,
'_median': .4,
'_max': 1.52,
'_min': -3.0,
'_std': 1.5798936673080249})]
)
def test_calc_series_stats(series, norm, expected):
result = calc_series_stats(series, norm=norm)
assert type(result) == dict
assert len(result) == len(expected)
assert result.keys() == expected.keys()
for key in result:
assert np.isclose(result[key], expected[key])
if norm == False:
np.random.seed(0)
np.random.shuffle(series)
result = calc_series_stats(series, norm=norm)
for key in result:
assert np.isclose(result[key], expected[key])
def test_calc_series_stats_nans():
assert calc_series_stats([np.nan, 10, 0, 1]) == calc_series_stats([10, 0, 1])
assert calc_series_stats([None, 10, 0, 1]) == calc_series_stats([10, 0, 1])
assert calc_series_stats([10, 0, np.nan, 1]) == calc_series_stats([10, 0, 1])
result = calc_series_stats([])
for key in result:
assert np.isnan(result[key])
result = calc_series_stats([np.nan, None])
for key in result:
assert np.isnan(result[key])
def int_hash(text):
return int(hashlib.md5(text.encode('utf-8')).hexdigest()[:8], 16)
class Data:
def __init__(self, columns, cat_columns=None, tickers=None):
self.columns = columns
self.cat_columns = cat_columns
self.tickers = tickers
def load_quarterly_data(self, tickers, quarter_count=None):
size=50
df = pd.DataFrame()
df['ticker'] = tickers * size
df['date'] = np.nan
np.random.seed(int_hash(str(tickers)))
for col in self.columns:
df[col] = np.random.uniform(-1e5, 1e5, size)
return df
def load_daily_data(self, tickers):
size=500
df = pd.DataFrame()
df['ticker'] = tickers * size
df['date'] = np.datetime64('now')
np.random.seed(int_hash(str(tickers)))
for col in self.columns:
df[col] = np.random.uniform(-1e5, 1e5, size)
return df
def load_base_data(self):
df = pd.DataFrame()
df['ticker'] = self.tickers
for col in self.cat_columns:
np.random.seed(0)
df[col] = np.random.randint(-2, 2, len(self.tickers))
return df
class TestQuarterlyFeatures:
@pytest.mark.parametrize(
["tickers", "columns", "quarter_counts", "max_back_quarter"],
[(['AAPL', 'TSLA'], ['ebit'], [2], 10),
(['NVDA', 'TSLA'], ['ebit'], [2, 4], 5),
(['AAPL', 'NVDA', 'TSLA', 'WORK'], ['ebit', 'debt'], [2, 4, 10], 10),
(['AAPL', 'ZLG'], ['ebit', 'debt'], [2, 4, 10], 5)]
)
def test_calculate(self, tickers, columns,
quarter_counts, max_back_quarter):
fc = QuarterlyFeatures(columns=columns,
quarter_counts=quarter_counts,
max_back_quarter=max_back_quarter)
loaders = [Data(columns), SF1Data(config['sf1_data_path'])]
for data_loader in loaders:
X = fc.calculate(data_loader, tickers)
assert type(X) == pd.DataFrame
assert 'ticker' in X.index.names
assert 'date' in X.index.names
if type(data_loader) == Data:
assert X.shape[0] == max_back_quarter * len(tickers)
else:
assert X.shape[0] <= max_back_quarter * len(tickers)
assert X.shape[1] == 2 * len(calc_series_stats([])) * \
len(columns) * len(quarter_counts)
# Minimum can not be lower with reduction of quarter_count
sorted_quarter_counts = np.sort(quarter_counts)
for col in columns:
for k in range(len(sorted_quarter_counts) - 1):
lower_count = sorted_quarter_counts[k]
higher_count = sorted_quarter_counts[k + 1]
l_col = 'quarter{}_{}_min'.format(lower_count, col)
h_col = 'quarter{}_{}_min'.format(higher_count, col)
assert (X[h_col] <= X[l_col]).min()
# Maximum can not be higher with reduction of quarter_count
sorted_quarter_counts = np.sort(quarter_counts)
for col in columns:
for k in range(len(sorted_quarter_counts) - 1):
lower_count = sorted_quarter_counts[k]
higher_count = sorted_quarter_counts[k + 1]
l_col = 'quarter{}_{}_max'.format(lower_count, col)
h_col = 'quarter{}_{}_max'.format(higher_count, col)
assert (X[h_col] >= X[l_col]).min()
std_cols = [x for x in X.columns if '_std' in x]
for col in std_cols:
assert X[col].min() >= 0
for col in columns:
for count in quarter_counts:
min_col = 'quarter{}_{}_min'.format(count, col)
max_col = 'quarter{}_{}_max'.format(count, col)
mean_col = 'quarter{}_{}_mean'.format(count, col)
median_col = 'quarter{}_{}_median'.format(count, col)
assert (X[max_col] >= X[min_col]).min()
assert (X[max_col] >= X[mean_col]).min()
assert (X[max_col] >= X[median_col]).min()
assert (X[mean_col] >= X[min_col]).min()
assert (X[median_col] >= X[min_col]).min()
class TestQuarterlyDiffFeatures:
@pytest.mark.parametrize(
["tickers", "columns", "compare_quarter_idxs", "max_back_quarter"],
[(['AAPL', 'TSLA'], ['ebit'], [1], 10),
(['NVDA', 'TSLA'], ['ebit'], [1, 4], 5),
(['AAPL', 'NVDA', 'TSLA', 'WORK'], ['ebit', 'debt'], [1, 4, 10], 10),
(['AAPL', 'ZLG'], ['ebit', 'debt'], [1, 4, 10], 5)]
)
def test_calculate(self, tickers, columns,
compare_quarter_idxs, max_back_quarter):
fc = QuarterlyDiffFeatures(columns=columns,
compare_quarter_idxs=compare_quarter_idxs,
max_back_quarter=max_back_quarter)
loaders = [Data(columns), SF1Data(config['sf1_data_path'])]
for data_loader in loaders:
X = fc.calculate(data_loader, tickers)
assert type(X) == pd.DataFrame
assert 'ticker' in X.index.names
assert 'date' in X.index.names
if type(data_loader) == Data:
assert X.shape[0] == max_back_quarter * len(tickers)
else:
assert X.shape[0] <= max_back_quarter * len(tickers)
assert X.shape[1] == len(compare_quarter_idxs) * len(columns)
class WrapData:
def __init__(self, data_loader, tickers):
self.data_loader = data_loader
self.tickers = tickers
def load_base_data(self):
df = pd.DataFrame()
df['ticker'] = self.tickers
df = pd.merge(df, self.data_loader.load_base_data(), how='left')
return df
class TestBaseCompanyFeatures:
@pytest.mark.parametrize(
["tickers", "cat_columns"],
[(['AAPL', 'TSLA'], ['sector']),
(['NVDA', 'TSLA'], ['sector', 'sicindustry']),
(['AAPL', 'NVDA', 'TSLA', 'WORK'], ['sector', 'sicindustry']),
(['AAPL', 'ZLG'], ['sector', 'sicindustry'])]
)
def test_calculate(self, tickers, cat_columns):
loaders = [Data(columns=[], cat_columns=cat_columns,
tickers=tickers),
SF1Data(config['sf1_data_path'])]
for data_loader in loaders[:]:
fc = BaseCompanyFeatures(cat_columns=cat_columns)
X = fc.calculate(data_loader, tickers)
assert type(X) == pd.DataFrame
assert 'ticker' in X.index.names
base_data = data_loader.load_base_data()
for col in cat_columns:
assert len(base_data[col].unique()) ==\
len(fc.col_to_encoder[col].classes_)
# Reuse fitted after first calculate fc
for col in cat_columns:
assert col in fc.col_to_encoder
new_X = fc.calculate(data_loader, tickers)
for col in cat_columns:
assert (new_X[col] == X[col]).min()
wd = WrapData(data_loader, tickers)
new_X = fc.calculate(wd, tickers)
for col in cat_columns:
assert (new_X[col] == X[col]).min()
class TestFeatureMerger:
@pytest.mark.parametrize(
"tickers",
[['AAPL', 'TSLA'], ['NVDA', 'TSLA'],
['AAPL', 'NVDA', 'TSLA', 'WORK'], ['AAPL', 'ZLG']]
)
def test_calculate(self, tickers):
data_loader = SF1Data(config['sf1_data_path'])
fc1 = QuarterlyFeatures(columns=['ebit'],
quarter_counts=[2],
max_back_quarter=10)
fc2 = QuarterlyDiffFeatures(columns=['ebit', 'debt'],
compare_quarter_idxs=[1, 4],
max_back_quarter=10)
fc3 = BaseCompanyFeatures(cat_columns=['sector', 'sicindustry'])
X1 = fc1.calculate(data_loader, tickers)
X2 = fc2.calculate(data_loader, tickers)
X3 = fc3.calculate(data_loader, tickers)
fm1 = FeatureMerger(fc1, fc2, on=['ticker', 'date'])
Xm1 = fm1.calculate(data_loader, tickers)
fm2 = FeatureMerger(fc1, fc3, on='ticker')
Xm2 = fm2.calculate(data_loader, tickers)
assert Xm1.shape[0] == X1.shape[0]
assert Xm2.shape[0] == X1.shape[0]
assert Xm1.shape[1] == X1.shape[1] + X2.shape[1]
assert Xm2.shape[1] == X1.shape[1] + X3.shape[1]
assert (Xm1.index == X1.index).min()
assert (Xm2.index == X1.index).min()
new_cols = Xm1.columns[:X1.shape[1]]
old_cols = X1.columns
for nc, oc in zip(new_cols, old_cols):
assert (Xm1[nc] == X1[oc]).min()
new_cols = Xm2.columns[:X1.shape[1]]
old_cols = X1.columns
for nc, oc in zip(new_cols, old_cols):
assert (Xm2[nc] == X1[oc]).min()
class TestDailyAggQuarterFeatures:
@pytest.mark.parametrize(
["tickers", "columns", "agg_day_counts", "max_back_quarter"],
[(['AAPL', 'TSLA'], ['marketcap'], [100], 10),
(['NVDA', 'TSLA'], ['marketcap'], [100, 200], 5),
(['AAPL', 'NVDA', 'TSLA', 'WORK'], ['marketcap', 'pe'], [50, 200], 10),
(['AAPL', 'ZLG'], ['marketcap', 'pe'], [50, 200], 5)]
)
def test_calculate(self, tickers, columns,
agg_day_counts, max_back_quarter):
fc = DailyAggQuarterFeatures(columns=columns,
agg_day_counts=agg_day_counts,
max_back_quarter=max_back_quarter)
data_loader = SF1Data(config['sf1_data_path'])
X = fc.calculate(data_loader, tickers)
assert type(X) == pd.DataFrame
assert 'ticker' in X.index.names
assert 'date' in X.index.names
assert X.shape[0] <= max_back_quarter * len(tickers)
assert X.shape[1] == len(calc_series_stats([])) * \
len(columns) * len(agg_day_counts)
for col in columns:
for count in agg_day_counts:
min_col = 'days{}_{}_min'.format(count, col)
max_col = 'days{}_{}_max'.format(count, col)
mean_col = 'days{}_{}_mean'.format(count, col)
median_col = 'days{}_{}_median'.format(count, col)
assert (X[max_col] >= X[min_col]).min()
assert (X[max_col] >= X[mean_col]).min()
assert (X[max_col] >= X[median_col]).min()
assert (X[mean_col] >= X[min_col]).min()
assert (X[median_col] >= X[min_col]).min()
|
[
"fartuk@pop-os.localdomain"
] |
fartuk@pop-os.localdomain
|
1e5ba37e5632d9af328796803bb460d84fe8af12
|
cb0e7d6493b23e870aa625eb362384a10f5ee657
|
/solutions/python3/1330.py
|
a628f22bd8f3702332e9e5888502f2c30940ce64
|
[] |
no_license
|
sweetpand/LeetCode-1
|
0acfa603af254a3350d457803449a91322f2d1a7
|
65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94
|
refs/heads/master
| 2022-11-14T07:01:42.502172
| 2020-07-12T12:25:56
| 2020-07-12T12:25:56
| 279,088,171
| 1
| 0
| null | 2020-07-12T15:03:20
| 2020-07-12T15:03:19
| null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
class Solution:
def maxValueAfterReverse(self, nums: List[int]) -> int:
mini = float('inf')
maxi = float('-inf')
for a, b in zip(nums, nums[1:]):
mini = min(mini, max(a, b))
maxi = max(maxi, min(a, b))
diff = max(0, (maxi - mini) * 2)
for a, b in zip(nums, nums[1:]):
headDiff = -abs(a - b) + abs(nums[0] - b)
tailDiff = -abs(a - b) + abs(nums[-1] - a)
diff = max(diff, headDiff, tailDiff)
return sum(abs(a - b) for a, b in zip(nums, nums[1:])) + diff
|
[
"walkccray@gmail.com"
] |
walkccray@gmail.com
|
44e407c8917cf9421fad88dac6961fbb22179b78
|
fef6ed0cee947f753cf8a34b244726bfdeedb079
|
/exercises/05_basic_scripts/task_5_2a.py
|
1fbea70ee085898ee942deef5240481f9c528704
|
[] |
no_license
|
mirsadm82/pyneng-examples-exercises-en
|
c11bc3d4181c7e79807bfab46587ecb5a182d54a
|
9a5b34bc460005d8b04dfd636790bf53a1916efc
|
refs/heads/main
| 2023-05-10T22:26:43.348567
| 2021-06-01T12:18:54
| 2021-06-01T12:20:20
| 360,168,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
# -*- coding: utf-8 -*-
"""
Task 5.2a
Copy and modify the script from task 5.2 so that, if the user entered a host address
rather than a network address, convert the host address to a network address
and print the network address and mask, as in task 5.2.
An example of a network address (all host bits are equal to zero):
* 10.0.1.0/24
* 190.1.0.0/16
Host address example:
* 10.0.1.1/24 - host from network 10.0.1.0/24
* 10.0.5.195/28 - host from network 10.0.5.192/28
If the user entered the address 10.0.1.1/24, the output should look like this:
Network:
10 0 1 0
00001010 00000000 00000001 00000000
Mask:
/24
255 255 255 0
11111111 11111111 11111111 00000000
Check the script work on different host/mask combinations, for example:
10.0.5.195/28, 10.0.1.1/24
Hint:
The network address can be calculated from the binary host address and the netmask.
If the mask is 28, then the network address is the first 28 bits host addresses + 4 zeros.
For example, the host address 10.1.1.195/28 in binary will be:
bin_ip = "00001010000000010000000111000011"
Then the network address will be the first 28 characters from bin_ip + 0000
(4 because in total there can be 32 bits in the address, and 32 - 28 = 4)
00001010000000010000000111000000
Restriction: All tasks must be done using the topics covered in this and previous chapters.
"""
net = input("Enter the IP network in the format e.g. 10.1.1.0/24: ")
ip, mask = net.split("/")
ip_list = ip.split(".")
mask = int(mask)
oct1, oct2, oct3, oct4 = [
int(ip_list[0]),
int(ip_list[1]),
int(ip_list[2]),
int(ip_list[3]),
]
bin_ip_str = "{:08b}{:08b}{:08b}{:08b}".format(oct1, oct2, oct3, oct4)
bin_network_str = bin_ip_str[:mask] + "0" * (32 - mask)
net1, net2, net3, net4 = [
int(bin_network_str[0:8], 2),
int(bin_network_str[8:16], 2),
int(bin_network_str[16:24], 2),
int(bin_network_str[24:32], 2),
]
bin_mask = "1" * mask + "0" * (32 - mask)
m1, m2, m3, m4 = [
int(bin_mask[0:8], 2),
int(bin_mask[8:16], 2),
int(bin_mask[16:24], 2),
int(bin_mask[24:32], 2),
]
ip_output = """
Network:
{0:<8} {1:<8} {2:<8} {3:<8}
{0:08b} {1:08b} {2:08b} {3:08b}"""
mask_output = """
Mask:
/{0}
{1:<8} {2:<8} {3:<8} {4:<8}
{1:08b} {2:08b} {3:08b} {4:08b}
"""
print(ip_output.format(net1, net2, net3, net4))
print(mask_output.format(mask, m1, m2, m3, m4))
|
[
"mirsad.muratagic@gmail.com"
] |
mirsad.muratagic@gmail.com
|
0dd4cf4810bd0213389b3262df85aa7df66ccec0
|
df57b91c423626791bc9d482d5c2e6a17339e348
|
/Detect emotions of your favorite toons/Final_model.py
|
40e7097560ea8888da43066e94235e5ebcaf03d7
|
[] |
no_license
|
VigneshwaraChinnadurai/Competitions
|
58640819c24666402b16830ed9837921190bafb8
|
7a2c29463d2969b62bd134756d4b583626ae378e
|
refs/heads/master
| 2023-02-02T10:14:01.514646
| 2020-12-20T16:06:53
| 2020-12-20T16:06:53
| 259,121,022
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,864
|
py
|
import keras
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape = (128, 128, 3),kernel_initializer='normal', activation='relu'))
model.add(Conv2D(16, (3, 3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(32, (3, 3), border_mode='valid',activation='relu'))
model.add(Conv2D(32, (3, 3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), border_mode='valid',activation='relu'))
model.add(Conv2D(128, (3, 3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), border_mode='valid',activation='relu'))
model.add(Conv2D(128, (3, 3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(5,activation='softmax'))
ada = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
sgd = keras.optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss="mean_squared_error", optimizer=sgd, metrics=['accuracy'])
model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(shear_range = 0.2,
channel_shift_range = 0.2,
zoom_range = 0.2,
rotation_range=10,
validation_split=0.9,
horizontal_flip = True)
test_datagen = ImageDataGenerator(validation_split=0.9)
training_set = train_datagen.flow_from_directory('Train',
target_size = (128, 128),
batch_size = 16,
shuffle=True,
seed=101,
#save_to_dir='Augumented/Train',
#save_format='jpeg',
interpolation='nearest',
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('Test',
target_size = (128, 128),
batch_size = 16,
shuffle=True,
seed=101,
#save_to_dir='Augumented/Test',
#save_format='jpeg',
interpolation='nearest',
class_mode = 'categorical')
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
es = EarlyStopping(monitor='val_accuracy', mode='max', patience=20)
mc = ModelCheckpoint('best_model.h5', monitor='val_accuracy', mode='max', save_best_only=True)
model.fit_generator(training_set,
steps_per_epoch = 228,
epochs = 100,
validation_data = test_set,
validation_steps = 70,
class_weight={0:5,4:9,2:15,3:15,1:15},
callbacks=[es,mc])
from keras.models import load_model
saved_model = load_model('best_model.h5')
from sklearn.externals import joblib
joblib.dump(model, 'Detect_emotions_of_your_favorite_cartoon.joblib')
d=training_set.class_indices
d= {v:k for k,v in d.items()}
import numpy as np
import pandas as pd
from keras.preprocessing import image
test_predict=pd.read_csv('Test.csv')
for n in test_predict['Frame_ID']:
test_image = image.load_img('predict/'+n, target_size = (128, 128))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = saved_model.predict(test_image)
training_set.class_indices
if result[0][0] >= 0.5:
prediction = 'Unknown'
elif result[0][1] >= 0.5:
prediction = 'angry'
elif result[0][2] >= 0.5:
prediction = 'happy'
elif result[0][3] >= 0.5:
prediction = 'sad'
else:
prediction = 'surprised'
test_predict.loc[test_predict['Frame_ID']==n,'Results']=prediction
test_predict.to_csv(r'Test_final.csv')
|
[
"noreply@github.com"
] |
VigneshwaraChinnadurai.noreply@github.com
|
89563112cf76730b6df18b18db7070c2e47fb257
|
842b3dd5461c6c1dbad1812262cbc583c234a564
|
/getSubdomains.py
|
5852707a2fc40df2e73af8afd4baa85cb903c3ec
|
[] |
no_license
|
RealAsianNoodles/pihole-blocklists
|
b17e3c47c16265e3e4fbc5bc9d6f43cd64ba9b16
|
f6adeafc1db171d3c9000819543602de04375385
|
refs/heads/master
| 2023-08-19T21:55:00.034954
| 2021-10-29T21:17:32
| 2021-10-29T21:17:32
| 422,589,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
##
## 0. Install python3+ and the requests library if required
## 1. Get an account at SecurityTrails.com (free ones are available for infrequent users)
## 2. Get your APIKEY from the the dash board and replace the text "Your Security Trails APIKey here" below
## 3. Replace the text "addYourDomain.com" below with the domain you want to block
## 4. The domains with "0.0.0.0 on the front will be added to a text file in your working directory (domain.com.txt)
## 5. Add the content of the output file to a new or existing blocklist
## 6. Repeat from step 3 for each domain
##
import requests
import os
domain = "addYourDomain.com"
url = "https://api.securitytrails.com/v1/domain/"+domain+"/subdomains"
querystring = {"children_only":"false"}
headers = {
"Accept": "application/json",
"APIKEY": "Your Security Trails APIKey here"
}
response = requests.request("GET", url, headers=headers, params=querystring)
lstSubs = response.json()["subdomains"]
count = 0
with open (os.getcwd()+"\\"+domain+".txt", "w") as f:
f.write("0.0.0.0 " + domain + "\n")
for sub in lstSubs:
f.write("0.0.0.0 " + sub + "." + domain+"\n")
count = count + 1
print ("Blocklist with "+str(count)+" domains written to: "+f.name)
|
[
"noreply@github.com"
] |
RealAsianNoodles.noreply@github.com
|
a0cdc7991d44eccf066101dc4f15fa22848ce276
|
f1b113d89f81bffc79caec8baf9214881813048f
|
/gui.py
|
831cd26b6406b68a846c90eebecf7bb0fc622efc
|
[] |
no_license
|
FreeLike76/kpi_pa_5
|
a3784e9454d7702d29f44f230919c0e5c0102e61
|
064224504be1041c8cdce4803580f63e56300ce9
|
refs/heads/master
| 2023-01-27T19:13:46.762262
| 2020-12-07T07:35:18
| 2020-12-07T07:35:18
| 317,866,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,625
|
py
|
from tkinter import *
class Gui:
def __init__(self):
self.root = Tk()
self.root.geometry("340x440")
headerFont = ("Times New Roman", 16, "bold")
simpleFont = ("Times New Roman", 16)
playerLabel = Label(self.root, text="Player", width=8, height=1, font=headerFont)
playerLabel.grid(row=0, column=0)
totalLabel = Label(self.root, text=":", width=4, height=1, font=headerFont)
totalLabel.grid(row=0, column=1)
botLabel = Label(self.root, text="AI", width=8, height=1, font=headerFont)
botLabel.grid(row=0, column=2)
self.playerHist = Text(self.root, width=8, height=9, font=simpleFont, state="disabled")
self.playerHist.grid(row=1, column=0)
self.scoreHist = Text(self.root, width=4, heigh=9, font=headerFont, state="disabled")
self.scoreHist.grid(row=1, column=1)
self.botHist = Text(self.root, width=8, height=9, font=simpleFont, state="disabled")
self.botHist.grid(row=1, column=2)
self.playerButtonRoll = Button(self.root, text="Roll Dice", font=simpleFont, width=21)
self.playerButtonRoll.grid(row=2, column=0, columnspan=3)
self.playerButtonAccept = Button(self.root, width=21, font=simpleFont, text="Accept Roll", state="disabled")
self.playerButtonAccept.grid(row=3, column=0, columnspan=3)
def pushPlayerHist(self, array):
self.playerHist.configure(state="normal")
self.playerHist.insert(END, "{}-{}-{}-{}-{}"
.format(array[0], array[1], array[2], array[3], array[4])+'\n')
self.playerHist.configure(state="disabled")
def pushBotHist(self, array):
self.botHist.configure(state="normal")
self.botHist.insert(END, "{}-{}-{}-{}-{}"
.format(array[0], array[1], array[2], array[3], array[4])+'\n')
self.botHist.configure(state="disabled")
def pushBotNewLine(self, amount):
self.botHist.configure(state="normal")
for i in range(0, amount):
self.botHist.insert(END, " - - - - -\n")
self.botHist.configure(state="disabled")
def pushScoreHist(self, score, rollCount):
scoreStr = ""
for i in range(1, rollCount):
scoreStr += " - -\n"
if score > 0:
scoreStr += " +"
elif score == 0:
scoreStr += " "
else:
scoreStr += " "
scoreStr += "{}\n".format(score)
self.scoreHist.configure(state="normal")
self.scoreHist.insert(END, scoreStr)
self.scoreHist.configure(state="disabled")
|
[
"dmytro.geleshko01@gmail.com"
] |
dmytro.geleshko01@gmail.com
|
c31ba1aebca5d0a8f3ab48d99843d4bac08458c9
|
94356d117bb3d8d5fc94a0cd4f8828e0c190ef81
|
/first_file.py
|
d06f7f1a1dd704c8cc9fbd4f9adf6ed85ff9fe50
|
[] |
no_license
|
chenyipeng1/507
|
3ba8a3872dade61588af517a9b590d1e42100ae3
|
c475f75fb8278dbec71234c96654a4fddce171b3
|
refs/heads/master
| 2020-03-28T23:35:57.054821
| 2018-09-18T14:05:52
| 2018-09-18T14:05:52
| 149,299,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
for i in range(0,5):
print (i)
a = 10
|
[
"chenyipeng@chendeMacBook-Pro.local"
] |
chenyipeng@chendeMacBook-Pro.local
|
95c6c2bd783fa32482815e82abb9c3fd6935152a
|
be8b4d0956223c895f64bc91a03ea56e7427abb0
|
/1.py
|
f30d15d4ae14dd4a00ced6b7ab0a6375504a5be4
|
[] |
no_license
|
Ashutos-h/python-L10
|
4b1fb3e5a00e38dd6f7d52b583648e2c57a46d79
|
cb05e48c658fb62a75c749a7cd7d08056409f0eb
|
refs/heads/master
| 2020-03-19T06:41:57.274817
| 2018-06-04T15:36:11
| 2018-06-04T15:36:11
| 136,046,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
#Answer 1
from threading import *
from time import *
def display():
print("Starting thread:")
sleep(5)
print("Ending Thread")
t=Thread(target=display)
t.start()
|
[
"ashhutoshsharma@gmail.com"
] |
ashhutoshsharma@gmail.com
|
cb06742c848779fd6fef3a4df5f1c0a8c803c34c
|
35a2c7af44e172038a28dedf869b813027a2e029
|
/cumulusci/robotframework/tests/test_template_util.py
|
3eecfa91711f836c66c1e7d4075061b669d3e81f
|
[] |
permissive
|
Julian88Tex/CumulusCI
|
b82ba31f82ac64911f446592ff79e26b80be1421
|
ea01e5d3523cc174d4a60af93584df7f4486c9f3
|
refs/heads/master
| 2023-08-04T03:57:51.847477
| 2023-02-05T05:18:17
| 2023-02-05T05:18:17
| 202,443,680
| 1
| 0
|
BSD-3-Clause
| 2023-02-05T05:18:18
| 2019-08-15T00:07:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
import unittest
from cumulusci.core import template_utils
class TemplateUtils(unittest.TestCase):
def test_string_generator(self):
x = 100
y = template_utils.StringGenerator(lambda: str(x))
assert str(y) == "100"
x = 200
assert str(y) == "200"
def test_faker_library(self):
fake = template_utils.FakerTemplateLibrary()
assert fake.first_name
assert "example.com" in fake.email(domain="example.com")
def test_faker_languages(self):
fake = template_utils.FakerTemplateLibrary("no_NO")
assert fake.first_name
assert "example.com" in fake.email(domain="example.com")
def test_format_str(self):
assert template_utils.format_str("abc") == "abc"
assert template_utils.format_str("{{abc}}", {"abc": 5}) == "5"
assert len(template_utils.format_str("{{fake.first_name}}"))
assert "15" in template_utils.format_str(
"{{fake.first_name}} {{count}}", {"count": 15}
)
assert "15" in template_utils.format_str(
"{{fake.first_name}} {{count}}", {"count": "15"}
)
assert (
template_utils.format_str("{% raw %}{}{% endraw %}", {"count": "15"})
== "{}"
)
def test_format_str_languages(self):
norwegian_faker = template_utils.FakerTemplateLibrary("no_NO")
val = template_utils.format_str(
"{{vikingfake.first_name}} {{abc}}",
{"abc": 5, "vikingfake": norwegian_faker},
)
assert "5" in val
def cosmopolitan_faker(language):
return template_utils.FakerTemplateLibrary(language)
val = template_utils.format_str(
"{{fakei18n('ne_NP').first_name}} {{abc}}",
{"abc": 5, "fakei18n": cosmopolitan_faker, "type": type},
)
assert "5" in val
|
[
"pprescod@salesforce.com"
] |
pprescod@salesforce.com
|
6a47246536fcce134f154fd6db26a1bff7b8ffe6
|
a8a2dfd74454db8e81b3d6fabd801534ffb3e9bb
|
/foobar/models/foo/models.py
|
b4457a83212aa32c6cb1b04a2668dfe6832f098f
|
[] |
no_license
|
s-zhao/foobar
|
b48c8f13951fefcfd1f8c3998e8909f113def61a
|
34bb4fa4e9d34f1c290f6664ac049326953b469d
|
refs/heads/master
| 2021-01-23T11:33:41.581696
| 2013-09-06T01:18:48
| 2013-09-06T01:18:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,072
|
py
|
""" corporate directory
[shichang-scz.rhcloud.com foobar]\> python manage-dev.py syncdb
CommandError: One or more models did not validate:
foo.person: Accessor for field 'contact' clashes with related field 'PersonalContact.person'. Add a related_name argument to the definition for 'contact'.
foo.person: Reverse query name for field 'contact' clashes with related field 'PersonalContact.person'. Add a related_name argument to the definition for 'contact'.
"""
"""
#
# organization contact directory
#
from django.db import models
class Address(models.Model):
line1 = models.CharField(max_length=50, blank=False)
line2 = models.CharField(max_length=50, blank=True, null=False)
city = models.CharField(max_length=50, blank=False)
state = models.CharField(max_length=2, blank=False)
zip_code = models.CharField(max_length=32, blank=False)
country = models.CharField(max_length=3, blank=False)
def __unicode__(self):
return u'%s, %s %s' % (self.line1, self.city, self.state)
class PersonalContact(Address):
home_phone = models.CharField(max_length=32, blank=True, null=False)
work_phone = models.CharField(max_length=32, blank=True, null=False)
mobile = models.CharField(max_length=32, blank=True, null=False)
email = models.EmailField(blank=True, null=False) #max_length=75,
url = models.URLField(blank=True, null=False) #max_length=200
notes = models.TextField(blank=True, null=False)
class BusinessContact(Address):
phone = models.CharField(max_length=32, blank=True, null=False)
email = models.EmailField(blank=True, null=False) #max_length=75,
url = models.URLField(blank=True, null=False) #max_length=200
notes = models.TextField(blank=True, null=False)
#
# entity - in nature how we define a person
# data ownership - organization
# therefore, a real person may have multiple records in the system
#
# API edit not allowed
#
class Person(models.Model):
GENDER = (
('', 'No Reply'),
('M', 'Male'),
('F', 'Female'),
('O', 'Other')
)
first_name = models.CharField(max_length=30, blank=False)
last_name = models.CharField(max_length=30, blank=False)
middle_name = models.CharField("middle and initials", max_length=30, blank=True, null=False)
gender = models.CharField(max_length=1, choices=GENDER, blank=True, null=False)
dob = models.DateField("birth date", blank=True, null=True)
#
# OneToOneField => ForeignKey(model, unique=True)
# so if intention is, each one shall have its own record, then use OneToOne
# if intention is, multiple may share the same, use ForeignKey
#
# each person shall provide and maintain one's own birth place record
#
birth_place = models.OneToOneField(Address, blank=True, null=True)
ssn = models.CharField(max_length=9, blank=True, null=False)
#
# up to date best contact information
#
contact = models.OneToOneField(PersonalContact, blank=True, null=True)
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
class Organization(models.Model):
name = models.CharField(max_length=50, unique=True, blank=False)
description = models.TextField(blank=True, null=False)
#
#legal/public
#
contact = models.OneToOneField(BusinessContact, blank=False)
def __unicode__(self):
return u'%s' % self.name
class OfficeType(models.Model):
class Meta:
unique_together = ('sym', 'organization')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
sym = models.CharField(max_length=32, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
organization = models.ForeignKey(Organization, blank=False)
def __unicode__(self):
return u'%s - %s' % (self.sym, self.name)
class DepartmentType(models.Model):
class Meta:
unique_together = ('sym', 'organization')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
sym = models.CharField(max_length=32, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
organization = models.ForeignKey(Organization, blank=False)
def __unicode__(self):
return u'%s - %s' % (self.sym, self.name)
#
# POST - per organization settings
#
class Position(models.Model):
class Meta:
unique_together = ('sym', 'organization')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
sym = models.CharField(max_length=32, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
organization = models.ForeignKey(Organization, blank=False)
def __unicode__(self):
return u'%s - %s' % (self.sym, self.name)
class Level(models.Model):
class Meta:
unique_together = ('sym', 'position')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
sym = models.CharField(max_length=32, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
position = models.ForeignKey(Position)
def __unicode__(self):
return u'%s - %s' % (self.sym, self.name)
class Title(models.Model):
class Meta:
unique_together = ('sym', 'position')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
sym = models.CharField(max_length=32, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
position = models.ForeignKey(Position)
def __unicode__(self):
return u'%s - %s' % (self.sym, self.name)
#full-time, contract, consultant ...
class EmploymentType(models.Model):
class Meta:
unique_together = ('sym', 'organization')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
sym = models.CharField(max_length=32, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
organization = models.ForeignKey(Organization, blank=False)
def __unicode__(self):
return u'%s - %s' % (self.sym, self.name)
class Office(models.Model):
class Meta:
unique_together = ('name', 'organization')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
organization = models.ForeignKey(Organization, blank=False, null=True)
office_type = models.ForeignKey(OfficeType, blank=False)
contact = models.OneToOneField(BusinessContact)
work_hours = models.CharField(max_length=250, blank=True, null=False)
weekend_hours = models.CharField(max_length=250, blank=True, null=False)
holiday_hours = models.CharField(max_length=250, blank=True, null=False)
#
# regional primary office or head quarter
#
is_primary = models.BooleanField(default=False)
def __unicode__(self):
return u'%s - %s' % (self.name, self.organization.name)
class Department(models.Model):
class Meta:
unique_together = ('name', 'office')
name = models.CharField(max_length=50, unique=True, blank=False, null=False)
description = models.TextField(blank=True, null=False)
office = models.ForeignKey(Office, blank=False, null=True)
department_type = models.ForeignKey(DepartmentType, blank=False)
contact = models.OneToOneField(BusinessContact)
work_hours = models.CharField(max_length=250, blank=True, null=False)
weekend_hours = models.CharField(max_length=250, blank=True, null=False)
holiday_hours = models.CharField(max_length=250, blank=True, null=False)
def __unicode__(self):
return u'%s - %s' % (self.name, self.office.name)
class Worker(models.Model):
class Meta:
#
# the same person may be associated with mutliple departments
#
unique_together = ('person', 'department')
#
# corporate owned 'Person' record
# access to person record requires: department | department.office | department.office.organization
# and function permission
#
person = models.ForeignKey(Person)
#
# contact information given to and shared by this employment
#
contact = models.OneToOneField(PersonalContact)
department = models.ForeignKey(Department)
start_date = models.DateField(blank=True, null=True)
conversion_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
position = models.ForeignKey(Position)
title = models.ForeignKey(Title)
level = models.ForeignKey(Level)
employment = models.ForeignKey(EmploymentType)
"""
|
[
"52005ab75973ca0d4e000061@ex-std-node61.prod.rhcloud.com"
] |
52005ab75973ca0d4e000061@ex-std-node61.prod.rhcloud.com
|
965db33f8a04be8071c225a2464639b4ef8e8833
|
bcb722f17cf273a37b1696be20e3dcfb82bdf443
|
/lib/SoftwareSettings.py
|
b3b2c1ca4126300379ffb84a9791fbb459bb3788
|
[
"Apache-2.0"
] |
permissive
|
swdotcom/swdc-sublime-music-time
|
e76bc2b30fd6fb4c0148b8554a026b2f34f0359c
|
a32d5490b8990510ef2776f2a077f69d8185ead0
|
refs/heads/master
| 2020-08-07T04:39:46.206978
| 2020-03-30T09:16:04
| 2020-03-30T09:16:04
| 213,299,287
| 2
| 2
|
Apache-2.0
| 2020-03-15T04:53:56
| 2019-10-07T05:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 360
|
py
|
import sublime_plugin
import sublime
def getValue(key, defaultValue):
SETTINGS = sublime.load_settings("Software.sublime_settings")
# log("Got value!")
return SETTINGS.get(key, defaultValue)
def setValue(key, value):
SETTINGS = sublime.load_settings("Software.sublime_settings")
# log("Set value!")
return SETTINGS.set(key, value)
|
[
"aidlmldeveloper.com"
] |
aidlmldeveloper.com
|
bea6f718a6ab4c50f6e4ceb6493de32591ebe8eb
|
f1e53710468220d0b33335c323131b9928758011
|
/backbones/RepVGG.py
|
a5ffb99ea18328965bf3acbfd9ff0e8812d6920c
|
[
"Apache-2.0"
] |
permissive
|
Scorpio-i/RepVGG-openMMLab
|
821fba8bc253593956c725a01942cf0e764810f5
|
ca6f4110281681b575400f973d28a9e55347c431
|
refs/heads/main
| 2023-07-14T11:23:48.818987
| 2021-08-09T21:49:08
| 2021-08-09T21:49:08
| 394,575,335
| 3
| 0
|
Apache-2.0
| 2021-08-10T08:12:09
| 2021-08-10T08:12:08
| null |
UTF-8
|
Python
| false
| false
| 7,401
|
py
|
from mim.utils import exit_with_error
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcls.models.builder import BACKBONES
except ImportError:
exit_with_error('Please install mmcls, mmcv, torch to run this example.')
class SEBlock(nn.Module):
"""Squeeze Excitation Block
the “Squeeze-and-Excitation” (SE) block, that adaptively recalibrates channel-wise
feature responses by explicitly modelling interdependencies between channels.
Args:
input_channels: down/up sampling channels
internal_neurons: internal sampling channels
"""
def __init__(self, input_channels, internal_neurons):
super(SEBlock, self).__init__()
self.down = nn.Conv2d(in_channels=input_channels,
out_channels=internal_neurons,
kernel_size=1,
stride=1,
bias=True)
self.up = nn.Conv2d(in_channels=internal_neurons,
out_channels=input_channels,
kernel_size=1,
stride=1,
bias=True)
self.input_channels = input_channels
def forward(self, inputs):
x = F.avg_pool2d(inputs, kernel_size=inputs.size(3))
x = self.down(x)
x = F.relu(x)
x = self.up(x)
x = torch.sigmoid(x)
x = x.view(-1, self.input_channels, 1, 1)
return inputs * x
def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
result = nn.Sequential()
result.add_module(
'conv',
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False))
result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
return result
class RepVGGBlock(nn.Module):
"""RepVGG BLock Module
Args:
in_channels: input channels
out_channels: output channels
kernel_size: kernel size
use_se: use SEBlock or not
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
use_se=False):
super(RepVGGBlock, self).__init__()
assert kernel_size == 3, "kernel size only 33 or 11"
assert padding == 1, "this padding to keep the input feature map \
size equal to the output size"
padding11 = padding - kernel_size // 2
if use_se:
self.se = SEBlock(out_channels,
internal_neurons=out_channels // 16)
else:
self.se = nn.Identity()
self.nonlinearity = nn.ReLU()
self.rbr_identity = nn.BatchNorm2d(num_features=in_channels) \
if out_channels == in_channels and stride == 1 else None
self.rbr_dense = conv_bn(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups)
self.rbr_1x1 = conv_bn(in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding11,
groups=groups)
def forward(self, inputs):
if self.rbr_identity is None:
id_out = 0
else:
id_out = self.rbr_identity(inputs)
return self.nonlinearity(
self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
@BACKBONES.register_module()
class RepVGG(nn.Module):
"""VGG backbone
Example:
model = RepVGG(numclasses = 1000,
num_blocks=[4, 6, 16, 1],
width_multiplier=[2.5, 2.5, 2.5, 5],
override_groups_map=g4_map)
use model..
Args:
num_blocks: Depth of RepVGG, from [4, 6, 16, 1] .
width_multiplier : stage width ,from [2.5, 2.5, 2.5, 5] ,default None
override_groups_map:.... ,default None
use_se: use SEBlock or not ,default False
"""
def __init__(self,
num_blocks,
num_classes,
use_se=False,
width_multiplier=None,
override_groups_map=None):
super(RepVGG, self).__init__()
assert len(width_multiplier) == 4, " "
self.override_groups_map = override_groups_map or dict()
assert 0 not in self.override_groups_map, " "
self.use_se = use_se
self.cur_layer_idx = 1
self.in_planes = min(64, int(64 * width_multiplier[0]))
self.stage0 = RepVGGBlock(in_channels=3,
out_channels=self.in_planes,
kernel_size=3,
stride=2,
padding=1,
use_se=self.use_se)
self.stage1 = self._make_stage(int(64 * width_multiplier[0]),
num_blocks[0],
stride=2)
self.stage2 = self._make_stage(int(128 * width_multiplier[1]),
num_blocks[1],
stride=2)
self.stage3 = self._make_stage(int(256 * width_multiplier[2]),
num_blocks[2],
stride=2)
self.stage4 = self._make_stage(int(512 * width_multiplier[3]),
num_blocks[3],
stride=2)
self.gap = nn.AdaptiveAvgPool2d(output_size=1)
self.linear = nn.Linear(int(512 * width_multiplier[3]), num_classes)
def _make_stage(self, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
blocks = []
for stride in strides:
cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1)
blocks.append(
RepVGGBlock(in_channels=self.in_planes,
out_channels=planes,
kernel_size=3,
stride=stride,
padding=1,
groups=cur_groups,
use_se=self.use_se))
self.in_planes = planes
self.cur_layer_idx += 1
return nn.Sequential(*blocks)
def forward(self, x):
assert x.shape[1] == 3, "first input channel equal 3"
out = self.stage0(x)
out = self.stage1(out)
out = self.stage2(out)
out = self.stage3(out)
out = self.stage4(out)
out = self.gap(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
[
"bofeng1997@gamil.com"
] |
bofeng1997@gamil.com
|
5c56cf821dbcef0ee849ddf312e67880ed652bd0
|
50e3a7f59de70835b2e1ccccde79aeee8be13abc
|
/code/Wikipedia Dataset XML to html page converter/WikiExtractor.py
|
e1bd5e8b52fd23fdb886041ccfd5a3347b532758
|
[] |
no_license
|
nilayc2012/Topic-Recommendation-System-using-LDA
|
ba27d98a37ec854a4d6a3a52f4c5a4044ac4a929
|
6c50d1cb250627b1f227ec9ee8df2ac3b43626b7
|
refs/heads/master
| 2020-07-04T08:40:41.161229
| 2017-05-05T07:53:38
| 2017-05-05T07:53:38
| 73,864,571
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109,374
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Version: 2.66 (Oct 29, 2016)
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
#
# Contributors:
# Antonio Fuschetto (fuschett@aol.com)
# Leonardo Souza (lsouza@amtera.com.br)
# Juan Manuel Caicedo (juan@cavorite.com)
# Humberto Pereira (begini@gmail.com)
# Siegfried-A. Gevatter (siegfried@gevatter.com)
# Pedro Assis (pedroh2306@gmail.com)
# Wim Muskee (wimmuskee@gmail.com)
# Radics Geza (radicsge@gmail.com)
# orangain (orangain@gmail.com)
# Seth Cleveland (scleveland@turnitin.com)
#
# =============================================================================
# Copyright (c) 2011-2016. Giuseppe Attardi (attardi@di.unipi.it).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
"""Wikipedia Extractor:
Extracts and cleans text from a Wikipedia database dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" revid="" url="" title="">
...
</doc>
Template expansion requires preprocesssng first the whole dump and
collecting template definitions.
"""
from __future__ import unicode_literals, division
import numpy as np
import sys
import argparse
import bz2
import codecs
import cgi
import fileinput
import logging
import os.path
import re # TODO use regex when it will be standard
import time
from io import StringIO
from multiprocessing import Queue, Process, Value, cpu_count
from timeit import default_timer
PY2 = sys.version_info[0] == 2
if PY2:
from urllib import quote
from htmlentitydefs import name2codepoint
from itertools import izip as zip, izip_longest as zip_longest
range = xrange # Overwrite by Python 3 name
chr = unichr # Overwrite by Python 3 name
text_type = unicode
else:
from urllib.parse import quote
from html.entities import name2codepoint
from itertools import zip_longest
text_type = str
# ===========================================================================
# Program version
version = '2.66'
## PARAMS ####################################################################
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces = set(['Template'])
##
# Keys for Template and Module namespaces
templateKeys = set(['10', '828'])
##
# The namespace used for template definitions
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace = ''
templatePrefix = ''
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace = ''
##
# Recognize only these namespaces in links
# w: Internal links to the Wikipedia
# wiktionary: Wiki dictionary
# wikt: shortcut for Wiktionary
#
acceptedNamespaces = ['w', 'wiktionary', 'wikt']
##
# Drop these elements from article text
#
discardElements = [
'gallery', 'timeline', 'noinclude', 'pre',
'table', 'tr', 'td', 'th', 'caption', 'div',
'form', 'input', 'select', 'option', 'textarea',
'ul', 'li', 'ol', 'dl', 'dt', 'dd', 'menu', 'dir',
'ref', 'references', 'img', 'imagemap', 'source', 'small',
'sub', 'sup', 'indicator'
]
# This is obtained from <siteinfo>
urlbase = ''
##
# Filter disambiguation pages
filter_disambig_pages = False
filter_disambig_page_pattern = re.compile("{{disambig(uation)?(\|[^}]*)?}}")
##
# page filtering logic -- remove templates, undesired xml namespaces, and disambiguation pages
def keepPage(ns, page):
if ns != '0': # Aritcle
return False
# remove disambig pages if desired
if filter_disambig_pages:
for line in page:
if filter_disambig_page_pattern.match(line):
return False
return True
def get_url(uid):
return "%s?curid=%s" % (urlbase, uid)
# =========================================================================
#
# MediaWiki Markup Grammar
# https://www.mediawiki.org/wiki/Preprocessor_ABNF
# xml-char = %x9 / %xA / %xD / %x20-D7FF / %xE000-FFFD / %x10000-10FFFF
# sptab = SP / HTAB
# ; everything except ">" (%x3E)
# attr-char = %x9 / %xA / %xD / %x20-3D / %x3F-D7FF / %xE000-FFFD / %x10000-10FFFF
# literal = *xml-char
# title = wikitext-L3
# part-name = wikitext-L3
# part-value = wikitext-L3
# part = ( part-name "=" part-value ) / ( part-value )
# parts = [ title *( "|" part ) ]
# tplarg = "{{{" parts "}}}"
# template = "{{" parts "}}"
# link = "[[" wikitext-L3 "]]"
# comment = "<!--" literal "-->"
# unclosed-comment = "<!--" literal END
# ; the + in the line-eating-comment rule was absent between MW 1.12 and MW 1.22
# line-eating-comment = LF LINE-START *SP +( comment *SP ) LINE-END
# attr = *attr-char
# nowiki-element = "<nowiki" attr ( "/>" / ( ">" literal ( "</nowiki>" / END ) ) )
# wikitext-L2 = heading / wikitext-L3 / *wikitext-L2
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment / xmlish-element /
# *wikitext-L3
# ------------------------------------------------------------------------------
selfClosingTags = ('br', 'hr', 'nobr', 'ref', 'references', 'nowiki')
# These tags are dropped, keeping their content.
# handle 'a' separately, depending on keepLinks
ignoredTags = (
'abbr', 'b', 'big', 'blockquote', 'center', 'cite', 'em',
'font', 'h1', 'h2', 'h3', 'h4', 'hiero', 'i', 'kbd',
'p', 'plaintext', 's', 'span', 'strike', 'strong',
'tt', 'u', 'var'
)
placeholder_tags = {'math': 'formula', 'code': 'codice'}
def normalizeTitle(title):
"""Normalize title"""
# remove leading/trailing whitespace and underscores
title = title.strip(' _')
# replace sequences of whitespace and underscore chars with a single space
title = re.sub(r'[\s_]+', ' ', title)
m = re.match(r'([^:]*):(\s*)(\S(?:.*))', title)
if m:
prefix = m.group(1)
if m.group(2):
optionalWhitespace = ' '
else:
optionalWhitespace = ''
rest = m.group(3)
ns = normalizeNamespace(prefix)
if ns in knownNamespaces:
# If the prefix designates a known namespace, then it might be
# followed by optional whitespace that should be removed to get
# the canonical page name
# (e.g., "Category: Births" should become "Category:Births").
title = ns + ":" + ucfirst(rest)
else:
# No namespace, just capitalize first letter.
# If the part before the colon is not a known namespace, then we
# must not remove the space after the colon (if any), e.g.,
# "3001: The_Final_Odyssey" != "3001:The_Final_Odyssey".
# However, to get the canonical page name we must contract multiple
# spaces into one, because
# "3001: The_Final_Odyssey" != "3001: The_Final_Odyssey".
title = ucfirst(prefix) + ":" + optionalWhitespace + ucfirst(rest)
else:
# no namespace, just capitalize first letter
title = ucfirst(title)
return title
def unescape(text):
"""
Removes HTML or XML character references and entities from a text string.
:param text The HTML (or XML) source text.
:return The plain text, as a Unicode string, if necessary.
"""
def fixup(m):
text = m.group(0)
code = m.group(1)
try:
if text[1] == "#": # character reference
if text[2] == "x":
return chr(int(code[1:], 16))
else:
return chr(int(code))
else: # named entity
return chr(name2codepoint[code])
except:
return text # leave as is
return re.sub("&#?(\w+);", fixup, text)
# Match HTML comments
# The buggy template {{Template:T}} has a comment terminating with just "->"
comment = re.compile(r'<!--.*?-->', re.DOTALL)
# Match <nowiki>...</nowiki>
nowiki = re.compile(r'<nowiki>.*?</nowiki>')
# Match ignored tags
ignored_tag_patterns = []
def ignoreTag(tag):
left = re.compile(r'<%s\b.*?>' % tag, re.IGNORECASE | re.DOTALL) # both <ref> and <reference>
right = re.compile(r'</\s*%s>' % tag, re.IGNORECASE)
ignored_tag_patterns.append((left, right))
for tag in ignoredTags:
ignoreTag(tag)
# Match selfClosing HTML tags
selfClosing_tag_patterns = [
re.compile(r'<\s*%s\b[^>]*/\s*>' % tag, re.DOTALL | re.IGNORECASE) for tag in selfClosingTags
]
# Match HTML placeholder tags
placeholder_tag_patterns = [
(re.compile(r'<\s*%s(\s*| [^>]+?)>.*?<\s*/\s*%s\s*>' % (tag, tag), re.DOTALL | re.IGNORECASE),
repl) for tag, repl in placeholder_tags.items()
]
# Match preformatted lines
preformatted = re.compile(r'^ .*?$')
# Match external links (space separates second optional parameter)
externalLink = re.compile(r'\[\w+[^ ]*? (.*?)]')
externalLinkNoAnchor = re.compile(r'\[\w+[&\]]*\]')
# Matches bold/italic
bold_italic = re.compile(r"'''''(.*?)'''''")
bold = re.compile(r"'''(.*?)'''")
italic_quote = re.compile(r"''\"([^\"]*?)\"''")
italic = re.compile(r"''(.*?)''")
quote_quote = re.compile(r'""([^"]*?)""')
# Matches space
spaces = re.compile(r' {2,}')
# Matches dots
dots = re.compile(r'\.{4,}')
# ======================================================================
class Template(list):
"""
A Template is a list of TemplateText or TemplateArgs
"""
@classmethod
def parse(cls, body):
tpl = Template()
# we must handle nesting, s.a.
# {{{1|{{PAGENAME}}}
# {{{italics|{{{italic|}}}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|
#
start = 0
for s, e in findMatchingBraces(body, 3):
tpl.append(TemplateText(body[start:s]))
tpl.append(TemplateArg(body[s + 3:e - 3]))
start = e
tpl.append(TemplateText(body[start:])) # leftover
return tpl
def subst(self, params, extractor, depth=0):
# We perform parameter substitutions recursively.
# We also limit the maximum number of iterations to avoid too long or
# even endless loops (in case of malformed input).
# :see: http://meta.wikimedia.org/wiki/Help:Expansion#Distinction_between_variables.2C_parser_functions.2C_and_templates
#
# Parameter values are assigned to parameters in two (?) passes.
# Therefore a parameter name in a template can depend on the value of
# another parameter of the same template, regardless of the order in
# which they are specified in the template call, for example, using
# Template:ppp containing "{{{{{{p}}}}}}", {{ppp|p=q|q=r}} and even
# {{ppp|q=r|p=q}} gives r, but using Template:tvvv containing
# "{{{{{{{{{p}}}}}}}}}", {{tvvv|p=q|q=r|r=s}} gives s.
# logging.debug('&*ssubst tpl %d %s', extractor.frame.length, '', depth, self)
if depth > extractor.maxParameterRecursionLevels:
extractor.recursion_exceeded_3_errs += 1
return ''
return ''.join([tpl.subst(params, extractor, depth) for tpl in self])
def __str__(self):
return ''.join([text_type(x) for x in self])
class TemplateText(text_type):
"""Fixed text of template"""
def subst(self, params, extractor, depth):
return self
class TemplateArg(object):
"""
parameter to a template.
Has a name and a default value, both of which are Templates.
"""
def __init__(self, parameter):
"""
:param parameter: the parts of a tplarg.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
# 4|{{{{{subst|}}}CURRENTYEAR}}
# any parts in a tplarg after the first (the parameter default) are
# ignored, and an equals sign in the first part is treated as plain text.
# logging.debug('TemplateArg %s', parameter)
parts = splitParts(parameter)
self.name = Template.parse(parts[0])
if len(parts) > 1:
# This parameter has a default value
self.default = Template.parse(parts[1])
else:
self.default = None
def __str__(self):
if self.default:
return '{{{%s|%s}}}' % (self.name, self.default)
else:
return '{{{%s}}}' % self.name
def subst(self, params, extractor, depth):
"""
Substitute value for this argument from dict :param params:
Use :param extractor: to evaluate expressions for name and default.
Limit substitution to the maximun :param depth:.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
paramName = self.name.subst(params, extractor, depth + 1)
paramName = extractor.transform(paramName)
res = ''
if paramName in params:
res = params[paramName] # use parameter value specified in template invocation
elif self.default: # use the default value
defaultValue = self.default.subst(params, extractor, depth + 1)
res = extractor.transform(defaultValue)
# logging.debug('subst arg %d %s -> %s' % (depth, paramName, res))
return res
class Frame(object):
def __init__(self, title='', args=[], prev=None):
self.title = title
self.args = args
self.prev = prev
self.depth = prev.depth + 1 if prev else 0
def push(self, title, args):
return Frame(title, args, self)
def pop(self):
return self.prev
def __str__(self):
res = ''
prev = this.prev
while prev:
if res: res += ', '
res += '(%s, %s)' % (prev.title, prev.args)
prev = prev.prev
return '<Frame [' + res + ']>'
# ======================================================================
substWords = 'subst:|safesubst:'
class Extractor(object):
"""
An extraction task on a article.
"""
##
# Whether to preserve links in output
keepLinks = False
##
# Whether to preserve section titles
keepSections = True
##
# Whether to preserve lists
keepLists = False
##
# Whether to output HTML instead of text
toHTML = False
##
# Whether to expand templates
expand_templates = True
##
## Whether to escape doc content
escape_doc = False
##
# Print the wikipedia article revision
print_revision = False
##
# Minimum expanded text length required to print document
min_text_length = 0
def __init__(self, id, revid, title, lines):
"""
:param id: id of page.
:param title: tutle of page.
:param lines: a list of lines.
"""
self.id = id
self.revid = revid
self.title = title
self.text = ''.join(lines)
self.magicWords = MagicWords()
self.frame = Frame()
self.recursion_exceeded_1_errs = 0 # template recursion within expand()
self.recursion_exceeded_2_errs = 0 # template recursion within expandTemplate()
self.recursion_exceeded_3_errs = 0 # parameter recursion
self.template_title_errs = 0
def extract(self, out):
"""
:param out: a memory file.
"""
logging.info('%s\t%s', self.id, self.title)
url = get_url(self.id)
header='<!DOCTYPE html><html><head><title>%s</title></head><body>\n' % (self.title)
# Separate header from text with a newline.
header += '<h1>' + self.title + '</h1>\n'
header +="<a href='https://en.wikipedia.org/"+url+"'>Wikipedia Link</a>\n <p>"
# https://www.mediawiki.org/wiki/Help:Magic_words
self.magicWords['PAGENAME'] = self.title
self.magicWords['FULLPAGENAME'] = self.title
self.magicWords['CURRENTYEAR'] = time.strftime('%Y')
self.magicWords['CURRENTMONTH'] = time.strftime('%m')
self.magicWords['CURRENTDAY'] = time.strftime('%d')
self.magicWords['CURRENTHOUR'] = time.strftime('%H')
self.magicWords['CURRENTTIME'] = time.strftime('%H:%M:%S')
text = self.text
self.text = '' # save memory
#
# @see https://doc.wikimedia.org/mediawiki-core/master/php/classParser.html
# This does the equivalent of internalParse():
#
# $dom = $this->preprocessToDom( $text, $flag );
# $text = $frame->expand( $dom );
#
text = self.transform(text)
text = self.wiki2text(text)
text = compact(self.clean(text))
footer = "\n </p></body></html>\n"
if out == sys.stdout: # option -a or -o -
header = header.encode('utf-8')
out.write(header)
for line in text: # option -a or -o -
if out == sys.stdout:
line = line.encode('utf-8')
out.write(line)
out.write('\n');
out.write(footer)
errs = (self.template_title_errs,
self.recursion_exceeded_1_errs,
self.recursion_exceeded_2_errs,
self.recursion_exceeded_3_errs)
if any(errs):
logging.warn("Template errors in article '%s' (%s): title(%d) recursion(%d, %d, %d)",
self.title, self.id, *errs)
def transform(self, wikitext):
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
# look for matching <nowiki>...</nowiki>
res = ''
cur = 0
for m in nowiki.finditer(wikitext, cur):
res += self.transform1(wikitext[cur:m.start()]) + wikitext[m.start():m.end()]
cur = m.end()
# leftover
res += self.transform1(wikitext[cur:])
return res
def transform1(self, text):
"""Transform text not containing <nowiki>"""
if Extractor.expand_templates:
# expand templates
# See: http://www.mediawiki.org/wiki/Help:Templates
return self.expand(text)
else:
# Drop transclusions (template, parser functions)
return dropNested(text, r'{{', r'}}')
def wiki2text(self, text):
#
# final part of internalParse().)
#
# $text = $this->doTableStuff( $text );
# $text = preg_replace( '/(^|\n)-----*/', '\\1<hr />', $text );
# $text = $this->doDoubleUnderscore( $text );
# $text = $this->doHeadings( $text );
# $text = $this->replaceInternalLinks( $text );
# $text = $this->doAllQuotes( $text );
# $text = $this->replaceExternalLinks( $text );
# $text = str_replace( self::MARKER_PREFIX . 'NOPARSE', '', $text );
# $text = $this->doMagicLinks( $text );
# $text = $this->formatHeadings( $text, $origText, $isMain );
# Drop tables
# first drop residual templates, or else empty parameter |} might look like end of table.
text = dropNested(text, r'{{', r'}}')
text = dropNested(text, r'{\|', r'\|}')
# Handle bold/italic/quote
if self.toHTML:
text = bold_italic.sub(r'<b>\1</b>', text)
text = bold.sub(r'<b>\1</b>', text)
text = italic.sub(r'<i>\1</i>', text)
else:
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
# replace internal links
text = replaceInternalLinks(text)
# replace external links
text = replaceExternalLinks(text)
# drop MagicWords behavioral switches
text = magicWordsRE.sub('', text)
# ############### Process HTML ###############
# turn into HTML, except for the content of <syntaxhighlight>
res = ''
cur = 0
for m in syntaxhighlight.finditer(text):
res += unescape(text[cur:m.start()]) + m.group(1)
cur = m.end()
text = res + unescape(text[cur:])
return text
def clean(self, text):
"""
Removes irrelevant parts from :param: text.
"""
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
for tag in discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
if not self.toHTML:
# Turn into text what is left (&nbsp;) and <syntaxhighlight>
text = unescape(text)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', '«').replace('>>', '»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(' (,:\.\)\]»)', r'\1', text)
text = re.sub('(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
if Extractor.toHTML:
text = cgi.escape(text)
return text
# ----------------------------------------------------------------------
# Expand templates
maxTemplateRecursionLevels = 30
maxParameterRecursionLevels = 10
# check for template beginning
reOpen = re.compile('(?<!{){{(?!{)', re.DOTALL)
def expand(self, wikitext):
"""
:param wikitext: the text to be expanded.
Templates are frequently nested. Occasionally, parsing mistakes may
cause template insertion to enter an infinite loop, for instance when
trying to instantiate Template:Country
{{country_{{{1}}}|{{{2}}}|{{{2}}}|size={{{size|}}}|name={{{name|}}}}}
which is repeatedly trying to insert template 'country_', which is
again resolved to Template:Country. The straightforward solution of
keeping track of templates that were already inserted for the current
article would not work, because the same template may legally be used
more than once, with different parameters in different parts of the
article. Therefore, we limit the number of iterations of nested
template inclusion.
"""
# Test template expansion at:
# https://en.wikipedia.org/wiki/Special:ExpandTemplates
# https://it.wikipedia.org/wiki/Speciale:EspandiTemplate
res = ''
if self.frame.depth >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_1_errs += 1
return res
# logging.debug('%*s<expand', self.frame.depth, '')
cur = 0
# look for matching {{...}}
for s, e in findMatchingBraces(wikitext, 2):
res += wikitext[cur:s] + self.expandTemplate(wikitext[s + 2:e - 2])
cur = e
# leftover
res += wikitext[cur:]
# logging.debug('%*sexpand> %s', self.frame.depth, '', res)
return res
def templateParams(self, parameters):
"""
Build a dictionary with positional or name key to expanded parameters.
:param parameters: the parts[1:] of a template, i.e. all except the title.
"""
templateParams = {}
if not parameters:
return templateParams
# logging.debug('%*s<templateParams: %s', self.frame.length, '', '|'.join(parameters))
# Parameters can be either named or unnamed. In the latter case, their
# name is defined by their ordinal position (1, 2, 3, ...).
unnamedParameterCounter = 0
# It's legal for unnamed parameters to be skipped, in which case they
# will get default values (if available) during actual instantiation.
# That is {{template_name|a||c}} means parameter 1 gets
# the value 'a', parameter 2 value is not defined, and parameter 3 gets
# the value 'c'. This case is correctly handled by function 'split',
# and does not require any special handling.
for param in parameters:
# Spaces before or after a parameter value are normally ignored,
# UNLESS the parameter contains a link (to prevent possible gluing
# the link to the following text after template substitution)
# Parameter values may contain "=" symbols, hence the parameter
# name extends up to the first such symbol.
# It is legal for a parameter to be specified several times, in
# which case the last assignment takes precedence. Example:
# "{{t|a|b|c|2=B}}" is equivalent to "{{t|a|B|c}}".
# Therefore, we don't check if the parameter has been assigned a
# value before, because anyway the last assignment should override
# any previous ones.
# FIXME: Don't use DOTALL here since parameters may be tags with
# attributes, e.g. <div class="templatequotecite">
# Parameters may span several lines, like:
# {{Reflist|colwidth=30em|refs=
# <ref name="Goode">Title</ref>
# The '=' might occurr within an HTML attribute:
# "<ref name=value"
# but we stop at first.
m = re.match(' *([^=]*?) *?=(.*)', param, re.DOTALL)
if m:
# This is a named parameter. This case also handles parameter
# assignments like "2=xxx", where the number of an unnamed
# parameter ("2") is specified explicitly - this is handled
# transparently.
parameterName = m.group(1).strip()
parameterValue = m.group(2)
if ']]' not in parameterValue: # if the value does not contain a link, trim whitespace
parameterValue = parameterValue.strip()
templateParams[parameterName] = parameterValue
else:
# this is an unnamed parameter
unnamedParameterCounter += 1
if ']]' not in param: # if the value does not contain a link, trim whitespace
param = param.strip()
templateParams[str(unnamedParameterCounter)] = param
# logging.debug('%*stemplateParams> %s', self.frame.length, '', '|'.join(templateParams.values()))
return templateParams
def expandTemplate(self, body):
"""Expands template invocation.
:param body: the parts of a template.
:see http://meta.wikimedia.org/wiki/Help:Expansion for an explanation
of the process.
See in particular: Expansion of names and values
http://meta.wikimedia.org/wiki/Help:Expansion#Expansion_of_names_and_values
For most parser functions all names and values are expanded,
regardless of what is relevant for the result. The branching functions
(#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are exceptions.
All names in a template call are expanded, and the titles of the
tplargs in the template body, after which it is determined which
values must be expanded, and for which tplargs in the template body
the first part (default) [sic in the original doc page].
In the case of a tplarg, any parts beyond the first are never
expanded. The possible name and the value of the first part is
expanded if the title does not match a name in the template call.
:see code for braceSubstitution at
https://doc.wikimedia.org/mediawiki-core/master/php/html/Parser_8php_source.html#3397:
"""
# template = "{{" parts "}}"
# Templates and tplargs are decomposed in the same way, with pipes as
# separator, even though eventually any parts in a tplarg after the first
# (the parameter default) are ignored, and an equals sign in the first
# part is treated as plain text.
# Pipes inside inner templates and tplargs, or inside double rectangular
# brackets within the template or tplargs are not taken into account in
# this decomposition.
# The first part is called title, the other parts are simply called parts.
# If a part has one or more equals signs in it, the first equals sign
# determines the division into name = value. Equals signs inside inner
# templates and tplargs, or inside double rectangular brackets within the
# part are not taken into account in this decomposition. Parts without
# equals sign are indexed 1, 2, .., given as attribute in the <name> tag.
if self.frame.depth >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_2_errs += 1
# logging.debug('%*sEXPAND> %s', self.frame.depth, '', body)
return ''
logging.debug('%*sEXPAND %s', self.frame.depth, '', body)
parts = splitParts(body)
# title is the portion before the first |
title = parts[0].strip()
title = self.expand(title)
# SUBST
# Apply the template tag to parameters without
# substituting into them, e.g.
# {{subst:t|a{{{p|q}}}b}} gives the wikitext start-a{{{p|q}}}b-end
# @see https://www.mediawiki.org/wiki/Manual:Substitution#Partial_substitution
subst = False
if re.match(substWords, title, re.IGNORECASE):
title = re.sub(substWords, '', title, 1, re.IGNORECASE)
subst = True
if title in self.magicWords.values:
ret = self.magicWords[title]
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', title, ret)
return ret
# Parser functions.
# For most parser functions all names and values are expanded,
# regardless of what is relevant for the result. The branching
# functions (#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are
# exceptions: for #if, #iferror, #ifexist, #ifexp, only the part that
# is applicable is expanded; for #ifeq the first and the applicable
# part are expanded; for #switch, expanded are the names up to and
# including the match (or all if there is no match), and the value in
# the case of a match or if there is no match, the default, if any.
# The first argument is everything after the first colon.
# It has been evaluated above.
colon = title.find(':')
if colon > 1:
funct = title[:colon]
parts[0] = title[colon + 1:].strip() # side-effect (parts[0] not used later)
# arguments after first are not evaluated
ret = callParserFunction(funct, parts, self)
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', funct, ret)
return ret
title = fullyQualifiedTemplateTitle(title)
if not title:
self.template_title_errs += 1
return ''
redirected = redirects.get(title)
if redirected:
title = redirected
# get the template
if title in templateCache:
template = templateCache[title]
elif title in templates:
template = Template.parse(templates[title])
# add it to cache
templateCache[title] = template
del templates[title]
else:
# The page being included could not be identified
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', title, '')
return ''
logging.debug('%*sTEMPLATE %s: %s', self.frame.depth, '', title, template)
# tplarg = "{{{" parts "}}}"
# parts = [ title *( "|" part ) ]
# part = ( part-name "=" part-value ) / ( part-value )
# part-name = wikitext-L3
# part-value = wikitext-L3
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment /
# xmlish-element / *wikitext-L3
# A tplarg may contain other parameters as well as templates, e.g.:
# {{{text|{{{quote|{{{1|{{error|Error: No text given}}}}}}}}}}}
# hence no simple RE like this would work:
# '{{{((?:(?!{{{).)*?)}}}'
# We must use full CF parsing.
# the parameter name itself might be computed, e.g.:
# {{{appointe{{#if:{{{appointer14|}}}|r|d}}14|}}}
# Because of the multiple uses of double-brace and triple-brace
# syntax, expressions can sometimes be ambiguous.
# Precedence rules specifed here:
# http://www.mediawiki.org/wiki/Preprocessor_ABNF#Ideal_precedence
# resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
#
# :see: https://en.wikipedia.org/wiki/Help:Template#Handling_parameters
params = parts[1:]
# Order of evaluation.
# Template parameters are fully evaluated before they are passed to the template.
# :see: https://www.mediawiki.org/wiki/Help:Templates#Order_of_evaluation
if not subst:
# Evaluate parameters, since they may contain templates, including
# the symbol "=".
# {{#ifexpr: {{{1}}} = 1 }}
params = [self.transform(p) for p in params]
# build a dict of name-values for the parameter values
params = self.templateParams(params)
# Perform parameter substitution.
# Extend frame before subst, since there may be recursion in default
# parameter value, e.g. {{OTRS|celebrative|date=April 2015}} in article
# 21637542 in enwiki.
self.frame = self.frame.push(title, params)
instantiated = template.subst(params, self)
value = self.transform(instantiated)
self.frame = self.frame.pop()
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', title, value)
return value
# ----------------------------------------------------------------------
# parameter handling
def splitParts(paramsList):
"""
:param paramsList: the parts of a template or tplarg.
Split template parameters at the separator "|".
separator "=".
Template parameters often contain URLs, internal links, text or even
template expressions, since we evaluate templates outside in.
This is required for cases like:
{{#if: {{{1}}} | {{lc:{{{1}}} | "parameter missing"}}
Parameters are separated by "|" symbols. However, we
cannot simply split the string on "|" symbols, since these
also appear inside templates and internal links, e.g.
{{if:|
|{{#if:the president|
|{{#if:|
[[Category:Hatnote templates|A{{PAGENAME}}]]
}}
}}
}}
We split parts at the "|" symbols that are not inside any pair
{{{...}}}, {{...}}, [[...]], {|...|}.
"""
# Must consider '[' as normal in expansion of Template:EMedicine2:
# #ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}
# as part of:
# {{#ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}}} ped/180{{#if: |~}}]
# should handle both tpl arg like:
# 4|{{{{{subst|}}}CURRENTYEAR}}
# and tpl parameters like:
# ||[[Category:People|{{#if:A|A|{{PAGENAME}}}}]]
sep = '|'
parameters = []
cur = 0
for s, e in findMatchingBraces(paramsList):
par = paramsList[cur:s].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
elif not parameters:
parameters = [''] # create first param
# add span to last previous parameter
parameters[-1] += paramsList[s:e]
cur = e
# leftover
par = paramsList[cur:].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
# logging.debug('splitParts %s %s\nparams: %s', sep, paramsList, str(parameters))
return parameters
def findMatchingBraces(text, ldelim=0):
"""
:param ldelim: number of braces to match. 0 means match [[]], {{}} and {{{}}}.
"""
# Parsing is done with respect to pairs of double braces {{..}} delimiting
# a template, and pairs of triple braces {{{..}}} delimiting a tplarg.
# If double opening braces are followed by triple closing braces or
# conversely, this is taken as delimiting a template, with one left-over
# brace outside it, taken as plain text. For any pattern of braces this
# defines a set of templates and tplargs such that any two are either
# separate or nested (not overlapping).
# Unmatched double rectangular closing brackets can be in a template or
# tplarg, but unmatched double rectangular opening brackets cannot.
# Unmatched double or triple closing braces inside a pair of
# double rectangular brackets are treated as plain text.
# Other formulation: in ambiguity between template or tplarg on one hand,
# and a link on the other hand, the structure with the rightmost opening
# takes precedence, even if this is the opening of a link without any
# closing, so not producing an actual link.
# In the case of more than three opening braces the last three are assumed
# to belong to a tplarg, unless there is no matching triple of closing
# braces, in which case the last two opening braces are are assumed to
# belong to a template.
# We must skip individual { like in:
# {{#ifeq: {{padleft:|1|}} | { | | }}
# We must resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|...}}
# {{{!}} {{!}}}
# Handle:
# {{{{{|safesubst:}}}#Invoke:String|replace|{{{1|{{{{{|safesubst:}}}PAGENAME}}}}}|%s+%([^%(]-%)$||plain=false}}
# as well as expressions with stray }:
# {{{link|{{ucfirst:{{{1}}}}}} interchange}}}
if ldelim: # 2-3
reOpen = re.compile('[{]{%d,}' % ldelim) # at least ldelim
reNext = re.compile('[{]{2,}|}{2,}') # at least 2
else:
reOpen = re.compile('{{2,}|\[{2,}')
reNext = re.compile('{{2,}|}{2,}|\[{2,}|]{2,}') # at least 2
cur = 0
while True:
m1 = reOpen.search(text, cur)
if not m1:
return
lmatch = m1.end() - m1.start()
if m1.group()[0] == '{':
stack = [lmatch] # stack of opening braces lengths
else:
stack = [-lmatch] # negative means [
end = m1.end()
while True:
m2 = reNext.search(text, end)
if not m2:
return # unbalanced
end = m2.end()
brac = m2.group()[0]
lmatch = m2.end() - m2.start()
if brac == '{':
stack.append(lmatch)
elif brac == '}':
while stack:
openCount = stack.pop() # opening span
if openCount == 0: # illegal unmatched [[
continue
if lmatch >= openCount:
lmatch -= openCount
if lmatch <= 1: # either close or stray }
break
else:
# put back unmatched
stack.append(openCount - lmatch)
break
if not stack:
yield m1.start(), end - lmatch
cur = end
break
elif len(stack) == 1 and 0 < stack[0] < ldelim:
# ambiguous {{{{{ }}} }}
#yield m1.start() + stack[0], end
cur = end
break
elif brac == '[': # [[
stack.append(-lmatch)
else: # ]]
while stack and stack[-1] < 0: # matching [[
openCount = -stack.pop()
if lmatch >= openCount:
lmatch -= openCount
if lmatch <= 1: # either close or stray ]
break
else:
# put back unmatched (negative)
stack.append(lmatch - openCount)
break
if not stack:
yield m1.start(), end - lmatch
cur = end
break
# unmatched ]] are discarded
cur = end
def findBalanced(text, openDelim=['[['], closeDelim=[']]']):
"""
Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: an iterator producing pairs (start, end) of start and end
positions in text containing a balanced expression.
"""
openPat = '|'.join([re.escape(x) for x in openDelim])
# pattern for delimiters expected after each opening delimiter
afterPat = {o: re.compile(openPat + '|' + c, re.DOTALL) for o, c in zip(openDelim, closeDelim)}
stack = []
start = 0
cur = 0
# end = len(text)
startSet = False
startPat = re.compile(openPat)
nextPat = startPat
while True:
next = nextPat.search(text, cur)
if not next:
return
if not startSet:
start = next.start()
startSet = True
delim = next.group(0)
if delim in openDelim:
stack.append(delim)
nextPat = afterPat[delim]
else:
opening = stack.pop()
# assert opening == openDelim[closeDelim.index(next.group(0))]
if stack:
nextPat = afterPat[stack[-1]]
else:
yield start, next.end()
nextPat = startPat
start = next.end()
startSet = False
cur = next.end()
# ----------------------------------------------------------------------
# Modules
# Only minimal support
# FIXME: import Lua modules.
def if_empty(*rest):
"""
This implements If_empty from English Wikipedia module:
<title>Module:If empty</title>
<ns>828</ns>
<text>local p = {}
function p.main(frame)
local args = require('Module:Arguments').getArgs(frame, {wrappers = 'Template:If empty', removeBlanks = false})
-- For backwards compatibility reasons, the first 8 parameters can be unset instead of being blank,
-- even though there's really no legitimate use case for this. At some point, this will be removed.
local lowestNil = math.huge
for i = 8,1,-1 do
if args[i] == nil then
args[i] = ''
lowestNil = i
end
end
for k,v in ipairs(args) do
if v ~= '' then
if lowestNil < k then
-- If any uses of this template depend on the behavior above, add them to a tracking category.
-- This is a rather fragile, convoluted, hacky way to do it, but it ensures that this module's output won't be modified
-- by it.
frame:extensionTag('ref', '[[Category:Instances of Template:If_empty missing arguments]]', {group = 'TrackingCategory'})
frame:extensionTag('references', '', {group = 'TrackingCategory'})
end
return v
end
end
end
return p </text>
"""
for arg in rest:
if arg:
return arg
return ''
# ----------------------------------------------------------------------
# String module emulation
# https://it.wikipedia.org/wiki/Modulo:String
def functionParams(args, vars):
"""
Build a dictionary of var/value from :param: args.
Parameters can be either named or unnamed. In the latter case, their
name is taken fron :param: vars.
"""
params = {}
index = 1
for var in vars:
value = args.get(var)
if value is None:
value = args.get(str(index))
if value is None:
value = ''
else:
index += 1
params[var] = value
return params
def string_sub(args):
params = functionParams(args, ('s', 'i', 'j'))
s = params.get('s', '')
i = int(params.get('i', 1) or 1) # or handles case of '' value
j = int(params.get('j', -1) or -1)
if i > 0: i -= 1 # lua is 1-based
if j < 0: j += 1
if j == 0: j = len(s)
return s[i:j]
def string_len(args):
params = functionParams(args, ('s'))
s = params.get('s', '')
return len(s)
def string_find(args):
params = functionParams(args, ('source', 'target', 'start', 'plain'))
source = params.get('source', '')
pattern = params.get('target', '')
start = int('0'+params.get('start', 1)) - 1 # lua is 1-based
plain = int('0'+params.get('plain', 1))
if source == '' or pattern == '':
return 0
if plain:
return source.find(pattern, start) + 1 # lua is 1-based
else:
return (re.compile(pattern).search(source, start) or -1) + 1
# ----------------------------------------------------------------------
# Module:Roman
# http://en.wikipedia.org/w/index.php?title=Module:Roman
# Modulo:Numero_romano
# https://it.wikipedia.org/wiki/Modulo:Numero_romano
def roman_main(args):
"""Convert first arg to roman numeral if <= 5000 else :return: second arg."""
num = int(float(args.get('1')))
# Return a message for numbers too big to be expressed in Roman numerals.
if 0 > num or num >= 5000:
return args.get('2', 'N/A')
def toRoman(n, romanNumeralMap):
"""convert integer to Roman numeral"""
result = ""
for integer, numeral in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
# Find the Roman numerals for numbers 4999 or less.
smallRomans = (
(1000, "M"),
(900, "CM"), (500, "D"), (400, "CD"), (100, "C"),
(90, "XC"), (50, "L"), (40, "XL"), (10, "X"),
(9, "IX"), (5, "V"), (4, "IV"), (1, "I")
)
return toRoman(num, smallRomans)
# ----------------------------------------------------------------------
modules = {
'convert': {
'convert': lambda x, u, *rest: x + ' ' + u, # no conversion
},
'If empty': {
'main': if_empty
},
'String': {
'sub': string_sub,
'len': string_len,
'find': string_find
},
'Roman': {
'main': roman_main
},
'Numero romano': {
'main': roman_main
}
}
# ----------------------------------------------------------------------
# variables
class MagicWords(object):
"""
One copy in each Extractor.
@see https://doc.wikimedia.org/mediawiki-core/master/php/MagicWord_8php_source.html
"""
names = [
'!',
'currentmonth',
'currentmonth1',
'currentmonthname',
'currentmonthnamegen',
'currentmonthabbrev',
'currentday',
'currentday2',
'currentdayname',
'currentyear',
'currenttime',
'currenthour',
'localmonth',
'localmonth1',
'localmonthname',
'localmonthnamegen',
'localmonthabbrev',
'localday',
'localday2',
'localdayname',
'localyear',
'localtime',
'localhour',
'numberofarticles',
'numberoffiles',
'numberofedits',
'articlepath',
'pageid',
'sitename',
'server',
'servername',
'scriptpath',
'stylepath',
'pagename',
'pagenamee',
'fullpagename',
'fullpagenamee',
'namespace',
'namespacee',
'namespacenumber',
'currentweek',
'currentdow',
'localweek',
'localdow',
'revisionid',
'revisionday',
'revisionday2',
'revisionmonth',
'revisionmonth1',
'revisionyear',
'revisiontimestamp',
'revisionuser',
'revisionsize',
'subpagename',
'subpagenamee',
'talkspace',
'talkspacee',
'subjectspace',
'subjectspacee',
'talkpagename',
'talkpagenamee',
'subjectpagename',
'subjectpagenamee',
'numberofusers',
'numberofactiveusers',
'numberofpages',
'currentversion',
'rootpagename',
'rootpagenamee',
'basepagename',
'basepagenamee',
'currenttimestamp',
'localtimestamp',
'directionmark',
'contentlanguage',
'numberofadmins',
'cascadingsources',
]
def __init__(self):
self.values = {'!': '|'}
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
self.values[name] = value
switches = (
'__NOTOC__',
'__FORCETOC__',
'__TOC__',
'__TOC__',
'__NEWSECTIONLINK__',
'__NONEWSECTIONLINK__',
'__NOGALLERY__',
'__HIDDENCAT__',
'__NOCONTENTCONVERT__',
'__NOCC__',
'__NOTITLECONVERT__',
'__NOTC__',
'__START__',
'__END__',
'__INDEX__',
'__NOINDEX__',
'__STATICREDIRECT__',
'__DISAMBIG__'
)
magicWordsRE = re.compile('|'.join(MagicWords.switches))
# ----------------------------------------------------------------------
# parser functions utilities
def ucfirst(string):
""":return: a string with just its first character uppercase
We can't use title() since it coverts all words.
"""
if string:
return string[0].upper() + string[1:]
else:
return ''
def lcfirst(string):
""":return: a string with its first character lowercase"""
if string:
if len(string) > 1:
return string[0].lower() + string[1:]
else:
return string.lower()
else:
return ''
def fullyQualifiedTemplateTitle(templateTitle):
"""
Determine the namespace of the page being included through the template
mechanism
"""
if templateTitle.startswith(':'):
# Leading colon by itself implies main namespace, so strip this colon
return ucfirst(templateTitle[1:])
else:
m = re.match('([^:]*)(:.*)', templateTitle)
if m:
# colon found but not in the first position - check if it
# designates a known namespace
prefix = normalizeNamespace(m.group(1))
if prefix in knownNamespaces:
return prefix + ucfirst(m.group(2))
# The title of the page being included is NOT in the main namespace and
# lacks any other explicit designation of the namespace - therefore, it
# is resolved to the Template namespace (that's the default for the
# template inclusion mechanism).
# This is a defense against pages whose title only contains UTF-8 chars
# that are reduced to an empty string. Right now I can think of one such
# case - <C2><A0> which represents the non-breaking space.
# In this particular case, this page is a redirect to [[Non-nreaking
# space]], but having in the system a redirect page with an empty title
# causes numerous problems, so we'll live happier without it.
if templateTitle:
return templatePrefix + ucfirst(templateTitle)
else:
return '' # caller may log as error
def normalizeNamespace(ns):
return ucfirst(ns)
# ----------------------------------------------------------------------
# Parser functions
# see http://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
# https://github.com/Wikia/app/blob/dev/extensions/ParserFunctions/ParserFunctions_body.php
class Infix:
"""Infix operators.
The calling sequence for the infix is:
x |op| y
"""
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
ROUND = Infix(lambda x, y: round(x, y))
from math import floor, ceil, pi, e, trunc, exp, log as ln, sin, cos, tan, asin, acos, atan
def sharp_expr(extr, expr):
"""Tries converting a lua expr into a Python expr."""
try:
expr = extr.expand(expr)
expr = re.sub('(?<![!<>])=', '==', expr) # negative lookbehind
expr = re.sub('mod', '%', expr) # no \b here
expr = re.sub('\bdiv\b', '/', expr)
expr = re.sub('\bround\b', '|ROUND|', expr)
return text_type(eval(expr))
except:
return '<span class="error">%s</span>' % expr
def sharp_if(extr, testValue, valueIfTrue, valueIfFalse=None, *args):
# In theory, we should evaluate the first argument here,
# but it was evaluated while evaluating part[0] in expandTemplate().
if testValue.strip():
# The {{#if:}} function is an if-then-else construct.
# The applied condition is: "The condition string is non-empty".
valueIfTrue = extr.expand(valueIfTrue.strip()) # eval
if valueIfTrue:
return valueIfTrue
elif valueIfFalse:
return extr.expand(valueIfFalse.strip()) # eval
return ""
def sharp_ifeq(extr, lvalue, rvalue, valueIfTrue, valueIfFalse=None, *args):
rvalue = rvalue.strip()
if rvalue:
# lvalue is always evaluated
if lvalue.strip() == rvalue:
# The {{#ifeq:}} function is an if-then-else construct. The
# applied condition is "is rvalue equal to lvalue". Note that this
# does only string comparison while MediaWiki implementation also
# supports numerical comparissons.
if valueIfTrue:
return extr.expand(valueIfTrue.strip())
else:
if valueIfFalse:
return extr.expand(valueIfFalse.strip())
return ""
def sharp_iferror(extr, test, then='', Else=None, *args):
if re.match('<(?:strong|span|p|div)\s(?:[^\s>]*\s+)*?class="(?:[^"\s>]*\s+)*?error(?:\s[^">]*)?"', test):
return extr.expand(then.strip())
elif Else is None:
return test.strip()
else:
return extr.expand(Else.strip())
def sharp_switch(extr, primary, *params):
# FIXME: we don't support numeric expressions in primary
# {{#switch: comparison string
# | case1 = result1
# | case2
# | case4 = result2
# | 1 | case5 = result3
# | #default = result4
# }}
primary = primary.strip()
found = False # for fall through cases
default = None
rvalue = None
lvalue = ''
for param in params:
# handle cases like:
# #default = [http://www.perseus.tufts.edu/hopper/text?doc=Perseus...]
pair = param.split('=', 1)
lvalue = extr.expand(pair[0].strip())
rvalue = None
if len(pair) > 1:
# got "="
rvalue = extr.expand(pair[1].strip())
# check for any of multiple values pipe separated
if found or primary in [v.strip() for v in lvalue.split('|')]:
# Found a match, return now
return rvalue
elif lvalue == '#default':
default = rvalue
rvalue = None # avoid defaulting to last case
elif lvalue == primary:
# If the value matches, set a flag and continue
found = True
# Default case
# Check if the last item had no = sign, thus specifying the default case
if rvalue is not None:
return lvalue
elif default is not None:
return default
return ''
# Extension Scribunto: https://www.mediawiki.org/wiki/Extension:Scribunto
def sharp_invoke(module, function, args):
functions = modules.get(module)
if functions:
funct = functions.get(function)
if funct:
return str(funct(args))
return ''
parserFunctions = {
'#expr': sharp_expr,
'#if': sharp_if,
'#ifeq': sharp_ifeq,
'#iferror': sharp_iferror,
'#ifexpr': lambda *args: '', # not supported
'#ifexist': lambda *args: '', # not supported
'#rel2abs': lambda *args: '', # not supported
'#switch': sharp_switch,
'#language': lambda *args: '', # not supported
'#time': lambda *args: '', # not supported
'#timel': lambda *args: '', # not supported
'#titleparts': lambda *args: '', # not supported
# This function is used in some pages to construct links
# http://meta.wikimedia.org/wiki/Help:URL
'urlencode': lambda string, *rest: quote(string.encode('utf-8')),
'lc': lambda string, *rest: string.lower() if string else '',
'lcfirst': lambda string, *rest: lcfirst(string),
'uc': lambda string, *rest: string.upper() if string else '',
'ucfirst': lambda string, *rest: ucfirst(string),
'int': lambda string, *rest: str(int(string)),
}
def callParserFunction(functionName, args, extractor):
"""
Parser functions have similar syntax as templates, except that
the first argument is everything after the first colon.
:return: the result of the invocation, None in case of failure.
:param: args not yet expanded (see branching functions).
https://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
"""
try:
# https://it.wikipedia.org/wiki/Template:Str_endswith has #Invoke
functionName = functionName.lower()
if functionName == '#invoke':
module, fun = args[0].strip(), args[1].strip()
logging.debug('%*s#invoke %s %s %s', extractor.frame.depth, '', module, fun, args[2:])
# special handling of frame
if len(args) == 2:
# find parameters in frame whose title is the one of the original
# template invocation
templateTitle = fullyQualifiedTemplateTitle(module)
if not templateTitle:
logging.warn("Template with empty title")
params = None
frame = extractor.frame
while frame:
if frame.title == templateTitle:
params = frame.args
break
frame = frame.prev
else:
params = [extractor.transform(p) for p in args[2:]] # evaluates them
params = extractor.templateParams(params)
ret = sharp_invoke(module, fun, params)
logging.debug('%*s<#invoke %s %s %s', extractor.frame.depth, '', module, fun, ret)
return ret
if functionName in parserFunctions:
# branching functions use the extractor to selectively evaluate args
return parserFunctions[functionName](extractor, *args)
except:
return "" # FIXME: fix errors
return ""
# ----------------------------------------------------------------------
# Expand using WikiMedia API
# import json
# def expand(text):
# """Expand templates invoking MediaWiki API"""
# text = urlib.urlencodew(text.encode('utf-8'))
# base = urlbase[:urlbase.rfind('/')]
# url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
# exp = json.loads(urllib.urlopen(url))
# return exp['expandtemplates']['*']
# ----------------------------------------------------------------------
# Extract Template definition
reNoinclude = re.compile(r'<noinclude>(?:.*?)</noinclude>', re.DOTALL)
reIncludeonly = re.compile(r'<includeonly>|</includeonly>', re.DOTALL)
# These are built before spawning processes, hence thay are shared.
templates = {}
redirects = {}
# cache of parser templates
# FIXME: sharing this with a Manager slows down.
templateCache = {}
def define_template(title, page):
"""
Adds a template defined in the :param page:.
@see https://en.wikipedia.org/wiki/Help:Template#Noinclude.2C_includeonly.2C_and_onlyinclude
"""
global templates
global redirects
# title = normalizeTitle(title)
# check for redirects
m = re.match('#REDIRECT.*?\[\[([^\]]*)]]', page[0], re.IGNORECASE)
if m:
redirects[title] = m.group(1) # normalizeTitle(m.group(1))
return
text = unescape(''.join(page))
# We're storing template text for future inclusion, therefore,
# remove all <noinclude> text and keep all <includeonly> text
# (but eliminate <includeonly> tags per se).
# However, if <onlyinclude> ... </onlyinclude> parts are present,
# then only keep them and discard the rest of the template body.
# This is because using <onlyinclude> on a text fragment is
# equivalent to enclosing it in <includeonly> tags **AND**
# enclosing all the rest of the template body in <noinclude> tags.
# remove comments
text = comment.sub('', text)
# eliminate <noinclude> fragments
text = reNoinclude.sub('', text)
# eliminate unterminated <noinclude> elements
text = re.sub(r'<noinclude\s*>.*$', '', text, flags=re.DOTALL)
text = re.sub(r'<noinclude/>', '', text)
onlyincludeAccumulator = ''
for m in re.finditer('<onlyinclude>(.*?)</onlyinclude>', text, re.DOTALL):
onlyincludeAccumulator += m.group(1)
if onlyincludeAccumulator:
text = onlyincludeAccumulator
else:
text = reIncludeonly.sub('', text)
if text:
if title in templates:
logging.warn('Redefining: %s', title)
templates[title] = text
# ----------------------------------------------------------------------
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim, re.IGNORECASE)
closeRE = re.compile(closeDelim, re.IGNORECASE)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -= 1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res
# ----------------------------------------------------------------------
# WikiLinks
# May be nested [[File:..|..[[..]]..|..]], [[Category:...]], etc.
# Also: [[Help:IPA for Catalan|[andora]]]
def replaceInternalLinks(text):
"""
Replaces internal links of the form:
[[title |...|label]]trail
with title concatenated with trail, when present, e.g. 's' for plural.
See https://www.mediawiki.org/wiki/Help:Links#Internal_links
"""
# call this after removal of external links, so we need not worry about
# triple closing ]]].
cur = 0
res = ''
for s, e in findBalanced(text):
m = tailRE.match(text, e)
if m:
trail = m.group(0)
end = m.end()
else:
trail = ''
end = e
inner = text[s + 2:e - 2]
# find first |
pipe = inner.find('|')
if pipe < 0:
title = inner
label = title
else:
title = inner[:pipe].rstrip()
# find last |
curp = pipe + 1
for s1, e1 in findBalanced(inner):
last = inner.rfind('|', curp, s1)
if last >= 0:
pipe = last # advance
curp = e1
label = inner[pipe + 1:].strip()
res += text[cur:s] + makeInternalLink(title, label) + trail
cur = end
return res + text[cur:]
# the official version is a method in class Parser, similar to this:
# def replaceInternalLinks2(text):
# global wgExtraInterlanguageLinkPrefixes
# # the % is needed to support urlencoded titles as well
# tc = Title::legalChars() + '#%'
# # Match a link having the form [[namespace:link|alternate]]trail
# e1 = re.compile("([%s]+)(?:\\|(.+?))?]](.*)" % tc, re.S | re.D)
# # Match cases where there is no "]]", which might still be images
# e1_img = re.compile("([%s]+)\\|(.*)" % tc, re.S | re.D)
# holders = LinkHolderArray(self)
# # split the entire text string on occurrences of [[
# iterBrackets = re.compile('[[').finditer(text)
# m in iterBrackets.next()
# # get the first element (all text up to first [[)
# s = text[:m.start()]
# cur = m.end()
# line = s
# useLinkPrefixExtension = self.getTargetLanguage().linkPrefixExtension()
# e2 = None
# if useLinkPrefixExtension:
# # Match the end of a line for a word that is not followed by whitespace,
# # e.g. in the case of "The Arab al[[Razi]]", "al" will be matched
# global wgContLang
# charset = wgContLang.linkPrefixCharset()
# e2 = re.compile("((?>.*[^charset]|))(.+)", re.S | re.D | re.U)
# if self.mTitle is None:
# raise MWException(__METHOD__ + ": \self.mTitle is null\n")
# nottalk = not self.mTitle.isTalkPage()
# if useLinkPrefixExtension:
# m = e2.match(s)
# if m:
# first_prefix = m.group(2)
# else:
# first_prefix = false
# else:
# prefix = ''
# useSubpages = self.areSubpagesAllowed()
# for m in iterBrackets:
# line = text[cur:m.start()]
# cur = m.end()
# # TODO: Check for excessive memory usage
# if useLinkPrefixExtension:
# m = e2.match(e2)
# if m:
# prefix = m.group(2)
# s = m.group(1)
# else:
# prefix = ''
# # first link
# if first_prefix:
# prefix = first_prefix
# first_prefix = False
# might_be_img = False
# m = e1.match(line)
# if m: # page with normal label or alt
# label = m.group(2)
# # If we get a ] at the beginning of m.group(3) that means we have a link that is something like:
# # [[Image:Foo.jpg|[http://example.com desc]]] <- having three ] in a row fucks up,
# # the real problem is with the e1 regex
# # See bug 1300.
# #
# # Still some problems for cases where the ] is meant to be outside punctuation,
# # and no image is in sight. See bug 2095.
# #
# if label and m.group(3)[0] == ']' and '[' in label:
# label += ']' # so that replaceExternalLinks(label) works later
# m.group(3) = m.group(3)[1:]
# # fix up urlencoded title texts
# if '%' in m.group(1):
# # Should anchors '#' also be rejected?
# m.group(1) = str_replace(array('<', '>'), array('<', '>'), rawurldecode(m.group(1)))
# trail = m.group(3)
# else:
# m = e1_img.match(line):
# if m:
# # Invalid, but might be an image with a link in its caption
# might_be_img = true
# label = m.group(2)
# if '%' in m.group(1):
# m.group(1) = rawurldecode(m.group(1))
# trail = ""
# else: # Invalid form; output directly
# s += prefix + '[[' + line
# continue
# origLink = m.group(1)
# # Dont allow internal links to pages containing
# # PROTO: where PROTO is a valid URL protocol these
# # should be external links.
# if (preg_match('/^(?i:' + self.mUrlProtocols + ')/', origLink)) {
# s += prefix + '[[' + line
# continue
# }
# # Make subpage if necessary
# if useSubpages:
# link = self.maybeDoSubpageLink(origLink, label)
# else:
# link = origLink
# noforce = origLink[0] != ':'
# if not noforce:
# # Strip off leading ':'
# link = link[1:]
# nt = Title::newFromText(self.mStripState.unstripNoWiki(link))
# if nt is None:
# s += prefix + '[[' + line
# continue
# ns = nt.getNamespace()
# iw = nt.getInterwiki()
# if might_be_img { # if this is actually an invalid link
# if (ns == NS_FILE and noforce) { # but might be an image
# found = False
# while True:
# # look at the next 'line' to see if we can close it there
# next_line = iterBrakets.next()
# if not next_line:
# break
# m = explode(']]', next_line, 3)
# if m.lastindex == 3:
# # the first ]] closes the inner link, the second the image
# found = True
# label += "[[%s]]%s" % (m.group(0), m.group(1))
# trail = m.group(2)
# break
# elif m.lastindex == 2:
# # if there is exactly one ]] that is fine, we will keep looking
# label += "[[{m[0]}]]{m.group(1)}"
# else:
# # if next_line is invalid too, we need look no further
# label += '[[' + next_line
# break
# if not found:
# # we couldnt find the end of this imageLink, so output it raw
# # but dont ignore what might be perfectly normal links in the text we ve examined
# holders.merge(self.replaceInternalLinks2(label))
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# } else: # it is not an image, so output it raw
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# }
# wasblank = (text == '')
# if wasblank:
# text = link
# else:
# # Bug 4598 madness. Handle the quotes only if they come from the alternate part
# # [[Lista d''e paise d''o munno]] . <a href="...">Lista d''e paise d''o munno</a>
# # [[Criticism of Harry Potter|Criticism of ''Harry Potter'']]
# # . <a href="Criticism of Harry Potter">Criticism of <i>Harry Potter</i></a>
# text = self.doQuotes(text)
# # Link not escaped by : , create the various objects
# if noforce and not nt.wasLocalInterwiki():
# # Interwikis
# if iw and mOptions.getInterwikiMagic() and nottalk and (
# Language::fetchLanguageName(iw, None, 'mw') or
# in_array(iw, wgExtraInterlanguageLinkPrefixes)):
# # Bug 24502: filter duplicates
# if iw not in mLangLinkLanguages:
# self.mLangLinkLanguages[iw] = True
# self.mOutput.addLanguageLink(nt.getFullText())
# s = rstrip(s + prefix)
# s += strip(trail, "\n") == '' ? '': prefix + trail
# continue
# if ns == NS_FILE:
# if not wfIsBadImage(nt.getDBkey(), self.mTitle):
# if wasblank:
# # if no parameters were passed, text
# # becomes something like "File:Foo.png",
# # which we dont want to pass on to the
# # image generator
# text = ''
# else:
# # recursively parse links inside the image caption
# # actually, this will parse them in any other parameters, too,
# # but it might be hard to fix that, and it doesnt matter ATM
# text = self.replaceExternalLinks(text)
# holders.merge(self.replaceInternalLinks2(text))
# # cloak any absolute URLs inside the image markup, so replaceExternalLinks() wont touch them
# s += prefix + self.armorLinks(
# self.makeImage(nt, text, holders)) + trail
# else:
# s += prefix + trail
# continue
# if ns == NS_CATEGORY:
# s = rstrip(s + "\n") # bug 87
# if wasblank:
# sortkey = self.getDefaultSort()
# else:
# sortkey = text
# sortkey = Sanitizer::decodeCharReferences(sortkey)
# sortkey = str_replace("\n", '', sortkey)
# sortkey = self.getConverterLanguage().convertCategoryKey(sortkey)
# self.mOutput.addCategory(nt.getDBkey(), sortkey)
# s += strip(prefix + trail, "\n") == '' ? '' : prefix + trail
# continue
# }
# }
# # Self-link checking. For some languages, variants of the title are checked in
# # LinkHolderArray::doVariants() to allow batching the existence checks necessary
# # for linking to a different variant.
# if ns != NS_SPECIAL and nt.equals(self.mTitle) and !nt.hasFragment():
# s += prefix + Linker::makeSelfLinkObj(nt, text, '', trail)
# continue
# # NS_MEDIA is a pseudo-namespace for linking directly to a file
# # @todo FIXME: Should do batch file existence checks, see comment below
# if ns == NS_MEDIA:
# # Give extensions a chance to select the file revision for us
# options = []
# descQuery = False
# Hooks::run('BeforeParserFetchFileAndTitle',
# [this, nt, &options, &descQuery])
# # Fetch and register the file (file title may be different via hooks)
# file, nt = self.fetchFileAndTitle(nt, options)
# # Cloak with NOPARSE to avoid replacement in replaceExternalLinks
# s += prefix + self.armorLinks(
# Linker::makeMediaLinkFile(nt, file, text)) + trail
# continue
# # Some titles, such as valid special pages or files in foreign repos, should
# # be shown as bluelinks even though they are not included in the page table
# #
# # @todo FIXME: isAlwaysKnown() can be expensive for file links; we should really do
# # batch file existence checks for NS_FILE and NS_MEDIA
# if iw == '' and nt.isAlwaysKnown():
# self.mOutput.addLink(nt)
# s += self.makeKnownLinkHolder(nt, text, array(), trail, prefix)
# else:
# # Links will be added to the output link list after checking
# s += holders.makeHolder(nt, text, array(), trail, prefix)
# }
# return holders
def makeInternalLink(title, label):
colon = title.find(':')
if colon > 0 and title[:colon] not in acceptedNamespaces:
return ''
if colon == 0:
# drop also :File:
colon2 = title.find(':', colon + 1)
if colon2 > 1 and title[colon + 1:colon2] not in acceptedNamespaces:
return ''
if Extractor.keepLinks:
return '<a href="%s">%s</a>' % (quote(title.encode('utf-8')), label)
else:
return label
# ----------------------------------------------------------------------
# External links
# from: https://doc.wikimedia.org/mediawiki-core/master/php/DefaultSettings_8php_source.html
wgUrlProtocols = [
'bitcoin:', 'ftp://', 'ftps://', 'geo:', 'git://', 'gopher://', 'http://',
'https://', 'irc://', 'ircs://', 'magnet:', 'mailto:', 'mms://', 'news:',
'nntp://', 'redis://', 'sftp://', 'sip:', 'sips:', 'sms:', 'ssh://',
'svn://', 'tel:', 'telnet://', 'urn:', 'worldwind://', 'xmpp:', '//'
]
# from: https://doc.wikimedia.org/mediawiki-core/master/php/Parser_8php_source.html
# Constants needed for external link processing
# Everything except bracket, space, or control characters
# \p{Zs} is unicode 'separator, space' category. It covers the space 0x20
# as well as U+3000 is IDEOGRAPHIC SPACE for bug 19052
EXT_LINK_URL_CLASS = r'[^][<>"\x00-\x20\x7F\s]'
ANCHOR_CLASS = r'[^][\x00-\x08\x0a-\x1F]'
ExtLinkBracketedRegex = re.compile(
'\[(((?i)' + '|'.join(wgUrlProtocols) + ')' + EXT_LINK_URL_CLASS + r'+)' +
r'\s*((?:' + ANCHOR_CLASS + r'|\[\[' + ANCHOR_CLASS + r'+\]\])' + r'*?)\]',
re.S | re.U)
# A simpler alternative:
# ExtLinkBracketedRegex = re.compile(r'\[(.*?)\](?!])')
EXT_IMAGE_REGEX = re.compile(
r"""^(http://|https://)([^][<>"\x00-\x20\x7F\s]+)
/([A-Za-z0-9_.,~%\-+&;#*?!=()@\x80-\xFF]+)\.((?i)gif|png|jpg|jpeg)$""",
re.X | re.S | re.U)
def replaceExternalLinks(text):
"""
https://www.mediawiki.org/wiki/Help:Links#External_links
[URL anchor text]
"""
s = ''
cur = 0
for m in ExtLinkBracketedRegex.finditer(text):
s += text[cur:m.start()]
cur = m.end()
url = m.group(1)
label = m.group(3)
# # The characters '<' and '>' (which were escaped by
# # removeHTMLtags()) should not be included in
# # URLs, per RFC 2396.
# m2 = re.search('&(lt|gt);', url)
# if m2:
# link = url[m2.end():] + ' ' + link
# url = url[0:m2.end()]
# If the link text is an image URL, replace it with an <img> tag
# This happened by accident in the original parser, but some people used it extensively
m = EXT_IMAGE_REGEX.match(label)
if m:
label = makeExternalImage(label)
# Use the encoded URL
# This means that users can paste URLs directly into the text
# Funny characters like ö aren't valid in URLs anyway
# This was changed in August 2004
s += makeExternalLink(url, label) # + trail
return s + text[cur:]
def makeExternalLink(url, anchor):
"""Function applied to wikiLinks"""
if Extractor.keepLinks:
return '<a href="%s">%s</a>' % (quote(url.encode('utf-8')), anchor)
else:
return anchor
def makeExternalImage(url, alt=''):
if Extractor.keepLinks:
return '<img src="%s" alt="%s">' % (url, alt)
else:
return alt
# ----------------------------------------------------------------------
# match tail after wikilink
tailRE = re.compile('\w+')
syntaxhighlight = re.compile('<syntaxhighlight .*?>(.*?)</syntaxhighlight>', re.DOTALL)
# skip level 1, it is page name level
section = re.compile(r'(==+)\s*(.*?)\s*\1')
listOpen = {'*': '<ul>', '#': '<ol>', ';': '<dl>', ':': '<dl>'}
listClose = {'*': '</ul>', '#': '</ol>', ';': '</dl>', ':': '</dl>'}
listItem = {'*': '<li>%s</li>', '#': '<li>%s</<li>', ';': '<dt>%s</dt>',
':': '<dd>%s</dd>'}
def compact(text):
"""Deal with headers, lists, empty sections, residuals of tables.
:param text: convert to HTML.
"""
page = [] # list of paragraph
headers = {} # Headers for unfilled sections
emptySection = False # empty sections are discarded
listLevel = [] # nesting of lists
for line in text.split('\n'):
if not line:
continue
# Handle section titles
m = section.match(line)
if m:
title = m.group(2)
lev = len(m.group(1)) # header level
if Extractor.toHTML:
page.append("<h%d>%s</h%d>" % (lev, title, lev))
if title and title[-1] not in '!?':
title += '.' # terminate sentence.
headers[lev] = title
# drop previous headers
for i in list(headers.keys()):
if i > lev:
del headers[i]
emptySection = True
listLevel = []
continue
# Handle page title
elif line.startswith('++'):
title = line[2:-2]
if title:
if title[-1] not in '!?':
title += '.'
page.append(title)
# handle indents
elif line[0] == ':':
# page.append(line.lstrip(':*#;'))
continue
# handle lists
elif line[0] in '*#;:':
i = 0
# c: current level char
# n: next level char
for c, n in zip_longest(listLevel, line, fillvalue=''):
if not n or n not in '*#;:': # shorter or different
if c:
if Extractor.toHTML:
page.append(listClose[c])
listLevel = listLevel[:-1]
continue
else:
break
# n != ''
if c != n and (not c or (c not in ';:' and n not in ';:')):
if c:
# close level
if Extractor.toHTML:
page.append(listClose[c])
listLevel = listLevel[:-1]
listLevel += n
if Extractor.toHTML:
page.append(listOpen[n])
i += 1
n = line[i - 1] # last list char
line = line[i:].strip()
if line: # FIXME: n is '"'
if Extractor.keepLists:
# emit open sections
items = sorted(headers.items())
for i, v in items:
page.append(v)
headers.clear()
# FIXME: use item count for #-lines
bullet = '1. ' if n == '#' else '- '
page.append('{0:{1}s}'.format(bullet, len(listLevel)) + line)
elif Extractor.toHTML:
page.append(listItem[n] % line)
elif len(listLevel):
page.append(line)
if Extractor.toHTML:
for c in reversed(listLevel):
page.append(listClose[c])
listLevel = []
# Drop residuals of lists
elif line[0] in '{|' or line[-1] == '}':
continue
# Drop irrelevant lines
elif (line[0] == '(' and line[-1] == ')') or line.strip('.-') == '':
continue
elif len(headers):
if Extractor.keepSections:
items = sorted(headers.items())
for i, v in items:
page.append(v)
headers.clear()
page.append(line) # first line
emptySection = False
elif not emptySection:
# Drop preformatted
if line[0] != ' ': # dangerous
page.append(line)
return page
def handle_unicode(entity):
numeric_code = int(entity[2:-1])
if numeric_code >= 0x10000: return ''
return chr(numeric_code)
# ------------------------------------------------------------------------------
# Output
class NextFile(object):
"""
Synchronous generation of next available file name.
"""
filesPerDir = 9223372036854775807
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = -1
self.file_index = -1
def __next__(self):
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
next = __next__
def _dirname(self):
char1 = self.dir_index % 26
char2 = self.dir_index // 26 % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d.html' % (self._dirname(), self.file_index)
class OutputSplitter(object):
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
:param nextFile: a NextFile object from which to obtain filenames
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(next(self.nextFile))
def reserve(self, size):
#if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(next(self.nextFile))
def write(self, data):
self.reserve(len(data))
self.file.write(data)
def close(self):
self.file.close()
def open(self, filename):
if self.compress:
return bz2.BZ2File(filename + '.bz2', 'w')
else:
return open(filename, 'wb')
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
# 1 2 3 4
def load_templates(file, output_file=None):
"""
Load templates from :param file:.
:param output_file: file where to save templates and modules.
"""
global templateNamespace, templatePrefix
templatePrefix = templateNamespace + ':'
global moduleNamespace, modulePrefix
modulePrefix = moduleNamespace + ':'
if output_file:
output = codecs.open(output_file, 'wb', 'utf-8')
for page_count, page_data in enumerate(pages_from(file)):
id, revid, title, ns, page = page_data
if not output_file and (not templateNamespace or
not moduleNamespace): # do not know it yet
# reconstruct templateNamespace and moduleNamespace from the first title
if ns in templateKeys:
colon = title.find(':')
if colon > 1:
if ns == '10':
templateNamespace = title[:colon]
templatePrefix = title[:colon + 1]
elif ns == '828':
moduleNamespace = title[:colon]
modulePrefix = title[:colon + 1]
if ns in templateKeys:
text = ''.join(page)
define_template(title, text)
# save templates and modules to file
if output_file:
output.write('<page>\n')
output.write(' <title>%s</title>\n' % title)
output.write(' <ns>%s</ns>\n' % ns)
output.write(' <id>%s</id>\n' % id)
output.write(' <text>')
for line in page:
output.write(line)
output.write(' </text>\n')
output.write('</page>\n')
if page_count and page_count % 100000 == 0:
logging.info("Preprocessed %d pages", page_count)
if output_file:
output.close()
logging.info("Saved %d templates to '%s'", len(templates), output_file)
def pages_from(input):
"""
Scans input extracting pages.
:return: (id, revid, title, namespace key, page), page is a list of lines.
"""
# we collect individual lines, since str.join() is significantly faster
# than concatenation
page = []
id = None
ns = '0'
last_id = None
revid = None
inText = False
redirect = False
title = None
for line in input:
line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
redirect = False
elif tag == 'id' and not id:
id = m.group(3)
elif tag == 'id' and id:
revid = m.group(3)
elif tag == 'title':
title = m.group(3)
elif tag == 'ns':
ns = m.group(3)
#elif tag == 'redirect':
# redirect = True
elif tag == 'text':
if m.lastindex == 3 and line[m.start(3)-2] == '/': # self closing
# <text xml:space="preserve" />
continue
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
if id != last_id and not redirect:
yield (id, revid, title, ns, page)
last_id = id
ns = '0'
id = None
revid = None
title = None
page = []
def process_dump(input_file, template_file, out_file, file_size, file_compress,
process_count):
"""
:param input_file: name of the wikipedia dump file; '-' to read from stdin
:param template_file: optional file with template definitions.
:param out_file: directory where to store extracted data, or '-' for stdout
:param file_size: max size of each extracted file, or None for no max (one file)
:param file_compress: whether to compress files with bzip.
:param process_count: number of extraction processes to spawn.
"""
global urlbase
global knownNamespaces
global templateNamespace, templatePrefix
global moduleNamespace, modulePrefix
if input_file == '-':
input = sys.stdin
else:
input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
# collect siteinfo
for line in input:
line = line.decode('utf-8')
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'base':
# discover urlbase from the xml dump file
# /mediawiki/siteinfo/base
base = m.group(3)
urlbase = base[:base.rfind("/")]
elif tag == 'namespace':
knownNamespaces.add(m.group(3))
if re.search('key="10"', line):
templateNamespace = m.group(3)
templatePrefix = templateNamespace + ':'
elif re.search('key="828"', line):
moduleNamespace = m.group(3)
modulePrefix = moduleNamespace + ':'
elif tag == '/siteinfo':
break
if Extractor.expand_templates:
# preprocess
template_load_start = default_timer()
if template_file:
if os.path.exists(template_file):
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", template_file)
# can't use with here:'
file = fileinput.FileInput(template_file,
openhook=fileinput.hook_compressed)
load_templates(file)
file.close()
else:
if input_file == '-':
# can't scan then reset stdin; must error w/ suggestion to specify template_file
raise ValueError("to use templates with stdin dump, must supply explicit template-file")
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
load_templates(input, template_file)
input.close()
input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
template_load_elapsed = default_timer() - template_load_start
logging.info("Loaded %d templates in %.1fs", len(templates), template_load_elapsed)
# process pages
logging.info("Starting page extraction from %s.", input_file)
extract_start = default_timer()
# Parallel Map/Reduce:
# - pages to be processed are dispatched to workers
# - a reduce process collects the results, sort them and print them.
maxsize = 10 * process_count
# output queue
output_queue = Queue(maxsize=maxsize)
if out_file == '-':
out_file = None
worker_count = max(1, process_count)
# load balancing
max_spool_length = 10000
spool_length = Value('i', 0, lock=False)
# reduce job that sorts and prints output
reduce = Process(target=reduce_process,
args=(output_queue, spool_length,
out_file, file_size, file_compress))
reduce.start()
# initialize jobs queue
jobs_queue = Queue(maxsize=maxsize)
# start worker processes
logging.info("Using %d extract processes.", worker_count)
workers = []
for i in range(worker_count):
extractor = Process(target=extract_process,
args=(i, jobs_queue, output_queue))
extractor.daemon = True # only live while parent process lives
extractor.start()
workers.append(extractor)
# fp_i = open('output.npy', 'rb')
# arr = np.load(fp_i);
#sorted_arr=arr[:,0].sort()
ignored_list=[]
with open('ignored_docs.txt') as f:
for line in f:
ignored_list.append(int(line));
count=1;
listcount=0;
# Mapper process
page_num = 0
for page_data in pages_from(input):
if count != ignored_list[listcount]:
id, revid, title, ns, page = page_data
if keepPage(ns, page):
# slow down
delay = 0
if spool_length.value > max_spool_length:
# reduce to 10%
while spool_length.value > max_spool_length/10:
time.sleep(10)
delay += 10
if delay:
logging.info('Delay %ds', delay)
job = (id, revid, title, page, page_num)
jobs_queue.put(job) # goes to any available extract_process
page_num += 1
page = None # free memory
else:
listcount=listcount+1
count=count+1
input.close()
# signal termination
for _ in workers:
jobs_queue.put(None)
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
extract_duration = default_timer() - extract_start
extract_rate = page_num / extract_duration
logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
process_count, page_num, extract_duration, extract_rate)
# ----------------------------------------------------------------------
# Multiprocess support
def extract_process(i, jobs_queue, output_queue):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
:param i: process id.
:param jobs_queue: where to get jobs.
:param output_queue: where to queue extracted text for output.
"""
out = StringIO() # memory buffer
while True:
job = jobs_queue.get() # job is (id, title, page, page_num)
if job:
id, revid, title, page, page_num = job
try:
e = Extractor(*job[:4]) # (id, revid, title, page)
page = None # free memory
e.extract(out)
text = out.getvalue()
except:
text = ''
logging.exception('Processing page: %s %s', id, title)
output_queue.put((page_num, text))
out.truncate(0)
out.seek(0)
else:
logging.debug('Quit extractor')
break
out.close()
report_period = 10000 # progress report period
def reduce_process(output_queue, spool_length,
out_file=None, file_size=0, file_compress=True):
"""Pull finished article text, write series of files (or stdout)
:param output_queue: text to be output.
:param spool_length: spool length.
:param out_file: filename where to print.
:param file_size: max file size.
:param file_compress: whether to compress output.
"""
if out_file:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
else:
output = sys.stdout if PY2 else sys.stdout.buffer
if file_compress:
logging.warn("writing to stdout, so no output compression (use an external tool)")
interval_start = default_timer()
# FIXME: use a heap
spool = {} # collected pages
next_page = 0 # sequence numbering of page
while True:
if next_page in spool:
output.write(spool.pop(next_page).encode('utf-8'))
next_page += 1
# tell mapper our load:
spool_length.value = len(spool)
# progress report
if next_page % report_period == 0:
interval_rate = report_period / (default_timer() - interval_start)
logging.info("Extracted %d articles (%.1f art/s)",
next_page, interval_rate)
interval_start = default_timer()
else:
# mapper puts None to signal finish
pair = output_queue.get()
if not pair:
break
page_num, text = pair
spool[page_num] = text
# tell mapper our load:
spool_length.value = len(spool)
# FIXME: if an extractor dies, process stalls; the other processes
# continue to produce pairs, filling up memory.
if len(spool) > 200:
logging.debug('Collected %d, waiting: %d, %d', len(spool),
next_page, next_page == page_num)
if output != sys.stdout:
output.close()
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
def main():
global urlbase, acceptedNamespaces, filter_disambig_pages
global templateCache
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
help="directory for extracted files (or '-' for dumping to stdout)")
groupO.add_argument("-b", "--bytes", default="1M",
help="maximum bytes per output file (default %(default)s)",
metavar="n[KMG]")
groupO.add_argument("-c", "--compress", action="store_true",
help="compress output files using bzip")
groupP = parser.add_argument_group('Processing')
groupP.add_argument("--html", action="store_true",
help="produce HTML output, subsumes --links")
groupP.add_argument("-l", "--links", action="store_true",
help="preserve links")
groupP.add_argument("-s", "--sections", action="store_true",
help="preserve sections")
groupP.add_argument("--lists", action="store_true",
help="preserve lists")
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
help="accepted namespaces in links")
groupP.add_argument("--templates",
help="use or create file containing templates")
groupP.add_argument("--no-templates", action="store_false",
help="Do not expand templates")
groupP.add_argument("-r", "--revision", action="store_true", default=Extractor.print_revision,
help="Include the document revision id (default=%(default)s)")
groupP.add_argument("--min_text_length", type=int, default=Extractor.min_text_length,
help="Minimum expanded text length required to write document (default=%(default)s)")
groupP.add_argument("--filter_disambig_pages", action="store_true", default=filter_disambig_pages,
help="Remove pages from output that contain disabmiguation markup (default=%(default)s)")
default_process_count = cpu_count() - 1
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("--debug", action="store_true",
help="print debug info")
groupS.add_argument("-a", "--article", action="store_true",
help="analyze a file containing a single article (debug option)")
groupS.add_argument("-v", "--version", action="version",
version='%(prog)s ' + version,
help="print program version")
args = parser.parse_args()
Extractor.keepLinks = args.links
Extractor.keepSections = args.sections
Extractor.keepLists = args.lists
Extractor.toHTML = args.html
Extractor.print_revision = args.revision
Extractor.min_text_length = args.min_text_length
if args.html:
Extractor.keepLinks = True
Extractor.expand_templates = args.no_templates
filter_disambig_pages = args.filter_disambig_pages
try:
power = 'kmg'.find(args.bytes[-1].lower()) + 1
file_size = int(args.bytes[:-1]) * 1024 ** power
if file_size < minFileSize:
raise ValueError()
except ValueError:
logging.error('Insufficient or invalid size: %s', args.bytes)
return
if args.namespaces:
acceptedNamespaces = set(args.namespaces.split(','))
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
if not args.quiet:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
input_file = args.input
if not Extractor.keepLinks:
ignoreTag('a')
# sharing cache of parser templates is too slow:
# manager = Manager()
# templateCache = manager.dict()
if args.article:
if args.templates:
if os.path.exists(args.templates):
with open(args.templates) as file:
load_templates(file)
file = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
# fp_i = open('output.npy', 'rb')
# arr = np.load(fp_i);
#sorted_arr=arr[:,0].sort()
ignored_list=[]
with open('ignored_docs.txt') as f:
for line in f:
ignored_list.append(int(line));
count=1;
listcount=0;
reduced_page_data=[]
for page_data in pages_from(file):
if count != ignored_list[listcount]:
reduced_page_data.append(page_data)
else:
listcount=listcount+1
count=count+1
for page_data in reduced_page_data:
id, revid, title, ns, page = page_data
Extractor(id, revid, title, page).extract(sys.stdout)
file.close()
return
output_path = args.output
if output_path != '-' and not os.path.isdir(output_path):
try:
os.makedirs(output_path)
except:
logging.error('Could not create: %s', output_path)
return
process_dump(input_file, args.templates, output_path, file_size,
args.compress, args.processes)
if __name__ == '__main__':
main()
|
[
"nilay.chakraborty@rutgers.edu"
] |
nilay.chakraborty@rutgers.edu
|
16eb84857e1e7a3ba4508db0565d50a1a3503b54
|
f3e165d4334708b39808f7cc0b86313e134b050d
|
/versionmonitor/models.py
|
6dc71afa30decc5b9e264e95b554184d951c226f
|
[] |
no_license
|
SammyVimes/versionmonitor
|
6c88940f12ea04bff75be69e06d86963dc5a8e49
|
9cae81fc67d139f23713fc959b4ed2a412879856
|
refs/heads/master
| 2016-08-12T13:26:30.126486
| 2015-11-12T18:55:21
| 2015-11-12T18:55:21
| 44,018,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
from versionmonitor.model.application import *
from versionmonitor.model.project import *
from versionmonitor.model.usr import *
|
[
"samvimes@yandex.ru"
] |
samvimes@yandex.ru
|
6cad4e365e7a673e4e080a6c30b59a28a0df3daa
|
618beb0cacd5de155497904df97b27873619b5cc
|
/homeworks/hw3.py
|
2897fe1429ad1a1b2eee4c82fa37dcb8b1f2804d
|
[] |
no_license
|
fatmaoz7/GlobalAIHubPythonCourse
|
354f4549d3d6fbe9a38e63235efbe89f737eb5cc
|
dbfa5e40d02bf10a0b5dfe4495ec39052b7eb6f7
|
refs/heads/main
| 2023-04-02T00:18:12.209147
| 2021-04-11T10:16:44
| 2021-04-11T10:16:44
| 355,261,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 9 21:41:36 2021
@author: fatma oz
"""
students={}
students_with_passing_grade={} #creating empty dictionaries
i=1;
n=5
students_grade_list_sorted=[]
while i<=n:
name=input("Please enter student name") #getting inputs from user
midterm=float(input("Please enter midterm grade of student"))
project=float(input("Please enter project grade of student"))
final=float(input("Please enter final grade of student"))
students[name]=midterm,project,final #placing into dictionary
passing_grade=students[name][0]*0.3+students[name][1]*0.3+students[name][2]*0.4;
students_with_passing_grade[name]="Passing grade", passing_grade
i+=1
# passingGrades=[passing_grade for passing_grade in students_with_passing_grade.items()]
students_grade_list_sorted.append(students_with_passing_grade[name][1])
print(students_with_passing_grade)
print(students)
# print(passingGrades)
print(sorted(students_grade_list_sorted,reverse=True))
|
[
"noreply@github.com"
] |
fatmaoz7.noreply@github.com
|
c383ebbd7b622a6e4b2d75f1785dbbb02fe57f50
|
a992faa128728011deb2aacbb38d2e2de042e6d0
|
/feedforward/modelsv3/test_rejected.py
|
e4af5f6b1faec186a4764aac49de8c9610acbaa6
|
[] |
no_license
|
nigroup/binaural_audition
|
e157693f5a3b524d9c9f79d00663a785ab348f7c
|
e03b9ee7961d3df347c3f7b1cbe15a51ad47f530
|
refs/heads/master
| 2021-06-06T04:05:00.243227
| 2021-01-14T12:57:06
| 2021-01-14T12:57:06
| 116,171,234
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
import os
from sys import exit
from sys import path
import pdb
from tqdm import tqdm
import tensorflow as tf
import numpy as np
import train_utils as tr_utils
# import plotting as plot
import hyperparams
import cnn_model
import dataloader
import ldataloader
import settings
import test_compare
def test_ldl(ldl_buffer_rows):
h = hyperparams.Hyperparams()
hyp = h.getworkingHyperparams()
import time
from tqdm import tqdm
TRAIN_FOLDS = [1, 2, 3, 4, 5, 6]
train_loader = ldataloader.LineDataLoader('train', h.LABEL_MODE, TRAIN_FOLDS, h.TRAIN_SCENES,
ldl_timesteps=settings.ldl_timesteps,
ldl_blocks_per_batch=hyp["ldl_blocks_per_batch"],
ldl_overlap=settings.ldl_overlap, ldl_buffer_rows=ldl_buffer_rows,
epochs=1, features=h.NFEATURES, classes=h.NCLASSES, seed_by_epoch=False, seed=time.time())
_, d_timesteps, __ = test_compare.measure_directly(train_loader)
batches = 0
while (True):
batches = batches + 1
_x, _y = train_loader.next_batch()
if _x is None:
break
'''
print("batches: " + str(batches))
print("Memory:" + str(train_loader.buffer_add_memory))
print("Timestepsy:" + str(train_loader.buffer_add_timesteps))
print("Buffer Iterations:" + str(train_loader.buffer_add_iterations))
#print(train_loader.buffer_x.shape)
print(train_loader.buffer_x.shape)
print("Buffer Timesteps:" + str(train_loader.buffer_add_timesteps))
print("Real Timesteps:" + str(d_timesteps))
print("bad:")
print(train_loader.bad_sliced_lines)
print("good")
print(train_loader.good_sliced_lines)
print("lin self.count_line_positions_taken_total" + str(train_loader.count_line_positions_taken_total))
print("max position sum" + str(train_loader.add_max_positions))
print("slices y" + str(train_loader.check_slices_y))
print("slices x" + str(train_loader.check_slices_xy))
print("count batches: " + str(train_loader.count_batches))
'''
slices_with_all_data = (d_timesteps / train_loader.buffer_x.shape[0]) /25
slices_in_buffer = train_loader.add_max_positions / 25
loss_due_to_buffer = 1 - (slices_in_buffer/slices_with_all_data)
loss_due_to_rejeceted_data = (train_loader.bad_sliced_lines / slices_in_buffer)
#print(slices_with_all_data)
#print(loss_due_to_buffer)
#print(loss_due_to_rejeceted_data)
return slices_with_all_data, loss_due_to_buffer, loss_due_to_rejeceted_data
if __name__ == '__main__':
iterations_per_block = 10
data = np.zeros((3,iterations_per_block,3))
for e, block_row in tqdm(enumerate(np.array([8, 16,32]))):
for i in range(0,iterations_per_block):
data[e,i,:] = test_ldl(block_row)
np.save("rejected_data.npy", data)
pdb.set_trace()
|
[
"alessandro.schneider.privat@gmail.com"
] |
alessandro.schneider.privat@gmail.com
|
3c050bc8e8b4d6dccd727483ed6631a18acd28c2
|
d5e3ecb3e79006a6c59c787ebd8ee4a321cd5dfd
|
/pythonproject/div.py
|
28ebc9f012d63c49456a35421274c4663217edd7
|
[
"Apache-2.0"
] |
permissive
|
sangampatel/pythonproject
|
f4551e760aac02648d736f09365062eab4634027
|
c6c690fd5d4fa62023db15cf10bdedeeb8b44a41
|
refs/heads/main
| 2023-07-08T10:02:34.387218
| 2021-08-11T09:25:15
| 2021-08-11T09:25:15
| 393,037,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
a = int(input())
b = int(input())
c = a/b
print(int(c))
print(float(c))
|
[
"noreply@github.com"
] |
sangampatel.noreply@github.com
|
e86711a24b545be310ffb5b0903c3b629e84b179
|
2befe9a69a976b304e57afb5a32797d8a85db6ff
|
/tutorial/tutorial/settings.py
|
79ac68aff082019f63dfc78cd9bba1cb856fcc45
|
[] |
no_license
|
IgorEngControle/djnago
|
a2f9217ab91788e43eea3ca0c3c53eaf51009a30
|
6649d79ffa3d921ee54173a7459b8b85cd69f507
|
refs/heads/master
| 2020-08-21T18:14:09.538592
| 2019-10-19T14:16:45
| 2019-10-19T14:16:45
| 216,216,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
"""
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+zxj0xk)-8r@d1*(757&ef*ui_4h4xnwe)k5_)-_z)xv+7%1+9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'accounts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"igor@tacoma.dcc.ufmg.br"
] |
igor@tacoma.dcc.ufmg.br
|
712d19d0246374006b8c8715c3ea651c1575118a
|
d113bfc092f3465d93d14a5420c3af4a23bf6246
|
/lambda.py
|
a5d87d3956b855f531d3985e2c9f96b73c5f9eba
|
[] |
no_license
|
joyvai/Python-
|
8d9de9e7146541966a237e549aaa7448ea9d8855
|
3222c36c7b0cd054b03bc775528a797ff1e9b25d
|
refs/heads/master
| 2021-01-16T23:37:55.719482
| 2017-10-02T11:02:37
| 2017-10-02T11:02:37
| 55,305,032
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# lambda is key word which is used for annonymous function.
# Small anonymous functions can be created with the lambda keyword
def make_incrementor(n):
return lambda x:x+n
f = make_incrementor(42)
print f(0)
print f(2)
a.sort(key = lambda pair: pair[0])
print a
|
[
"noreply@github.com"
] |
joyvai.noreply@github.com
|
c8e90da54de4f0eef7bbb58a590acd4e21bbdb53
|
6fb302872dbd0fb689cd33be0f777e7f97e44da6
|
/data acquisition/env_vars.py
|
b061b86441f805e66c42b4233fa36347f60a696d
|
[] |
no_license
|
davidanagy/Spotify-Song-Suggester-DS
|
65aa8d5a7494e5efa43308cfbf5334de04cfa949
|
c9b3aec9af313e21bfff76fcdcf71e268d8411e8
|
refs/heads/master
| 2022-04-28T19:18:29.929949
| 2020-05-04T21:15:51
| 2020-05-04T21:15:51
| 261,292,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
SPOTIFYUSERNAME = 'basslaughter' #your spotify username
CLIENT_ID = '63594c9b2f99411a8cbd18df04851fc4' #set at your developer account
CLIENT_SECRET = '096168b2bd1f4378ae410726955c9ed8' #set at your developer account
REDIRECT_URI = 'http://google.com/' #set at your developer account, usually "http://localhost:8000"
SCOPE = 'user-library-read' # or else
GENIUS_CLIENT_ACCESS_TOKEN = 'p0TX__gy-p5bD4UG4D0ASZ3tiLvVNqlD0iStYgOPR3OPakUewNwHlMBkodfkCv1V'
|
[
"zwarshavsky@gmail.com"
] |
zwarshavsky@gmail.com
|
fbf3a11ee73399b2b88c3a02438615c08e7783fd
|
8a02308635ac5d00993094afd71cc8ba8695ca81
|
/ch06_iteration/iteration.py
|
2c3381eeaefca8cde77c6b17a56245366da99c78
|
[] |
no_license
|
suptaphilip/how-to-think-like-a-computer-scientist
|
c219c341037a663b567477be5b0218681b25d747
|
390258201257c93cd1752272e9ff4fb15e424756
|
refs/heads/master
| 2021-01-10T21:19:46.799334
| 2012-08-08T20:17:37
| 2012-08-08T20:17:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
def sqrt(n):
approx = n / 2.0
better = (approx + n / approx) / 2.0
while better != approx:
approx = better
better = (approx + n / approx) / 2.0
print better
return approx
def print_triangular_numbers(n):
result = 0
index = 1
while index <= n:
result += index
print '%-5d %-5d' % (index, result)
index += 1
|
[
"helena.josol@gmail.com"
] |
helena.josol@gmail.com
|
59d57a4cd3691c235a5cc4a77d1735c3b0645f04
|
088b072bf4021e7b48bc458297e470a576f893d7
|
/main.py
|
3b2f2d46d93176daf976d81efb15b0de9aa0b83d
|
[] |
no_license
|
Guybachar210/python-pi
|
db53b18edb350ca6376108abc7fd3a8de3543614
|
88397f7d06487df050a20e90fc4f293afcd6ed0d
|
refs/heads/master
| 2020-03-13T06:15:45.300351
| 2018-04-25T13:11:32
| 2018-04-25T13:11:32
| 131,001,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
#!/usr/bin/python3
//This is a comment
print("guy")
|
[
"guy.bachar@mail.huji.ac.il"
] |
guy.bachar@mail.huji.ac.il
|
974df18b41b3a6861168c136ec7244f28c7b5aca
|
ff477a586b946c575441b6189123ab86c175e5ae
|
/linker_tests/link_pre_489/PysamTestModule_link_pre_489/__init__.py
|
e441021180c1280a017ba4118871c577c6661fcc
|
[
"MIT"
] |
permissive
|
pysam-developers/pysam
|
5552e4903106fc253869a405f4a2c068c6bd65c5
|
0663ca85739877e5dd05c0eb2512a8bcaa515b39
|
refs/heads/master
| 2023-08-16T19:10:17.566296
| 2023-08-15T10:06:59
| 2023-08-15T12:28:29
| 16,557,526
| 678
| 332
|
MIT
| 2023-09-14T10:40:22
| 2014-02-05T20:38:10
|
C
|
UTF-8
|
Python
| false
| false
| 84
|
py
|
from PysamTestModule_link_pre_489.BuildRead import build_read
all = ["build_read"]
|
[
"jacobs@bioinoformed.com"
] |
jacobs@bioinoformed.com
|
2755146e8b7d934435fb6a6f933999355302b995
|
35045d4883ee369de5d8e8544b69af5711a937ca
|
/main.py
|
e8e64698903e0178ed9f50dfc0c5de3b8c8c9845
|
[] |
no_license
|
matsui23/blogz
|
0b12f904692e0d4f74922c8cd2957784cb9c07e3
|
adfd7062cbd035eb88d8a7ffdb47f587e922ceae
|
refs/heads/master
| 2020-04-01T15:58:53.049625
| 2018-10-24T00:18:45
| 2018-10-24T00:18:45
| 153,359,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,683
|
py
|
from flask import Flask, request, redirect, render_template, session, flash, url_for
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:pass@localhost:8889/blogz'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = 'secretkey'
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.String(600))
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, title, body, owner):
self.title = title
self.body = body
self.owner = owner
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(120))
password = db.Column(db.String(120))
posts = db.relationship('Blog', backref = 'owner')
def __init__(self, username, password):
self.username = username
self.password = password
@app.before_request
def require_login():
print('()()()()()()()()()()()()()()()')
allowed_routes = ['login', 'register']
if request.endpoint not in allowed_routes and 'email' not in session:
return redirect('/login')
@app.route('/', methods=['POST', 'GET'])
def index():
title = 'Build-a-blog'
if request.method == 'POST':
return render_template('blog.html')
if request.method == 'GET':
titles = Blog.query.all()
return render_template('blog.html', titles = titles)
@app.route('/blog', methods=['POST','GET'])
def blog():
title = 'Build-a-blog'
if request.method == 'POST':
return render_template('blog.html')
if request.method == 'GET':
current_user = session['email']
print('================')
print(session['email'])
print('================')
titles = Blog.query.all()
#titles_id = Blog.query.get(id)
print('++++++++++++++++++++++++++++')
users = User.query.all()
#print(titles_id)
print(users)
print('++++++++++++++++++++++++++++')
return render_template('blog.html', titles = titles, current_user = current_user, users = users)
@app.route('/newpost', methods=['POST', 'GET'])
def newpost():
owner = User.query.filter_by(username = session['email']).first()
if request.method == 'POST':
title = request.form['title']
post = request.form['post']
error_title = False
error_post = False
if title == '':
error_title = True
print(error_title)
return render_template('newpost.html', error_title = error_title, error_post = error_post,
title = title, post = post)
if post == '':
print(error_post)
error_post = True
return render_template('newpost.html', error_title = error_title, error_post = error_post,
title = title, post = post)
new_post = Blog(title, post, owner)
db.session.add(new_post)
db.session.commit()
current_id = new_post.id
user_id = new_post.owner_id
return redirect('/post?id={0}&user_id={1}'.format(current_id, user_id))
titles = Blog.query.all()
return render_template('blog.html', titles = titles, user_id = user_id)
if request.method == 'GET':
return render_template('newpost.html')
@app.route('/post', methods = ['POST', 'GET'])
def see_post():
# if request.method == 'POST'
# pass
print('----------------------')
if request.method == 'GET':
current_id = request.args.get('id')
user_id = request.args.get('user_id')
post = Blog.query.get(current_id)
user = User.query.get(user_id)
# need to query for the id to pass to the html
return render_template('post.html', post = post, user = user)
@app.route('/register', methods = ['POST', 'GET'])
def register():
print('-------------------------------')
if request.method == 'GET':
print('-------------------------------')
return render_template('register.html')
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
verify = request.form['verify']
duplicate_user = False
error_email = False
error_password = False
error_pass_match = False
existing_user = User.query.filter_by(username = email).first()
if existing_user:
duplicate_user = True
return render_template('register.html', email = email, duplicate_user = duplicate_user, error_email = error_email, error_password = error_password, error_pass_match = error_pass_match)
if email =='' and password == '' and verify == '':
error_email = True
error_password = True
return render_template('register.html', email = email, duplicate_user = duplicate_user, error_email = error_email, error_password = error_password, error_pass_match = error_pass_match)
if password == '' or verify =='':
error_password = True
return render_template('register.html', email = email, duplicate_user = duplicate_user, error_email = error_email, error_password = error_password, error_pass_match = error_pass_match)
if password != verify:
error_pass_match = True
return render_template('register.html', email = email, duplicate_user = duplicate_user, error_email = error_email, error_password = error_password, error_pass_match = error_pass_match)
if not existing_user and password == verify and password:
new_user = User(email, password)
db.session.add(new_user)
db.session.commit()
session['email'] = email
print('-----------------------')
return redirect('/blog')
else:
print('***********************')
return redirect('/register')
@app.route('/login', methods = ['POST', 'GET'])
def login():
print('-------------------------------')
if request.method == 'GET':
print('-------------------------------')
return render_template('login.html')
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
error_email = False
error_password = False
error_email_empty = False
error_password_empty = False
user = User.query.filter_by(username = email).first()
if user and user.password == password:
session['email'] = email
return redirect('/newpost')
if email == '' and password == '':
error_email_empty = True
error_password_empty = True
return render_template('login.html', email = email, error_email = error_email, error_password = error_password, error_email_empty = error_email_empty, error_password_empty = error_password_empty)
if not user:
error_email = True
return render_template('login.html', email = email, error_email = error_email, error_password = error_password, error_email_empty = error_email_empty, error_password_empty = error_password_empty)
if user.password != password:
error_password = True
return render_template('login.html', email = email, error_email = error_email, error_password = error_password, error_email_empty = error_email_empty, error_password_empty = error_password_empty)
return render_template('login.html')
@app.route('/logout')
def logout():
print('----------------------------')
print(session['email'])
print('----------------------------')
del session['email']
return redirect('/')
@app.route('/userlist', methods = ['POST', 'GET'])
def display_users():
if request.method == 'GET':
users = User.query.all()
return render_template('user_list.html', users = users)
@app.route('/userposts', methods = ['POST', 'GET'])
def display_users_posts():
if request.method == 'GET':
owner_id = request.args.get('id')
print('**********************')
print(owner_id)
print('**********************')
posts = Blog.query.filter_by(owner_id = owner_id)
print('[][][][[][][][][]')
print(posts)
print('[][][][[][][][][]')
return render_template('users_posts.html', posts = posts)
if __name__ == '__main__':
app.run()
|
[
"mattgamedev2313@gmail.com"
] |
mattgamedev2313@gmail.com
|
cdec30010c2923fa9b8c544e8bdf200916434ee6
|
5cc4f84574e98c3182baa8049e307a54f0ce0eba
|
/Image.py
|
f9321317b32ef706fa9a8aedef2c4f37143304df
|
[
"MIT"
] |
permissive
|
leonardodalcin/cvbootstrap
|
fbc6f4c7115844b90adac54231c26a1060adb92d
|
72b0b4bc8433f47ed78378895e23ba385cf1ecc3
|
refs/heads/master
| 2020-03-28T03:59:13.325705
| 2018-09-06T14:46:10
| 2018-09-06T14:46:10
| 147,687,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
from datetime import datetime
from matplotlib import pyplot as plt
import cv2
import os
class Image:
image = None
def show(self):
plt.imshow(self.image, cmap="gray", interpolation='bicubic')
plt.show()
def save(self):
print("Saving photo")
now = datetime.now()
dirName = now.strftime("%d-%m-%Y")
fileName = now.strftime("%X")
if not os.path.exists(dirName):
os.makedirs(dirName)
cv2.imwrite(dirName + "/" + fileName + ".png", self.image)
def rotate(self, degrees):
(height, width) = self.image.shape[:2]
center = (height / 2, width / 2)
rotationMatrix = cv2.getRotationMatrix2D(center, degrees, scale=1)
self.image = cv2.warpAffine(self.image, rotationMatrix, (width, height))
def __init__(self, image = None, path = None):
if (path):
self.image = cv2.imread(path, 0)
else:
self.image = image
|
[
"noreply@github.com"
] |
leonardodalcin.noreply@github.com
|
6059f19d35052a833fe121731afddf53ecd51c70
|
1b3454e03d5f07bd9739c8fa51aef53576234970
|
/publish_preflight/forms.py
|
812367022ef16f4da49c7b8f7faa99228bbe4966
|
[
"BSD-3-Clause"
] |
permissive
|
cityofaustin/wagtail-publish-preflight
|
6b4f171713b31b39fbda3cecf128b38c8278790b
|
5a40d20a7811c67d2c2e085c8127c8d35103a5c8
|
refs/heads/master
| 2022-12-15T13:31:29.861322
| 2019-12-23T19:13:24
| 2019-12-23T19:13:24
| 221,550,274
| 1
| 0
|
BSD-3-Clause
| 2022-12-08T03:16:34
| 2019-11-13T20:57:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
from wagtail.admin.forms import WagtailAdminPageForm
from wagtail.core.models import Page, PageRevision
from django.core.exceptions import ValidationError
from wagtail.admin import messages
import logging
class PublishPreflightForm(WagtailAdminPageForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def clean(self):
"""
ways to limit scope:
changed fields,
then exclude fields that are required
self.changed_data = list of fields changed
self[field_name].data or as_text (might be useful for streamfields)
looks like this is working, atm tho it just wont let you publish any empty fields :-D
"""
def check_for_empties():
"""
adds an error to each field if it is empty
"""
if hasattr(self.instance, 'fields_required_for_publish'):
errors_for_empties = {
field_name: try_adding_error_to_field(
field_name, field_value)
for (field_name, field_value) in self.data.items()
if (len(field_value) == 0 or field_value == 'null') and field_name in self.instance.fields_required_for_publish
}
def try_adding_error_to_field(field_name, field_value):
try:
self.add_error(field_name, f'{field_name} is empty!')
except ValueError as e:
logging.error(e)
try:
field_value.non_form_errors().append(
f'{field_name} not selected!')
self.add_error(None, f'{field_name} is missing!')
except AttributeError as e:
logging.error(e)
pass
pass
def check_for_missing_relations():
relations = self.formsets
# relation_value.cleaned_data
if hasattr(self.instance, 'fields_required_for_publish'):
errors_for_missing_relations = {
relation_name: try_adding_error_to_field(
relation_name, relation_value)
for (relation_name, relation_value) in relations.items()
if not relation_value.forms and relation_name in self.instance.fields_required_for_publish
}
cleaned_data = super().clean()
if 'action-publish' in self.data:
# TODO: we'll probably want a good way to check a managed subset
all_keys = list(self.fields.keys())
check_all = check_for_empties()
missing_relations = check_for_missing_relations()
return cleaned_data
|
[
"ericandrewsherman@gmail.com"
] |
ericandrewsherman@gmail.com
|
f188de2692770cbb987a93c8b00f449e44b9a4d7
|
df9f069a0186cf38df8a592ca561c5cb4eb02aef
|
/src/Indeed.py
|
f80663d7f132d040ffdbefb9eaba85b3cee71925
|
[] |
no_license
|
SeekrLabs/JobScraper
|
6f018d6915f9cb30454e5c905117647f7df07426
|
3e490b5e22d3593f3951dbd62aabd57c1dde107c
|
refs/heads/master
| 2020-07-30T08:59:42.171708
| 2019-08-29T19:18:08
| 2019-08-29T19:18:08
| 210,164,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,102
|
py
|
import json
import botocore.vendored.requests as requests
import time
from bs4 import BeautifulSoup
import boto3
import datetime
# Run this once a day, it gets completely refreshed once per day
BASE_LINK = 'https://www.indeed.ca/jobs?l=Toronto,+ON&sort=date&fromage=1&limit=50'
sqs = boto3.resource('sqs')
QUEUE_MESSAGE_SIZE = 50
queue = sqs.get_queue_by_name(
QueueName='JobsIngestionQueue'
)
def scrape(event, context):
scrape_start_time = int(time.time())
search = IndeedSearch(BASE_LINK, scrape_start_time)
num_pages = event['pages']
early_stop = event['ads_per_page']
job_ads = []
for _ in range(num_pages):
results = search.process_visit_link(early_stop)
job_ads += [vars(res) for res in results]
search.update_visit_link()
print("Sending message to SQS queue.")
sqs_batch_send_message(job_ads)
def sqs_batch_send_message(content_list):
if content_list == []:
return
num_batches = len(content_list) // 50 + 1
for i in range(num_batches):
batch_send = content_list[i * num_batches: i * num_batches + 50]
queue.send_message(
MessageBody=json.dumps(batch_send)
)
class IndeedSearch:
# base_link is the base query without indexing the pages of the search
# visit_link is indexed pages, it's updated
def __init__(self, base_link, scrape_start_time):
self.base_link = base_link
self.num_ads = 0
self.visit_link = base_link + '&start=' + str(self.num_ads)
self.scrape_start_time = scrape_start_time
def update_visit_link(self):
self.num_ads += 20
self.visit_link = self.base_link + '&start=' + str(self.num_ads)
def process_visit_link(self, ads_to_visit=999):
jobs_data = []
print('Issuing GET: ' + self.visit_link)
search_query = requests.get(self.visit_link)
print('GET Success, Parsing...')
search_soup = BeautifulSoup(search_query.text, 'html.parser')
print('Finding advertisement cards...')
ad_card_soups = search_soup.find_all('div', {'class': 'jobsearch-SerpJobCard'})
print('Found ' + str(len(ad_card_soups)) + ' ad cards.')
for ad_card_soup in ad_card_soups:
job_ad = IndeedJobAd(ad_card_soup, self.scrape_start_time)
valid_card = job_ad.extract_card()
if valid_card:
jobs_data.append(job_ad)
if len(jobs_data) > ads_to_visit:
break
# Visiting each link in ad card
for ad in jobs_data:
ad.visit_link_to_extract_description()
return jobs_data
class IndeedJobAd:
# Constants
BASE_INDEED = 'https://www.indeed.com'
# Initalize with a BeautifulSoup Card element
def __init__(self, ad_soup, scrape_start_time):
self.ad_soup = ad_soup
self.scrape_start_time = scrape_start_time
# Returns false if Job Posting is sponsored
def extract_card(self):
title_soup = find_element_from_soup(self.ad_soup,
[{'el': 'a',
'tag': 'class',
'attr': 'jobtitle'}])
metadata_soup = find_element_from_soup(self.ad_soup,
[{'el': 'div',
'tag': 'class',
'attr': 'sjcl'}])
post_date_soup = find_element_from_soup(self.ad_soup,
[{'el': 'span',
'tag': 'class',
'attr': 'date'}])
del self.ad_soup
if title_soup:
self.title = title_soup.text.strip()
self.url = self.BASE_INDEED + title_soup['href']
self.source = 'INDEED'
if self.url.startswith('/pagead'):
return False
if metadata_soup:
company_soup = find_element_from_soup(metadata_soup,
[{'el': 'span',
'tag': 'class',
'attr': 'company'}])
location_soup = find_element_from_soup(metadata_soup,
[{'el': 'span',
'tag': 'class',
'attr': 'location'},
{'el': 'div',
'tag': 'class',
'attr': 'location'}])
if company_soup:
self.company = company_soup.text.strip()
if location_soup:
self.location = location_soup.text.strip()
if post_date_soup:
self.get_post_date_and_time(post_date_soup.text.strip())
return True
def visit_link_to_extract_description(self):
if self.url:
job_url = self.BASE_INDEED + self.url
print('Issuing GET: ' + job_url)
job_response = requests.get(job_url)
print('GET Success, Parsing...')
specific_job_soup = BeautifulSoup(job_response.text, 'html.parser')
description = find_element_from_soup(specific_job_soup,
[{'el': 'div',
'tag': 'class',
'attr': 'jobsearch-JobComponent-description'}])
if description:
self.description = str(description)
def get_post_date_and_time(self, post_time):
post_time_epoch = self.scrape_start_time
if 'hour' in post_time:
num_hours = int(post_time[0:2].strip())
post_time_epoch -= num_hours * 60 * 60
elif 'minute' in post_time:
num_hours = int(post_time[0:2].strip())
post_time_epoch -= num_hours * 60
self.post_date = datetime.datetime.utcfromtimestamp(post_time_epoch).strftime('%Y-%m-%d %H:%M:%S')
def find_element_from_soup(soup, specs):
for spec in specs:
print('Looking for ' + spec['el'] + ' ' + spec['tag']
+ ' ' + spec['attr'] + '... Found if not otherwise stated.')
result = soup.find(spec['el'], {spec['tag'], spec['attr']})
if result:
return result
print('NOT FOUND ' + specs[0]['attr'] + '... ' + str(soup.attrs))
return None
|
[
"waltonwang1922@gmail.com"
] |
waltonwang1922@gmail.com
|
5d1eece352fc8c5f68bf09f0c53324cb86162250
|
12cb18dc1c101482381144eb97f12321aded8fc6
|
/ActSent/src/pythoncode/stat/filterRelation.py
|
1ea77a68438ef2104a08fedb4818d975a0fea784
|
[] |
no_license
|
yulongp/ActSent
|
5525f81dc7ba92d24826cd6fc09e33a41c383bbe
|
d2e521ee85093712ab2dbddf3edb90da80ebc1b0
|
refs/heads/master
| 2016-09-02T00:13:46.036602
| 2014-12-10T15:51:57
| 2014-12-10T15:51:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2014年11月25日
@author: Yulong Pei
'''
fin = open('E:/Courses/11742/data/Sentiment/tweet/user_filter.txt', 'r')
users = set()
for line in fin.readlines():
users.add(line.strip())
fin.close()
print len(users)
fin = open('E:/Courses/11742/data/Sentiment/relation.txt', 'r')
fout = open('E:/Courses/11742/data/Sentiment/tweet/relation_filter.txt', 'w')
for line in fin.readlines():
tmp = line.strip().split()
if tmp[0] in users and tmp[1] in users:
fout.write(line)
fin.close()
fout.close()
|
[
"Think@Think-PC"
] |
Think@Think-PC
|
3f3844063992b68f39585095d648b061af440896
|
7c731762df05f017e464801c1aa0ad34c5cde722
|
/2ndLeariningPhase/prepareTrainingData.py
|
5d3246e30f3eb05b19131d01c7108a441e7ce8e5
|
[] |
no_license
|
WaleedRagheb/tmv-dmem
|
57d4521be18f2e37f2c3c7337dd2c507d74b3c4a
|
0a63f1397dae9e1c26272b512bb72032598dd8c1
|
refs/heads/master
| 2023-01-02T04:39:06.372426
| 2020-10-30T09:10:50
| 2020-10-30T09:10:50
| 308,577,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,048
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 12:50:10 2018
@author: HP
"""
import sys
sys.path.insert(0, r'..\ChunkDealer')
import chunksProcessor
import sys
sys.path.insert(0, r'..\DeepModeling')
import testScoring
import gensim
import numpy as np
import os
##############################################################
def pos_neg_count(prob,prob_Thr_Diff):
pos_cntr = 0
neg_cntr = 0
for index, row in prob.iterrows():
pos_Prob = row[0]
neg_Prob = row[1]
diff = np.abs((pos_Prob-neg_Prob))
if diff >= prob_Thr_Diff:
if pos_Prob > neg_Prob:
pos_cntr = pos_cntr + 1
else:
neg_cntr = neg_cntr + 1
return pos_cntr, neg_cntr
# print(row['c1'], row['c2'])
##############################################################
def giveDecision(prob, prob_Thr_Diff):
cnt_p,cnt_n = pos_neg_count(prob, prob_Thr_Diff)
sum_P_N = cnt_p + cnt_n
if sum_P_N == 0:
return 0, 0
return cnt_p/sum_P_N, cnt_n/sum_P_N
##############################################################
prob_Thr_Diff = 0.0
for numberOfChunks in range(1,11):
if numberOfChunks == 0:
continue
for model_ChunkN in range(10,11):
if model_ChunkN == 0:
continue
Positive_chunkDir = r'C:\Users\HP\Downloads\clpsych16-data\2018\training\task1\eRisk@CLEF2018-task1-releasedtrainingdata\eRisk 2018 - training\2017 train\positive_examples_anonymous_chunks'
Negative_chunkDir = r'C:\Users\HP\Downloads\clpsych16-data\2018\training\task1\eRisk@CLEF2018-task1-releasedtrainingdata\eRisk 2018 - training\2017 train\negative_examples_anonymous_chunks'
model_Positive_path = r'../DeepModeling/Models_arch_6-plus/Positive/Chunk_' + str(model_ChunkN) + r'_300_c_40.word2vec'
model_Negative_path = r'../DeepModeling/Models_arch_6-plus/Negative/Chunk_' + str(model_ChunkN) + r'_300_c_40.word2vec'
sDic_pos = chunksProcessor.process_chunk_N_NotAcc(Positive_chunkDir,numberOfChunks)
sDic_neg = chunksProcessor.process_chunk_N_NotAcc(Negative_chunkDir,numberOfChunks)
modelPositive = gensim.models.Word2Vec.load(model_Positive_path)
modelNegative = gensim.models.Word2Vec.load(model_Negative_path)
if not os.path.exists(r'.\TempRes\Positive'):
os.makedirs(r'.\TempRes\Positive')
if not os.path.exists(r'.\TempRes\Negative'):
os.makedirs(r'.\TempRes\Negative')
fileName_pos = r'.\TempRes\Positive\Initial Results_' + str(numberOfChunks) + '_' + str(model_ChunkN) + '.txt'
fileName_neg = r'.\TempRes\Negative\Initial Results_' + str(numberOfChunks) + '_' + str(model_ChunkN) + '.txt'
for k, v_txt in sDic_pos.items():
sents = testScoring.txtToSen(v_txt)
sentsList = list(sents)
if len(sentsList) < 1:
with open(fileName_pos, "a") as f:
f.write(k + "\t None\n")
continue
prob = testScoring.docprob(sentsList,[modelPositive,modelNegative])
# d_subj should be 0,1,2
cnt_p, cnt_n = giveDecision(prob, prob_Thr_Diff)
with open(fileName_pos, "a") as f:
f.write(k + "\t " + str(cnt_p) + "\t " + str(cnt_n) + "\n")
###################################################################
for k, v_txt in sDic_neg.items():
sents = testScoring.txtToSen(v_txt)
sentsList = list(sents)
if len(sentsList) < 1:
with open(fileName_neg, "a") as f:
f.write(k + "\t None\n")
continue
prob = testScoring.docprob(sentsList,[modelPositive,modelNegative])
# d_subj should be 0,1,2
cnt_p, cnt_n = giveDecision(prob, prob_Thr_Diff)
with open(fileName_neg, "a") as f:
f.write(k + "\t " + str(cnt_p) + "\t " + str(cnt_n) + "\n")
#######################################################################
from os import listdir
from os.path import isfile, join
import numpy as np
import re
import matplotlib.pyplot as plt
res_matrix_pos = np.zeros((10,10,83,2),float)
subj_ID_array = []
resultsPath =r'.\TempRes\Positive'
onlyfiles = [f for f in listdir(resultsPath) if isfile(join(resultsPath, f))]
for res_f_name in onlyfiles:
print(res_f_name)
m = re.search('_(.+?)\.',res_f_name)
chunk_idx, model_idx = m.group(1).split('_')
subj_ID_array = np.loadtxt(resultsPath + '\\' + res_f_name, delimiter='\t', usecols=(0), unpack=True,dtype=np.str)
try:
Pos_Neg_mtrx = np.loadtxt(resultsPath + '\\' + res_f_name, delimiter='\t', usecols=(1, 2), unpack=True).transpose()
except IndexError:
Pos_Neg_mtrx = np.zeros((83,2),float)
with open(resultsPath + '\\' + res_f_name) as fi:
l_ctr = 0
for line in fi:
fields = line.split('\t')
if len(fields) > 2:
Pos_Neg_mtrx[l_ctr][0] = fields[1]
Pos_Neg_mtrx[l_ctr][1] = fields[2]
l_ctr += 1
res_matrix_pos[int(chunk_idx)-1,int(model_idx)-1,:,:] = Pos_Neg_mtrx
############################################################################
res_matrix_neg = np.zeros((10,10,403,2),float)
subj_ID_array = []
resultsPath =r'.\TempRes\Negative'
onlyfiles = [f for f in listdir(resultsPath) if isfile(join(resultsPath, f))]
for res_f_name in onlyfiles:
print(res_f_name)
m = re.search('_(.+?)\.',res_f_name)
chunk_idx, model_idx = m.group(1).split('_')
subj_ID_array = np.loadtxt(resultsPath + '\\' + res_f_name, delimiter='\t', usecols=(0), unpack=True,dtype=np.str)
try:
Pos_Neg_mtrx = np.loadtxt(resultsPath + '\\' + res_f_name, delimiter='\t', usecols=(1, 2), unpack=True).transpose()
except IndexError:
Pos_Neg_mtrx = np.zeros((403,2),float)
with open(resultsPath + '\\' + res_f_name) as fi:
l_ctr = 0
for line in fi:
fields = line.split('\t')
if len(fields) > 2:
Pos_Neg_mtrx[l_ctr][0] = fields[1]
Pos_Neg_mtrx[l_ctr][1] = fields[2]
l_ctr += 1
res_matrix_neg[int(chunk_idx)-1,int(model_idx)-1,:,:] = Pos_Neg_mtrx
with open(r'.\TempRes\TrainingData.csv', 'a') as trainFile:
for itr in range(402):
lstToWrite = res_matrix_neg[:,9,itr,0]
for ii in range(len(lstToWrite)):
trainFile.write(str(lstToWrite[ii]) + ",")
trainFile.write("NEG\n")
for itr in range(82):
lstToWrite = res_matrix_pos[:,9,itr,0]
for ii in range(len(lstToWrite)):
trainFile.write(str(lstToWrite[ii]) + ",")
trainFile.write("POS\n")
trainFile.close()
|
[
"azmy1@hotmail.com"
] |
azmy1@hotmail.com
|
38d1a8b5525bafefdf414bc8772df462ea1b9e46
|
4217beb675f8d985130316bc85e54d33f003603c
|
/04/skuska.py
|
6fdf0db77acd63106558b4c379902bdf87e5c1a3
|
[] |
no_license
|
Katrinkarus/pyladies
|
b997e39fef91eb4aad91efb878746945baf3f31f
|
75727d1050b985df8c73aa3a97c41e55d9b5be32
|
refs/heads/master
| 2020-05-04T12:42:17.533842
| 2019-04-02T18:10:59
| 2019-04-02T18:10:59
| 179,130,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
while True:
rodne_cislo = input('Zadaj rodne cislo: ')
datum = rodne_cislo[0:6]
poradie = rodne_cislo[7:11]
if rodne_cislo == datum + "/" + poradie:
print('Spravne zadane rodne cislo.')
break
print("Zle zadane rodne cislo. Zadaj rodne cislo: ")
|
[
"katrinkarusnak@gmail.com"
] |
katrinkarusnak@gmail.com
|
49189603cb3bc1b7fcce4713ced166ee1da90037
|
6b05aed1fd6ab434b2db37108f442e1949f1279b
|
/venv/lib/python3.9/site-packages/keyring/errors.py
|
b2df613b8779d456e4d7b7343162d25c97f31922
|
[
"Apache-2.0",
"OFL-1.1"
] |
permissive
|
storskegg/Qlikr
|
830512debdb69fb9c2fa0a7d022027b54a8e07f0
|
e63cb6401d0c49cc4ff7c2a1cb604c6ba9acbfc3
|
refs/heads/master
| 2023-08-17T13:02:41.737491
| 2022-09-27T17:31:52
| 2022-09-27T17:31:52
| 241,127,869
| 1
| 0
|
Apache-2.0
| 2023-08-09T19:24:57
| 2020-02-17T14:29:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,453
|
py
|
import sys
__metaclass__ = type
class KeyringError(Exception):
"""Base class for exceptions in keyring"""
class PasswordSetError(KeyringError):
"""Raised when the password can't be set."""
class PasswordDeleteError(KeyringError):
"""Raised when the password can't be deleted."""
class InitError(KeyringError):
"""Raised when the keyring could not be initialised"""
class KeyringLocked(KeyringError):
"""Raised when the keyring failed unlocking"""
class NoKeyringError(KeyringError, RuntimeError):
"""Raised when there is no keyring backend"""
class ExceptionRaisedContext:
"""
An exception-trapping context that indicates whether an exception was
raised.
"""
def __init__(self, ExpectedException=Exception):
self.ExpectedException = ExpectedException
self.exc_info = None
def __enter__(self):
self.exc_info = object.__new__(ExceptionInfo)
return self.exc_info
def __exit__(self, *exc_info):
self.exc_info.__init__(*exc_info)
return self.exc_info.type and issubclass(
self.exc_info.type, self.ExpectedException
)
class ExceptionInfo:
def __init__(self, *info):
if not info:
info = sys.exc_info()
self.type, self.value, _ = info
def __bool__(self):
"""
Return True if an exception occurred
"""
return bool(self.type)
__nonzero__ = __bool__
|
[
"liam@storskegg.org"
] |
liam@storskegg.org
|
1fe82a003bc84d91e8c6db00b500c06f66884aff
|
00dcecec4d2cacc1c4d6d8457bbed15c3df4c6ea
|
/31.py
|
ec29f45d30b30264d190d234d4308d161b23304a
|
[] |
no_license
|
asadoughi/euler
|
375010163de2d174a20561b89bf240a0de34098f
|
93fdcb29a037f30898a017d5d41e8fa20d2de4a9
|
refs/heads/master
| 2021-01-18T14:05:37.747124
| 2014-08-16T20:42:35
| 2014-08-16T20:42:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
c = (200, 100, 50, 20, 10, 5, 2, 1)
def updated_path(path, i):
if path is None:
path = (0,) * len(c)
return tuple(x + 1
if j == i else x
for j, x in enumerate(path))
solution_set = set()
def ways_to_make(y, tab=0, previous_coin=None, path=None):
if y <= 0:
return
for i, coin in enumerate(c):
if previous_coin is not None:
if coin > previous_coin:
continue
if coin == y:
# print '%s%s' % ('\t'*tab, coin)
solution_set.add(updated_path(path, i))
elif coin < y:
# print '%s%s' % ('\t'*tab, coin)
ways_to_make(y - coin, tab + 1, coin, updated_path(path, i))
ways_to_make(200)
print len(solution_set)
|
[
"amir.sadoughi@gmail.com"
] |
amir.sadoughi@gmail.com
|
ecf39a5c705c8db5316fd9b8f2f2a10e55b0f3b7
|
13be2f1034dd5cf54142ffd587b963ad82b994a2
|
/examples/sliay/human_recording.py
|
e336ff5fb3951b36d5515a8541c7745e947360ea
|
[
"MIT"
] |
permissive
|
flyers/ViZDoom
|
a20f3bbca0f28d62c67fad70a6ab879d8b2f945c
|
721bda34d5605e2e074b573a13c45c169138c237
|
refs/heads/master
| 2020-07-16T17:52:29.746759
| 2017-06-22T06:56:20
| 2017-06-22T06:56:20
| 94,313,186
| 0
| 0
| null | 2017-06-14T09:21:43
| 2017-06-14T09:21:43
| null |
UTF-8
|
Python
| false
| false
| 3,786
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import cv2
from vizdoom import *
parser = argparse.ArgumentParser(
description='record human training data in CIG')
parser.add_argument('--config', type=str, default='/home/sliay/Documents/ViZDoom/scenarios/cig.cfg')
parser.add_argument('--map', type=str, default='map01')
parser.add_argument('--bots', type=int, default=7)
parser.add_argument('--frame-skip', type=int, default=4)
parser.add_argument('--ip', type=str, default=None)
parser.add_argument('--dir', type=str, required=True)
args = parser.parse_args()
save_dir = os.path.join(args.dir, 'screens')
os.makedirs(save_dir)
f_frag = open(os.path.join(save_dir, 'frags.txt'), 'w')
f_health = open(os.path.join(save_dir, 'health.txt'), 'w')
f_ammo = open(os.path.join(save_dir, 'ammo.txt'), 'w')
f_action = open(os.path.join(save_dir, 'action.txt'), 'w')
frame_skip = args.frame_skip
game = DoomGame()
game.load_config(args.config)
game.set_doom_map(args.map) # Limited deathmatch.
# game.set_doom_map("map02") # Full deathmatch.
game.add_game_args("-host 1 -deathmatch +sv_spawnfarthest 1 "
"+timelimit 10.0 +sv_forcerespawn 1 +sv_noautoaim 1 +sv_respawnprotect 1 +sv_nocrouch 1 "
"+viz_respawn_delay 2 +viz_nocheat 1 ")
if args.ip is not None:
game.add_game_args("-join " + args.ip + " ")
game.add_game_args("+name AI +colorset 0")
game.set_screen_format(ScreenFormat.RGB24)
# game.set_screen_format(ScreenFormat.ARGB32)
# game.set_screen_format(ScreenFormat.GRAY8)
# game.set_screen_format(ScreenFormat.BGR24)
# game.set_screen_format(ScreenFormat.RGBA32)
# game.set_screen_format(ScreenFormat.BGRA32)
# game.set_screen_format(ScreenFormat.ABGR32)
# Raw Doom buffer with palette's values. This one makes no sense in particular
# game.set_screen_format(ScreenFormat.DOOM_256_COLORS)
# Sets resolution for all buffers.
game.set_screen_resolution(ScreenResolution.RES_640X480)
# Enables depth buffer.
game.set_depth_buffer_enabled(True)
# Enables labeling of in game objects labeling.
game.set_labels_buffer_enabled(True)
# Enables buffer with top down map of he current episode/level .
game.set_automap_buffer_enabled(True)
game.set_automap_mode(AutomapMode.OBJECTS)
game.set_automap_rotate(False)
game.set_automap_render_textures(False)
game.set_render_hud(True)
game.set_render_minimal_hud(False)
game.set_mode(Mode.ASYNC_SPECTATOR)
game.init()
print("Episode Started")
print('Available Buttons:', game.get_available_buttons())
game.send_game_command("removebots")
for i in range(args.bots):
game.send_game_command("addbot")
# Play until the game (episode) is over.
cnt = 0
while not game.is_episode_finished():
s = game.get_state()
if cnt % frame_skip == 0:
cv2.imwrite(os.path.join(save_dir, 'screen_%05d.png' % cnt), s.screen_buffer)
cnt += 1
game.advance_action()
if game.is_player_dead():
game.respawn_player()
print("Frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
print("Health:", game.get_game_variable(GameVariable.HEALTH))
print("Ammo:", game.get_game_variable(GameVariable.AMMO5))
print("Performed Action:", game.get_last_action())
f_frag.write('%d\n' % game.get_game_variable(GameVariable.FRAGCOUNT))
f_health.write('%d\n' % game.get_game_variable(GameVariable.HEALTH))
f_ammo.write('%d\n' % game.get_game_variable(GameVariable.AMMO5))
action = game.get_last_action()
for i in range(len(action)):
if i != len(action)-1:
f_action.write('%f ' % action[i])
else:
f_action.write('%f\n' % action[i])
print("Episode finished.")
print("************************")
f_frag.close()
f_health.close()
f_ammo.close()
f_action.close()
game.close()
|
[
"lisiyi.zju@gmail.com"
] |
lisiyi.zju@gmail.com
|
a417559139fa6528356016ba2b62917e0ff2b559
|
b4bd4ce04253ce3b68a11a3c479676e3e6b90f9f
|
/src/com/zobar/rosalind/GC.py
|
ed1bc449f0f8a85f1f9c91504b4a974961a13a44
|
[] |
no_license
|
zo-bar/rosalind
|
2af34c55bfa3da2da3a69af5b0b5ea9abd398d29
|
f6a232d086f596f0bf964cc1415cddfc3af79b31
|
refs/heads/master
| 2021-04-26T03:47:02.052410
| 2017-10-23T03:04:08
| 2017-10-23T03:04:08
| 107,925,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
'''
Created on Feb 28, 2013
@author: Zoya
'''
from __future__ import division
import io
def gcCount(dna):
stream = io.StringIO(unicode(dna));
nchar = stream.read(1)
gc = 0
total = 0
while nchar:
if nchar.upper() == 'C' or nchar.upper() == 'G':
gc += 1
total += 1
elif nchar.upper() == 'A' or nchar.upper() == 'T':
total += 1
nchar = stream.read(1)
result = 100 * gc / total
return result
def gcCountLines(fileName):
bestGCcontent = 0
bestID = 0
with open(fileName) as inputFile:
# skip first >
inputFile.read(1)
dnaID = inputFile.readline()
while dnaID:
char = 'strat'
dna = ''
while char:
char = inputFile.read(1)
if char == '>':
break
dna = dna + char
gcContent = gcCount(dna)
if bestGCcontent < gcContent:
bestGCcontent = gcContent
bestID = dnaID
dnaID = inputFile.readline()
print "%s%f" % (bestID, bestGCcontent)
gcCountLines("data/rosalind_gc.txt")
|
[
"zoya.barabanova@gmail.com"
] |
zoya.barabanova@gmail.com
|
741c7ed84e776d3b296496e8a03cb9505ed6c707
|
ae8680fdf9c0ccef372a28c6e21c5bc88b759e28
|
/user/views.py
|
031474d21848b45c13842cf5b1f4257e15b59d86
|
[] |
no_license
|
LevchenCoM/django-hotels
|
3b1a03a6c5e08e11f0daa5d78f81267dac287b39
|
66c448c15296bc105d5e2461f52028f9fb7d4ebc
|
refs/heads/master
| 2020-04-02T15:41:25.522697
| 2019-01-14T22:07:45
| 2019-01-14T22:07:45
| 154,578,974
| 0
| 1
| null | 2020-04-21T11:36:20
| 2018-10-24T22:49:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
from django.shortcuts import render, redirect
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.http import HttpResponse
def sign_up(request):
user_creation_form = UserCreationForm()
if request.method=='POST':
user_creation_form = UserCreationForm(request.POST)
# user_form = UserSignUpForm(request.POST)
if user_creation_form.is_valid():
username=user_creation_form.cleaned_data['username']
password=user_creation_form.cleaned_data['password1']
new_user=User.objects.create_user(username=username,password=password)
login(request, authenticate(username=username,password=password))
return redirect('/home')
return render(request, "user_auth/sign-up.html", {'user_form':user_creation_form})
# def sign_in(request):
# # user_form=UserSignUpForm()
# # # user_form = UserCreationForm()
# # dir(user_form)
# # if request.method=='POST':
# # user_form = UserSignUpForm(request.POST)
# # # user_form = UserCreationForm(request.POST)
# # if user_form.is_valid():
# # username=user_form.cleaned_data['username']
# # password=user_form.cleaned_data['password1']
# # new_user=User.objects.create_user(username=username,password=password)
# # login(request, authenticate(username=username,password=password))
# # return redirect('/home')
# return render(request, "user_auth/sign-up.html", {'user_form':user_form})
|
[
"qupadlive@gmail.com"
] |
qupadlive@gmail.com
|
3b5387d6906c0f8e3d571ef855877e7f3122fa9a
|
e69f36986b40051aa1b79f57c82e0c1d1092e845
|
/blog/migrations/0003_posts_status.py
|
c6e12d009ad82213f0f1bf2a6a23a891d0ee07d2
|
[
"MIT"
] |
permissive
|
p3dr0migue1/back_and_beyond
|
58b782104ec22846d5703e31dc2e519d733c9db9
|
a1926124b67793e002801eb4da7ff5e55c12e885
|
refs/heads/main
| 2022-12-10T09:22:54.475648
| 2021-03-16T16:00:19
| 2021-03-16T16:00:19
| 48,260,489
| 1
| 0
|
MIT
| 2022-12-07T23:21:32
| 2015-12-18T23:07:41
|
Python
|
UTF-8
|
Python
| false
| false
| 520
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-22 08:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20151220_2103'),
]
operations = [
migrations.AddField(
model_name='posts',
name='status',
field=models.IntegerField(choices=[(1, b'Draft'), (2, b'Published'), (3, b'Private'), (4, b'Archived')], default=1),
),
]
|
[
"pedro.miguel@live.co.uk"
] |
pedro.miguel@live.co.uk
|
e487919518bf88bc71d350791ee008c305bac7e9
|
ccf62dacb53df805f142c0e07f83b6a5ce871acf
|
/lesson_1 2/task_1_3.py
|
a6436f237f3ed571c6dd5c51ab75e4b22f4655f2
|
[] |
no_license
|
LEVON-332125/python-homework
|
fa1452beaa149ccc96c37e730d7de62196416968
|
88f658accd453e2a8a885d78f4ab2f5f78b6177f
|
refs/heads/main
| 2023-02-17T03:56:31.918184
| 2021-01-14T14:20:01
| 2021-01-14T14:20:01
| 329,627,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
# Write following program: calculate how much money you
# should pay for 1500$ bank loan if annual percentage is 16%
value = 1500
percent = 16
years_total = (value * percent)/100
print(years_total)
|
[
"levon.yeghshatyan1994@gmail.com"
] |
levon.yeghshatyan1994@gmail.com
|
5ad5ba54ab183a3b8d8547dc36fe13a182ff5fb7
|
15e308ddb4d03767900a7ae52b3d01e009dd5bcb
|
/yapily/models/site.py
|
ed15e46528817bde7d29d29e5dd30a26eaf8734c
|
[] |
no_license
|
MedatechUK/yapily
|
be6d01df2a4d8a5f4ce8c79dcb098bd297fb884c
|
e4440d67a504dc6a8ec9f314e1362d5f6cf424a4
|
refs/heads/main
| 2023-07-07T16:53:41.059013
| 2021-08-23T06:57:23
| 2021-08-23T06:57:23
| 381,364,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,967
|
py
|
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 0.0.359
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from yapily.configuration import Configuration
class Site(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'identification': 'str',
'name': 'str'
}
attribute_map = {
'identification': 'Identification',
'name': 'Name'
}
def __init__(self, identification=None, name=None, local_vars_configuration=None): # noqa: E501
"""Site - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._identification = None
self._name = None
self.discriminator = None
if identification is not None:
self.identification = identification
if name is not None:
self.name = name
@property
def identification(self):
"""Gets the identification of this Site. # noqa: E501
:return: The identification of this Site. # noqa: E501
:rtype: str
"""
return self._identification
@identification.setter
def identification(self, identification):
"""Sets the identification of this Site.
:param identification: The identification of this Site. # noqa: E501
:type: str
"""
self._identification = identification
@property
def name(self):
"""Gets the name of this Site. # noqa: E501
:return: The name of this Site. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Site.
:param name: The name of this Site. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Site):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Site):
return True
return self.to_dict() != other.to_dict()
|
[
"simonbarnett@emerge-it.co.uk"
] |
simonbarnett@emerge-it.co.uk
|
985a0181777a667c351bdcecfb2f163f8ca20c93
|
21b719411b6a6a18e169eab506c95b8e4c2ca0de
|
/mysite/main/admin.py
|
ab004f6a11edf10b6fbf3fe07ededabb02b087c6
|
[] |
no_license
|
Techdavee/Myfiles
|
5525d1f93dbcc7961b12725ddb91b54223853cf7
|
1bfc755e0c1332f69006679393b76e5b08258bb1
|
refs/heads/main
| 2023-04-29T20:40:48.225158
| 2021-02-04T15:04:41
| 2021-02-04T15:04:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
from django.contrib import admin
from .models import Tutorial
from tinymce.widgets import TinyMCE
from django.db import models
# Register your models here.
class TutorialAdmin(admin.ModelAdmin):
fieldsets = [
("Title/date", {"fields": ["tutorial_title", "tutorial_published"]}),
("Content", {"fields":["tutorial_content"]})
]
formfields_overrides = {
models.TextField: {'widget': TinyMCE()},
}
admin.site.register(Tutorial, TutorialAdmin)
|
[
"noreply@github.com"
] |
Techdavee.noreply@github.com
|
fbe74fd7cfe43e90db48ece7aa26a52868248dc4
|
9d6706d64836ca032101fb4a01a5a5b3762e9778
|
/school_summary.py
|
35123ead34007c2030fdd47d979a628c58b8b556
|
[] |
no_license
|
johnwchoo/school_district_analysis
|
a34b2bb325d5d0b441916fc0d1eed1cb904158c2
|
691a14550d299350fc519a933f7c7e64508e34dd
|
refs/heads/master
| 2021-02-14T02:58:04.087393
| 2020-03-03T23:07:13
| 2020-03-03T23:07:13
| 244,464,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,283
|
py
|
#School Summary
#Create an overview table that summarizes key metrics about each school, including:
#School Name +
#School Type +
#Total Students +
#Total School Budget +
#Per Student Budget +
#Average Math Score +
#Average Reading Score +
#% Passing Math +
#% Passing Reading +
#Overall Passing Rate (Average of the above two)
# Set Up ------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
# School Name ------------------------------------------------------------------------------------
#skip
# School Type ------------------------------------------------------------------------------------
school_type = school_data.set_index(['school_name'])['type']
#print(school_type)
# Total Students per school ------------------------------------------------------------------------------------
total_students_per_school = school_data_complete["school_name"].value_counts()
#print(students_per_school)
# Total School & Student Budget ------------------------------------------------------------------------------------
per_school_budget = school_data_complete.groupby(['school_name']).mean()['budget']
per_student_budget = per_school_budget/ total_students_per_school
#print(per_student_budget)
# Average Math Score per School ------------------------------------------------------------------------------------
avg_math_score_per_school = school_data_complete.groupby(['school_name']).mean()['math_score']
#print(avg_math_score_per_school)
# Average Reading Score per School ------------------------------------------------------------------------------------
avg_reading_score_per_school = school_data_complete.groupby(['school_name']).mean()['reading_score']
#print(avg_reading_score_per_school)
#% Passing Math ------------------------------------------------------------------------------------
school_passing_math = school_data_complete[(school_data_complete["math_score"] > 70)]
per_school_passing_math = school_passing_math.groupby(["school_name"]).count()["student_name"] / total_students_per_school * 100
# 1 - total students passing math
# 2 - total students passing math grouped by school name and turned into percentage
#% Passing Reading ------------------------------------------------------------------------------------
school_passing_reading = school_data_complete[(school_data_complete["reading_score"] > 70)]
per_school_passing_reading = school_passing_reading.groupby(["school_name"]).count()["student_name"] / total_students_per_school * 100
#% Overall Passing Rate ------------------------------------------------------------------------------------
overall_passing_rate = (per_school_passing_reading + per_school_passing_math)/ 2
#print(overall_passing_rate)
# School Summary ------------------------------------------------------------------------------------
per_school_summary = pd.DataFrame({"School Type": school_type,
"Total Students": total_students_per_school,
"Total School Budget": per_school_budget,
"Per Student Budget": per_student_budget,
"Average Math Score": avg_math_score_per_school,
"Average Reading Score": avg_reading_score_per_school,
"% Passing Math": per_school_passing_math,
"% Passing Reading": per_school_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
#print(per_school_summary)
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Top Performing Schools (By Passing Rate) ------------------------------------------------------------------------------------
top_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=False)
#Bottom Performing Schools (By Passing Rate) ------------------------------------------------------------------------------------
bottom_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=True)
#Math Scores by Grade ------------------------------------------------------------------------------------
ninth_graders = school_data_complete[(school_data_complete["grade"] == "9th")]
tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")]
eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")]
twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")]
ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["math_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"]
#categorize by grade
#Group each grade by school
scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
#Reading Scores by Grade ------------------------------------------------------------------------------------
ninth_graders = school_data_complete[(school_data_complete["grade"] == "9th")]
tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")]
eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")]
twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")]
# Group each by school name
ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["reading_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"]
scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
#****Scores by School Spending ------------------------------------------------------------------------------------
spending_bins = [0, 250, 500, 750]
group_names = ['<250','250-500','500-750']
per_school_summary["Spending Ranges (Per Student)"] = pd.cut(per_student_budget, spending_bins, labels=group_names)
print(per_school_summary)
spending_math_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"]
spending_reading_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"]
spending_passing_math = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"]
spending_passing_reading = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"]
overall_passing_rate = (spending_math_scores + spending_reading_scores) / 2
spending_summary = pd.DataFrame({"Average Math Score" : spending_math_scores,
"Average Reading Score": spending_reading_scores,
"% Passing Math": spending_passing_math,
"% Passing Reading": spending_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
#print(spending_summary)
#****Scores by School Size ------------------------------------------------------------------------------------
size_bins = [0, 1000, 2000, 5000]
label_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# Categorize the spending based on the bins
per_school_summary["School Size"] = pd.cut(per_school_summary["Total Students"], size_bins, labels=label_names)
# Calculate the scores based on bins
size_math_scores = per_school_summary.groupby(["School Size"]).mean()["Average Math Score"]
size_reading_scores = per_school_summary.groupby(["School Size"]).mean()["Average Reading Score"]
size_passing_math = per_school_summary.groupby(["School Size"]).mean()["% Passing Math"]
size_passing_reading = per_school_summary.groupby(["School Size"]).mean()["% Passing Reading"]
overall_passing_rate = (size_passing_math + size_passing_reading) / 2
# Assemble into data frame
size_summary = pd.DataFrame({"Average Math Score" : size_math_scores,
"Average Reading Score": size_reading_scores,
"% Passing Math": size_passing_math,
"% Passing Reading": size_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
#print(size_summary)
#print(per_school_summary)
#****Scores by School Type ------------------------------------------------------------------------------------
type_math_score = per_school_summary.groupby(['School Type']).mean()["Average Math Score"]
type_reading_score = per_school_summary.groupby(['School Type']).mean()["Average Reading Score"]
type_passing_math = per_school_summary.groupby(['School Type']).mean()['% Passing Math']
type_passing_reading = per_school_summary.groupby(['School Type']).mean()['% Passing Reading']
overall_passing_rate = (type_passing_math + type_passing_reading) / 2
type_summary = pd.DataFrame({"Average Math Score": type_math_score,
"Average Reading Score": type_reading_score,
"% Passing Math": type_passing_math,
"% Passing Reading": type_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
print(type_summary)
|
[
"johnwchoo@gmail.com"
] |
johnwchoo@gmail.com
|
cc97825789e2626dee928f0438ffb14d11c4338f
|
6782c1b6b1a472846417b82c0089a935ba0cd13b
|
/examples/node2vec/multi_class.py
|
e01f7b39d0697941b2337f7c6d218828090f75f8
|
[
"Apache-2.0"
] |
permissive
|
Liwb5/PGL
|
291d7d23a2f56060de8afffb6180476c86a81279
|
1253335c602709fb93061d8a64b2e4ba1049bfff
|
refs/heads/main
| 2022-04-26T10:17:27.937508
| 2022-04-19T12:20:33
| 2022-04-19T12:20:33
| 216,797,608
| 1
| 0
|
Apache-2.0
| 2020-10-09T07:49:14
| 2019-10-22T11:31:31
|
Python
|
UTF-8
|
Python
| false
| false
| 9,706
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import os
import math
import glob
import numpy as np
import paddle
from easydict import EasyDict as edict
import pgl
import yaml
from paddle.optimizer import Adam
import tqdm
from pgl.utils.logger import log
from sklearn.metrics import f1_score
from dataset import ShardedDataset
def load(name):
if name == 'cora':
dataset = pgl.dataset.CoraDataset()
elif name == "pubmed":
dataset = pgl.dataset.CitationDataset("pubmed", symmetry_edges=True)
elif name == "citeseer":
dataset = pgl.dataset.CitationDataset("citeseer", symmetry_edges=True)
elif name == "BlogCatalog":
dataset = pgl.dataset.BlogCatalogDataset()
else:
raise ValueError(name + " dataset doesn't exists")
dataset.graph.indegree()
dataset.graph.outdegree()
dataset.graph = dataset.graph.to_mmap()
return dataset
class Model(paddle.nn.Layer):
def __init__(self, num_nodes, embed_size=16, num_classes=39):
super(Model, self).__init__()
self.num_nodes = num_nodes
embed_init = paddle.nn.initializer.Uniform(
low=-1. / math.sqrt(embed_size), high=1. / math.sqrt(embed_size))
emb_attr = paddle.ParamAttr(name="node_embedding")
self.emb = paddle.nn.Embedding(
num_nodes, embed_size, weight_attr=emb_attr)
self.linear = paddle.nn.Linear(embed_size, num_classes)
def forward(self, node_ids):
node_emb = self.emb(node_ids)
node_emb.stop_gradient = True
logits = self.linear(node_emb)
return logits
def node_classify_generator(graph,
all_nodes=None,
batch_size=512,
epoch=1,
shuffle=True):
if all_nodes is None:
all_nodes = np.arange(graph.num_nodes)
def batch_nodes_generator(shuffle=shuffle):
perm = np.arange(len(all_nodes), dtype=np.int64)
if shuffle:
np.random.shuffle(perm)
start = 0
while start < len(all_nodes):
yield all_nodes[perm[start:start + batch_size]]
start += batch_size
def wrapper():
for _ in range(epoch):
for batch_nodes in batch_nodes_generator():
# batch_nodes_expanded = np.expand_dims(batch_nodes,
# -1).astype(np.int64)
batch_labels = graph.node_feat['group_id'][batch_nodes].astype(
np.float32)
yield [batch_nodes, batch_labels]
return wrapper
def topk_f1_score(labels,
probs,
topk_list=None,
average="macro",
threshold=None):
assert topk_list is not None or threshold is not None, "one of topklist and threshold should not be None"
if threshold is not None:
preds = probs > threshold
else:
preds = np.zeros_like(labels, dtype=np.int64)
for idx, (prob, topk) in enumerate(zip(np.argsort(probs), topk_list)):
preds[idx][prob[-int(topk):]] = 1
return f1_score(labels, preds, average=average)
def train(model, data_loader, optim, log_per_step=1000, threshold=0.3):
model.train()
total_loss = 0.
total_sample = 0
bce_loss = paddle.nn.BCEWithLogitsLoss()
test_probs_vals, test_labels_vals, test_topk_vals = [], [], []
for batch, (node, labels) in enumerate(data_loader):
num_samples = len(node)
node = paddle.to_tensor(node)
labels = paddle.to_tensor(labels)
logits = model(node)
probs = paddle.nn.functional.sigmoid(logits)
loss = bce_loss(logits, labels)
loss.backward()
optim.step()
optim.clear_grad()
topk = labels.sum(-1)
test_probs_vals.append(probs.numpy())
test_labels_vals.append(labels.numpy())
test_topk_vals.append(topk.numpy())
total_loss += loss.numpy()[0] * num_samples
total_sample += num_samples
test_probs_array = np.concatenate(test_probs_vals)
test_labels_array = np.concatenate(test_labels_vals)
test_topk_array = np.concatenate(test_topk_vals)
test_macro_f1 = topk_f1_score(test_labels_array, test_probs_array,
test_topk_array, "macro", threshold)
test_micro_f1 = topk_f1_score(test_labels_array, test_probs_array,
test_topk_array, "micro", threshold)
test_loss_val = total_loss / total_sample
log.info("Train Loss: %f " % test_loss_val + "Train Macro F1: %f " %
test_macro_f1 + "Train Micro F1: %f " % test_micro_f1)
return total_loss / total_sample
@paddle.no_grad()
def test(model, data_loader, log_per_step=1000, threshold=0.3):
model.eval()
total_loss = 0.
total_sample = 0
bce_loss = paddle.nn.BCEWithLogitsLoss()
test_probs_vals, test_labels_vals, test_topk_vals = [], [], []
for batch, (node, labels) in enumerate(data_loader):
num_samples = len(node)
node = paddle.to_tensor(node)
labels = paddle.to_tensor(labels)
logits = model(node)
probs = paddle.nn.functional.sigmoid(logits)
loss = bce_loss(logits, labels)
topk = labels.sum(-1)
test_probs_vals.append(probs.numpy())
test_labels_vals.append(labels.numpy())
test_topk_vals.append(topk.numpy())
total_loss += loss.numpy()[0] * num_samples
total_sample += num_samples
test_probs_array = np.concatenate(test_probs_vals)
test_labels_array = np.concatenate(test_labels_vals)
test_topk_array = np.concatenate(test_topk_vals)
test_macro_f1 = topk_f1_score(test_labels_array, test_probs_array,
test_topk_array, "macro", threshold)
test_micro_f1 = topk_f1_score(test_labels_array, test_probs_array,
test_topk_array, "micro", threshold)
test_loss_val = total_loss / total_sample
log.info("\t\tTest Loss: %f " % test_loss_val + "Test Macro F1: %f " %
test_macro_f1 + "Test Micro F1: %f " % test_micro_f1)
return test_loss_val, test_macro_f1, test_micro_f1
def load_from_files(model_dir):
files = glob.glob(
os.path.join(model_dir, "node_embedding_txt",
"node_embedding.block*.txt"))
emb_table = dict()
for filename in files:
for line in open(filename):
key, value = line.strip(",\n").split("\t")
key = int(key)
value = [float(v) for v in value.split(",")]
emb_table[key] = value
emb_list = [emb_table[key] for key in range(len(emb_table))]
emb_arr = np.array(emb_list, dtype=np.float32)
emb_arr = emb_arr[:, :(emb_arr.shape[1] - 3) // 3]
return {'emb.weight': emb_arr}
def main(args):
if not args.use_cuda:
paddle.set_device("cpu")
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
dataset = load(args.dataset)
graph = dataset.graph
model = Model(graph.num_nodes, args.embed_size, dataset.num_groups)
model = paddle.DataParallel(model)
batch_size = len(dataset.train_index)
train_steps = int(len(dataset.train_index) / batch_size) * args.epoch
scheduler = paddle.optimizer.lr.PolynomialDecay(
learning_rate=args.multiclass_learning_rate,
decay_steps=train_steps,
end_lr=0.0001)
optim = Adam(learning_rate=scheduler, parameters=model.parameters())
if args.load_from_static:
model.set_state_dict(load_from_files("./model"))
else:
model.set_state_dict(paddle.load("model.pdparams"))
train_data_loader = node_classify_generator(
graph, dataset.train_index, batch_size=batch_size, epoch=1)
test_data_loader = node_classify_generator(
graph, dataset.test_index, batch_size=batch_size, epoch=1)
best_test_macro_f1 = -1
for epoch in tqdm.tqdm(range(args.epoch)):
train_loss = train(model, train_data_loader(), optim)
test_loss, test_macro_f1, test_micro_f1 = test(model,
test_data_loader())
best_test_macro_f1 = max(best_test_macro_f1, test_macro_f1)
log.info("Best test macro f1 is %s." % best_test_macro_f1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Deepwalk')
parser.add_argument(
"--dataset",
type=str,
default="BlogCatalog",
help="dataset (cora, pubmed, BlogCatalog)")
parser.add_argument("--use_cuda", action='store_true', help="use_cuda")
parser.add_argument(
"--conf",
type=str,
default="./config.yaml",
help="config file for models")
parser.add_argument("--epoch", type=int, default=1000, help="Epoch")
parser.add_argument(
"--load_from_static", action='store_true', help="use_cuda")
args = parser.parse_args()
# merge user args and config file
config = edict(yaml.load(open(args.conf), Loader=yaml.FullLoader))
config.update(vars(args))
main(config)
|
[
"weiyue.su@gmail.com"
] |
weiyue.su@gmail.com
|
a1624b94b297bc87a48ad16e0cdcb21d81e82e0b
|
cc32a607aedba612412dab77886fda45f9796abb
|
/erfelijkheid.py
|
5f7a37594cd0ea9125f529909152085f9576bafe
|
[
"MIT"
] |
permissive
|
Smartz-Wesley/Smartz-Test
|
59fe0f7c092f631ed3689862634f07a715f6aab7
|
8b34849ec8933a9715cf3c28d81bd211c3dc30e4
|
refs/heads/master
| 2020-03-15T16:18:55.279708
| 2018-05-13T10:09:06
| 2018-05-13T10:09:06
| 132,232,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
class Person ():
def lopen(self):
print("rechtervoet, linkervoer, rechtervoet, wat was het ook alweer...")
def praten(self):
print("Het enige dat ik kan is praten")
def werken(self):
print("Ben ik aan het werk?")
class Wesley (Person):
def usa(self):
print("Oh say can you see....")
class Jeffrey (Person):
def antiajax(self):
print("Bah, bah Ajax: joden, joden")
class Tim (Person):
def best(self):
print("Zoals mijn grote voorbeeld altijd aangaf, ik ben de beste!")
class Dennis (Person):
def fortnite(self):
print("Van het weekend zal ik eens gaan spelen")
w = Wesley()
j = Jeffrey()
t = Tim()
d = Dennis()
w.lopen()
w.praten()
w.werken()
w.usa()
|
[
"w.speetgens@smartz.eu"
] |
w.speetgens@smartz.eu
|
61a09c18c8b630245e154ae30f0dcbb89579d77a
|
33c674fb0209804aa715276201c9966e2b050ab3
|
/wksp/devel/bin/view_frames
|
76057371ca6f360e6f00925d656e9fe8905003ac
|
[] |
no_license
|
ese-519/teamcaravan-rpi
|
74d4d45adb00b5acc48e0f7f3f6689ee4d477001
|
88fd59055758b0c39c0e00816e2bc3e06be695b3
|
refs/heads/master
| 2016-08-11T07:09:00.863406
| 2015-12-09T16:42:39
| 2015-12-09T16:42:39
| 46,686,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# creates a relay to a python script source file, acting as that file.
# The purpose is that of a symlink
with open("/home/ubuntu/hemo_code/new_code/wksp/src/geometry/tf/scripts/groovy_compatibility/view_frames", 'r') as fh:
exec(fh.read())
|
[
"avanaken92@gmail.com"
] |
avanaken92@gmail.com
|
|
1ce95d475c7088d7b805a6c7e62d93c4986fce10
|
cfb474d8b9aa6fd268d322e0aeae8598d0d40383
|
/user/logsitic.py
|
d8022176535ce4f98a2f59df6adb9ed9a5ce7d8b
|
[] |
no_license
|
aomenxinpujing/swiper
|
b90fd6f439e3afdf0d0c89160b7b83b7b6e1c8fc
|
af16546ce15cfa68e9c416e9acc5d3ea25f3577d
|
refs/heads/master
| 2020-05-03T02:21:22.655293
| 2019-04-02T14:06:02
| 2019-04-02T14:06:02
| 178,367,068
| 0
| 1
| null | 2019-04-02T14:06:03
| 2019-03-29T08:44:43
|
Python
|
UTF-8
|
Python
| false
| false
| 629
|
py
|
import os
from urllib.parse import urljoin
from django.conf import settings
from lib import qiniuyun
from swiper import config
from commen.keys import ICON_KEY
from user.models import User
from worker import celery_app
@celery_app.task
def upload_qn(uid,icon):
filename = ICON_KEY % uid
filepath = os.path.join(settings.BASE_DIR, settings.MEDIA_ROOT, filename)
with open(filepath, 'wb') as fp:
for chunk in icon.chunks():
fp.write(chunk)
qiniuyun.upload_qiniu(filename,filepath)
user = User.objects.get(id=uid)
user.avatar = urljoin(config.QINIU_CLOUD_URL, filename)
user.save()
|
[
"noreply@github.com"
] |
aomenxinpujing.noreply@github.com
|
e2347380c65993b481e6ce5d1830967a3fdc5e84
|
ef4e6257040fde6dc73898c967f8d7005f293a20
|
/DS6.py
|
8580174f69860b4ef0785661ce4bf38cf2b3b099
|
[] |
no_license
|
AnandKrishnamoorthy1/ML-and-NLP-Basics
|
1ad4b6f84aa468654fc62050838fe30923702966
|
65f26e044789f3611026ac716b6cc188d8728af7
|
refs/heads/master
| 2020-08-25T06:43:49.375453
| 2019-12-19T16:08:06
| 2019-12-19T16:08:06
| 216,977,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,025
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 10:26:06 2018
@author: anand
"""
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
load_data=datasets.load_boston()
X=pd.DataFrame(data=load_data.data,columns=load_data.feature_names)
Y=load_data.target
X_train,X_test,y_train,y_test=train_test_split(X,Y,random_state=0)
model=LinearRegression()
model.fit(X_train,y_train)
model_coeff=pd.DataFrame(data={'Features':load_data.feature_names,'Coefficients':model.coef_})
"""
print('Coefficients:')
print(model_coeff)
print('Intercept:')
print(model.intercept_)
"""
predict_val=model.predict(X_test)
price_compare=pd.DataFrame(data={'Actual Value':y_test,'Predicted Value':predict_val})
print(price_compare.round(1))
predict_train_val=model.predict(X_train)
price_compare=pd.DataFrame(data={'Actual Value':y_train,'Predicted Value':predict_train_val})
print(price_compare.round(1))
print('Model Train Score: ',model.score(X_train,y_train).round(2))
print('Model Test Score: ',model.score(X_test,y_test).round(2))
#####################################################################
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from sklearn.preprocessing import MinMaxScaler
load_data=datasets.load_boston()
X=pd.DataFrame(data=load_data.data,columns=load_data.feature_names)
Y=load_data.target
X_train,X_test,y_train,y_test=train_test_split(X,Y,random_state=0)
scaler=MinMaxScaler()
X_train_scaled=scaler.fit_transform(X_train)
X_test_scaled=scaler.transform(X_test)
model=Ridge(alpha=2)
model.fit(X_train_scaled,y_train)
model_coeff=pd.DataFrame(data={'Features':load_data.feature_names,'Coefficients':model.coef_})
print('Coefficients:')
print(model_coeff)
print('Intercept:')
print(model.intercept_)
predict_val=model.predict(X_test_scaled)
price_compare=pd.DataFrame(data={'Actual Value':y_test,'Predicted Value':predict_val})
print(price_compare.round(1))
print('Model Train Score: ',model.score(X_train_scaled,y_train).round(2))
print('Model Test Score: ',model.score(X_test_scaled,y_test).round(2))
#####################################################################
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso
from sklearn.preprocessing import MinMaxScaler
load_data=datasets.load_boston()
X=pd.DataFrame(data=load_data.data,columns=load_data.feature_names)
Y=load_data.target
X_train,X_test,y_train,y_test=train_test_split(X,Y,random_state=0)
scaler=MinMaxScaler()
X_train_scaled=scaler.fit_transform(X_train)
X_test_scaled=scaler.transform(X_test)
model=Lasso(alpha=.2)
model.fit(X_train_scaled,y_train)
model_coeff=pd.DataFrame(data={'Features':load_data.feature_names,'Coefficients':model.coef_})
print('Coefficients:')
print(model_coeff)
print('Intercept:')
print(model.intercept_)
predict_val=model.predict(X_test_scaled)
price_compare=pd.DataFrame(data={'Actual Value':y_test,'Predicted Value':predict_val})
print(price_compare.round(1))
print('Model Train Score: ',model.score(X_train_scaled,y_train).round(2))
print('Model Test Score: ',model.score(X_test_scaled,y_test).round(2))
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
load_data=datasets.load_boston()
X=pd.DataFrame(data=load_data.data,columns=load_data.feature_names)
Y=load_data.target
X_train,X_test,y_train,y_test=train_test_split(X,Y,random_state=0)
poly=PolynomialFeatures()
X_train_scaled=poly.fit_transform(X_train)
X_test_scaled=poly.fit_transform(X_test)
model=Ridge(alpha=5)
model.fit(X_train_scaled,y_train)
#model_coeff=pd.DataFrame(data={'Features':load_data.feature_names,'Coefficients':model.coef_})
"""
print('Coefficients:')
print(model_coeff)
print('Intercept:')
print(model.intercept_)
"""
predict_val=model.predict(X_test_scaled)
print('Model Train Score Polynomial: ',model.score(X_train_scaled,y_train).round(2))
print('Model Test Score Polynomial: ',model.score(X_test_scaled,y_test).round(2))
#####################################################################
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
load_data=datasets.load_breast_cancer()
X=pd.DataFrame(data=load_data.data,columns=load_data.feature_names)
Y=load_data.target
X_train,X_test,y_train,y_test=train_test_split(X,Y,random_state=0)
model=LogisticRegression(C=2)
model.fit(X_train,y_train)
predict_val=model.predict(X_test)
price_compare=pd.DataFrame(data={'Actual Value':y_test,'Predicted Value':predict_val})
print(price_compare.round(1))
print('Model Train Score: ',model.score(X_train,y_train).round(2))
print('Model Test Score: ',model.score(X_test,y_test).round(2))
#####################################################################
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
load_data=datasets.load_breast_cancer()
X=pd.DataFrame(data=load_data.data,columns=load_data.feature_names)
Y=load_data.target
X_train,X_test,y_train,y_test=train_test_split(X,Y,random_state=0)
model=LinearSVC(C=5)
model.fit(X_train,y_train)
predict_val=model.predict(X_test)
price_compare=pd.DataFrame(data={'Actual Value':y_test,'Predicted Value':predict_val})
print(price_compare.round(1))
print('Model Train Score: ',model.score(X_train,y_train).round(2))
print('Model Test Score: ',model.score(X_test,y_test).round(2))
#####################################################################
|
[
"noreply@github.com"
] |
AnandKrishnamoorthy1.noreply@github.com
|
50218bb2ebd05266a714ea763bac56c767829696
|
09af7fb5619a882fe10236c22930cd36080a5725
|
/NumPy_TutorialPoint.py
|
ff82784400e9ee0ee09c0abb353074df9867fafd
|
[] |
no_license
|
sireeshadaruvuri/Python-NumPy
|
f1536c04805b0757d19c1ea820b0391a009e76ae
|
a357d3b347850b3715956cde25559c37dd0f1d07
|
refs/heads/master
| 2022-12-22T22:08:05.238517
| 2020-09-18T23:18:12
| 2020-09-18T23:18:12
| 296,747,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,339
|
py
|
#array syntax
import numpy as np
a = np.array([1,2,3,4])
print(a)
#two dimensions (result shows with two square brackets)
b = np.array([[1,2,3],[4,5,6]])
print(b)
#ndmin specifies the minimum dimensions of resultant array
c = np.array([1,2,3,4], ndmin=2) #If I give ndmin = 3(3 dimensions)here it gives result with 3 square brackets
print(c)
#dtype is a desired datatype of an array
a = np.array([1,2,3,4], dtype=complex)
print(a)
a = np.array([1,2,3,4], dtype=str)
print(a)
#Data Type Objects (dtype)
d = np.dtype(np.float)
print(d)
##int8, int16, int32, int64 can be replaced by equivalent string 'i1', 'i2','i4', etc.
dt = np.dtype('i8')
print(dt)
# using endian notation
dt = np.dtype('>i4')
print(dt)
dt = np.dtype([('age',np.int8)])
print(dt)
## first create structured data type
#In case of structured type, the names of fields,
# data type of each field and part of the memory block taken by each field.
dto = np.dtype([('name','S5')])
ax = np.array([('Siri'),('purni')],dtype=dto)
print(ax)
print(ax['name'])
#The following examples define a structured data type called student with a string field 'name',
# an integer field 'age' and a float field 'marks'
dt = np.dtype([('name','S10'),('age','i8'),('marks','f8')])
a = np.array([('siri',20,30),('purni',10,50)], dtype=dt)
print(a)
#---------------------------------------------
#array attributes
a = np.array([[1,2,3,4],[3,4,5,6]])
print(a.shape)
#array can be reshaped using a.shape function
a.shape = (4,2)
print(a)
#NumPy also provides a reshape function to resize an array.
b = a.reshape(4,2)
print(b)
#ndarray.ndim
#This array attribute returns the number of array dimensions.
a = np.arange(24)
print(a)
print(a.ndim)
b = a.reshape(4,2,3)
print(b)
b = a.reshape(2,4,3)
print(b)
print(b.ndim)
#numpy.itemsize This array attribute returns the length of each element of array in bytes.
c = np.array([1,2,3,4,5],dtype=np.int8)
print(c.itemsize)
#numpy.flags
#The ndarray object has the following attributes. Its current values are returned by this function.
print(c.flags)
#numpy.empty
#The following code shows an example of an empty array
x = np.empty([3,2],dtype=np.int8)
print(x)
#np.zeros
x = np.zeros((5,),dtype=np.int)
print(x)
#convert a list to ndarray(n dimentional array) using asarray function
x = [1,2,3,4]
a = np.asarray(x,dtype=int)
print(a)
#convert a tuple
x = (1,2,3,4)
a = np.asarray(x,dtype=float)
print(a)
#list of typles
x = [(1,2,3,4),(4,5,6,8)]
a = np.asarray(x,dtype=int)
print(a)
#numpy.frombuffer
#This function interprets a buffer as one-dimensional array.
"""
s = 'Hello World'
a = np.frombuffer(s,dtype=int)
print(a)
"""
a = np.arange(20)
print(a[1:20:2])
print(a[1:])
print(a[:5:2])
a = np.array([[1,2,3,4],[12,8,9,20],[34,56,78,90]])
print(a[1:])
print(a[:1])
print(a[1,3])
print(a)
#Advanced slicing of arrays
#column1
print(a[...,1] )
#column3
print(a[...,3])
#row1
print(a[1,...])
#from column 1
print(a[...,1:])
print(a)
b = a[[0,1,2],[0,1,0]] #it takes as 0,0 and 1,1 and 2,0
print(b)
#----------
x = np.array([[ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8],[ 9, 10, 11]])
print(x)
rows = np.array([[0, 0], [3, 3]])
cols = np.array([[0, 2], [0, 2]])
y = x[rows, cols]
print(y)
#-----------
#advanced indexing for column
x = np.array([[ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8],[ 9, 10, 11]])
print('after slicing array becomes')
print(x[1:4])
print(x[1:3])
print(x[1:2,1:3])
print(x[1:4,1:3])
print(x[1,2])
print(x[1:4,[1,2]])
#boolean
print('greater than 5 are ', +x[x>5])
#remove NaN numbers(Not a Number) using ~ (complement operator)
y = np.array([np.nan,1,2,np.nan,4,5])
print('elements in y are', +y)
print(y[~np.isnan(y)])
#to filterout non complex elements
z = np.array([1, 2+6j, 5, 6+7j])
print(z[np.iscomplex(z)])
#self-exercise of indexing and advanced indexing
n = np.array([[5,4,10,15,6],[20,25,16,18,21],[23,32,42,45,46],[48,50,55,60,62]])
print(n)
print(n[0,2])
print(n[0:2])
print(n[2:3])
print(n[1,4])
print(n[2,1])
print(n[3,2])
print(n[3,4])
print(n[1:])
print(n[:1])
print(n[1,])
print(n[...,1])
print(n[...,2])
print(n[...,3])
print(n[...,1:3])
print(n[[1,1,1],[0,1,2]],n[[2,2,2],[0,2,2]])
a = np.array([[1,1,1],[2,2,2]])
b = np.array([[0,1,2],[0,2,2]])
c = n[a,b]
print(c)
|
[
"noreply@github.com"
] |
sireeshadaruvuri.noreply@github.com
|
3a19964e213dc6bb061ab5e5ed5357a4c1f603f5
|
5785d7ed431b024dd910b642f10a6781df50e4aa
|
/craft-demo/.venv_python/bin/easy_install
|
617f968d4148a4d9a1ea945031c47c0d7bbad2ce
|
[] |
no_license
|
kashyapa/interview-prep
|
45d77324446da34d99bf8efedb3544b367b5523e
|
7060c090c40602fb9c4778eace2078e1b51e235b
|
refs/heads/master
| 2023-07-28T13:12:49.515299
| 2021-09-06T14:33:25
| 2021-09-06T14:33:25
| 403,706,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
#!/Users/schandra2/Developer/coding-problems/craft-demo/.venv_python/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"schandra2@godaddy.com"
] |
schandra2@godaddy.com
|
|
5eb38b81802aa7a6c84f0f34a1b62b50a3056eb1
|
c01ab71f681efdeb9f4e7d52ed083745b6d42590
|
/old/6th sem/competetive_c/COMPILER/test_modules/testCases.py
|
94facbbde057509de80d4825991aefabf070de5a
|
[] |
no_license
|
anant-pushkar/competetive_programming_codes
|
398a39c85a761c8d242f42f368933239a438ac06
|
127c67d7d4e2cef2d1f25189b6535606f4523af6
|
refs/heads/master
| 2021-01-20T11:57:07.528790
| 2014-11-14T08:29:21
| 2014-11-14T08:29:21
| 23,577,655
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
import testTemplate
'''
class customTester(testInstance):
def __init__(self , inStr , outStr , dec):
testInstance.__init__(self , inStr , outStr , dec)
def test(self,txt,ref):
#write test logic here
'''
def getTests():
tests = []
suite=testTemplate.testSuite("Sample Test Suite1")
testcase = testTemplate.regexTester("<<>>" , "4" , "sample1")
suite.add(testcase)
testcase = testTemplate.regexTester("><" , "0" , "sample2")
suite.add(testcase)
testcase = testTemplate.regexTester("<>>>" , "2" , "sample3")
suite.add(testcase)
tests.append(suite)
return tests
|
[
"anantpushkar009@gmail.com"
] |
anantpushkar009@gmail.com
|
b527f234d750265bae3dbe7f5038130ef20fa5c2
|
57c54ab35d84ce3021eb304af37e8393c4d50036
|
/data/utils.py
|
7c52df4d98a1b6a099963a0539365dc054b9a585
|
[] |
no_license
|
arryon/ml2014
|
89f9fc6d28622238518f26038bcc0251caeb2e18
|
136b8e32e8cf4859bc881b7d2a2b52186a3df41a
|
refs/heads/master
| 2020-06-06T02:05:13.268466
| 2013-10-22T17:39:37
| 2013-10-22T17:39:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
import numpy as np
def get_largest_std(nested_list):
tuples = [(np.std(a),idx) for idx,a in zip(range(len(nested_list)),nested_list)]
_max = max(tuples)
return nested_list[_max[1]]
|
[
"arryon@gmail.com"
] |
arryon@gmail.com
|
28d438cfc97620ca95904678fa4f47a290b9c081
|
2e23286fd6bbb26cf2262ac8588040ffd207f372
|
/app/recipy/tests/test_tags_api.py
|
848861229f7e475d24618f8ca2a67a0ee835a0f2
|
[
"MIT"
] |
permissive
|
Zoki92/Vue-recipe-api-frontend
|
470a8ba0fc96f0788fddd9e24c9e86d6e393ab5c
|
96a3505f3a451e5a9402b20c10261e4d7e3effcb
|
refs/heads/master
| 2020-05-19T23:12:44.846021
| 2019-05-17T19:04:54
| 2019-05-17T19:04:54
| 185,262,656
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipy.serializers import TagSerializer
TAGS_URL = reverse('recipy:tag-list')
class PublicTagsApiTests(TestCase):
# Test the publicly available tags API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
# Test that login is required for retrieving tags
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
# Test the authorized user tags API
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@test.com',
'123testing'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
# Test retrieving tags
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
# Test that tags returned are for the authenticated user
user2 = get_user_model().objects.create_user(
'other@test.com',
'123test123'
)
Tag.objects.create(user=user2, name="Fruity")
tag = Tag.objects.create(user=self.user, name="Comfort Food")
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
# Test creating a new tag
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
# Test creating a new tag with invalid payload
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
# Test filtering tags by those assigned to recipes
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
|
[
"zoranstoilov@yahoo.com"
] |
zoranstoilov@yahoo.com
|
0b9582b0e90d5b58ba6e6981d5158237639bfa9b
|
bdfc8ce53719cf5de27bbc4dc51b336b46644800
|
/QUESTION_3.PY
|
78a3ba2208d4e4328189cc4269486ef71012e6cb
|
[] |
no_license
|
gauripatil20/DEBUGGING
|
74a72241689cb7b9af6dd7e25ee574ef3cffde50
|
d171dfc558cc8c1d7830cda43e89b249cd85c0ad
|
refs/heads/master
| 2023-04-11T04:11:00.233625
| 2021-04-18T13:00:13
| 2021-04-18T13:00:13
| 359,143,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,980
|
py
|
# Apne simple se message ko aise change karna jisse koi dusra insaan usse samajh na paye usse encryption bolte hai. Encryption karne ke bhot sare ways hote hai. Hum cipher wheel use karenge. Cipher wheel mei hum her character ko kissi number se aage shift kar dete hai. Jaise: Hum iss cihper wheel mein her character ki value ko 2 se increase kar denge. Aisa karne ke liye hum chars aur shifted_chars array ka use karenge. Example:
# chars = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
# shifted_chars = ['c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','a','b']
# Topics covered * function returning the wrong value
# argument passed to the function but never used.
# for loop itterates over wrong string
# semantic/syntactic problems in if/else
# Neeche yeh program diya hua hai, isko sahi kar ke ek nayi file mein submit karo.
# Cipher wheel with a function for finding an element in a list
# find_in_list function defined here but not called
def find_in_list(query, mainlist):
# this function is used to find the position of the "query" in the "mainlist". If "query" is in the list then it returns its position, otherwise it returns None
mainlist_len = len(mainlist)
range_for_loop = range(mainlist_len)
index =None
for i in range_for_loop:
element = mainlist[i]
if element == query:
index = i
return index
# this should return the position of the "query" in the "mainlist"
chars = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
shifted_chars = ['c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','a','b']
# encrypt_message function defined here but not called
def encrypted_message(plain_msg):
# this fucnction takes "plain_msg" as an argument and print/return the encrypted message. The "plain_msg" is tranfered into "encrypted_msg" using "shifted_chars" list. Example, if plain_msg = "ng" then n => p, g => i and hence encrypted_msg = "pi"
encrypted_msg = ""
for character in plain_msg:
# for character in msg
if character in chars:
chars_index = find_in_list(character,chars)
new_char = shifted_chars[chars_index]
encrypted_msg = encrypted_msg + new_char
else:
encrypted_msg = encrypted_msg + character
return encrypted_msg
# decrypt_message function defined here but not called
def decrypt_message(encrypted_msg):
# this fucnction takes "encrypted_msg" as an argument and print/return the encrypted message. The "encrypted_msg" is tranfered into "decrypted_msg" using "shifted_chars" list. Example, if encrypted_msg = "pi" then p => n, i => g and hence decrypted_msg = "ng"
decrypted_msg = ""
for character in encrypted_msg:
if character in shifted_chars:
char_index = find_in_list(character, shifted_chars)
new_char = chars[char_index]
decrypted_msg = decrypted_msg + new_char
else:
decrypted_msg = decrypted_msg + character
return decrypted_msg
# methods should return or print the new messages.
############################################### Code starts from here ##################################################
flag = True
while flag == True:
choice = input("What do you want to do? .Encrypt a message 2. Decrypt a message Enter `e` or `d` respectively!")
if choice == 'e':
plain_message = input("Enter message to encrypt??")
print(encrypted_message(plain_message))
elif choice == 'd':
encrypted_msg = input("Enter message to decrypt?")
print(decrypt_message(encrypted_msg))
play_again = input("Do you want to try agian or Do you want to exit? (Y/N)")
if play_again == 'Y':
continue
elif play_again == 'N':
break
|
[
"you@example.com"
] |
you@example.com
|
2cce7d2367c4e8e437d7c9be2660c7dd224b85d1
|
498e792e16ab1a74ac034c53177c4cccbeef2749
|
/segmentation/RTDA/scripts/train.py
|
0da7a5ee94e79c06adb2d30e32adcbd526f81a68
|
[] |
no_license
|
ydwisroad/imageprocessingpytorch
|
f97bec4469c087f6bbbca5d42da180c95be8b13f
|
bd8d1af228619c9c6c9c1a2b880422f7d5048dd5
|
refs/heads/master
| 2023-07-29T05:05:11.145832
| 2022-02-21T23:32:03
| 2022-02-21T23:32:03
| 284,976,501
| 7
| 3
| null | 2023-07-24T01:08:22
| 2020-08-04T12:43:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 45,758
|
py
|
import os
import torch
import torch.optim as optim
from tqdm import tqdm
from utils.scheduler import PolyLR
from utils.losses import CrossEntropy2d, FocalLoss, get_target_tensor, DiceLoss, get_target_tensor_mc
from torch.nn import BCEWithLogitsLoss
from scripts.eval import validate
from models.utils import save_model, load_model, save_da_model, load_da_model, freeze_model
import torch.nn.functional as F
from dataset.utils import source_to_target, source_to_target_np
import torch.backends.cudnn as cudnn
from dataset.utils import find_dataset_using_name
from torch.utils import data
import copy
import numpy as np
from PIL import Image
import torch.cuda.amp as amp
palette = [128, 64, 128, # Road, 0
244, 35, 232, # Sidewalk, 1
70, 70, 70, # Building, 2
102, 102, 156, # Wall, 3
190, 153, 153, # Fence, 4
153, 153, 153, # pole, 5
250, 170, 30, # traffic light, 6
220, 220, 0, # traffic sign, 7
107, 142, 35, # vegetation, 8
152, 251, 152, # terrain, 9
70, 130, 180, # sky, 10
220, 20, 60, # person, 11
255, 0, 0, # rider, 12
0, 0, 142, # car, 13
0, 0, 70, # truck, 14
0, 60, 100, # bus, 15
0, 80, 100, # train, 16
0, 0, 230, # motor-bike, 17
119, 11, 32] # bike, 18]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def compute_ce_weights(num_classes, predictions):
z = np.zeros((num_classes,))
predictions = predictions.clone().detach().cpu().numpy()
for i, (label) in enumerate(predictions):
y = label
# y = y.detach().cpu().numpy()
mask = (y >= 0) & (y < num_classes)
labels = y[mask].astype(np.uint8)
count_l = np.bincount(labels, minlength=num_classes)
z += count_l
total_frequency = np.sum(z)
class_weights = []
for frequency in z:
class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
class_weights.append(class_weight)
ret = np.array(class_weights)
return ret
def train_source_only(args,
model,
source_train_loader,
val_loader,
metrics,
iter_counter,
visualizer):
start_iter = 0
train_loss = 0.0
scaler = amp.GradScaler()
# Define optimizer
optimizer = optim.SGD(model.parameters(),
lr=args.seg_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Define scheduler
scheduler = PolyLR(optimizer,
max_iters=args.max_iters,
power=args.power)
# Define loss criterion
if args.seg_loss == 'focal':
criterion_seg = FocalLoss(num_class=args.num_classes, ignore_label=args.ignore_index)
elif args.seg_loss == 'dice':
criterion_seg = DiceLoss(num_classes=args.num_classes, ignore_index=args.ignore_index)
else:
criterion_seg = CrossEntropy2d(ignore_label=args.ignore_index)
# Resume model if continuing training
if args.continue_train:
model, optimizer, scheduler, start_iter = load_model(args, model, optimizer, scheduler)
# Start training
source_train_loader_it = iter(source_train_loader)
iter_counter.record_training_start(start_iter)
for i_iter in tqdm(iter_counter.training_steps()):
# Set model to train
model.train()
# Get images/labels and move them to GPUs
try:
images, labels, _, _ = next(source_train_loader_it)
except:
source_train_loader_it = iter(source_train_loader)
images, labels, _, _ = next(source_train_loader_it)
iter_counter.record_one_epoch()
if args.use_st:
mean = torch.reshape(torch.from_numpy(args.mean), (1, 3, 1, 1))
B, C, H, W = images.shape
mean = mean.repeat(B, 1, H, W)
images = images - mean
images, labels = images, labels = images.to(args.gpu_ids[0], dtype=torch.float32), labels.to(args.gpu_ids[0], dtype=torch.long)
# Zero-grad the optimizer
optimizer.zero_grad()
# Train source
with amp.autocast():
preds, preds_sup1, preds_sup2 = model(images)
loss1 = criterion_seg(preds, labels)
loss2 = criterion_seg(preds_sup1, labels)
loss3 = criterion_seg(preds_sup2, labels)
loss_seg = loss1 + loss2 + loss3
# Update gradients
scaler.scale(loss_seg).backward()
# Update model
scaler.step(optimizer)
if scheduler is not None:
scheduler.step()
scaler.update()
# Update logging information
train_loss += loss_seg.item()
# Print losses
if iter_counter.needs_printing():
# Print log and visualize on tensorboard
visualizer.info(f'Training loss at iter {iter_counter.total_steps_so_far}: {train_loss / args.print_freq}')
visualizer.add_scalar('Training_Loss', train_loss / args.print_freq, iter_counter.total_steps_so_far)
train_loss = 0.0
# Validation phase
if iter_counter.needs_validating():
# Set model to eval
visualizer.info('Validating model at step %d' % iter_counter.total_steps_so_far)
validate(args, model, val_loader, metrics, visualizer, i_iter)
# Save model
if iter_counter.needs_saving():
save_model(args, model, optimizer, scheduler, iter_counter)
iter_counter.record_one_iteration()
iter_counter.record_training_end()
save_model(args, model, optimizer, scheduler, iter_counter)
validate(args, model, val_loader, metrics, visualizer, i_iter)
def train_da(args, model, model_d, source_train_loader, target_train_loader, val_loader, metrics, iter_counter,
visualizer):
if args.ft is not None:
model = load_model(args, model, ft=args.ft)
validate(args, model, val_loader, metrics, visualizer, 0)
exit()
# Initialize variables
cudnn.benchmark = True
cudnn.enabled = True
scaler = amp.GradScaler()
start_iter = 0
parser_source_loss = 0.0
parser_target_loss = 0.0
parser_d_loss = 0.0
discriminator_source_loss = 0.0
discriminator_target_loss = 0.0
metrics.reset()
# Define optimizers
optimizer = optim.SGD(model.parameters(),
lr=args.seg_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
optimizer_d = optim.Adam(model_d.parameters(),
lr=args.d_lr,
betas=(args.beta1_d, args.beta2_d))
# Define schedulers
scheduler = PolyLR(optimizer,
max_iters=args.max_iters,
power=args.power)
scheduler_d = PolyLR(optimizer_d,
max_iters=args.max_iters,
power=args.power)
# Define losses criterion
if args.seg_loss == 'focal':
criterion_seg = FocalLoss(num_class=args.num_classes,
ignore_label=args.ignore_index)
elif args.seg_loss == 'dice':
criterion_seg = DiceLoss(num_classes=args.num_classes, ignore_index=args.ignore_index)
else:
criterion_seg = CrossEntropy2d(ignore_label=args.ignore_index)
criterion_d = BCEWithLogitsLoss()
# Resume model if continuing training
if args.continue_train:
model, model_d, model_d2, optimizer, optimizer_d, \
optimizer_d2, scheduler, scheduler_d, scheduler_d2, start_iter = load_da_model(args,
model,
model_d,
optimizer,
optimizer_d,
scheduler,
scheduler_d
)
# Start training
iter_counter.record_training_start(start_iter)
source_train_loader_it = iter(source_train_loader)
target_train_loader_it = iter(target_train_loader)
visualizer.info(f'Lambda: {args.lambda_adv} at epoch: {iter_counter.total_epochs()}')
iter_counter.record_training_start(start_iter)
for i_iter in tqdm(iter_counter.training_steps()):
# Set model to train
model.train()
model_d.train()
# Zero-grad the optimizers
optimizer.zero_grad()
optimizer_d.zero_grad()
# Get source/target images and labels and move them to GPUs
try:
source_images, source_labels, _, _ = next(source_train_loader_it)
except:
source_train_loader_it = iter(source_train_loader)
source_images, source_labels, _, _ = next(source_train_loader_it)
try:
target_images, target_labels, _, name = next(target_train_loader_it)
except:
target_train_loader_it = iter(target_train_loader)
target_images, target_labels, _, name = next(target_train_loader_it)
#validate(args, model, val_loader, metrics, visualizer, i_iter)
#model.train()
if args.use_st:
src_in_trg = source_to_target(source_images, target_images, L=0.01)
mean = torch.reshape(torch.from_numpy(args.mean), (1, 3, 1, 1))
B, C, H, W = source_images.shape
mean = mean.repeat(B, 1, H, W)
source_images = src_in_trg.clone() - mean
target_images = target_images - mean
source_images, source_labels = source_images.to(args.gpu_ids[0], dtype=torch.float32), source_labels.to(
args.gpu_ids[0], dtype=torch.long)
target_images, target_labels = target_images.to(args.gpu_ids[0], dtype=torch.float32), target_labels.to(
args.gpu_ids[0], dtype=torch.long)
# TRAIN SCENE PARSER
# Don't accumulate gradients in discriminator
for param in model_d.parameters():
param.requires_grad = False
# Train Source
if args.model == "bisenetv2":
with amp.autocast():
spreds = model(source_images)
loss_seg_source = criterion_seg(spreds, source_labels)
else:
with amp.autocast():
spreds, spreds_sup1, spreds_sup2 = model(source_images)
loss1 = criterion_seg(spreds, source_labels)
loss2 = criterion_seg(spreds_sup1, source_labels)
loss3 = criterion_seg(spreds_sup2, source_labels)
loss_seg_source = loss1 + loss2 + loss3
scaler.scale(loss_seg_source).backward()
# Train Target
if args.model == "bisenetv2":
with amp.autocast():
tpreds = model(target_images)
loss_seg_source = criterion_seg(spreds, source_labels)
if args.ssl == "ssl" or args.ssl == "ssl_st":
loss_seg_target = criterion_seg(tpreds, target_labels)
else:
loss_seg_target = 0.0
else:
with amp.autocast():
tpreds, tpreds_sup1, tpreds_sup2 = model(target_images)
if args.ssl == "ssl" or args.ssl == "ssl_st":
losst1 = criterion_seg(tpreds, target_labels)
losst2 = criterion_seg(tpreds_sup1, target_labels)
losst3 = criterion_seg(tpreds_sup2, target_labels)
loss_seg_target = losst1 + losst2 + losst3
else:
loss_seg_target = 0.0
# Fool the discriminator
with amp.autocast():
d_output = model_d(F.softmax(tpreds, dim=1))
loss_fool = criterion_d(d_output,
get_target_tensor(d_output, "source").to(args.gpu_ids[0], dtype=torch.float))
loss_target = loss_fool * args.lambda_adv + loss_seg_target
scaler.scale(loss_target).backward()
# TRAIN DISCRIMINATOR
for param in model_d.parameters():
param.requires_grad = True
source_predictions = spreds.detach()
target_predictions = tpreds.detach()
with amp.autocast():
d_output_source = model_d(F.softmax(source_predictions, dim=1))
target_tensor = get_target_tensor(d_output_source, "source")
source_d_loss = criterion_d(d_output_source, target_tensor.to(args.gpu_ids[0], dtype=torch.float)) / 2
scaler.scale(source_d_loss).backward()
with amp.autocast():
d_output_target = model_d(F.softmax(target_predictions, dim=1))
target_tensor = get_target_tensor(d_output_target, "target")
target_d_loss = criterion_d(d_output_target, target_tensor.to(args.gpu_ids[0], dtype=torch.float)) / 2
scaler.scale(target_d_loss).backward()
scaler.step(optimizer)
scaler.step(optimizer_d)
if scheduler is not None:
scheduler.step()
scheduler_d.step()
scaler.update()
# Update logging information
parser_source_loss += loss_seg_source.item()
if loss_seg_target != 0.0:
parser_target_loss += loss_seg_target.item()
else:
parser_target_loss += loss_seg_target
parser_d_loss += loss_fool.item()
discriminator_source_loss += source_d_loss.item()
discriminator_target_loss += target_d_loss.item()
# Print losses
if iter_counter.needs_printing():
# Print log and visualize on tensorboard
visualizer.info(
f'Parser source loss at iter {iter_counter.total_steps_so_far}: {parser_source_loss / args.print_freq}')
visualizer.add_scalar('Parser_Source_Loss', parser_source_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Parser target loss at iter {iter_counter.total_steps_so_far}: {parser_target_loss / args.print_freq}')
visualizer.add_scalar('Parser_Target_Loss', parser_target_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Parser discriminator loss at iter {iter_counter.total_steps_so_far}: {parser_d_loss / args.print_freq}')
visualizer.add_scalar('Parser_Discriminator_Loss', parser_d_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Discriminator Source loss at iter {iter_counter.total_steps_so_far}: {discriminator_source_loss / args.print_freq}')
visualizer.add_scalar('Discriminator_Source_Loss', discriminator_source_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Discriminator Target loss at iter {iter_counter.total_steps_so_far}: {discriminator_target_loss / args.print_freq}')
visualizer.add_scalar('Discriminator_Target_Loss', discriminator_target_loss / args.print_freq,
iter_counter.total_steps_so_far)
parser_source_loss = 0.0
parser_target_loss = 0.0
parser_d_loss = 0.0
discriminator_source_loss = 0.0
discriminator_target_loss = 0.0
# Validation phase
if iter_counter.needs_validating():
# Set model to eval
visualizer.info('Validating model at step %d' % iter_counter.total_steps_so_far)
validate(args, model, val_loader, metrics, visualizer, i_iter)
# Save model
if iter_counter.needs_saving():
save_da_model(args, model, model_d, optimizer, optimizer_d, scheduler, scheduler_d, iter_counter)
iter_counter.record_one_iteration()
iter_counter.record_training_end()
visualizer.info('End training')
save_da_model(args, model, model_d, optimizer, optimizer_d, scheduler, scheduler_d, iter_counter)
validate(args, model, val_loader, metrics, visualizer, i_iter)
def train_mcda(args, model, model_d, source_train_loader, target_train_loader, val_loader, metrics, iter_counter,
visualizer):
# Initialize variables
cudnn.benchmark = True
cudnn.enabled = True
scaler = amp.GradScaler()
start_iter = 0
parser_source_loss = 0.0
parser_target_loss = 0.0
parser_d_loss = 0.0
discriminator_source_loss = 0.0
discriminator_target_loss = 0.0
metrics.reset()
# Define optimizers
optimizer = optim.SGD(model.parameters(),
lr=args.seg_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
optimizer_d = optim.Adam(model_d.parameters(),
lr=args.d_lr,
betas=(args.beta1_d, args.beta2_d))
# Define schedulers
scheduler = PolyLR(optimizer,
max_iters=args.max_iters,
power=args.power)
scheduler_d = PolyLR(optimizer_d,
max_iters=args.max_iters,
power=args.power)
# Define losses criterion
if args.seg_loss == 'focal':
criterion_seg = FocalLoss(num_class=args.num_classes,
ignore_label=args.ignore_index)
else:
criterion_seg = CrossEntropy2d(ignore_label=args.ignore_index)
criterion_d = BCEWithLogitsLoss()
# Resume model if continuing training
if args.continue_train:
model, model_d, model_d2, optimizer, optimizer_d, \
optimizer_d2, scheduler, scheduler_d, scheduler_d2, start_iter = load_da_model(args,
model,
model_d,
optimizer,
optimizer_d,
scheduler,
scheduler_d
)
# Start training
iter_counter.record_training_start(start_iter)
source_train_loader_it = iter(source_train_loader)
target_train_loader_it = iter(target_train_loader)
visualizer.info(f'Lambda: {args.lambda_adv} at epoch: {iter_counter.total_epochs()}')
iter_counter.record_training_start(start_iter)
for i_iter in tqdm(iter_counter.training_steps()):
# Set model to train
model.train()
model_d.train()
# Zero-grad the optimizers
optimizer.zero_grad()
optimizer_d.zero_grad()
# Get source/target images and labels and move them to GPUs
try:
source_images, source_labels, _, _ = next(source_train_loader_it)
except:
source_train_loader_it = iter(source_train_loader)
source_images, source_labels, _, _ = next(source_train_loader_it)
try:
target_images, target_labels, _, name = next(target_train_loader_it)
except:
target_train_loader_it = iter(target_train_loader)
target_images, target_labels, _, name = next(target_train_loader_it)
validate(args, model, val_loader, metrics, visualizer, i_iter)
model.train()
source_images, source_labels = source_images.to(args.gpu_ids[0], dtype=torch.float32), source_labels.to(
args.gpu_ids[0], dtype=torch.long)
target_images, target_labels = target_images.to(args.gpu_ids[0], dtype=torch.float32), target_labels.to(
args.gpu_ids[0], dtype=torch.long)
# TRAIN SCENE PARSER
# Don't accumulate gradients in discriminator
for param in model_d.parameters():
param.requires_grad = False
# Train Source
with amp.autocast():
spreds, spreds_sup1, spreds_sup2 = model(source_images)
loss1 = criterion_seg(spreds, source_labels)
loss2 = criterion_seg(spreds_sup1, source_labels)
loss3 = criterion_seg(spreds_sup2, source_labels)
loss_seg_source = loss1 + loss2 + loss3
scaler.scale(loss_seg_source).backward()
# Train Target
with amp.autocast():
tpreds, _, _ = model(target_images)
# tsp = tpreds.clone().detach()
#_, tsp = tsp.max(dim=1)
# tsp = tsp.cpu().numpy()
if args.use_weights:
classes_weights = compute_ce_weights(args.num_classes, source_labels)
classes_weights = (classes_weights - np.min(classes_weights)) / (np.max(classes_weights) - np.min(classes_weights))
classes_weights = torch.from_numpy(classes_weights.astype(np.float32)).to(args.gpu_ids[0])
loss_seg_target = 0.0
# Fool the discriminator
d_output = model_d(F.softmax(tpreds, dim=1))
target_tensor = get_target_tensor_mc(d_output, "source").to(args.gpu_ids[0], dtype=torch.long)
if args.use_weights:
loss_fool = criterion_seg(d_output, target_tensor, weight=classes_weights)
else:
loss_fool = criterion_seg(d_output, target_tensor)
loss_target = loss_fool * args.lambda_adv + loss_seg_target
scaler.scale(loss_target).backward()
# TRAIN DISCRIMINATOR
for param in model_d.parameters():
param.requires_grad = True
source_predictions = spreds.detach()
target_predictions = tpreds.detach()
with amp.autocast():
d_output_source = model_d(F.softmax(source_predictions, dim=1))
target_tensor = get_target_tensor_mc(d_output_source, "source")
source_d_loss = criterion_seg(d_output_source, target_tensor.to(args.gpu_ids[0], dtype=torch.long)) / 2
scaler.scale(source_d_loss).backward()
with amp.autocast():
d_output_target = model_d(F.softmax(target_predictions, dim=1))
target_tensor = get_target_tensor_mc(d_output_target, "target")
target_d_loss = criterion_seg(d_output_target, target_tensor.to(args.gpu_ids[0], dtype=torch.long)) / 2
scaler.scale(target_d_loss).backward()
scaler.step(optimizer)
scaler.step(optimizer_d)
if scheduler is not None:
scheduler.step()
scheduler_d.step()
scaler.update()
# Update logging information
parser_source_loss += loss_seg_source.item()
if loss_seg_target != 0.0:
parser_target_loss += loss_seg_target.item()
else:
parser_target_loss += loss_seg_target
parser_d_loss += loss_fool.item()
discriminator_source_loss += source_d_loss.item()
discriminator_target_loss += target_d_loss.item()
# Print losses
if iter_counter.needs_printing():
# Print log and visualize on tensorboard
visualizer.info(
f'Parser source loss at iter {iter_counter.total_steps_so_far}: {parser_source_loss / args.print_freq}')
visualizer.add_scalar('Parser_Source_Loss', parser_source_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Parser target loss at iter {iter_counter.total_steps_so_far}: {parser_target_loss / args.print_freq}')
visualizer.add_scalar('Parser_Target_Loss', parser_target_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Parser discriminator loss at iter {iter_counter.total_steps_so_far}: {parser_d_loss / args.print_freq}')
visualizer.add_scalar('Parser_Discriminator_Loss', parser_d_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Discriminator Source loss at iter {iter_counter.total_steps_so_far}: {discriminator_source_loss / args.print_freq}')
visualizer.add_scalar('Discriminator_Source_Loss', discriminator_source_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Discriminator Target loss at iter {iter_counter.total_steps_so_far}: {discriminator_target_loss / args.print_freq}')
visualizer.add_scalar('Discriminator_Target_Loss', discriminator_target_loss / args.print_freq,
iter_counter.total_steps_so_far)
parser_source_loss = 0.0
parser_target_loss = 0.0
parser_d_loss = 0.0
discriminator_source_loss = 0.0
discriminator_target_loss = 0.0
# Validation phase
if iter_counter.needs_validating():
# Set model to eval
visualizer.info('Validating model at step %d' % iter_counter.total_steps_so_far)
validate(args, model, val_loader, metrics, visualizer, i_iter)
# Save model
if iter_counter.needs_saving():
save_da_model(args, model, model_d, optimizer, optimizer_d, scheduler, scheduler_d, iter_counter)
iter_counter.record_one_iteration()
iter_counter.record_training_end()
visualizer.info('End training')
save_da_model(args, model, model_d, optimizer, optimizer_d, scheduler, scheduler_d, iter_counter)
validate(args, model, val_loader, metrics, visualizer, i_iter)
def train_dda(args, model, model_d, model_d_sp, source_train_loader, target_train_loader, val_loader, metrics, iter_counter,
visualizer):
if args.ft is not None:
model = load_model(args, model, ft=args.ft)
# Initialize variables
cudnn.benchmark = True
cudnn.enabled = True
scaler = amp.GradScaler()
start_iter = 0
parser_source_loss = 0.0
parser_target_loss = 0.0
parser_d_loss = 0.0
discriminator_source_loss = 0.0
discriminator_target_loss = 0.0
metrics.reset()
# Define optimizers
optimizer = optim.SGD(model.parameters(),
lr=args.seg_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
optimizer_d = optim.Adam(model_d.parameters(),
lr=args.d_lr,
betas=(args.beta1_d, args.beta2_d))
optimizer_d_sp = optim.Adam(model_d_sp.parameters(),
lr=args.d_lr,
betas=(args.beta1_d, args.beta2_d))
# Define schedulers
scheduler = PolyLR(optimizer,
max_iters=args.max_iters,
power=args.power)
scheduler_d = PolyLR(optimizer_d,
max_iters=args.max_iters,
power=args.power)
scheduler_d_sp = PolyLR(optimizer_d_sp,
max_iters=args.max_iters,
power=args.power)
# Define losses criterion
if args.seg_loss == 'focal':
criterion_seg = FocalLoss(num_class=args.num_classes,
ignore_label=args.ignore_index)
elif args.seg_loss == 'dice':
criterion_seg = DiceLoss(num_classes=args.num_classes, ignore_index=args.ignore_index)
else:
criterion_seg = CrossEntropy2d(ignore_label=args.ignore_index)
criterion_d = BCEWithLogitsLoss()
# Resume model if continuing training
if args.continue_train:
model, model_d, model_d_sp, optimizer, optimizer_d, \
optimizer_d_sp, scheduler, scheduler_d, scheduler_d_sp, start_iter = load_da_model(args,
model,
model_d,
optimizer,
optimizer_d,
scheduler,
scheduler_d,
model_d_sp,
optimizer_d_sp,
scheduler_d_sp)
# Start training
iter_counter.record_training_start(start_iter)
source_train_loader_it = iter(source_train_loader)
target_train_loader_it = iter(target_train_loader)
visualizer.info(f'Lambda: {args.lambda_adv} at epoch: {iter_counter.total_epochs()}')
iter_counter.record_training_start(start_iter)
for i_iter in tqdm(iter_counter.training_steps()):
# Set model to train
model.train()
model_d.train()
model_d_sp.train()
# Zero-grad the optimizers
optimizer.zero_grad()
optimizer_d.zero_grad()
optimizer_d_sp.zero_grad()
# Get source/target images and labels and move them to GPUs
try:
source_images, source_labels, _, _ = next(source_train_loader_it)
except:
source_train_loader_it = iter(source_train_loader)
source_images, source_labels, _, _ = next(source_train_loader_it)
try:
target_images, target_labels, _, name = next(target_train_loader_it)
except:
target_train_loader_it = iter(target_train_loader)
target_images, target_labels, _, name = next(target_train_loader_it)
validate(args, model, val_loader, metrics, visualizer, i_iter)
model.train()
source_images, source_labels = source_images.to(args.gpu_ids[0], dtype=torch.float32), source_labels.to(
args.gpu_ids[0], dtype=torch.long)
target_images, target_labels = target_images.to(args.gpu_ids[0], dtype=torch.float32), target_labels.to(
args.gpu_ids[0], dtype=torch.long)
# TRAIN SCENE PARSER
# Don't accumulate gradients in discriminator
for param in model_d.parameters():
param.requires_grad = False
for param in model_d_sp.parameters():
param.requires_grad = False
# Train Source
with amp.autocast():
spreds, spreds_sup1, spreds_sup2, ssp = model(source_images)
loss1 = criterion_seg(spreds, source_labels)
loss2 = criterion_seg(spreds_sup1, source_labels)
loss3 = criterion_seg(spreds_sup2, source_labels)
loss_seg_source = loss1 + loss2 + loss3
scaler.scale(loss_seg_source).backward()
# Train Target
with amp.autocast():
tpreds, _, _, tsp = model(target_images)
loss_seg_target = 0.0
# Fool the discriminator
d_output = model_d(F.softmax(tpreds, dim=1))
loss_fool = criterion_d(d_output,
get_target_tensor(d_output, "source").to(args.gpu_ids[0], dtype=torch.float))
output_sp = model_d_sp(F.softmax(tsp, dim=1))
loss_fool_sp = criterion_d(output_sp,
get_target_tensor(output_sp, "source").to(args.gpu_ids[0], dtype=torch.float))
loss_target = loss_fool * args.lambda_adv + loss_fool_sp * args.lambda_adv + loss_seg_target
scaler.scale(loss_target).backward()
# TRAIN DISCRIMINATOR
for param in model_d.parameters():
param.requires_grad = True
for param in model_d_sp.parameters():
param.requires_grad = True
source_predictions = spreds.detach()
target_predictions = tpreds.detach()
source_predictions_sp = ssp.detach()
target_predictions_sp = tsp.detach()
with amp.autocast():
d_output_source = model_d(F.softmax(source_predictions, dim=1))
target_tensor = get_target_tensor(d_output_source, "source")
source_d_loss = criterion_d(d_output_source, target_tensor.to(args.gpu_ids[0], dtype=torch.float)) / 2
scaler.scale(source_d_loss).backward()
with amp.autocast():
d_output_target = model_d(F.softmax(target_predictions, dim=1))
target_tensor = get_target_tensor(d_output_target, "target")
target_d_loss = criterion_d(d_output_target, target_tensor.to(args.gpu_ids[0], dtype=torch.float)) / 2
scaler.scale(target_d_loss).backward()
with amp.autocast():
d_output_source_sp = model_d_sp(F.softmax(source_predictions_sp, dim=1))
target_tensor_sp = get_target_tensor(d_output_source_sp, "source")
source_d_loss_sp = criterion_d(d_output_source_sp, target_tensor_sp.to(args.gpu_ids[0], dtype=torch.float)) / 2
scaler.scale(source_d_loss_sp).backward()
with amp.autocast():
d_output_target_sp = model_d_sp(F.softmax(target_predictions_sp, dim=1))
target_tensor_sp = get_target_tensor(d_output_target_sp, "target")
target_d_loss_sp = criterion_d(d_output_target_sp, target_tensor_sp.to(args.gpu_ids[0], dtype=torch.float)) / 2
scaler.scale(target_d_loss_sp).backward()
scaler.step(optimizer)
scaler.step(optimizer_d)
scaler.step(optimizer_d_sp)
if scheduler is not None:
scheduler.step()
scheduler_d.step()
scheduler_d_sp.step()
scaler.update()
# Update logging information
parser_source_loss += loss_seg_source.item()
if loss_seg_target != 0.0:
parser_target_loss += loss_seg_target.item()
else:
parser_target_loss += loss_seg_target
parser_d_loss += loss_fool.item()
discriminator_source_loss += source_d_loss.item()
discriminator_target_loss += target_d_loss.item()
# Print losses
if iter_counter.needs_printing():
# Print log and visualize on tensorboard
visualizer.info(
f'Parser source loss at iter {iter_counter.total_steps_so_far}: {parser_source_loss / args.print_freq}')
visualizer.add_scalar('Parser_Source_Loss', parser_source_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Parser target loss at iter {iter_counter.total_steps_so_far}: {parser_target_loss / args.print_freq}')
visualizer.add_scalar('Parser_Target_Loss', parser_target_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Parser discriminator loss at iter {iter_counter.total_steps_so_far}: {parser_d_loss / args.print_freq}')
visualizer.add_scalar('Parser_Discriminator_Loss', parser_d_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Discriminator Source loss at iter {iter_counter.total_steps_so_far}: {discriminator_source_loss / args.print_freq}')
visualizer.add_scalar('Discriminator_Source_Loss', discriminator_source_loss / args.print_freq,
iter_counter.total_steps_so_far)
visualizer.info(
f'Discriminator Target loss at iter {iter_counter.total_steps_so_far}: {discriminator_target_loss / args.print_freq}')
visualizer.add_scalar('Discriminator_Target_Loss', discriminator_target_loss / args.print_freq,
iter_counter.total_steps_so_far)
parser_source_loss = 0.0
parser_target_loss = 0.0
parser_d_loss = 0.0
discriminator_source_loss = 0.0
discriminator_target_loss = 0.0
# Validation phase
if iter_counter.needs_validating():
# Set model to eval
visualizer.info('Validating model at step %d' % iter_counter.total_steps_so_far)
validate(args, model, val_loader, metrics, visualizer, i_iter)
# Save model
if iter_counter.needs_saving():
save_da_model(args, model, model_d, optimizer, optimizer_d, scheduler, scheduler_d, iter_counter, model_d_sp, optimizer_d_sp, scheduler_d_sp)
iter_counter.record_one_iteration()
iter_counter.record_training_end()
visualizer.info('End training')
save_da_model(args, model, model_d, optimizer, optimizer_d, scheduler, scheduler_d, iter_counter, model_d_sp,
optimizer_d_sp, scheduler_d_sp)
validate(args, model, val_loader, metrics, visualizer, i_iter)
def ssl(args, model, target_train_loader, visualizer):
if args.ft is not None:
model = load_model(args, model, ft=args.ft)
model.eval()
target_train_loader_it = iter(target_train_loader)
index = 0
for i_iter in range(len(target_train_loader)):
target_images, _, _, target_names = next(target_train_loader_it)
if index % 100 == 0:
visualizer.info(f'Processed {index} images')
image_name = []
predicted_label = np.zeros((target_images.shape[0], 1024, 2048))
predicted_prob = np.zeros((target_images.shape[0], 1024, 2048))
for index, (timage, tname) in enumerate(zip(target_images, target_names)):
if timage is not None:
timage = timage.unsqueeze(0)
timage = timage.to(args.gpu_ids[0], dtype=torch.float32)
predictions = model(timage)
target_predictions = predictions[0].unsqueeze(0)
output = torch.nn.functional.softmax(target_predictions, dim=1)
output = torch.nn.functional.upsample(output, (1024, 2048), mode='bilinear', align_corners=True).cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
label, prob = np.argmax(output, axis=2), np.max(output, axis=2)
predicted_label[index] = label.copy()
predicted_prob[index] = prob.copy()
image_name.append(tname)
# sceglie le threshold guardando il batch
thres = []
for i in range(19):
x = predicted_prob[predicted_label == i]
if len(x) == 0:
thres.append(0)
continue
x = np.sort(x)
thres.append(x[np.int(np.round(len(x) * 0.5))])
thres = np.array(thres)
thres[thres > 0.9] = 0.9
# crea le pseudo labels
for idx in range(target_images.shape[0]):
name = image_name[idx]
label = predicted_label[idx]
prob = predicted_prob[idx]
# creare la label di conseguenza
for i in range(19):
label[(prob < thres[i]) * (label == i)] = 255
output = np.asarray(label, dtype=np.uint8)
mask_img = colorize_mask(output)
output = Image.fromarray(output)
name = name.split('/')[-1]
os.makedirs("Cityscapes/pseudolabels", exist_ok=True)
os.makedirs("Cityscapes/pseudolabels_rgb", exist_ok=True)
output.save('%s/%s' % ("Cityscapes/pseudolabels", name))
mask_img.save('%s/%s' % ("Cityscapes/pseudolabels_rgb", name))
def ssl_st(args, model, source_train_loader, target_train_loader, visualizer):
if args.ft is not None:
model = load_model(args, model, ft=args.ft)
model.eval()
source_train_loader_it = iter(source_train_loader)
target_train_loader_it = iter(target_train_loader)
index = 0
for i_iter in range(len(target_train_loader)):
try:
source_images, source_labels, _, _ = next(source_train_loader_it)
except:
source_train_loader_it = iter(source_train_loader)
source_images, source_labels, _, _ = next(source_train_loader_it)
target_images, _, _, target_names = next(target_train_loader_it)
if index % 100 == 0:
visualizer.info(f'Processed {index} images')
target_in_source = target_images.clone()
for cnt, (trg_img, src_img) in enumerate(zip(target_images, source_images)):
trg_in_src = torch.from_numpy(source_to_target_np(trg_img, src_img, L=0.01))
target_in_source[cnt] = trg_in_src.clone()
target_images[cnt] = trg_img.clone()
mean = torch.reshape(torch.from_numpy(args.mean), (1, 3, 1, 1))
B, C, H, W = target_images.shape
mean = mean.repeat(B, 1, H, W)
target_in_source = target_in_source - mean
target_images = target_images - mean
image_name = []
predicted_label = np.zeros((target_images.shape[0], 1024, 2048))
predicted_prob = np.zeros((target_images.shape[0], 1024, 2048))
predicted_label_st = np.zeros((target_images.shape[0], 1024, 2048))
predicted_prob_st = np.zeros((target_images.shape[0], 1024, 2048))
for index, (timage, image_st, tname) in enumerate(zip(target_images, target_in_source, target_names)):
if timage is not None:
timage = timage.unsqueeze(0)
image_st = image_st.unsqueeze(0)
img = torch.cat((timage, image_st), dim=0)
img = img.to(args.gpu_ids[0], dtype=torch.float32)
predictions = model(img)
target_predictions, target_predictions_st = predictions[0].unsqueeze(0), predictions[1].unsqueeze(0)
output = torch.nn.functional.softmax(target_predictions, dim=1)
output = torch.nn.functional.upsample(output, (1024, 2048), mode='bilinear', align_corners=True).cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
label, prob = np.argmax(output, axis=2), np.max(output, axis=2)
predicted_label[index] = label.copy()
predicted_prob[index] = prob.copy()
image_name.append(tname)
output_st = torch.nn.functional.softmax(target_predictions_st, dim=1)
output_st = torch.nn.functional.upsample(output_st, (1024, 2048), mode='bilinear', align_corners=True).cpu().data[0].numpy()
output_st = output_st.transpose(1, 2, 0)
label_st, prob_st = np.argmax(output_st, axis=2), np.max(output_st, axis=2)
predicted_label_st[index] = label_st.copy()
predicted_prob_st[index] = prob_st.copy()
# sceglie le threshold guardando il batch
thres, thres_st = [], []
for i in range(19):
cont, cont_st = False, False
x = predicted_prob[predicted_label == i]
x_st = predicted_prob_st[predicted_label_st == i]
if len(x) == 0:
thres.append(0)
cont = True
if len(x_st) == 0:
thres_st.append(0)
cont_st = True
if not cont:
x = np.sort(x)
thres.append(x[np.int(np.round(len(x) * 0.5))])
if not cont_st:
x_st = np.sort(x_st)
thres_st.append(x_st[np.int(np.round(len(x_st) * 0.5))])
thres, thres_st = np.array(thres), np.array(thres_st)
for cls in range(len(thres)):
thres[cls] = max(thres[cls], thres_st[cls])
thres[thres > 0.9] = 0.9
# crea le pseudo labels
for idx in range(target_images.shape[0]):
name = image_name[idx]
label = predicted_label[idx]
prob = predicted_prob[idx]
label_st = predicted_label_st[idx]
prob_st = predicted_prob_st[idx]
# max
label_new = np.maximum(label, label_st)
prob_new = np.maximum(prob, prob_st)
# creare la label di conseguenza
for i in range(19):
label_new[(prob_new < thres[i]) * (label_new == i)] = 255
output = np.asarray(label_new, dtype=np.uint8)
mask_img = colorize_mask(output)
output = Image.fromarray(output)
name = name.split('/')[-1]
os.makedirs("Cityscapes/pseudolabels_st", exist_ok=True)
os.makedirs("Cityscapes/pseudolabels_st_rgb", exist_ok=True)
output.save('%s/%s' % ("Cityscapes/pseudolabels_st", name))
mask_img.save('%s/%s' % ("Cityscapes/pseudolabels_st_rgb", name))
|
[
"wandf12345@163.com"
] |
wandf12345@163.com
|
f371ba254c8755397bda9195488aa778687e20d1
|
09f8b619e0e351b653db31ea9d65a97767e92a1c
|
/tests/test_style.py
|
1e3741754f0bcd250119ecde1ac64505350a8bc5
|
[
"BSD-2-Clause"
] |
permissive
|
sudlab/cgat
|
793e5ec8503db399ad9a741293bf3683828bf149
|
4261368393195b8dc75c3ad544d5556a27280633
|
refs/heads/master
| 2021-01-21T18:00:16.097868
| 2017-05-17T08:03:47
| 2017-05-17T08:03:47
| 45,106,248
| 0
| 1
| null | 2017-04-05T13:56:50
| 2015-10-28T10:53:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
'''test_style - test coding style confirmation of CGAT code
===========================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
This script runs pep8 on all scripts in the CGAT
code collection.
This script is best run within nosetests::
nosetests tests/test_style.py
'''
import pep8
import glob
import os
from nose.tools import ok_
# DIRECTORIES to examine for python modules/scripts
EXPRESSIONS = (
('tests', 'tests/*.py'),
('scripts', 'scripts/*.py'),
('optic', 'scripts/optic/*.py'),
('gpipe', 'scripts/gpipe/*.py'),
('CGAT', 'CGAT/*.py'),
('CGATPipelines', 'CGATPipelines/*.py'),
('trackers', 'CGATPipelines/pipeline_docs/*/trackers/*.py'))
# Codes to ignore in the pep8 BaseReport
IGNORE = set(('E101', # indentation contains mixed spaces and tabs
'E201', # whitespace after '('
'E202', # whitespace before ')'
'E122', # continuation line missing indentation or outdented
'E265', # block comment should start with '# '
'E501', # line too long (82 > 79 characters)
'E502', # the backslash is redundant between brackets
'E731', # do not assign a lambda expression, use a def
'W191',
'W291',
'W293',
'W391',
'W503', # line break before binary operator
'W601',
'W602',
'files',
'directories',
'physical lines',
'logical lines',))
def check_style(filename):
'''check style of filename.
'''
p = pep8.StyleGuide(quiet=True)
report = p.check_files([filename])
# count errors/warning excluding
# those to ignore
take = [y for x, y
in report.counters.items() if x not in IGNORE]
found = ['%s:%i' % (x, y) for x, y
in report.counters.items() if x not in IGNORE]
total = sum(take)
ok_(total == 0,
'pep8 style violations in %s: %s' % (filename, ','.join(found)))
def test_style():
'''test style of scripts
'''
for label, expression in EXPRESSIONS:
files = glob.glob(expression)
files.sort()
for f in files:
if os.path.isdir(f):
continue
check_style.description = os.path.abspath(f)
yield(check_style, os.path.abspath(f))
|
[
"andreas.heger@gmail.com"
] |
andreas.heger@gmail.com
|
24af616887fef4a04a39dfd9e1971ddb5961b424
|
7b6b60ef46b712ee3e76b044bc5085c631abfee5
|
/task3/src/my_utils.py
|
48cda3c65ca8369e080cbda37051dc33c254cea9
|
[
"MIT"
] |
permissive
|
shreeshiv/ICDAR-2019-SROIE
|
f5f317616e24d8ba77d0e4c918f3c9c35808be02
|
94c5d951f9c117abf21ee340ff6ba848d51b70a8
|
refs/heads/master
| 2022-04-12T16:37:22.805662
| 2020-04-11T19:05:59
| 2020-04-11T19:05:59
| 255,050,752
| 1
| 0
|
MIT
| 2020-04-12T09:39:47
| 2020-04-12T09:39:46
| null |
UTF-8
|
Python
| false
| false
| 2,175
|
py
|
import random
from difflib import SequenceMatcher
from string import ascii_uppercase, digits, punctuation
import numpy
import regex
def pred_to_dict(text, pred, prob):
res = {"company": ("", 0), "date": ("", 0), "address": ("", 0), "total": ("", 0)}
keys = list(res.keys())
seps = [0] + (numpy.nonzero(numpy.diff(pred))[0] + 1).tolist() + [len(pred)]
for i in range(len(seps) - 1):
pred_class = pred[seps[i]] - 1
if pred_class == -1:
continue
new_key = keys[pred_class]
new_prob = prob[seps[i] : seps[i + 1]].max()
if new_prob > res[new_key][1]:
res[new_key] = (text[seps[i] : seps[i + 1]], new_prob)
return {k: regex.sub(r"[\t\n]", " ", v[0].strip()) for k, v in res.items()}
def compare_truth(pred_dict, truth_dict):
ratio = 0
for k in truth_dict.keys():
ratio += SequenceMatcher(None, truth_dict[k], pred_dict[k]).ratio()
return ratio / len(truth_dict.keys())
def robust_padding(texts, labels):
maxlen = max(len(t) for t in texts)
for i, text in enumerate(texts):
if len(text) == maxlen:
continue
pad_before = random.randint(0, maxlen - len(text))
pad_after = maxlen - pad_before - len(text)
texts[i] = random_string(pad_before) + text + random_string(pad_after)
labels[i] = numpy.pad(
labels[i], (pad_before, pad_after), "constant", constant_values=0
)
def random_string(n):
if n == 0:
return ""
x = random.random()
if x > 0.5:
pad = " " * n
elif x > 0.3:
pad = "".join(random.choices(digits + " \t\n", k=n))
elif x > 0.2:
pad = "".join(random.choices(ascii_uppercase + " \t\n", k=n))
elif x > 0.1:
pad = "".join(random.choices(ascii_uppercase + digits + " \t\n", k=n))
else:
pad = "".join(
random.choices(ascii_uppercase + digits + punctuation + " \t\n", k=n)
)
return pad
if __name__ == "__main__":
pred = {"a": "qwertyuiop", "b": "asdfghjkl", "c": "zxcvbnm"}
truth = {"a": "qwertyu iop", "b": "ascfghjkl ", "c": ""}
print(compare_truth(pred, truth))
|
[
"zhangns@mail2.sysu.edu.cn"
] |
zhangns@mail2.sysu.edu.cn
|
cbedde86fd5dc5b284b7b7c2f3e06658e773ca97
|
aa63224b1ee06f5377ad27191493e48081c467a1
|
/Task2.py
|
d790bed03ce14e83487c40486d1341a5d29d1ad6
|
[] |
no_license
|
RachnaReddyM/Inverted-Index-Generation
|
0237d238606bba6c42e72d4e673864cc5e3b5df5
|
5899a08825bbb63a93e3bb20297a1e3145166943
|
refs/heads/master
| 2021-05-03T08:38:46.809500
| 2018-02-07T05:32:06
| 2018-02-07T05:32:06
| 120,567,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,064
|
py
|
import os
import operator
index_unigram= {} # Inverted Index for Unigrams
"""index_bigram={} # Inverted Index for Bigrams
index_trigram={} # Inverted Index for Trigrams
unigram_tf={} # Unigrams term frequencies
bigram_tf={} # Bigrams term frequencies
trigram_tf={} # Trigrams term frequencies
unigram_df={} # Unigrams Document frequencies
bigram_df={} # Bigrams Document frequencies
trigram_df={} # Trigrams Document frequencies
uni_tokens={} # Unigrams- no. of tokens in each document
bi_tokens={} # Bigrams- no. of tokens in each document
tri_tokens={} # Trigrams- no. of tokens in each document"""
# fetch corups from the below path
docs_source_path = "D:\IR\dummypro\corpus"
# filenamelist contains the list of raw HTML file names
filenamelist = os.listdir(docs_source_path)
# to generate an inverted index
def create_inverted_index():
for filename in filenamelist:
content=[]
name_of_file=filename.split('.txt')
docID=name_of_file[0]
print docID
f = open(docs_source_path + '\\' + filename, 'r+')
raw_data=f.read()
content=raw_data.split()
## Unigram-Inverted-Index generation
unigram_token=[]
for c in content:
#if c not in unigram_token:
# to generate the number of tokens in each document
#unigram_token.append(c)
if index_unigram.has_key(c):
# when this term occurs for the first time in a particular document
if docID not in index_unigram[c]:
index_unigram[c].update({docID:1})
else:
index_unigram[c][docID]+=1
# if term does not exist in the dictionary yet
else:
index_unigram[c]={docID:1}
#uni_tokens[docID]=len(unigram_token)
#Bigram-Inverted_Index creation
""" bigrams_token=[]
for i,c in enumerate(content):
next=i+1
if next < len(content):
term=c+' '+content[i+1]
if term not in bigrams_token:
# to generate the number of tokens in each document
bigrams_token.append(term)
if index_bigram.has_key(term):
# when this term occurs for the first time in a particular document
if docID not in index_bigram[term]:
index_bigram[term].update({docID:1})
else:
index_bigram[term][docID]+=1
# if term does not exist in the dictionary yet
else:
index_bigram[term]={docID:1}
bi_tokens[docID]=len(bigrams_token)
#Trigram_Inverted_Index creation
trigram_token=[]
for i,c in enumerate(content):
next=i+2
if next < len(content):
term=c+' '+content[i+1]+' '+content[i+2]
if term not in trigram_token:
# to generate the number of tokens in each document
trigram_token.append(term)
if index_trigram.has_key(term):
# when this term occurs for the first time in a particular document
if docID not in index_trigram[term]:
index_trigram[term].update({docID:1})
else:
index_trigram[term][docID]+=1
# if term does not exist in the dictionary yet
else:
index_trigram[term]={docID:1}
tri_tokens[docID]=len(trigram_token)
# to generate the term frequency and document frequency table
def generate_tf_df(index_gram,gram_tf,gram_df):
for key,value in index_gram.iteritems():
term_frequency=0
docString=""
i=0
for k,v in value.iteritems():
i+=1
term_frequency+=v
docString+=k
if(i<len(value)):
docString+=" "
no_of_docs=len(value)
# dictionary to hold the term frequencies
gram_tf[key]=term_frequency
# dictionary to hold the document frequencies
gram_df[key]={docString:no_of_docs}
# to sort and write the term frequency table to a file
def generate_tf_table(gram_tf,nof):
sort_dict=sorted(sorted(gram_tf.iteritems()), key=operator.itemgetter(1), reverse=True)
f=open(nof,"w+")
for k,v in sort_dict:
f.write(str(k)+" -------------> "+str(v)+"\n")
f.close()
# to sort and write document frequency table to file
def generate_df_table(gram_df,nof):
sort_dict=sorted(gram_df.iteritems(), key=operator.itemgetter(0))
f=open(nof,"w+")
for k,v in sort_dict:
df_value=""
for key,value in v.iteritems():
df_value=str(key)+" -> "+str(value)
final_string=str(k)+" ------------> "+df_value+"\n"
f.write(final_string)
f.close()"""
# to write the inverted index to a file
def write_index_to_file(dict_gram,nof):
f=open(nof,"w+")
for k,v in dict_gram.iteritems():
value_term=""
i=0
for key,val in v.iteritems():
i+=1
value_term+="("+str(key)+","+str(val)+")"
if(i<len(v)):
value_term+=","
final_term=str(k)+" -----> "+value_term+"\n"
f.write(final_term)
f.close()
"""# to write ti no. of tokens dictionary to file
def write_tokens_to_file(grams_tokens,nof):
token_string=""
f=open(nof,"w+")
for k,v in grams_tokens.iteritems():
token_string+=str(k)+"->"+str(v)+"\n"
f.write(token_string)
f.close()"""
def _mainindexer():
create_inverted_index()
write_index_to_file(index_unigram,"Task_2_Index_Unigram.txt")
"""write_index_to_file(index_bigram,"Task_2_Index_Bigram.txt")
write_index_to_file(index_trigram,"Task_2_Index_Trigram.txt")
write_tokens_to_file(uni_tokens,"Task_2_Unigram_tokens.txt")
write_tokens_to_file(bi_tokens,"Task_2_Bigram_tokens.txt")
write_tokens_to_file(tri_tokens,"Task_2_Trigram_tokens.txt")
generate_tf_df(index_unigram,unigram_tf,unigram_df)
generate_tf_df(index_bigram,bigram_tf,bigram_df)
generate_tf_df(index_trigram,trigram_tf,trigram_df)
generate_tf_table(unigram_tf,"Task 3_Unigram_TF.txt")
generate_tf_table(bigram_tf,"Task 3_Bigram_TF.txt")
generate_tf_table(trigram_tf,"Task 3_Trigram_TF.txt")
generate_df_table(unigram_df,"Task 3_Unigram_DF.txt")
generate_df_table(bigram_df,"Task 3_Bigram_DF.txt")
generate_df_table(trigram_df,"Task 3_Trigram_DF.txt")"""
_mainindexer()
|
[
"rachna_reddy93@yahoo.in"
] |
rachna_reddy93@yahoo.in
|
d8f3e2e8da6c3e45068109576849095485f36831
|
21790d64e67a705e3cba12facc7a5132fd4b42db
|
/ex7.py
|
2537fcca39593db65657de4cc344d4f2a1715bb4
|
[] |
no_license
|
jiaoweiming215/ABOUT-OF-PYTHON
|
7d325be6f8c09af3bc04a1d696651c0e43bdba4e
|
4416be8f0c67ba0a4293e945c352ffd2d7f03768
|
refs/heads/master
| 2021-01-21T21:06:46.228383
| 2018-03-13T06:16:59
| 2018-03-13T06:16:59
| 92,308,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
# format string
#-*-coding:utf-8-*-
import time;
ticks = time.time()
localtime = time.localtime(time.time())
print "时 间:",ticks
print"NOw",localtime
|
[
"jiaoweimng215@163.com"
] |
jiaoweimng215@163.com
|
100c7b9715f2d3d1d5e5fcd0b8a906106e8912a9
|
9b5842b300bba0cf34babfe6ace235af4f5abcc2
|
/AlphaZero_Gomoku/mcts_alphaZero.py
|
92f90619a982f2ac6f49665e33b8bd609f4cef94
|
[] |
no_license
|
RanZhu1989/Py.RL
|
cec5e6abdad5988fa1fe5f61112728bc5b0e3334
|
9abb025ced8fc7e51f61790301efd7aee31826cb
|
refs/heads/master
| 2022-12-07T10:38:16.095433
| 2020-08-27T14:47:41
| 2020-08-27T14:47:41
| 284,641,756
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,877
|
py
|
# -*- coding: utf-8 -*-
"""
Monte Carlo Tree Search in AlphaGo Zero style, which uses a policy-value
network to guide the tree search and evaluate the leaf nodes
@author: Junxiao Song
"""
import numpy as np
import copy
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
class TreeNode(object):
"""A node in the MCTS tree."""
def __init__(self, parent, prior_p):
self._parent = parent
self._children = {}
self._n_visits = 0
self._Q = 0
self._u = 0
self._P = prior_p
def select(self, c_puct):
"""Return: A tuple of (action, next_node)"""
return max(self._children.items(),
key=lambda act_node: act_node[1].get_value(c_puct))
def get_value(self, c_puct):
self._u = (c_puct * self._P *
np.sqrt(self._parent._n_visits) / (1 + self._n_visits))
return self._Q + self._u
def expand(self, action_priors):
for action, prob in action_priors:
if action not in self._children:
self._children[action] = TreeNode(self, prob)
def update(self, leaf_value):
self._n_visits += 1
self._Q += 1.0*(leaf_value - self._Q) / self._n_visits
def update_recursive(self, leaf_value):
if self._parent:
self._parent.update_recursive(-leaf_value)
self.update(leaf_value)
def is_leaf(self):
return self._children == {}
def is_root(self):
return self._parent is None
class MCTS(object):
"""An implementation of Monte Carlo Tree Search."""
def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):
self._root = TreeNode(None, 1.0)
self._policy = policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
def _playout(self, state):
"""完整的执行选择、扩展评估和回传更新等步骤"""
node = self._root
# 选择
while(1):
if node.is_leaf():
break
action, node = node.select(self._c_puct)
state.do_move(action)
# 扩展及评估
action_probs, leaf_value = self._policy(state)
end, winner = state.game_end()
if not end:
node.expand(action_probs)
else:
if winner == -1: # 平局
leaf_value = 0.0
else:
leaf_value = (
1.0 if winner == state.get_current_player() else -1.0
)
# 回传更新
node.update_recursive(-leaf_value)
def get_move_probs(self, state, temp=1e-3):
for n in range(self._n_playout):
state_copy = copy.deepcopy(state)
self._playout(state_copy)
act_visits = [(act, node._n_visits)
for act, node in self._root._children.items()]
acts, visits = zip(*act_visits)
act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10))
return acts, act_probs
def update_with_move(self, last_move):
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
# def __str__(self):
# return "MCTS"
class MCTSPlayer(object):
"""AI player based on MCTS"""
def __init__(self, policy_value_function,
c_puct=5, n_playout=2000, is_selfplay=0):
self.mcts = MCTS(policy_value_function, c_puct, n_playout)
self._is_selfplay = is_selfplay
def get_action(self, board, temp=1e-3, return_prob=0):
sensible_moves = board.availables
move_probs = np.zeros(board.width*board.height)
if len(sensible_moves) > 0:
acts, probs = self.mcts.get_move_probs(board, temp)
move_probs[list(acts)] = probs
if self._is_selfplay:
move = np.random.choice(
acts, p=0.75*probs +
0.25*np.random.dirichlet(0.3*np.ones(len(probs)))
)
# 更新根节点,复用搜索子树
self.mcts.update_with_move(move)
else:
move = np.random.choice(acts, p=probs)
# 重置根节点
self.mcts.update_with_move(-1)
location = board.move_to_location(move)
print("AI move: %d,%d\n" % (location[0], location[1]))
if return_prob:
return move, move_probs
else:
return move
else:
print("WARNING: the board is full")
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def __str__(self):
return "MCTS {}".format(self.player)
|
[
"gemina_cat@163.com"
] |
gemina_cat@163.com
|
4a3b6296ac88b566d5c90fdce51aa1335eb0806a
|
2cd10b2d480954eae373322308d07e5ba094db1e
|
/FILTER.py
|
7d0ec1b53f948a08010d1b6f0383bf3df339dd0c
|
[] |
no_license
|
dasing/CBIR
|
fd103ee32ffa5db8580875a7a733f5eb81012f2a
|
c882db4ea04a4f340bfbd5f47d68611923881807
|
refs/heads/master
| 2021-08-16T10:34:17.802624
| 2017-11-19T16:16:22
| 2017-11-19T16:16:22
| 111,275,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
import numpy as np
import cv2
from PARAMETER import *
def buildFilters():
filters = []
ksize = 31
orientation = 4
scale = 4
bandWidth = 4
for theta in np.arange(0, np.pi, np.pi/orientation):
for sigma in np.arange(1, scale+1, 1 ):
for lamda in np.arange(0, np.pi, np.pi/bandWidth):
params = {'ksize':(ksize, ksize), 'sigma':sigma, 'theta':theta, 'lambd':lamda, 'gamma':0.02, 'psi':0, 'ktype': cv2.CV_32F}
kern = cv2.getGaborKernel(**params)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
def buildRandomMatrix( featureDim ):
reducedDim = int(round(featureDim * PROJECT_RATIO))
size = reducedDim * featureDim
genMatrix = np.random.normal( 0, 0.1, size ).reshape((reducedDim, featureDim))
# print("reducedDim is {}".format(reducedDim))
# print("size is {}".format(size))
# print(genMatrix)
return genMatrix
|
[
"f2283689@gmail.com"
] |
f2283689@gmail.com
|
ad482575e6fc07931a01a486236fdad23538b5d3
|
58a9984d8ceb274ddd2e35a4c1416be22fb3081f
|
/cnn_baseline.py
|
a8a3310e8def303ad6ca874f1c4eed45df10ed6e
|
[] |
no_license
|
KaranKaur/mnist
|
a694920420ca4650a950b25bec8aa3d98d779279
|
91b0776bdbf83a08bdd800f94e575da855414581
|
refs/heads/master
| 2020-03-28T07:10:21.621632
| 2018-09-08T00:50:29
| 2018-09-08T00:50:29
| 147,884,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,576
|
py
|
# from keras.datasets import mnist
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
import pandas as pd
from sklearn.model_selection import train_test_split
# load the data files
def plot_figure():
plt.subplot(221)
plt.imshow(X_train[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(X_train[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(X_train[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(X_train[3], cmap=plt.get_cmap('gray'))
plt.show()
np.random.seed(7)
def get_data():
#read training data
mnist_train = pd.read_csv('.')
x_train = mnist_train.iloc[:, 1:]
y_train = mnist_train.iloc[:, 0]
#read test data
mnist_test = pd.read_csv('.')
x_test = mnist_test.iloc[:, :]
return x_train.values, y_train.values, x_test.values
def baseline_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(1, 28, 28)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def main():
X_train, Y_train, X_test = get_data()
print(X_train.shape)
# reshape to be [samples][pixels][width][height]
x_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
x_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# normalize
x_train /= 255
x_test /= 255
y_train = np_utils.to_categorical(y_train)
num_classes = y_train.shape[1]
model = baseline_model()
model.fit(x_train, y_train,batch_size=128, nb_epoch=10, verbose=1)
#scores = model.evaluate(x_test, y_test, verbose=0)
#print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
print('Fitted the model!')
predict = model.predict_classes(x_test, batch_size=128, verbose=1)
return predict
if __name__ == '__main__':
predictions = main()
df = pd.DataFrame(predictions)
df.to_csv("predictions_base_cnn.csv")
print('saved the predictions!')
|
[
"kkaur89@umd.edu"
] |
kkaur89@umd.edu
|
a96134858b3badb524e61aed9c77e9b7fcefa628
|
09dd88f7bc37a8d396af6c10b33c14caac00defb
|
/crypto/PhaseStream 3/find_aes_blocks.py
|
77a13ced4119ff2e647ed333bd7ec04b92ee073d
|
[] |
no_license
|
NU-Cyber-Clinic/Cyber-Apocalypse-2021
|
8d80343c11d80e00914ea7522d8919e2b555bf68
|
ae032a074208f8a740885faacdcdcdff32f396c7
|
refs/heads/master
| 2023-04-11T01:42:08.483075
| 2021-04-28T15:58:15
| 2021-04-28T16:01:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
from bitstring import BitArray
# CTR explainer: https://pycryptodome.readthedocs.io/en/latest/src/util/util.html#crypto-util-counter-module
test = b"No right of private conversation was enumerated in the Constitution. I don't suppose it occurred to anyone at the time that it could be prevented."
test_bin = BitArray(test).bin
encrypted = "464851522838603926f4422a4ca6d81b02f351b454e6f968a324fcc77da30cf979eec57c8675de3bb92f6c21730607066226780a8d4539fcf67f9f5589d150a6c7867140b5a63de2971dc209f480c270882194f288167ed910b64cf627ea6392456fa1b648afd0b239b59652baedc595d4f87634cf7ec4262f8c9581d7f56dc6f836cfe696518ce434ef4616431d4d1b361c"
encrypted_bin = BitArray(hex=encrypted).bin
print(test_bin)
print(encrypted_bin)
print()
print()
print()
block = []
test_block = []
for i in range(146*8):
block.append( '1' if test_bin[i] != encrypted_bin[i] else '0')
block_hex = BitArray(bin=''.join(block)).hex
print(block_hex)
|
[
"54325599+cosmin-bianu@users.noreply.github.com"
] |
54325599+cosmin-bianu@users.noreply.github.com
|
8d32669cedc944fe8c0857c4eb88d72ec93d589a
|
5dfc16c0e22c5e1c5f04adbc5a382354801b5a35
|
/5-seq2seq/utils/optim.py
|
e96dce79a5a9b0850f80f16bc22fc91292a373fb
|
[
"MIT"
] |
permissive
|
gothxx/Machine-Learning-Theory-and-Application
|
af632782887e53f2d5d4a63127c52160a4e036ff
|
e20cb4e2623786d9dc7540e3252b8bbf7a0b2553
|
refs/heads/master
| 2023-08-17T18:12:30.886054
| 2021-10-17T07:17:48
| 2021-10-17T07:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
# %% [markdown]
# ## Optimizer: Adam + lr scheduling
# Inverse square root 排程對於訓練 Transformer 時的穩定性很重要,後來也用在 RNN 上。
# 根據底下公式來更新 learning rate,前期線性增長,後期根據更新步數方根的倒數來遞減。
# $$lrate = d_{\text{model}}^{-0.5}\cdot\min({step\_num}^{-0.5},{step\_num}\cdot{warmup\_steps}^{-1.5})$$
# code [source](https://nlp.seas.harvard.edu/2018/04/03/attention.html)
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
@property
def param_groups(self):
return self.optimizer.param_groups
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
p.grad.data.mul_(c)
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
"Implement `lrate` above"
if step is None:
step = self._step
return 0 if not step else self.factor * (self.model_size**(-0.5) * min(
step**(-0.5), step * self.warmup**(-1.5)))
|
[
"silencejiang12138@gmail.com"
] |
silencejiang12138@gmail.com
|
ad008fd9022a57c2af0750b1b2aba2d6d91e46b7
|
eda721786c726373f9c8ae748ec9af61df27900b
|
/igep_qa/helpers/common.py
|
dcab3a6e8c35c21c71eaf8085206a199af8b95ff
|
[
"MIT"
] |
permissive
|
eballetbo/igep_qa
|
60f66a136a26af1a5daa3f0e2f2278bb9d30f57e
|
d5a58a974250e435cd2e9a76514cd3eb6002c5f0
|
refs/heads/master
| 2020-03-29T13:16:30.695466
| 2014-02-19T14:53:51
| 2014-02-19T14:53:51
| 13,930,291
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,355
|
py
|
#!/usr/bin/env python
"""
This provides various generic helper functions and classes
"""
import fcntl
import mmap
import os
import struct
import socket
def is_in_path(name):
""" Return True if name refers to an existing file in path, otherwise
returns False.
Keyword arguments:
- name: The path name.
"""
for p in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(p, name)):
return True
# File not found in path
return False
def is_nfsroot():
""" Return True if rootfs is mounted via nfs, otherwise returns False
"""
fd = open("/proc/mounts", "r")
content = fd.read()
if content.find("/ nfs") == -1 :
return False
return True
def get_hwaddr(ifname):
""" Return the MAC address for a specific local interface.
Keyword arguments:
- ifname: The interface name, e.g. eth0, wlan0.
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
# exception
except:
return ""
class QCpuinfo:
""" Helper class to parse the /proc/cpuinfo
On Linux systems various information about the CPU ( or CPUs ) in the
computer can be gleaned from /proc/cpuinfo.
Known issues:
1. This class doesn't work as expected with more than one core
"""
def __init__(self):
""" Parse /proc/cpuinfo file and store into map.
"""
fd = open("/proc/cpuinfo", "r")
self.data = { "processor" : "0" }
lines = fd.readlines()
for line in lines:
keymap = line.split(": ")
if len(keymap) == 2:
key = keymap[0].strip("\t")
value = keymap[1].rstrip("\n")
self.data[key] = value
fd.close()
def __getitem__(self, index):
""" Overload [] operator, access to the data like a mamp
Example 1: Is easy to get the "Processor" value with,
cpu = TCpuinfo()
print cpu["Processor"]
The result should be like this
ARMv7 Processor rev 2 (v7l)
"""
return self.data[index]
class QCmdline:
""" Helper class to parse the /proc/cmdline
On Linux systems information about the kernel command line in the
computer can be gleaned from /proc/cmdline.
"""
def __init__(self):
fd = open("/proc/cmdline", "r")
self.cmdline = fd.read()
fd.close()
def checkparam(self, param):
""" Check for the existence of a kernel parameter
Return True if parameter is in cmdline, otherwise returns False
Keyword arguments:
- param: The paramater to be found.
"""
if (self.cmdline.find(param) == -1) :
return False
return True
class QDeviceTree:
""" Helper class to parse the /proc/device-tree
On Linux systems information about device-tree in the
computer can be gleaned from /proc/device-tree directory.
"""
def compatible(self, name):
""" Check for the existence of a kernel parameter
Return True if name is in compatible, otherwise returns False
Keyword arguments:
- name: The compatible string to be found.
"""
fd = open("/proc/device-tree/compatible", "r")
return (name in fd.read())
class QMmap:
""" Simple helper class to read/write from/to any location in memory
References:
http://www.lartmaker.nl/lartware/port/devmem2.c
"""
MAP_MASK = mmap.PAGESIZE - 1
WORD = 4
def read(self, addr):
""" Read from any location in memory
Returns the readed value in hexadecimal format
Keyword arguments:
- addr: The memory address to be readed.
"""
fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
# Map one page
mm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE
| mmap.PROT_READ, offset=addr & ~self.MAP_MASK)
mm.seek(addr & self.MAP_MASK)
retval = struct.unpack('I', mm.read(self.WORD))
mm.close()
os.close(fd)
return "%08X" % retval[0]
|
[
"eballetbo@iseebcn.com"
] |
eballetbo@iseebcn.com
|
d6331878b4a8aeaaa369f97edecc824266ec6da0
|
a005085fbd9a938f79f330cdf380104aa373c944
|
/domainbed/munit/core/utils.py
|
c8baabda8b85c077469befd0f17ef4db9a9e65fc
|
[
"MIT"
] |
permissive
|
iostream11/mbdg
|
46775318cfefed4eba2517a0bdffd0dc3c206cad
|
b4e768a6d31ab1e2cb0f0a3aad76832895068876
|
refs/heads/main
| 2023-06-03T14:24:34.594919
| 2021-06-16T16:48:29
| 2021-06-16T16:48:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,820
|
py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
# from torch.utils.serialization import load_lua
from torch.utils.data import DataLoader
from core.networks import Vgg16
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torchvision import transforms
# from data import ImageFilelist, ImageFolder
import torch
import torch.nn as nn
import os
import math
import torchvision.utils as vutils
import yaml
import numpy as np
import torch.nn.init as init
import time
# from datasets.mnist2colormnist.MNIST_Color import MNISTDataset
# from datasets.cure_tsr.CURE_TSR_Datasets import CURE_StreetSigns
# from datasets.loaders import get_model_loaders
# Methods
# get_config : load yaml file
# eformat :
# write_2images : save output image
# prepare_sub_folder : create checkpoints and images folders for saving outputs
# write_one_row_html : write one row of the html file for output images
# write_html : create the html file.
# write_loss
# slerp
# get_slerp_interp
# get_model_list
# load_vgg16
# load_inception
# vgg_preprocess
# get_scheduler
# weights_init
# def get_all_data_loaders(conf):
# train_A, train_B, test_A, test_B = get_model_loaders(conf)
#
# return train_A, train_B, test_A, test_B
def get_config(config):
with open(config, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def eformat(f, prec):
s = "%.*e"%(prec, f)
mantissa, exp = s.split('e')
# add 1 to digits as 1 is taken by sign +/-
return "%se%d"%(mantissa, int(exp))
def __write_images(image_outputs, display_image_num, file_name):
image_outputs = [images.expand(-1, 3, -1, -1) for images in image_outputs] # expand gray-scale images to 3 channels
image_tensor = torch.cat([images[:display_image_num] for images in image_outputs], 0)
image_grid = vutils.make_grid(image_tensor.data, nrow=display_image_num, padding=0, normalize=True)
vutils.save_image(image_grid, file_name, nrow=1)
def write_2images(image_outputs, display_image_num, image_directory, postfix):
n = len(image_outputs)
__write_images(image_outputs[0:n//2], display_image_num, '%s/gen_a2b_%s.jpg' % (image_directory, postfix))
__write_images(image_outputs[n//2:n], display_image_num, '%s/gen_b2a_%s.jpg' % (image_directory, postfix))
def prepare_sub_folder(output_directory):
image_directory = os.path.join(output_directory, 'images')
if not os.path.exists(image_directory):
print("Creating directory: {}".format(image_directory))
os.makedirs(image_directory)
checkpoint_directory = os.path.join(output_directory, 'checkpoints')
if not os.path.exists(checkpoint_directory):
print("Creating directory: {}".format(checkpoint_directory))
os.makedirs(checkpoint_directory)
return checkpoint_directory, image_directory
def write_one_row_html(html_file, iterations, img_filename, all_size):
html_file.write("<h3>iteration [%d] (%s)</h3>" % (iterations,img_filename.split('/')[-1]))
html_file.write("""
<p><a href="%s">
<img src="%s" style="width:%dpx">
</a><br>
<p>
""" % (img_filename, img_filename, all_size))
return
def write_html(filename, iterations, image_save_iterations, image_directory, all_size=1536):
html_file = open(filename, "w")
html_file.write('''
<!DOCTYPE html>
<html>
<head>
<title>Experiment name = %s</title>
<meta http-equiv="refresh" content="30">
</head>
<body>
''' % os.path.basename(filename))
html_file.write("<h3>current</h3>")
write_one_row_html(html_file, iterations, '%s/gen_a2b_train_current.jpg' % (image_directory), all_size)
write_one_row_html(html_file, iterations, '%s/gen_b2a_train_current.jpg' % (image_directory), all_size)
for j in range(iterations, image_save_iterations-1, -1):
if j % image_save_iterations == 0:
write_one_row_html(html_file, j, '%s/gen_a2b_test_%08d.jpg' % (image_directory, j), all_size)
write_one_row_html(html_file, j, '%s/gen_b2a_test_%08d.jpg' % (image_directory, j), all_size)
write_one_row_html(html_file, j, '%s/gen_a2b_train_%08d.jpg' % (image_directory, j), all_size)
write_one_row_html(html_file, j, '%s/gen_b2a_train_%08d.jpg' % (image_directory, j), all_size)
html_file.write("</body></html>")
html_file.close()
def write_loss(iterations, trainer, train_writer):
members = [attr for attr in dir(trainer) \
if not callable(getattr(trainer, attr)) and not attr.startswith("__") and ('loss' in attr or 'grad' in attr or 'nwd' in attr)]
for m in members:
train_writer.add_scalar(m, getattr(trainer, m), iterations + 1)
def slerp(val, low, high):
"""
original: Animating Rotation with Quaternion Curves, Ken Shoemake
https://arxiv.org/abs/1609.04468
Code: https://github.com/soumith/dcgan.torch/issues/14, Tom White
"""
omega = np.arccos(np.dot(low / np.linalg.norm(low), high / np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0 - val) * omega) / so * low + np.sin(val * omega) / so * high
def get_slerp_interp(nb_latents, nb_interp, z_dim):
"""
modified from: PyTorch inference for "Progressive Growing of GANs" with CelebA snapshot
https://github.com/ptrblck/prog_gans_pytorch_inference
"""
latent_interps = np.empty(shape=(0, z_dim), dtype=np.float32)
for _ in range(nb_latents):
low = np.random.randn(z_dim)
high = np.random.randn(z_dim) # low + np.random.randn(512) * 0.7
interp_vals = np.linspace(0, 1, num=nb_interp)
latent_interp = np.array([slerp(v, low, high) for v in interp_vals],
dtype=np.float32)
latent_interps = np.vstack((latent_interps, latent_interp))
return latent_interps[:, :, np.newaxis, np.newaxis]
# Get model list for resume
def get_model_list(dirname, key):
if os.path.exists(dirname) is False:
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and key in f and ".pt" in f]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
def load_vgg16(model_dir):
""" Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')):
if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')):
os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7'))
vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7'))
vgg = Vgg16()
for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):
dst.data[:] = src
torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight'))
vgg = Vgg16()
vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight')))
return vgg
def load_inception(model_path):
state_dict = torch.load(model_path)
model = inception_v3(pretrained=False, transform_input=True)
model.aux_logits = False
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, state_dict['fc.weight'].size(0))
model.load_state_dict(state_dict)
for param in model.parameters():
param.requires_grad = False
return model
def vgg_preprocess(batch):
tensortype = type(batch.data)
(r, g, b) = torch.chunk(batch, 3, dim = 1)
batch = torch.cat((b, g, r), dim = 1) # convert RGB to BGR
batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255]
mean = tensortype(batch.data.size()).cuda()
mean[:, 0, :, :] = 103.939
mean[:, 1, :, :] = 116.779
mean[:, 2, :, :] = 123.680
batch = batch.sub(Variable(mean)) # subtract mean
return batch
def get_scheduler(optimizer, hyperparameters, iterations=-1):
if 'lr_policy' not in hyperparameters or hyperparameters['lr_policy'] == 'constant':
scheduler = None # constant scheduler
elif hyperparameters['lr_policy'] == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=hyperparameters['step_size'],
gamma=hyperparameters['gamma'], last_epoch=iterations)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', hyperparameters['lr_policy'])
return scheduler
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
# print m.__class__.__name__
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time))
def pytorch03_to_pytorch04(state_dict_base, trainer_name):
def __conversion_core(state_dict_base, trainer_name):
state_dict = state_dict_base.copy()
if trainer_name == 'MUNIT':
for key, value in state_dict_base.items():
if key.endswith(('enc_content.model.0.norm.running_mean',
'enc_content.model.0.norm.running_var',
'enc_content.model.1.norm.running_mean',
'enc_content.model.1.norm.running_var',
'enc_content.model.2.norm.running_mean',
'enc_content.model.2.norm.running_var',
'enc_content.model.3.model.0.model.1.norm.running_mean',
'enc_content.model.3.model.0.model.1.norm.running_var',
'enc_content.model.3.model.0.model.0.norm.running_mean',
'enc_content.model.3.model.0.model.0.norm.running_var',
'enc_content.model.3.model.1.model.1.norm.running_mean',
'enc_content.model.3.model.1.model.1.norm.running_var',
'enc_content.model.3.model.1.model.0.norm.running_mean',
'enc_content.model.3.model.1.model.0.norm.running_var',
'enc_content.model.3.model.2.model.1.norm.running_mean',
'enc_content.model.3.model.2.model.1.norm.running_var',
'enc_content.model.3.model.2.model.0.norm.running_mean',
'enc_content.model.3.model.2.model.0.norm.running_var',
'enc_content.model.3.model.3.model.1.norm.running_mean',
'enc_content.model.3.model.3.model.1.norm.running_var',
'enc_content.model.3.model.3.model.0.norm.running_mean',
'enc_content.model.3.model.3.model.0.norm.running_var',
)):
del state_dict[key]
else:
def __conversion_core(state_dict_base):
state_dict = state_dict_base.copy()
for key, value in state_dict_base.items():
if key.endswith(('enc.model.0.norm.running_mean',
'enc.model.0.norm.running_var',
'enc.model.1.norm.running_mean',
'enc.model.1.norm.running_var',
'enc.model.2.norm.running_mean',
'enc.model.2.norm.running_var',
'enc.model.3.model.0.model.1.norm.running_mean',
'enc.model.3.model.0.model.1.norm.running_var',
'enc.model.3.model.0.model.0.norm.running_mean',
'enc.model.3.model.0.model.0.norm.running_var',
'enc.model.3.model.1.model.1.norm.running_mean',
'enc.model.3.model.1.model.1.norm.running_var',
'enc.model.3.model.1.model.0.norm.running_mean',
'enc.model.3.model.1.model.0.norm.running_var',
'enc.model.3.model.2.model.1.norm.running_mean',
'enc.model.3.model.2.model.1.norm.running_var',
'enc.model.3.model.2.model.0.norm.running_mean',
'enc.model.3.model.2.model.0.norm.running_var',
'enc.model.3.model.3.model.1.norm.running_mean',
'enc.model.3.model.3.model.1.norm.running_var',
'enc.model.3.model.3.model.0.norm.running_mean',
'enc.model.3.model.3.model.0.norm.running_var',
'dec.model.0.model.0.model.1.norm.running_mean',
'dec.model.0.model.0.model.1.norm.running_var',
'dec.model.0.model.0.model.0.norm.running_mean',
'dec.model.0.model.0.model.0.norm.running_var',
'dec.model.0.model.1.model.1.norm.running_mean',
'dec.model.0.model.1.model.1.norm.running_var',
'dec.model.0.model.1.model.0.norm.running_mean',
'dec.model.0.model.1.model.0.norm.running_var',
'dec.model.0.model.2.model.1.norm.running_mean',
'dec.model.0.model.2.model.1.norm.running_var',
'dec.model.0.model.2.model.0.norm.running_mean',
'dec.model.0.model.2.model.0.norm.running_var',
'dec.model.0.model.3.model.1.norm.running_mean',
'dec.model.0.model.3.model.1.norm.running_var',
'dec.model.0.model.3.model.0.norm.running_mean',
'dec.model.0.model.3.model.0.norm.running_var',
)):
del state_dict[key]
return state_dict
state_dict = dict()
state_dict['a'] = __conversion_core(state_dict_base['a'], trainer_name)
state_dict['b'] = __conversion_core(state_dict_base['b'], trainer_name)
return state_dict
|
[
"arobey1@seas.upenn.edu"
] |
arobey1@seas.upenn.edu
|
982cd66cd05486ce807cf58a7913b9abf4656f7a
|
dc2f1cf081b1c08907e2572ac9a484574b841ca5
|
/test_polygon_rect.py
|
435b2c17d882c260bc5adba99396cdc4f58a5f2a
|
[] |
no_license
|
meekrob/merge-overlapping-svg-paths
|
52ede1136a771d7ed6cadfbb25d77b2942253a0b
|
04375132b7834494d8f900d531764cc451320e24
|
refs/heads/master
| 2020-09-27T18:34:05.522538
| 2019-12-08T21:04:14
| 2019-12-08T21:04:14
| 226,580,303
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
#!/usr/bin/env python3
def list_to_svg_path_d(l):
command = 'm'
d = ''
for point in l:
d += "%s %.2f,%.2f " % (command,point[0],point[1])
command = 'l'
return d + 'z'
def merge_rect_into_polygon(rect, polygon):
# for intersecting points between the rect and polygon, replace with the rect's non-intersecting points
match_point = None
points_to_insert = []
for corner in rect:
try:
i = polygon.index( corner )
print('found:', corner, "==", polygon[i])
except:
points_to_insert.append( corner )
continue
if match_point == None:
match_point = i
print('match_point:', match_point)
polygon.pop(i)
for new_point in points_to_insert:
polygon.insert( i, new_point )
i += 1
return polygon
rect1 = [ (0,0), (0,1), (1,1), (1,0) ]
rect2 = [ (1,0), (1,1), (2,1), (2,0) ]
rect3 = [ (0,1), (0,2), (1,2), (1,1) ]
rect4 = [ (1,1), (1,2), (2,2), (2,1) ]
p = rect1
print("polygon", p)
print("###############\n")
print("rect2", rect2)
p = merge_rect_into_polygon(rect2, p)
print("polygon", p)
print("###############\n")
print("rect3", rect3)
p = merge_rect_into_polygon(rect3, p)
print("polygon", p)
print("###############\n")
print("rect4", rect4)
p = merge_rect_into_polygon(rect4, p)
print("polygon", p)
print( list_to_svg_path_d(p) )
|
[
"davidcking.inbox@gmail.com"
] |
davidcking.inbox@gmail.com
|
100eb310ce1ac3e3f2486d6d9618a3bf5a705b8e
|
beab958fd74261f2598818b44666d4613de65774
|
/test_create_negative.py
|
748f78374128c19f7f12e6b8fc62d5056d3d0b47
|
[
"Apache-2.0"
] |
permissive
|
ArtemVavilov88/test2-Se-Python-14
|
cb3ead45f144a1b08dea6fcd66a1764572534f00
|
522791b4fcde1d26b8e56244b10ebaf2e7146e57
|
refs/heads/master
| 2021-01-13T00:53:54.313505
| 2015-11-18T17:28:06
| 2015-11-18T17:28:06
| 46,283,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class php4dvd_create_film_negtive(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(20)
def test_create_film_negative(self):
driver = self.driver
driver.get("http://localhost/php4dvd/")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("submit").click()
driver.find_element_by_link_text("Add movie").click()
driver.find_element_by_name("imdbid").send_keys("0003")
driver.find_element_by_name("aka").send_keys("Test-information")
driver.find_element_by_name("year").send_keys("2015")
driver.find_element_by_name("duration").send_keys("0000")
driver.find_element_by_id("own_no").click()
driver.find_element_by_name("plots").send_keys("test-test")
driver.find_element_by_id("submit").click()
#checking error message
getText_error = driver.find_element_by_tag_name("label").text
print getText_error
def tearDown(self):
self.driver.quit()
if __name__=="__main__":
unittest.main()
|
[
"avlov88@gmail.com"
] |
avlov88@gmail.com
|
4bc7aecd3876eb2d91e7d6880e5c13737562dca5
|
e6e20cd07e12679fd3085ab5d9a75cb60353a96a
|
/apps/login_app/migrations/0004_auto_20170927_1201.py
|
6a51b738cfa5e227fa02e22f97e87c1c0f39e65e
|
[] |
no_license
|
sneihaasawant/Friends
|
47b948658b6706ca1a7963c882423ad3d63befab
|
c74c85995b982b5e968142cb09853574849414d2
|
refs/heads/master
| 2021-08-27T23:01:46.158186
| 2017-12-10T16:21:11
| 2017-12-10T16:21:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-27 19:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_app', '0003_user_friends_with'),
]
operations = [
migrations.AlterField(
model_name='user',
name='friends_with',
field=models.ManyToManyField(blank=True, null=True, related_name='_user_friends_with_+', to='login_app.User'),
),
]
|
[
"sneha.s.sawant25@gmail.com"
] |
sneha.s.sawant25@gmail.com
|
1d1b0213e352a561d37417353719711b31bd3de4
|
b51535db8031ce51babcc1d86073ff8e74504ef2
|
/primenumber.py
|
82078f0aba6378c1115906dabe6f1a7bd2bfd52f
|
[] |
no_license
|
chriskok/PythonLearningWorkspace
|
d5750b3cbf651538e58b97ae3f9889f157a3ca33
|
2852f47b5eb6e52d549ded0324418b2d492cef29
|
refs/heads/master
| 2021-01-24T08:44:22.412024
| 2018-03-19T14:15:13
| 2018-03-19T14:15:13
| 122,990,979
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# Note, prime can only be divided by 1 and itself
# 5 is prime because only divided by 1 and 5 - positive factor
# 6 is not a prime, divide by 1,2,3,6
# use a for loop and check if modulus == 0 True
def is_prime(num):
for i in range(2, num):
if (num % i) == 0:
return False
return True
def get_prime(max_number):
list_of_primes = []
for num1 in range(2, max_number):
if is_prime(num1):
list_of_primes.append(num1)
return list_of_primes
# Ask the user to type in the maximum prime
max_prime = input("Insert max prime: ")
max_prime = int(max_prime)
primes_list = get_prime(max_prime)
for prime in primes_list:
print(prime)
|
[
"ckok@purdue.edu"
] |
ckok@purdue.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.